pciide.c revision 1.196 1 /* $NetBSD: pciide.c,v 1.196 2003/08/17 15:52:06 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.196 2003/08/17 15:52:06 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_setup_channel __P((struct channel_softc*));
179
180 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_setup_channel __P((struct channel_softc*));
183 void cmd_channel_map __P((struct pci_attach_args *,
184 struct pciide_softc *, int));
185 int cmd_pci_intr __P((void *));
186 void cmd646_9_irqack __P((struct channel_softc *));
187 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void cmd680_setup_channel __P((struct channel_softc*));
189 void cmd680_channel_map __P((struct pci_attach_args *,
190 struct pciide_softc *, int));
191
192 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void cmd3112_setup_channel __P((struct channel_softc*));
194
195 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void cy693_setup_channel __P((struct channel_softc*));
197
198 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void sis_setup_channel __P((struct channel_softc*));
200 void sis96x_setup_channel __P((struct channel_softc*));
201 static int sis_hostbr_match __P(( struct pci_attach_args *));
202 static int sis_south_match __P(( struct pci_attach_args *));
203
204 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
205 void acer_setup_channel __P((struct channel_softc*));
206 int acer_pci_intr __P((void *));
207
208 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void pdc202xx_setup_channel __P((struct channel_softc*));
210 void pdc20268_setup_channel __P((struct channel_softc*));
211 int pdc202xx_pci_intr __P((void *));
212 int pdc20265_pci_intr __P((void *));
213 static void pdc20262_dma_start __P((void*, int, int));
214 static int pdc20262_dma_finish __P((void*, int, int, int));
215
216 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
217 void opti_setup_channel __P((struct channel_softc*));
218
219 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
220 void hpt_setup_channel __P((struct channel_softc*));
221 int hpt_pci_intr __P((void *));
222
223 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
224 void acard_setup_channel __P((struct channel_softc*));
225 int acard_pci_intr __P((void *));
226
227 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
228 void serverworks_setup_channel __P((struct channel_softc*));
229 int serverworks_pci_intr __P((void *));
230
231 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
232 void sl82c105_setup_channel __P((struct channel_softc*));
233
234 void pciide_channel_dma_setup __P((struct pciide_channel *));
235 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
236 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
237 void pciide_dma_start __P((void*, int, int));
238 int pciide_dma_finish __P((void*, int, int, int));
239 void pciide_irqack __P((struct channel_softc *));
240 void pciide_print_modes __P((struct pciide_channel *));
241
242 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
243
244 struct pciide_product_desc {
245 u_int32_t ide_product;
246 int ide_flags;
247 const char *ide_name;
248 /* map and setup chip, probe drives */
249 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
250 };
251
252 /* Flags for ide_flags */
253 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
254 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
255
256 /* Default product description for devices not known from this controller */
257 const struct pciide_product_desc default_product_desc = {
258 0,
259 0,
260 "Generic PCI IDE controller",
261 default_chip_map,
262 };
263
264 const struct pciide_product_desc pciide_intel_products[] = {
265 { PCI_PRODUCT_INTEL_82092AA,
266 0,
267 "Intel 82092AA IDE controller",
268 default_chip_map,
269 },
270 { PCI_PRODUCT_INTEL_82371FB_IDE,
271 0,
272 "Intel 82371FB IDE controller (PIIX)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82371SB_IDE,
276 0,
277 "Intel 82371SB IDE Interface (PIIX3)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82371AB_IDE,
281 0,
282 "Intel 82371AB IDE controller (PIIX4)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82440MX_IDE,
286 0,
287 "Intel 82440MX IDE controller",
288 piix_chip_map
289 },
290 { PCI_PRODUCT_INTEL_82801AA_IDE,
291 0,
292 "Intel 82801AA IDE Controller (ICH)",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801AB_IDE,
296 0,
297 "Intel 82801AB IDE Controller (ICH0)",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801BA_IDE,
301 0,
302 "Intel 82801BA IDE Controller (ICH2)",
303 piix_chip_map,
304 },
305 { PCI_PRODUCT_INTEL_82801BAM_IDE,
306 0,
307 "Intel 82801BAM IDE Controller (ICH2-M)",
308 piix_chip_map,
309 },
310 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
311 0,
312 "Intel 82801CA IDE Controller (ICH3)",
313 piix_chip_map,
314 },
315 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
316 0,
317 "Intel 82801CA IDE Controller (ICH3)",
318 piix_chip_map,
319 },
320 { PCI_PRODUCT_INTEL_82801DB_IDE,
321 0,
322 "Intel 82801DB IDE Controller (ICH4)",
323 piix_chip_map,
324 },
325 { PCI_PRODUCT_INTEL_82801DBM_IDE,
326 0,
327 "Intel 82801DBM IDE Controller (ICH4-M)",
328 piix_chip_map,
329 },
330 { PCI_PRODUCT_INTEL_82801EB_IDE,
331 0,
332 "Intel 82801EB IDE Controller (ICH5)",
333 piix_chip_map,
334 },
335 { PCI_PRODUCT_INTEL_31244,
336 0,
337 "Intel 31244 Serial ATA Controller",
338 artisea_chip_map,
339 },
340 { 0,
341 0,
342 NULL,
343 NULL
344 }
345 };
346
347 const struct pciide_product_desc pciide_amd_products[] = {
348 { PCI_PRODUCT_AMD_PBC756_IDE,
349 0,
350 "Advanced Micro Devices AMD756 IDE Controller",
351 amd7x6_chip_map
352 },
353 { PCI_PRODUCT_AMD_PBC766_IDE,
354 0,
355 "Advanced Micro Devices AMD766 IDE Controller",
356 amd7x6_chip_map
357 },
358 { PCI_PRODUCT_AMD_PBC768_IDE,
359 0,
360 "Advanced Micro Devices AMD768 IDE Controller",
361 amd7x6_chip_map
362 },
363 { PCI_PRODUCT_AMD_PBC8111_IDE,
364 0,
365 "Advanced Micro Devices AMD8111 IDE Controller",
366 amd7x6_chip_map
367 },
368 { 0,
369 0,
370 NULL,
371 NULL
372 }
373 };
374
375 const struct pciide_product_desc pciide_nvidia_products[] = {
376 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
377 0,
378 "NVIDIA nForce IDE Controller",
379 amd7x6_chip_map
380 },
381 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
382 0,
383 "NVIDIA nForce2 IDE Controller",
384 amd7x6_chip_map
385 },
386 { 0,
387 0,
388 NULL,
389 NULL
390 }
391 };
392
393 const struct pciide_product_desc pciide_cmd_products[] = {
394 { PCI_PRODUCT_CMDTECH_640,
395 0,
396 "CMD Technology PCI0640",
397 cmd_chip_map
398 },
399 { PCI_PRODUCT_CMDTECH_643,
400 0,
401 "CMD Technology PCI0643",
402 cmd0643_9_chip_map,
403 },
404 { PCI_PRODUCT_CMDTECH_646,
405 0,
406 "CMD Technology PCI0646",
407 cmd0643_9_chip_map,
408 },
409 { PCI_PRODUCT_CMDTECH_648,
410 IDE_PCI_CLASS_OVERRIDE,
411 "CMD Technology PCI0648",
412 cmd0643_9_chip_map,
413 },
414 { PCI_PRODUCT_CMDTECH_649,
415 IDE_PCI_CLASS_OVERRIDE,
416 "CMD Technology PCI0649",
417 cmd0643_9_chip_map,
418 },
419 { PCI_PRODUCT_CMDTECH_680,
420 IDE_PCI_CLASS_OVERRIDE,
421 "Silicon Image 0680",
422 cmd680_chip_map,
423 },
424 { PCI_PRODUCT_CMDTECH_3112,
425 IDE_PCI_CLASS_OVERRIDE,
426 "Silicon Image SATALink 3112",
427 cmd3112_chip_map,
428 },
429 { 0,
430 0,
431 NULL,
432 NULL
433 }
434 };
435
436 const struct pciide_product_desc pciide_via_products[] = {
437 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
438 0,
439 NULL,
440 apollo_chip_map,
441 },
442 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
443 0,
444 NULL,
445 apollo_chip_map,
446 },
447 { 0,
448 0,
449 NULL,
450 NULL
451 }
452 };
453
454 const struct pciide_product_desc pciide_cypress_products[] = {
455 { PCI_PRODUCT_CONTAQ_82C693,
456 IDE_16BIT_IOSPACE,
457 "Cypress 82C693 IDE Controller",
458 cy693_chip_map,
459 },
460 { 0,
461 0,
462 NULL,
463 NULL
464 }
465 };
466
467 const struct pciide_product_desc pciide_sis_products[] = {
468 { PCI_PRODUCT_SIS_5597_IDE,
469 0,
470 NULL,
471 sis_chip_map,
472 },
473 { 0,
474 0,
475 NULL,
476 NULL
477 }
478 };
479
480 const struct pciide_product_desc pciide_acer_products[] = {
481 { PCI_PRODUCT_ALI_M5229,
482 0,
483 "Acer Labs M5229 UDMA IDE Controller",
484 acer_chip_map,
485 },
486 { 0,
487 0,
488 NULL,
489 NULL
490 }
491 };
492
493 const struct pciide_product_desc pciide_promise_products[] = {
494 { PCI_PRODUCT_PROMISE_ULTRA33,
495 IDE_PCI_CLASS_OVERRIDE,
496 "Promise Ultra33/ATA Bus Master IDE Accelerator",
497 pdc202xx_chip_map,
498 },
499 { PCI_PRODUCT_PROMISE_ULTRA66,
500 IDE_PCI_CLASS_OVERRIDE,
501 "Promise Ultra66/ATA Bus Master IDE Accelerator",
502 pdc202xx_chip_map,
503 },
504 { PCI_PRODUCT_PROMISE_ULTRA100,
505 IDE_PCI_CLASS_OVERRIDE,
506 "Promise Ultra100/ATA Bus Master IDE Accelerator",
507 pdc202xx_chip_map,
508 },
509 { PCI_PRODUCT_PROMISE_ULTRA100X,
510 IDE_PCI_CLASS_OVERRIDE,
511 "Promise Ultra100/ATA Bus Master IDE Accelerator",
512 pdc202xx_chip_map,
513 },
514 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
515 IDE_PCI_CLASS_OVERRIDE,
516 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
517 pdc202xx_chip_map,
518 },
519 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
520 IDE_PCI_CLASS_OVERRIDE,
521 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
522 pdc202xx_chip_map,
523 },
524 { PCI_PRODUCT_PROMISE_ULTRA133,
525 IDE_PCI_CLASS_OVERRIDE,
526 "Promise Ultra133/ATA Bus Master IDE Accelerator",
527 pdc202xx_chip_map,
528 },
529 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
530 IDE_PCI_CLASS_OVERRIDE,
531 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
532 pdc202xx_chip_map,
533 },
534 { PCI_PRODUCT_PROMISE_MBULTRA133,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
537 pdc202xx_chip_map,
538 },
539 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
542 pdc202xx_chip_map,
543 },
544 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
547 pdc202xx_chip_map,
548 },
549 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
550 IDE_PCI_CLASS_OVERRIDE,
551 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
552 pdc202xx_chip_map,
553 },
554 { 0,
555 0,
556 NULL,
557 NULL
558 }
559 };
560
561 const struct pciide_product_desc pciide_opti_products[] = {
562 { PCI_PRODUCT_OPTI_82C621,
563 0,
564 "OPTi 82c621 PCI IDE controller",
565 opti_chip_map,
566 },
567 { PCI_PRODUCT_OPTI_82C568,
568 0,
569 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
570 opti_chip_map,
571 },
572 { PCI_PRODUCT_OPTI_82D568,
573 0,
574 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
575 opti_chip_map,
576 },
577 { 0,
578 0,
579 NULL,
580 NULL
581 }
582 };
583
584 const struct pciide_product_desc pciide_triones_products[] = {
585 { PCI_PRODUCT_TRIONES_HPT366,
586 IDE_PCI_CLASS_OVERRIDE,
587 NULL,
588 hpt_chip_map,
589 },
590 { PCI_PRODUCT_TRIONES_HPT372,
591 IDE_PCI_CLASS_OVERRIDE,
592 NULL,
593 hpt_chip_map
594 },
595 { PCI_PRODUCT_TRIONES_HPT374,
596 IDE_PCI_CLASS_OVERRIDE,
597 NULL,
598 hpt_chip_map
599 },
600 { 0,
601 0,
602 NULL,
603 NULL
604 }
605 };
606
607 const struct pciide_product_desc pciide_acard_products[] = {
608 { PCI_PRODUCT_ACARD_ATP850U,
609 IDE_PCI_CLASS_OVERRIDE,
610 "Acard ATP850U Ultra33 IDE Controller",
611 acard_chip_map,
612 },
613 { PCI_PRODUCT_ACARD_ATP860,
614 IDE_PCI_CLASS_OVERRIDE,
615 "Acard ATP860 Ultra66 IDE Controller",
616 acard_chip_map,
617 },
618 { PCI_PRODUCT_ACARD_ATP860A,
619 IDE_PCI_CLASS_OVERRIDE,
620 "Acard ATP860-A Ultra66 IDE Controller",
621 acard_chip_map,
622 },
623 { 0,
624 0,
625 NULL,
626 NULL
627 }
628 };
629
630 const struct pciide_product_desc pciide_serverworks_products[] = {
631 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
632 0,
633 "ServerWorks OSB4 IDE Controller",
634 serverworks_chip_map,
635 },
636 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
637 0,
638 "ServerWorks CSB5 IDE Controller",
639 serverworks_chip_map,
640 },
641 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
642 0,
643 "ServerWorks CSB6 RAID/IDE Controller",
644 serverworks_chip_map,
645 },
646 { 0,
647 0,
648 NULL,
649 }
650 };
651
652 const struct pciide_product_desc pciide_symphony_products[] = {
653 { PCI_PRODUCT_SYMPHONY_82C105,
654 0,
655 "Symphony Labs 82C105 IDE controller",
656 sl82c105_chip_map,
657 },
658 { 0,
659 0,
660 NULL,
661 }
662 };
663
664 const struct pciide_product_desc pciide_winbond_products[] = {
665 { PCI_PRODUCT_WINBOND_W83C553F_1,
666 0,
667 "Winbond W83C553F IDE controller",
668 sl82c105_chip_map,
669 },
670 { 0,
671 0,
672 NULL,
673 }
674 };
675
676 struct pciide_vendor_desc {
677 u_int32_t ide_vendor;
678 const struct pciide_product_desc *ide_products;
679 };
680
681 const struct pciide_vendor_desc pciide_vendors[] = {
682 { PCI_VENDOR_INTEL, pciide_intel_products },
683 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
684 { PCI_VENDOR_VIATECH, pciide_via_products },
685 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
686 { PCI_VENDOR_SIS, pciide_sis_products },
687 { PCI_VENDOR_ALI, pciide_acer_products },
688 { PCI_VENDOR_PROMISE, pciide_promise_products },
689 { PCI_VENDOR_AMD, pciide_amd_products },
690 { PCI_VENDOR_OPTI, pciide_opti_products },
691 { PCI_VENDOR_TRIONES, pciide_triones_products },
692 { PCI_VENDOR_ACARD, pciide_acard_products },
693 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
694 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
695 { PCI_VENDOR_WINBOND, pciide_winbond_products },
696 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
697 { 0, NULL }
698 };
699
700 /* options passed via the 'flags' config keyword */
701 #define PCIIDE_OPTIONS_DMA 0x01
702 #define PCIIDE_OPTIONS_NODMA 0x02
703
704 int pciide_match __P((struct device *, struct cfdata *, void *));
705 void pciide_attach __P((struct device *, struct device *, void *));
706
707 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
708 pciide_match, pciide_attach, NULL, NULL);
709
710 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
711 int pciide_mapregs_compat __P(( struct pci_attach_args *,
712 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
713 int pciide_mapregs_native __P((struct pci_attach_args *,
714 struct pciide_channel *, bus_size_t *, bus_size_t *,
715 int (*pci_intr) __P((void *))));
716 void pciide_mapreg_dma __P((struct pciide_softc *,
717 struct pci_attach_args *));
718 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
719 void pciide_mapchan __P((struct pci_attach_args *,
720 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
721 int (*pci_intr) __P((void *))));
722 int pciide_chan_candisable __P((struct pciide_channel *));
723 void pciide_map_compat_intr __P(( struct pci_attach_args *,
724 struct pciide_channel *, int, int));
725 int pciide_compat_intr __P((void *));
726 int pciide_pci_intr __P((void *));
727 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
728
729 const struct pciide_product_desc *
730 pciide_lookup_product(id)
731 u_int32_t id;
732 {
733 const struct pciide_product_desc *pp;
734 const struct pciide_vendor_desc *vp;
735
736 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
737 if (PCI_VENDOR(id) == vp->ide_vendor)
738 break;
739
740 if ((pp = vp->ide_products) == NULL)
741 return NULL;
742
743 for (; pp->chip_map != NULL; pp++)
744 if (PCI_PRODUCT(id) == pp->ide_product)
745 break;
746
747 if (pp->chip_map == NULL)
748 return NULL;
749 return pp;
750 }
751
752 int
753 pciide_match(parent, match, aux)
754 struct device *parent;
755 struct cfdata *match;
756 void *aux;
757 {
758 struct pci_attach_args *pa = aux;
759 const struct pciide_product_desc *pp;
760
761 /*
762 * Check the ID register to see that it's a PCI IDE controller.
763 * If it is, we assume that we can deal with it; it _should_
764 * work in a standardized way...
765 */
766 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
767 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
768 return (1);
769 }
770
771 /*
772 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
773 * controllers. Let see if we can deal with it anyway.
774 */
775 pp = pciide_lookup_product(pa->pa_id);
776 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
777 return (1);
778 }
779
780 return (0);
781 }
782
783 void
784 pciide_attach(parent, self, aux)
785 struct device *parent, *self;
786 void *aux;
787 {
788 struct pci_attach_args *pa = aux;
789 pci_chipset_tag_t pc = pa->pa_pc;
790 pcitag_t tag = pa->pa_tag;
791 struct pciide_softc *sc = (struct pciide_softc *)self;
792 pcireg_t csr;
793 char devinfo[256];
794 const char *displaydev;
795
796 aprint_naive(": disk controller\n");
797
798 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
799 sc->sc_pp = pciide_lookup_product(pa->pa_id);
800 if (sc->sc_pp == NULL) {
801 sc->sc_pp = &default_product_desc;
802 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
803 displaydev = devinfo;
804 } else
805 displaydev = sc->sc_pp->ide_name;
806
807 /* if displaydev == NULL, printf is done in chip-specific map */
808 if (displaydev)
809 aprint_normal(": %s (rev. 0x%02x)\n", displaydev,
810 PCI_REVISION(pa->pa_class));
811
812 sc->sc_pc = pa->pa_pc;
813 sc->sc_tag = pa->pa_tag;
814
815 /* Set up DMA defaults; these might be adjusted by chip_map. */
816 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
817 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
818
819 #ifdef WDCDEBUG
820 if (wdcdebug_pciide_mask & DEBUG_PROBE)
821 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
822 #endif
823 sc->sc_pp->chip_map(sc, pa);
824
825 if (sc->sc_dma_ok) {
826 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
827 csr |= PCI_COMMAND_MASTER_ENABLE;
828 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
829 }
830 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
831 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
832 }
833
834 /* tell whether the chip is enabled or not */
835 int
836 pciide_chipen(sc, pa)
837 struct pciide_softc *sc;
838 struct pci_attach_args *pa;
839 {
840 pcireg_t csr;
841 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
842 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
843 PCI_COMMAND_STATUS_REG);
844 aprint_normal("%s: device disabled (at %s)\n",
845 sc->sc_wdcdev.sc_dev.dv_xname,
846 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
847 "device" : "bridge");
848 return 0;
849 }
850 return 1;
851 }
852
853 int
854 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
855 struct pci_attach_args *pa;
856 struct pciide_channel *cp;
857 int compatchan;
858 bus_size_t *cmdsizep, *ctlsizep;
859 {
860 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
861 struct channel_softc *wdc_cp = &cp->wdc_channel;
862
863 cp->compat = 1;
864 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
865 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
866
867 wdc_cp->cmd_iot = pa->pa_iot;
868 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
869 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
870 aprint_error("%s: couldn't map %s channel cmd regs\n",
871 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
872 return (0);
873 }
874
875 wdc_cp->ctl_iot = pa->pa_iot;
876 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
877 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
878 aprint_error("%s: couldn't map %s channel ctl regs\n",
879 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
880 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
881 PCIIDE_COMPAT_CMD_SIZE);
882 return (0);
883 }
884
885 return (1);
886 }
887
888 int
889 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
890 struct pci_attach_args * pa;
891 struct pciide_channel *cp;
892 bus_size_t *cmdsizep, *ctlsizep;
893 int (*pci_intr) __P((void *));
894 {
895 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
896 struct channel_softc *wdc_cp = &cp->wdc_channel;
897 const char *intrstr;
898 pci_intr_handle_t intrhandle;
899
900 cp->compat = 0;
901
902 if (sc->sc_pci_ih == NULL) {
903 if (pci_intr_map(pa, &intrhandle) != 0) {
904 aprint_error("%s: couldn't map native-PCI interrupt\n",
905 sc->sc_wdcdev.sc_dev.dv_xname);
906 return 0;
907 }
908 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
909 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
910 intrhandle, IPL_BIO, pci_intr, sc);
911 if (sc->sc_pci_ih != NULL) {
912 aprint_normal("%s: using %s for native-PCI interrupt\n",
913 sc->sc_wdcdev.sc_dev.dv_xname,
914 intrstr ? intrstr : "unknown interrupt");
915 } else {
916 aprint_error(
917 "%s: couldn't establish native-PCI interrupt",
918 sc->sc_wdcdev.sc_dev.dv_xname);
919 if (intrstr != NULL)
920 aprint_normal(" at %s", intrstr);
921 aprint_normal("\n");
922 return 0;
923 }
924 }
925 cp->ih = sc->sc_pci_ih;
926 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
927 PCI_MAPREG_TYPE_IO, 0,
928 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
929 aprint_error("%s: couldn't map %s channel cmd regs\n",
930 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
931 return 0;
932 }
933
934 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
935 PCI_MAPREG_TYPE_IO, 0,
936 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
937 aprint_error("%s: couldn't map %s channel ctl regs\n",
938 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
939 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
940 return 0;
941 }
942 /*
943 * In native mode, 4 bytes of I/O space are mapped for the control
944 * register, the control register is at offset 2. Pass the generic
945 * code a handle for only one byte at the right offset.
946 */
947 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
948 &wdc_cp->ctl_ioh) != 0) {
949 aprint_error("%s: unable to subregion %s channel ctl regs\n",
950 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
951 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
952 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
953 return 0;
954 }
955 return (1);
956 }
957
958 void
959 pciide_mapreg_dma(sc, pa)
960 struct pciide_softc *sc;
961 struct pci_attach_args *pa;
962 {
963 pcireg_t maptype;
964 bus_addr_t addr;
965
966 /*
967 * Map DMA registers
968 *
969 * Note that sc_dma_ok is the right variable to test to see if
970 * DMA can be done. If the interface doesn't support DMA,
971 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
972 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
973 * non-zero if the interface supports DMA and the registers
974 * could be mapped.
975 *
976 * XXX Note that despite the fact that the Bus Master IDE specs
977 * XXX say that "The bus master IDE function uses 16 bytes of IO
978 * XXX space," some controllers (at least the United
979 * XXX Microelectronics UM8886BF) place it in memory space.
980 */
981 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
982 PCIIDE_REG_BUS_MASTER_DMA);
983
984 switch (maptype) {
985 case PCI_MAPREG_TYPE_IO:
986 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
987 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
988 &addr, NULL, NULL) == 0);
989 if (sc->sc_dma_ok == 0) {
990 aprint_normal(
991 ", but unused (couldn't query registers)");
992 break;
993 }
994 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
995 && addr >= 0x10000) {
996 sc->sc_dma_ok = 0;
997 aprint_normal(
998 ", but unused (registers at unsafe address "
999 "%#lx)", (unsigned long)addr);
1000 break;
1001 }
1002 /* FALLTHROUGH */
1003
1004 case PCI_MAPREG_MEM_TYPE_32BIT:
1005 sc->sc_dma_ok = (pci_mapreg_map(pa,
1006 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1007 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1008 sc->sc_dmat = pa->pa_dmat;
1009 if (sc->sc_dma_ok == 0) {
1010 aprint_normal(", but unused (couldn't map registers)");
1011 } else {
1012 sc->sc_wdcdev.dma_arg = sc;
1013 sc->sc_wdcdev.dma_init = pciide_dma_init;
1014 sc->sc_wdcdev.dma_start = pciide_dma_start;
1015 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1016 }
1017
1018 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1019 PCIIDE_OPTIONS_NODMA) {
1020 aprint_normal(
1021 ", but unused (forced off by config file)");
1022 sc->sc_dma_ok = 0;
1023 }
1024 break;
1025
1026 default:
1027 sc->sc_dma_ok = 0;
1028 aprint_normal(
1029 ", but unsupported register maptype (0x%x)", maptype);
1030 }
1031 }
1032
1033 int
1034 pciide_compat_intr(arg)
1035 void *arg;
1036 {
1037 struct pciide_channel *cp = arg;
1038
1039 #ifdef DIAGNOSTIC
1040 /* should only be called for a compat channel */
1041 if (cp->compat == 0)
1042 panic("pciide compat intr called for non-compat chan %p", cp);
1043 #endif
1044 return (wdcintr(&cp->wdc_channel));
1045 }
1046
1047 int
1048 pciide_pci_intr(arg)
1049 void *arg;
1050 {
1051 struct pciide_softc *sc = arg;
1052 struct pciide_channel *cp;
1053 struct channel_softc *wdc_cp;
1054 int i, rv, crv;
1055
1056 rv = 0;
1057 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1058 cp = &sc->pciide_channels[i];
1059 wdc_cp = &cp->wdc_channel;
1060
1061 /* If a compat channel skip. */
1062 if (cp->compat)
1063 continue;
1064 /* if this channel not waiting for intr, skip */
1065 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1066 continue;
1067
1068 crv = wdcintr(wdc_cp);
1069 if (crv == 0)
1070 ; /* leave rv alone */
1071 else if (crv == 1)
1072 rv = 1; /* claim the intr */
1073 else if (rv == 0) /* crv should be -1 in this case */
1074 rv = crv; /* if we've done no better, take it */
1075 }
1076 return (rv);
1077 }
1078
1079 void
1080 pciide_channel_dma_setup(cp)
1081 struct pciide_channel *cp;
1082 {
1083 int drive;
1084 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1085 struct ata_drive_datas *drvp;
1086
1087 for (drive = 0; drive < 2; drive++) {
1088 drvp = &cp->wdc_channel.ch_drive[drive];
1089 /* If no drive, skip */
1090 if ((drvp->drive_flags & DRIVE) == 0)
1091 continue;
1092 /* setup DMA if needed */
1093 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1094 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1095 sc->sc_dma_ok == 0) {
1096 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1097 continue;
1098 }
1099 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1100 != 0) {
1101 /* Abort DMA setup */
1102 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1103 continue;
1104 }
1105 }
1106 }
1107
1108 int
1109 pciide_dma_table_setup(sc, channel, drive)
1110 struct pciide_softc *sc;
1111 int channel, drive;
1112 {
1113 bus_dma_segment_t seg;
1114 int error, rseg;
1115 const bus_size_t dma_table_size =
1116 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1117 struct pciide_dma_maps *dma_maps =
1118 &sc->pciide_channels[channel].dma_maps[drive];
1119
1120 /* If table was already allocated, just return */
1121 if (dma_maps->dma_table)
1122 return 0;
1123
1124 /* Allocate memory for the DMA tables and map it */
1125 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1126 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1127 BUS_DMA_NOWAIT)) != 0) {
1128 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1129 "allocate", drive, error);
1130 return error;
1131 }
1132 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1133 dma_table_size,
1134 (caddr_t *)&dma_maps->dma_table,
1135 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1136 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1137 "map", drive, error);
1138 return error;
1139 }
1140 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1141 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1142 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1143 /* Create and load table DMA map for this disk */
1144 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1145 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1146 &dma_maps->dmamap_table)) != 0) {
1147 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1148 "create", drive, error);
1149 return error;
1150 }
1151 if ((error = bus_dmamap_load(sc->sc_dmat,
1152 dma_maps->dmamap_table,
1153 dma_maps->dma_table,
1154 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1155 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1156 "load", drive, error);
1157 return error;
1158 }
1159 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1160 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1161 DEBUG_PROBE);
1162 /* Create a xfer DMA map for this drive */
1163 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1164 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1165 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1166 &dma_maps->dmamap_xfer)) != 0) {
1167 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1168 "create xfer", drive, error);
1169 return error;
1170 }
1171 return 0;
1172 }
1173
1174 int
1175 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1176 void *v;
1177 int channel, drive;
1178 void *databuf;
1179 size_t datalen;
1180 int flags;
1181 {
1182 struct pciide_softc *sc = v;
1183 int error, seg;
1184 struct pciide_dma_maps *dma_maps =
1185 &sc->pciide_channels[channel].dma_maps[drive];
1186
1187 error = bus_dmamap_load(sc->sc_dmat,
1188 dma_maps->dmamap_xfer,
1189 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1190 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1191 if (error) {
1192 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1193 "load xfer", drive, error);
1194 return error;
1195 }
1196
1197 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1198 dma_maps->dmamap_xfer->dm_mapsize,
1199 (flags & WDC_DMA_READ) ?
1200 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1201
1202 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1203 #ifdef DIAGNOSTIC
1204 /* A segment must not cross a 64k boundary */
1205 {
1206 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1207 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1208 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1209 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1210 printf("pciide_dma: segment %d physical addr 0x%lx"
1211 " len 0x%lx not properly aligned\n",
1212 seg, phys, len);
1213 panic("pciide_dma: buf align");
1214 }
1215 }
1216 #endif
1217 dma_maps->dma_table[seg].base_addr =
1218 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1219 dma_maps->dma_table[seg].byte_count =
1220 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1221 IDEDMA_BYTE_COUNT_MASK);
1222 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1223 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1224 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1225
1226 }
1227 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1228 htole32(IDEDMA_BYTE_COUNT_EOT);
1229
1230 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1231 dma_maps->dmamap_table->dm_mapsize,
1232 BUS_DMASYNC_PREWRITE);
1233
1234 /* Maps are ready. Start DMA function */
1235 #ifdef DIAGNOSTIC
1236 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1237 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1238 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1239 panic("pciide_dma_init: table align");
1240 }
1241 #endif
1242
1243 /* Clear status bits */
1244 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1245 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1246 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1247 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1248 /* Write table addr */
1249 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1250 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1251 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1252 /* set read/write */
1253 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1254 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1255 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1256 /* remember flags */
1257 dma_maps->dma_flags = flags;
1258 return 0;
1259 }
1260
1261 void
1262 pciide_dma_start(v, channel, drive)
1263 void *v;
1264 int channel, drive;
1265 {
1266 struct pciide_softc *sc = v;
1267
1268 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1269 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1270 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1271 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1272 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1273 }
1274
1275 int
1276 pciide_dma_finish(v, channel, drive, force)
1277 void *v;
1278 int channel, drive;
1279 int force;
1280 {
1281 struct pciide_softc *sc = v;
1282 u_int8_t status;
1283 int error = 0;
1284 struct pciide_dma_maps *dma_maps =
1285 &sc->pciide_channels[channel].dma_maps[drive];
1286
1287 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1288 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1289 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1290 DEBUG_XFERS);
1291
1292 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1293 return WDC_DMAST_NOIRQ;
1294
1295 /* stop DMA channel */
1296 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1297 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1298 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1299 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1300
1301 /* Unload the map of the data buffer */
1302 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1303 dma_maps->dmamap_xfer->dm_mapsize,
1304 (dma_maps->dma_flags & WDC_DMA_READ) ?
1305 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1306 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1307
1308 if ((status & IDEDMA_CTL_ERR) != 0) {
1309 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1310 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1311 error |= WDC_DMAST_ERR;
1312 }
1313
1314 if ((status & IDEDMA_CTL_INTR) == 0) {
1315 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1316 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1317 drive, status);
1318 error |= WDC_DMAST_NOIRQ;
1319 }
1320
1321 if ((status & IDEDMA_CTL_ACT) != 0) {
1322 /* data underrun, may be a valid condition for ATAPI */
1323 error |= WDC_DMAST_UNDER;
1324 }
1325 return error;
1326 }
1327
1328 void
1329 pciide_irqack(chp)
1330 struct channel_softc *chp;
1331 {
1332 struct pciide_channel *cp = (struct pciide_channel*)chp;
1333 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1334
1335 /* clear status bits in IDE DMA registers */
1336 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1337 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1338 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1339 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1340 }
1341
1342 /* some common code used by several chip_map */
1343 int
1344 pciide_chansetup(sc, channel, interface)
1345 struct pciide_softc *sc;
1346 int channel;
1347 pcireg_t interface;
1348 {
1349 struct pciide_channel *cp = &sc->pciide_channels[channel];
1350 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1351 cp->name = PCIIDE_CHANNEL_NAME(channel);
1352 cp->wdc_channel.channel = channel;
1353 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1354 cp->wdc_channel.ch_queue =
1355 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1356 if (cp->wdc_channel.ch_queue == NULL) {
1357 aprint_error("%s %s channel: "
1358 "can't allocate memory for command queue",
1359 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1360 return 0;
1361 }
1362 aprint_normal("%s: %s channel %s to %s mode\n",
1363 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1364 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1365 "configured" : "wired",
1366 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1367 "native-PCI" : "compatibility");
1368 return 1;
1369 }
1370
1371 /* some common code used by several chip channel_map */
1372 void
1373 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1374 struct pci_attach_args *pa;
1375 struct pciide_channel *cp;
1376 pcireg_t interface;
1377 bus_size_t *cmdsizep, *ctlsizep;
1378 int (*pci_intr) __P((void *));
1379 {
1380 struct channel_softc *wdc_cp = &cp->wdc_channel;
1381
1382 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1383 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1384 pci_intr);
1385 else
1386 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1387 wdc_cp->channel, cmdsizep, ctlsizep);
1388
1389 if (cp->hw_ok == 0)
1390 return;
1391 wdc_cp->data32iot = wdc_cp->cmd_iot;
1392 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1393 wdcattach(wdc_cp);
1394 }
1395
1396 /*
1397 * Generic code to call to know if a channel can be disabled. Return 1
1398 * if channel can be disabled, 0 if not
1399 */
1400 int
1401 pciide_chan_candisable(cp)
1402 struct pciide_channel *cp;
1403 {
1404 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1405 struct channel_softc *wdc_cp = &cp->wdc_channel;
1406
1407 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1408 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1409 aprint_normal("%s: disabling %s channel (no drives)\n",
1410 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1411 cp->hw_ok = 0;
1412 return 1;
1413 }
1414 return 0;
1415 }
1416
1417 /*
1418 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1419 * Set hw_ok=0 on failure
1420 */
1421 void
1422 pciide_map_compat_intr(pa, cp, compatchan, interface)
1423 struct pci_attach_args *pa;
1424 struct pciide_channel *cp;
1425 int compatchan, interface;
1426 {
1427 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1428 struct channel_softc *wdc_cp = &cp->wdc_channel;
1429
1430 if (cp->hw_ok == 0)
1431 return;
1432 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1433 return;
1434
1435 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1436 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1437 pa, compatchan, pciide_compat_intr, cp);
1438 if (cp->ih == NULL) {
1439 #endif
1440 aprint_error("%s: no compatibility interrupt for use by %s "
1441 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1442 cp->hw_ok = 0;
1443 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1444 }
1445 #endif
1446 }
1447
1448 void
1449 pciide_print_modes(cp)
1450 struct pciide_channel *cp;
1451 {
1452 wdc_print_modes(&cp->wdc_channel);
1453 }
1454
1455 void
1456 default_chip_map(sc, pa)
1457 struct pciide_softc *sc;
1458 struct pci_attach_args *pa;
1459 {
1460 struct pciide_channel *cp;
1461 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1462 pcireg_t csr;
1463 int channel, drive;
1464 struct ata_drive_datas *drvp;
1465 u_int8_t idedma_ctl;
1466 bus_size_t cmdsize, ctlsize;
1467 char *failreason;
1468
1469 if (pciide_chipen(sc, pa) == 0)
1470 return;
1471
1472 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1473 aprint_normal("%s: bus-master DMA support present",
1474 sc->sc_wdcdev.sc_dev.dv_xname);
1475 if (sc->sc_pp == &default_product_desc &&
1476 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1477 PCIIDE_OPTIONS_DMA) == 0) {
1478 aprint_normal(", but unused (no driver support)");
1479 sc->sc_dma_ok = 0;
1480 } else {
1481 pciide_mapreg_dma(sc, pa);
1482 if (sc->sc_dma_ok != 0)
1483 aprint_normal(", used without full driver "
1484 "support");
1485 }
1486 } else {
1487 aprint_normal("%s: hardware does not support DMA",
1488 sc->sc_wdcdev.sc_dev.dv_xname);
1489 sc->sc_dma_ok = 0;
1490 }
1491 aprint_normal("\n");
1492 if (sc->sc_dma_ok) {
1493 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1494 sc->sc_wdcdev.irqack = pciide_irqack;
1495 }
1496 sc->sc_wdcdev.PIO_cap = 0;
1497 sc->sc_wdcdev.DMA_cap = 0;
1498
1499 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1500 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1501 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1502
1503 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1504 cp = &sc->pciide_channels[channel];
1505 if (pciide_chansetup(sc, channel, interface) == 0)
1506 continue;
1507 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1508 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1509 &ctlsize, pciide_pci_intr);
1510 } else {
1511 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1512 channel, &cmdsize, &ctlsize);
1513 }
1514 if (cp->hw_ok == 0)
1515 continue;
1516 /*
1517 * Check to see if something appears to be there.
1518 */
1519 failreason = NULL;
1520 if (!wdcprobe(&cp->wdc_channel)) {
1521 failreason = "not responding; disabled or no drives?";
1522 goto next;
1523 }
1524 /*
1525 * Now, make sure it's actually attributable to this PCI IDE
1526 * channel by trying to access the channel again while the
1527 * PCI IDE controller's I/O space is disabled. (If the
1528 * channel no longer appears to be there, it belongs to
1529 * this controller.) YUCK!
1530 */
1531 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1532 PCI_COMMAND_STATUS_REG);
1533 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1534 csr & ~PCI_COMMAND_IO_ENABLE);
1535 if (wdcprobe(&cp->wdc_channel))
1536 failreason = "other hardware responding at addresses";
1537 pci_conf_write(sc->sc_pc, sc->sc_tag,
1538 PCI_COMMAND_STATUS_REG, csr);
1539 next:
1540 if (failreason) {
1541 aprint_error("%s: %s channel ignored (%s)\n",
1542 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1543 failreason);
1544 cp->hw_ok = 0;
1545 bus_space_unmap(cp->wdc_channel.cmd_iot,
1546 cp->wdc_channel.cmd_ioh, cmdsize);
1547 if (interface & PCIIDE_INTERFACE_PCI(channel))
1548 bus_space_unmap(cp->wdc_channel.ctl_iot,
1549 cp->ctl_baseioh, ctlsize);
1550 else
1551 bus_space_unmap(cp->wdc_channel.ctl_iot,
1552 cp->wdc_channel.ctl_ioh, ctlsize);
1553 } else {
1554 pciide_map_compat_intr(pa, cp, channel, interface);
1555 }
1556 if (cp->hw_ok) {
1557 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1558 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1559 wdcattach(&cp->wdc_channel);
1560 }
1561 }
1562
1563 if (sc->sc_dma_ok == 0)
1564 return;
1565
1566 /* Allocate DMA maps */
1567 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1568 idedma_ctl = 0;
1569 cp = &sc->pciide_channels[channel];
1570 for (drive = 0; drive < 2; drive++) {
1571 drvp = &cp->wdc_channel.ch_drive[drive];
1572 /* If no drive, skip */
1573 if ((drvp->drive_flags & DRIVE) == 0)
1574 continue;
1575 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1576 continue;
1577 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1578 /* Abort DMA setup */
1579 aprint_error(
1580 "%s:%d:%d: can't allocate DMA maps, "
1581 "using PIO transfers\n",
1582 sc->sc_wdcdev.sc_dev.dv_xname,
1583 channel, drive);
1584 drvp->drive_flags &= ~DRIVE_DMA;
1585 }
1586 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1587 sc->sc_wdcdev.sc_dev.dv_xname,
1588 channel, drive);
1589 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1590 }
1591 if (idedma_ctl != 0) {
1592 /* Add software bits in status register */
1593 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1594 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1595 idedma_ctl);
1596 }
1597 }
1598 }
1599
1600 void
1601 sata_setup_channel(chp)
1602 struct channel_softc *chp;
1603 {
1604 struct ata_drive_datas *drvp;
1605 int drive;
1606 u_int32_t idedma_ctl;
1607 struct pciide_channel *cp = (struct pciide_channel*)chp;
1608 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1609
1610 /* setup DMA if needed */
1611 pciide_channel_dma_setup(cp);
1612
1613 idedma_ctl = 0;
1614
1615 for (drive = 0; drive < 2; drive++) {
1616 drvp = &chp->ch_drive[drive];
1617 /* If no drive, skip */
1618 if ((drvp->drive_flags & DRIVE) == 0)
1619 continue;
1620 if (drvp->drive_flags & DRIVE_UDMA) {
1621 /* use Ultra/DMA */
1622 drvp->drive_flags &= ~DRIVE_DMA;
1623 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1624 } else if (drvp->drive_flags & DRIVE_DMA) {
1625 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1626 }
1627 }
1628
1629 /*
1630 * Nothing to do to setup modes; it is meaningless in S-ATA
1631 * (but many S-ATA drives still want to get the SET_FEATURE
1632 * command).
1633 */
1634 if (idedma_ctl != 0) {
1635 /* Add software bits in status register */
1636 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1637 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1638 idedma_ctl);
1639 }
1640 pciide_print_modes(cp);
1641 }
1642
1643 void
1644 piix_chip_map(sc, pa)
1645 struct pciide_softc *sc;
1646 struct pci_attach_args *pa;
1647 {
1648 struct pciide_channel *cp;
1649 int channel;
1650 u_int32_t idetim;
1651 bus_size_t cmdsize, ctlsize;
1652
1653 if (pciide_chipen(sc, pa) == 0)
1654 return;
1655
1656 aprint_normal("%s: bus-master DMA support present",
1657 sc->sc_wdcdev.sc_dev.dv_xname);
1658 pciide_mapreg_dma(sc, pa);
1659 aprint_normal("\n");
1660 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1661 WDC_CAPABILITY_MODE;
1662 if (sc->sc_dma_ok) {
1663 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1664 sc->sc_wdcdev.irqack = pciide_irqack;
1665 switch(sc->sc_pp->ide_product) {
1666 case PCI_PRODUCT_INTEL_82371AB_IDE:
1667 case PCI_PRODUCT_INTEL_82440MX_IDE:
1668 case PCI_PRODUCT_INTEL_82801AA_IDE:
1669 case PCI_PRODUCT_INTEL_82801AB_IDE:
1670 case PCI_PRODUCT_INTEL_82801BA_IDE:
1671 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1672 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1673 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1674 case PCI_PRODUCT_INTEL_82801DB_IDE:
1675 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1676 case PCI_PRODUCT_INTEL_82801EB_IDE:
1677 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1678 }
1679 }
1680 sc->sc_wdcdev.PIO_cap = 4;
1681 sc->sc_wdcdev.DMA_cap = 2;
1682 switch(sc->sc_pp->ide_product) {
1683 case PCI_PRODUCT_INTEL_82801AA_IDE:
1684 sc->sc_wdcdev.UDMA_cap = 4;
1685 break;
1686 case PCI_PRODUCT_INTEL_82801BA_IDE:
1687 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1688 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1689 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1690 case PCI_PRODUCT_INTEL_82801DB_IDE:
1691 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1692 case PCI_PRODUCT_INTEL_82801EB_IDE:
1693 sc->sc_wdcdev.UDMA_cap = 5;
1694 break;
1695 default:
1696 sc->sc_wdcdev.UDMA_cap = 2;
1697 }
1698 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1699 sc->sc_wdcdev.set_modes = piix_setup_channel;
1700 else
1701 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1702 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1703 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1704
1705 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1706 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1707 DEBUG_PROBE);
1708 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1709 WDCDEBUG_PRINT((", sidetim=0x%x",
1710 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1711 DEBUG_PROBE);
1712 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1713 WDCDEBUG_PRINT((", udamreg 0x%x",
1714 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1715 DEBUG_PROBE);
1716 }
1717 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1718 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1719 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1720 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1721 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1722 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1723 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1724 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1725 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1726 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1727 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1728 DEBUG_PROBE);
1729 }
1730
1731 }
1732 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1733
1734 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1735 cp = &sc->pciide_channels[channel];
1736 /* PIIX is compat-only */
1737 if (pciide_chansetup(sc, channel, 0) == 0)
1738 continue;
1739 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1740 if ((PIIX_IDETIM_READ(idetim, channel) &
1741 PIIX_IDETIM_IDE) == 0) {
1742 aprint_normal("%s: %s channel ignored (disabled)\n",
1743 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1744 continue;
1745 }
1746 /* PIIX are compat-only pciide devices */
1747 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1748 if (cp->hw_ok == 0)
1749 continue;
1750 if (pciide_chan_candisable(cp)) {
1751 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1752 channel);
1753 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1754 idetim);
1755 }
1756 pciide_map_compat_intr(pa, cp, channel, 0);
1757 if (cp->hw_ok == 0)
1758 continue;
1759 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1760 }
1761
1762 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1763 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1764 DEBUG_PROBE);
1765 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1766 WDCDEBUG_PRINT((", sidetim=0x%x",
1767 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1768 DEBUG_PROBE);
1769 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1770 WDCDEBUG_PRINT((", udamreg 0x%x",
1771 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1772 DEBUG_PROBE);
1773 }
1774 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1775 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1776 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1777 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1778 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1779 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1780 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1781 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1782 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1783 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1784 DEBUG_PROBE);
1785 }
1786 }
1787 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1788 }
1789
1790 void
1791 piix_setup_channel(chp)
1792 struct channel_softc *chp;
1793 {
1794 u_int8_t mode[2], drive;
1795 u_int32_t oidetim, idetim, idedma_ctl;
1796 struct pciide_channel *cp = (struct pciide_channel*)chp;
1797 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1798 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1799
1800 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1801 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1802 idedma_ctl = 0;
1803
1804 /* set up new idetim: Enable IDE registers decode */
1805 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1806 chp->channel);
1807
1808 /* setup DMA */
1809 pciide_channel_dma_setup(cp);
1810
1811 /*
1812 * Here we have to mess up with drives mode: PIIX can't have
1813 * different timings for master and slave drives.
1814 * We need to find the best combination.
1815 */
1816
1817 /* If both drives supports DMA, take the lower mode */
1818 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1819 (drvp[1].drive_flags & DRIVE_DMA)) {
1820 mode[0] = mode[1] =
1821 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1822 drvp[0].DMA_mode = mode[0];
1823 drvp[1].DMA_mode = mode[1];
1824 goto ok;
1825 }
1826 /*
1827 * If only one drive supports DMA, use its mode, and
1828 * put the other one in PIO mode 0 if mode not compatible
1829 */
1830 if (drvp[0].drive_flags & DRIVE_DMA) {
1831 mode[0] = drvp[0].DMA_mode;
1832 mode[1] = drvp[1].PIO_mode;
1833 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1834 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1835 mode[1] = drvp[1].PIO_mode = 0;
1836 goto ok;
1837 }
1838 if (drvp[1].drive_flags & DRIVE_DMA) {
1839 mode[1] = drvp[1].DMA_mode;
1840 mode[0] = drvp[0].PIO_mode;
1841 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1842 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1843 mode[0] = drvp[0].PIO_mode = 0;
1844 goto ok;
1845 }
1846 /*
1847 * If both drives are not DMA, takes the lower mode, unless
1848 * one of them is PIO mode < 2
1849 */
1850 if (drvp[0].PIO_mode < 2) {
1851 mode[0] = drvp[0].PIO_mode = 0;
1852 mode[1] = drvp[1].PIO_mode;
1853 } else if (drvp[1].PIO_mode < 2) {
1854 mode[1] = drvp[1].PIO_mode = 0;
1855 mode[0] = drvp[0].PIO_mode;
1856 } else {
1857 mode[0] = mode[1] =
1858 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1859 drvp[0].PIO_mode = mode[0];
1860 drvp[1].PIO_mode = mode[1];
1861 }
1862 ok: /* The modes are setup */
1863 for (drive = 0; drive < 2; drive++) {
1864 if (drvp[drive].drive_flags & DRIVE_DMA) {
1865 idetim |= piix_setup_idetim_timings(
1866 mode[drive], 1, chp->channel);
1867 goto end;
1868 }
1869 }
1870 /* If we are there, none of the drives are DMA */
1871 if (mode[0] >= 2)
1872 idetim |= piix_setup_idetim_timings(
1873 mode[0], 0, chp->channel);
1874 else
1875 idetim |= piix_setup_idetim_timings(
1876 mode[1], 0, chp->channel);
1877 end: /*
1878 * timing mode is now set up in the controller. Enable
1879 * it per-drive
1880 */
1881 for (drive = 0; drive < 2; drive++) {
1882 /* If no drive, skip */
1883 if ((drvp[drive].drive_flags & DRIVE) == 0)
1884 continue;
1885 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1886 if (drvp[drive].drive_flags & DRIVE_DMA)
1887 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1888 }
1889 if (idedma_ctl != 0) {
1890 /* Add software bits in status register */
1891 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1892 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1893 idedma_ctl);
1894 }
1895 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1896 pciide_print_modes(cp);
1897 }
1898
1899 void
1900 piix3_4_setup_channel(chp)
1901 struct channel_softc *chp;
1902 {
1903 struct ata_drive_datas *drvp;
1904 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1905 struct pciide_channel *cp = (struct pciide_channel*)chp;
1906 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1907 int drive;
1908 int channel = chp->channel;
1909
1910 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1911 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1912 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1913 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1914 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1915 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1916 PIIX_SIDETIM_RTC_MASK(channel));
1917
1918 idedma_ctl = 0;
1919 /* If channel disabled, no need to go further */
1920 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1921 return;
1922 /* set up new idetim: Enable IDE registers decode */
1923 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1924
1925 /* setup DMA if needed */
1926 pciide_channel_dma_setup(cp);
1927
1928 for (drive = 0; drive < 2; drive++) {
1929 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1930 PIIX_UDMATIM_SET(0x3, channel, drive));
1931 drvp = &chp->ch_drive[drive];
1932 /* If no drive, skip */
1933 if ((drvp->drive_flags & DRIVE) == 0)
1934 continue;
1935 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1936 (drvp->drive_flags & DRIVE_UDMA) == 0))
1937 goto pio;
1938
1939 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1940 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1941 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1942 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1943 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1944 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1945 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1946 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1947 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1948 ideconf |= PIIX_CONFIG_PINGPONG;
1949 }
1950 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1951 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1952 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1953 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1954 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1955 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1956 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1957 /* setup Ultra/100 */
1958 if (drvp->UDMA_mode > 2 &&
1959 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1960 drvp->UDMA_mode = 2;
1961 if (drvp->UDMA_mode > 4) {
1962 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1963 } else {
1964 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1965 if (drvp->UDMA_mode > 2) {
1966 ideconf |= PIIX_CONFIG_UDMA66(channel,
1967 drive);
1968 } else {
1969 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1970 drive);
1971 }
1972 }
1973 }
1974 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1975 /* setup Ultra/66 */
1976 if (drvp->UDMA_mode > 2 &&
1977 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1978 drvp->UDMA_mode = 2;
1979 if (drvp->UDMA_mode > 2)
1980 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1981 else
1982 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1983 }
1984 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1985 (drvp->drive_flags & DRIVE_UDMA)) {
1986 /* use Ultra/DMA */
1987 drvp->drive_flags &= ~DRIVE_DMA;
1988 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1989 udmareg |= PIIX_UDMATIM_SET(
1990 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1991 } else {
1992 /* use Multiword DMA */
1993 drvp->drive_flags &= ~DRIVE_UDMA;
1994 if (drive == 0) {
1995 idetim |= piix_setup_idetim_timings(
1996 drvp->DMA_mode, 1, channel);
1997 } else {
1998 sidetim |= piix_setup_sidetim_timings(
1999 drvp->DMA_mode, 1, channel);
2000 idetim =PIIX_IDETIM_SET(idetim,
2001 PIIX_IDETIM_SITRE, channel);
2002 }
2003 }
2004 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2005
2006 pio: /* use PIO mode */
2007 idetim |= piix_setup_idetim_drvs(drvp);
2008 if (drive == 0) {
2009 idetim |= piix_setup_idetim_timings(
2010 drvp->PIO_mode, 0, channel);
2011 } else {
2012 sidetim |= piix_setup_sidetim_timings(
2013 drvp->PIO_mode, 0, channel);
2014 idetim =PIIX_IDETIM_SET(idetim,
2015 PIIX_IDETIM_SITRE, channel);
2016 }
2017 }
2018 if (idedma_ctl != 0) {
2019 /* Add software bits in status register */
2020 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2021 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2022 idedma_ctl);
2023 }
2024 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2025 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2026 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2027 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2028 pciide_print_modes(cp);
2029 }
2030
2031
2032 /* setup ISP and RTC fields, based on mode */
2033 static u_int32_t
2034 piix_setup_idetim_timings(mode, dma, channel)
2035 u_int8_t mode;
2036 u_int8_t dma;
2037 u_int8_t channel;
2038 {
2039
2040 if (dma)
2041 return PIIX_IDETIM_SET(0,
2042 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2043 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2044 channel);
2045 else
2046 return PIIX_IDETIM_SET(0,
2047 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2048 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2049 channel);
2050 }
2051
2052 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2053 static u_int32_t
2054 piix_setup_idetim_drvs(drvp)
2055 struct ata_drive_datas *drvp;
2056 {
2057 u_int32_t ret = 0;
2058 struct channel_softc *chp = drvp->chnl_softc;
2059 u_int8_t channel = chp->channel;
2060 u_int8_t drive = drvp->drive;
2061
2062 /*
2063 * If drive is using UDMA, timings setups are independant
2064 * So just check DMA and PIO here.
2065 */
2066 if (drvp->drive_flags & DRIVE_DMA) {
2067 /* if mode = DMA mode 0, use compatible timings */
2068 if ((drvp->drive_flags & DRIVE_DMA) &&
2069 drvp->DMA_mode == 0) {
2070 drvp->PIO_mode = 0;
2071 return ret;
2072 }
2073 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2074 /*
2075 * PIO and DMA timings are the same, use fast timings for PIO
2076 * too, else use compat timings.
2077 */
2078 if ((piix_isp_pio[drvp->PIO_mode] !=
2079 piix_isp_dma[drvp->DMA_mode]) ||
2080 (piix_rtc_pio[drvp->PIO_mode] !=
2081 piix_rtc_dma[drvp->DMA_mode]))
2082 drvp->PIO_mode = 0;
2083 /* if PIO mode <= 2, use compat timings for PIO */
2084 if (drvp->PIO_mode <= 2) {
2085 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2086 channel);
2087 return ret;
2088 }
2089 }
2090
2091 /*
2092 * Now setup PIO modes. If mode < 2, use compat timings.
2093 * Else enable fast timings. Enable IORDY and prefetch/post
2094 * if PIO mode >= 3.
2095 */
2096
2097 if (drvp->PIO_mode < 2)
2098 return ret;
2099
2100 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2101 if (drvp->PIO_mode >= 3) {
2102 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2103 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2104 }
2105 return ret;
2106 }
2107
2108 /* setup values in SIDETIM registers, based on mode */
2109 static u_int32_t
2110 piix_setup_sidetim_timings(mode, dma, channel)
2111 u_int8_t mode;
2112 u_int8_t dma;
2113 u_int8_t channel;
2114 {
2115 if (dma)
2116 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2117 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2118 else
2119 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2120 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2121 }
2122
2123 void
2124 amd7x6_chip_map(sc, pa)
2125 struct pciide_softc *sc;
2126 struct pci_attach_args *pa;
2127 {
2128 struct pciide_channel *cp;
2129 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2130 int channel;
2131 pcireg_t chanenable;
2132 bus_size_t cmdsize, ctlsize;
2133
2134 if (pciide_chipen(sc, pa) == 0)
2135 return;
2136 aprint_normal("%s: bus-master DMA support present",
2137 sc->sc_wdcdev.sc_dev.dv_xname);
2138 pciide_mapreg_dma(sc, pa);
2139 aprint_normal("\n");
2140 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2141 WDC_CAPABILITY_MODE;
2142 if (sc->sc_dma_ok) {
2143 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2144 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2145 sc->sc_wdcdev.irqack = pciide_irqack;
2146 }
2147 sc->sc_wdcdev.PIO_cap = 4;
2148 sc->sc_wdcdev.DMA_cap = 2;
2149
2150 switch (sc->sc_pci_vendor) {
2151 case PCI_VENDOR_AMD:
2152 switch (sc->sc_pp->ide_product) {
2153 case PCI_PRODUCT_AMD_PBC766_IDE:
2154 case PCI_PRODUCT_AMD_PBC768_IDE:
2155 case PCI_PRODUCT_AMD_PBC8111_IDE:
2156 sc->sc_wdcdev.UDMA_cap = 5;
2157 break;
2158 default:
2159 sc->sc_wdcdev.UDMA_cap = 4;
2160 }
2161 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2162 break;
2163
2164 case PCI_VENDOR_NVIDIA:
2165 switch (sc->sc_pp->ide_product) {
2166 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2167 sc->sc_wdcdev.UDMA_cap = 5;
2168 break;
2169 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2170 sc->sc_wdcdev.UDMA_cap = 6;
2171 break;
2172 }
2173 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2174 break;
2175
2176 default:
2177 panic("amd7x6_chip_map: unknown vendor");
2178 }
2179 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2180 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2181 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2182 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2183 AMD7X6_CHANSTATUS_EN(sc));
2184
2185 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2186 DEBUG_PROBE);
2187 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2188 cp = &sc->pciide_channels[channel];
2189 if (pciide_chansetup(sc, channel, interface) == 0)
2190 continue;
2191
2192 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2193 aprint_normal("%s: %s channel ignored (disabled)\n",
2194 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2195 continue;
2196 }
2197 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2198 pciide_pci_intr);
2199
2200 if (pciide_chan_candisable(cp))
2201 chanenable &= ~AMD7X6_CHAN_EN(channel);
2202 pciide_map_compat_intr(pa, cp, channel, interface);
2203 if (cp->hw_ok == 0)
2204 continue;
2205
2206 amd7x6_setup_channel(&cp->wdc_channel);
2207 }
2208 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2209 chanenable);
2210 return;
2211 }
2212
2213 void
2214 amd7x6_setup_channel(chp)
2215 struct channel_softc *chp;
2216 {
2217 u_int32_t udmatim_reg, datatim_reg;
2218 u_int8_t idedma_ctl;
2219 int mode, drive;
2220 struct ata_drive_datas *drvp;
2221 struct pciide_channel *cp = (struct pciide_channel*)chp;
2222 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2223 #ifndef PCIIDE_AMD756_ENABLEDMA
2224 int rev = PCI_REVISION(
2225 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2226 #endif
2227
2228 idedma_ctl = 0;
2229 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2230 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2231 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2232 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2233
2234 /* setup DMA if needed */
2235 pciide_channel_dma_setup(cp);
2236
2237 for (drive = 0; drive < 2; drive++) {
2238 drvp = &chp->ch_drive[drive];
2239 /* If no drive, skip */
2240 if ((drvp->drive_flags & DRIVE) == 0)
2241 continue;
2242 /* add timing values, setup DMA if needed */
2243 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2244 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2245 mode = drvp->PIO_mode;
2246 goto pio;
2247 }
2248 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2249 (drvp->drive_flags & DRIVE_UDMA)) {
2250 /* use Ultra/DMA */
2251 drvp->drive_flags &= ~DRIVE_DMA;
2252 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2253 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2254 AMD7X6_UDMA_TIME(chp->channel, drive,
2255 amd7x6_udma_tim[drvp->UDMA_mode]);
2256 /* can use PIO timings, MW DMA unused */
2257 mode = drvp->PIO_mode;
2258 } else {
2259 /* use Multiword DMA, but only if revision is OK */
2260 drvp->drive_flags &= ~DRIVE_UDMA;
2261 #ifndef PCIIDE_AMD756_ENABLEDMA
2262 /*
2263 * The workaround doesn't seem to be necessary
2264 * with all drives, so it can be disabled by
2265 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2266 * triggered.
2267 */
2268 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2269 sc->sc_pp->ide_product ==
2270 PCI_PRODUCT_AMD_PBC756_IDE &&
2271 AMD756_CHIPREV_DISABLEDMA(rev)) {
2272 aprint_normal(
2273 "%s:%d:%d: multi-word DMA disabled due "
2274 "to chip revision\n",
2275 sc->sc_wdcdev.sc_dev.dv_xname,
2276 chp->channel, drive);
2277 mode = drvp->PIO_mode;
2278 drvp->drive_flags &= ~DRIVE_DMA;
2279 goto pio;
2280 }
2281 #endif
2282 /* mode = min(pio, dma+2) */
2283 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2284 mode = drvp->PIO_mode;
2285 else
2286 mode = drvp->DMA_mode + 2;
2287 }
2288 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2289
2290 pio: /* setup PIO mode */
2291 if (mode <= 2) {
2292 drvp->DMA_mode = 0;
2293 drvp->PIO_mode = 0;
2294 mode = 0;
2295 } else {
2296 drvp->PIO_mode = mode;
2297 drvp->DMA_mode = mode - 2;
2298 }
2299 datatim_reg |=
2300 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2301 amd7x6_pio_set[mode]) |
2302 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2303 amd7x6_pio_rec[mode]);
2304 }
2305 if (idedma_ctl != 0) {
2306 /* Add software bits in status register */
2307 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2308 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2309 idedma_ctl);
2310 }
2311 pciide_print_modes(cp);
2312 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2313 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2314 }
2315
2316 void
2317 apollo_chip_map(sc, pa)
2318 struct pciide_softc *sc;
2319 struct pci_attach_args *pa;
2320 {
2321 struct pciide_channel *cp;
2322 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2323 int channel;
2324 u_int32_t ideconf;
2325 bus_size_t cmdsize, ctlsize;
2326 pcitag_t pcib_tag;
2327 pcireg_t pcib_id, pcib_class;
2328
2329 if (pciide_chipen(sc, pa) == 0)
2330 return;
2331 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2332 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2333 /* and read ID and rev of the ISA bridge */
2334 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2335 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2336 aprint_normal(": VIA Technologies ");
2337 switch (PCI_PRODUCT(pcib_id)) {
2338 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2339 aprint_normal("VT82C586 (Apollo VP) ");
2340 if(PCI_REVISION(pcib_class) >= 0x02) {
2341 aprint_normal("ATA33 controller\n");
2342 sc->sc_wdcdev.UDMA_cap = 2;
2343 } else {
2344 aprint_normal("controller\n");
2345 sc->sc_wdcdev.UDMA_cap = 0;
2346 }
2347 break;
2348 case PCI_PRODUCT_VIATECH_VT82C596A:
2349 aprint_normal("VT82C596A (Apollo Pro) ");
2350 if (PCI_REVISION(pcib_class) >= 0x12) {
2351 aprint_normal("ATA66 controller\n");
2352 sc->sc_wdcdev.UDMA_cap = 4;
2353 } else {
2354 aprint_normal("ATA33 controller\n");
2355 sc->sc_wdcdev.UDMA_cap = 2;
2356 }
2357 break;
2358 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2359 aprint_normal("VT82C686A (Apollo KX133) ");
2360 if (PCI_REVISION(pcib_class) >= 0x40) {
2361 aprint_normal("ATA100 controller\n");
2362 sc->sc_wdcdev.UDMA_cap = 5;
2363 } else {
2364 aprint_normal("ATA66 controller\n");
2365 sc->sc_wdcdev.UDMA_cap = 4;
2366 }
2367 break;
2368 case PCI_PRODUCT_VIATECH_VT8231:
2369 aprint_normal("VT8231 ATA100 controller\n");
2370 sc->sc_wdcdev.UDMA_cap = 5;
2371 break;
2372 case PCI_PRODUCT_VIATECH_VT8233:
2373 aprint_normal("VT8233 ATA100 controller\n");
2374 sc->sc_wdcdev.UDMA_cap = 5;
2375 break;
2376 case PCI_PRODUCT_VIATECH_VT8233A:
2377 aprint_normal("VT8233A ATA133 controller\n");
2378 sc->sc_wdcdev.UDMA_cap = 6;
2379 break;
2380 case PCI_PRODUCT_VIATECH_VT8235:
2381 aprint_normal("VT8235 ATA133 controller\n");
2382 sc->sc_wdcdev.UDMA_cap = 6;
2383 break;
2384 case PCI_PRODUCT_VIATECH_VT8237_RAID:
2385 aprint_normal("VT8237 ATA133 controller\n");
2386 sc->sc_wdcdev.UDMA_cap = 6;
2387 break;
2388 default:
2389 aprint_normal("unknown ATA controller\n");
2390 sc->sc_wdcdev.UDMA_cap = 0;
2391 }
2392
2393 aprint_normal("%s: bus-master DMA support present",
2394 sc->sc_wdcdev.sc_dev.dv_xname);
2395 pciide_mapreg_dma(sc, pa);
2396 aprint_normal("\n");
2397 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2398 WDC_CAPABILITY_MODE;
2399 if (sc->sc_dma_ok) {
2400 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2401 sc->sc_wdcdev.irqack = pciide_irqack;
2402 if (sc->sc_wdcdev.UDMA_cap > 0)
2403 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2404 }
2405 sc->sc_wdcdev.PIO_cap = 4;
2406 sc->sc_wdcdev.DMA_cap = 2;
2407 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2408 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2409 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2410
2411 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2412 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2413 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2414 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2415 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2416 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2417 DEBUG_PROBE);
2418
2419 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2420 cp = &sc->pciide_channels[channel];
2421 if (pciide_chansetup(sc, channel, interface) == 0)
2422 continue;
2423
2424 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2425 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2426 aprint_normal("%s: %s channel ignored (disabled)\n",
2427 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2428 continue;
2429 }
2430 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2431 pciide_pci_intr);
2432 if (cp->hw_ok == 0)
2433 continue;
2434 if (pciide_chan_candisable(cp)) {
2435 ideconf &= ~APO_IDECONF_EN(channel);
2436 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2437 ideconf);
2438 }
2439 pciide_map_compat_intr(pa, cp, channel, interface);
2440
2441 if (cp->hw_ok == 0)
2442 continue;
2443 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2444 }
2445 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2446 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2447 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2448 }
2449
2450 void
2451 apollo_setup_channel(chp)
2452 struct channel_softc *chp;
2453 {
2454 u_int32_t udmatim_reg, datatim_reg;
2455 u_int8_t idedma_ctl;
2456 int mode, drive;
2457 struct ata_drive_datas *drvp;
2458 struct pciide_channel *cp = (struct pciide_channel*)chp;
2459 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2460
2461 idedma_ctl = 0;
2462 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2463 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2464 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2465 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2466
2467 /* setup DMA if needed */
2468 pciide_channel_dma_setup(cp);
2469
2470 for (drive = 0; drive < 2; drive++) {
2471 drvp = &chp->ch_drive[drive];
2472 /* If no drive, skip */
2473 if ((drvp->drive_flags & DRIVE) == 0)
2474 continue;
2475 /* add timing values, setup DMA if needed */
2476 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2477 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2478 mode = drvp->PIO_mode;
2479 goto pio;
2480 }
2481 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2482 (drvp->drive_flags & DRIVE_UDMA)) {
2483 /* use Ultra/DMA */
2484 drvp->drive_flags &= ~DRIVE_DMA;
2485 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2486 APO_UDMA_EN_MTH(chp->channel, drive);
2487 if (sc->sc_wdcdev.UDMA_cap == 6) {
2488 /* 8233a */
2489 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2490 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2491 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2492 /* 686b */
2493 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2494 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2495 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2496 /* 596b or 686a */
2497 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2498 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2499 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2500 } else {
2501 /* 596a or 586b */
2502 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2503 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2504 }
2505 /* can use PIO timings, MW DMA unused */
2506 mode = drvp->PIO_mode;
2507 } else {
2508 /* use Multiword DMA */
2509 drvp->drive_flags &= ~DRIVE_UDMA;
2510 /* mode = min(pio, dma+2) */
2511 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2512 mode = drvp->PIO_mode;
2513 else
2514 mode = drvp->DMA_mode + 2;
2515 }
2516 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2517
2518 pio: /* setup PIO mode */
2519 if (mode <= 2) {
2520 drvp->DMA_mode = 0;
2521 drvp->PIO_mode = 0;
2522 mode = 0;
2523 } else {
2524 drvp->PIO_mode = mode;
2525 drvp->DMA_mode = mode - 2;
2526 }
2527 datatim_reg |=
2528 APO_DATATIM_PULSE(chp->channel, drive,
2529 apollo_pio_set[mode]) |
2530 APO_DATATIM_RECOV(chp->channel, drive,
2531 apollo_pio_rec[mode]);
2532 }
2533 if (idedma_ctl != 0) {
2534 /* Add software bits in status register */
2535 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2536 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2537 idedma_ctl);
2538 }
2539 pciide_print_modes(cp);
2540 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2541 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2542 }
2543
2544 void
2545 cmd_channel_map(pa, sc, channel)
2546 struct pci_attach_args *pa;
2547 struct pciide_softc *sc;
2548 int channel;
2549 {
2550 struct pciide_channel *cp = &sc->pciide_channels[channel];
2551 bus_size_t cmdsize, ctlsize;
2552 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2553 int interface, one_channel;
2554
2555 /*
2556 * The 0648/0649 can be told to identify as a RAID controller.
2557 * In this case, we have to fake interface
2558 */
2559 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2560 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2561 PCIIDE_INTERFACE_SETTABLE(1);
2562 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2563 CMD_CONF_DSA1)
2564 interface |= PCIIDE_INTERFACE_PCI(0) |
2565 PCIIDE_INTERFACE_PCI(1);
2566 } else {
2567 interface = PCI_INTERFACE(pa->pa_class);
2568 }
2569
2570 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2571 cp->name = PCIIDE_CHANNEL_NAME(channel);
2572 cp->wdc_channel.channel = channel;
2573 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2574
2575 /*
2576 * Older CMD64X doesn't have independant channels
2577 */
2578 switch (sc->sc_pp->ide_product) {
2579 case PCI_PRODUCT_CMDTECH_649:
2580 one_channel = 0;
2581 break;
2582 default:
2583 one_channel = 1;
2584 break;
2585 }
2586
2587 if (channel > 0 && one_channel) {
2588 cp->wdc_channel.ch_queue =
2589 sc->pciide_channels[0].wdc_channel.ch_queue;
2590 } else {
2591 cp->wdc_channel.ch_queue =
2592 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2593 }
2594 if (cp->wdc_channel.ch_queue == NULL) {
2595 aprint_error("%s %s channel: "
2596 "can't allocate memory for command queue",
2597 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2598 return;
2599 }
2600
2601 aprint_normal("%s: %s channel %s to %s mode\n",
2602 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2603 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2604 "configured" : "wired",
2605 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2606 "native-PCI" : "compatibility");
2607
2608 /*
2609 * with a CMD PCI64x, if we get here, the first channel is enabled:
2610 * there's no way to disable the first channel without disabling
2611 * the whole device
2612 */
2613 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2614 aprint_normal("%s: %s channel ignored (disabled)\n",
2615 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2616 return;
2617 }
2618
2619 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2620 if (cp->hw_ok == 0)
2621 return;
2622 if (channel == 1) {
2623 if (pciide_chan_candisable(cp)) {
2624 ctrl &= ~CMD_CTRL_2PORT;
2625 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2626 CMD_CTRL, ctrl);
2627 }
2628 }
2629 pciide_map_compat_intr(pa, cp, channel, interface);
2630 }
2631
2632 int
2633 cmd_pci_intr(arg)
2634 void *arg;
2635 {
2636 struct pciide_softc *sc = arg;
2637 struct pciide_channel *cp;
2638 struct channel_softc *wdc_cp;
2639 int i, rv, crv;
2640 u_int32_t priirq, secirq;
2641
2642 rv = 0;
2643 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2644 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2645 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2646 cp = &sc->pciide_channels[i];
2647 wdc_cp = &cp->wdc_channel;
2648 /* If a compat channel skip. */
2649 if (cp->compat)
2650 continue;
2651 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2652 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2653 crv = wdcintr(wdc_cp);
2654 if (crv == 0)
2655 printf("%s:%d: bogus intr\n",
2656 sc->sc_wdcdev.sc_dev.dv_xname, i);
2657 else
2658 rv = 1;
2659 }
2660 }
2661 return rv;
2662 }
2663
2664 void
2665 cmd_chip_map(sc, pa)
2666 struct pciide_softc *sc;
2667 struct pci_attach_args *pa;
2668 {
2669 int channel;
2670
2671 /*
2672 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2673 * and base adresses registers can be disabled at
2674 * hardware level. In this case, the device is wired
2675 * in compat mode and its first channel is always enabled,
2676 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2677 * In fact, it seems that the first channel of the CMD PCI0640
2678 * can't be disabled.
2679 */
2680
2681 #ifdef PCIIDE_CMD064x_DISABLE
2682 if (pciide_chipen(sc, pa) == 0)
2683 return;
2684 #endif
2685
2686 aprint_normal("%s: hardware does not support DMA\n",
2687 sc->sc_wdcdev.sc_dev.dv_xname);
2688 sc->sc_dma_ok = 0;
2689
2690 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2691 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2692 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2693
2694 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2695 cmd_channel_map(pa, sc, channel);
2696 }
2697 }
2698
2699 void
2700 cmd0643_9_chip_map(sc, pa)
2701 struct pciide_softc *sc;
2702 struct pci_attach_args *pa;
2703 {
2704 struct pciide_channel *cp;
2705 int channel;
2706 pcireg_t rev = PCI_REVISION(pa->pa_class);
2707
2708 /*
2709 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2710 * and base adresses registers can be disabled at
2711 * hardware level. In this case, the device is wired
2712 * in compat mode and its first channel is always enabled,
2713 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2714 * In fact, it seems that the first channel of the CMD PCI0640
2715 * can't be disabled.
2716 */
2717
2718 #ifdef PCIIDE_CMD064x_DISABLE
2719 if (pciide_chipen(sc, pa) == 0)
2720 return;
2721 #endif
2722 aprint_normal("%s: bus-master DMA support present",
2723 sc->sc_wdcdev.sc_dev.dv_xname);
2724 pciide_mapreg_dma(sc, pa);
2725 aprint_normal("\n");
2726 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2727 WDC_CAPABILITY_MODE;
2728 if (sc->sc_dma_ok) {
2729 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2730 switch (sc->sc_pp->ide_product) {
2731 case PCI_PRODUCT_CMDTECH_649:
2732 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2733 sc->sc_wdcdev.UDMA_cap = 5;
2734 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2735 break;
2736 case PCI_PRODUCT_CMDTECH_648:
2737 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2738 sc->sc_wdcdev.UDMA_cap = 4;
2739 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2740 break;
2741 case PCI_PRODUCT_CMDTECH_646:
2742 if (rev >= CMD0646U2_REV) {
2743 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2744 sc->sc_wdcdev.UDMA_cap = 2;
2745 } else if (rev >= CMD0646U_REV) {
2746 /*
2747 * Linux's driver claims that the 646U is broken
2748 * with UDMA. Only enable it if we know what we're
2749 * doing
2750 */
2751 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2752 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2753 sc->sc_wdcdev.UDMA_cap = 2;
2754 #endif
2755 /* explicitly disable UDMA */
2756 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2757 CMD_UDMATIM(0), 0);
2758 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2759 CMD_UDMATIM(1), 0);
2760 }
2761 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2762 break;
2763 default:
2764 sc->sc_wdcdev.irqack = pciide_irqack;
2765 }
2766 }
2767
2768 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2769 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2770 sc->sc_wdcdev.PIO_cap = 4;
2771 sc->sc_wdcdev.DMA_cap = 2;
2772 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2773
2774 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2775 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2776 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2777 DEBUG_PROBE);
2778
2779 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2780 cp = &sc->pciide_channels[channel];
2781 cmd_channel_map(pa, sc, channel);
2782 if (cp->hw_ok == 0)
2783 continue;
2784 cmd0643_9_setup_channel(&cp->wdc_channel);
2785 }
2786 /*
2787 * note - this also makes sure we clear the irq disable and reset
2788 * bits
2789 */
2790 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2791 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2792 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2793 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2794 DEBUG_PROBE);
2795 }
2796
2797 void
2798 cmd0643_9_setup_channel(chp)
2799 struct channel_softc *chp;
2800 {
2801 struct ata_drive_datas *drvp;
2802 u_int8_t tim;
2803 u_int32_t idedma_ctl, udma_reg;
2804 int drive;
2805 struct pciide_channel *cp = (struct pciide_channel*)chp;
2806 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2807
2808 idedma_ctl = 0;
2809 /* setup DMA if needed */
2810 pciide_channel_dma_setup(cp);
2811
2812 for (drive = 0; drive < 2; drive++) {
2813 drvp = &chp->ch_drive[drive];
2814 /* If no drive, skip */
2815 if ((drvp->drive_flags & DRIVE) == 0)
2816 continue;
2817 /* add timing values, setup DMA if needed */
2818 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2819 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2820 if (drvp->drive_flags & DRIVE_UDMA) {
2821 /* UltraDMA on a 646U2, 0648 or 0649 */
2822 drvp->drive_flags &= ~DRIVE_DMA;
2823 udma_reg = pciide_pci_read(sc->sc_pc,
2824 sc->sc_tag, CMD_UDMATIM(chp->channel));
2825 if (drvp->UDMA_mode > 2 &&
2826 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2827 CMD_BICSR) &
2828 CMD_BICSR_80(chp->channel)) == 0)
2829 drvp->UDMA_mode = 2;
2830 if (drvp->UDMA_mode > 2)
2831 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2832 else if (sc->sc_wdcdev.UDMA_cap > 2)
2833 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2834 udma_reg |= CMD_UDMATIM_UDMA(drive);
2835 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2836 CMD_UDMATIM_TIM_OFF(drive));
2837 udma_reg |=
2838 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2839 CMD_UDMATIM_TIM_OFF(drive));
2840 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2841 CMD_UDMATIM(chp->channel), udma_reg);
2842 } else {
2843 /*
2844 * use Multiword DMA.
2845 * Timings will be used for both PIO and DMA,
2846 * so adjust DMA mode if needed
2847 * if we have a 0646U2/8/9, turn off UDMA
2848 */
2849 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2850 udma_reg = pciide_pci_read(sc->sc_pc,
2851 sc->sc_tag,
2852 CMD_UDMATIM(chp->channel));
2853 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2854 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2855 CMD_UDMATIM(chp->channel),
2856 udma_reg);
2857 }
2858 if (drvp->PIO_mode >= 3 &&
2859 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2860 drvp->DMA_mode = drvp->PIO_mode - 2;
2861 }
2862 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2863 }
2864 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2865 }
2866 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2867 CMD_DATA_TIM(chp->channel, drive), tim);
2868 }
2869 if (idedma_ctl != 0) {
2870 /* Add software bits in status register */
2871 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2872 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2873 idedma_ctl);
2874 }
2875 pciide_print_modes(cp);
2876 }
2877
2878 void
2879 cmd646_9_irqack(chp)
2880 struct channel_softc *chp;
2881 {
2882 u_int32_t priirq, secirq;
2883 struct pciide_channel *cp = (struct pciide_channel*)chp;
2884 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2885
2886 if (chp->channel == 0) {
2887 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2888 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2889 } else {
2890 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2891 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2892 }
2893 pciide_irqack(chp);
2894 }
2895
2896 void
2897 cmd680_chip_map(sc, pa)
2898 struct pciide_softc *sc;
2899 struct pci_attach_args *pa;
2900 {
2901 struct pciide_channel *cp;
2902 int channel;
2903
2904 if (pciide_chipen(sc, pa) == 0)
2905 return;
2906 aprint_normal("%s: bus-master DMA support present",
2907 sc->sc_wdcdev.sc_dev.dv_xname);
2908 pciide_mapreg_dma(sc, pa);
2909 aprint_normal("\n");
2910 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2911 WDC_CAPABILITY_MODE;
2912 if (sc->sc_dma_ok) {
2913 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2914 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2915 sc->sc_wdcdev.UDMA_cap = 6;
2916 sc->sc_wdcdev.irqack = pciide_irqack;
2917 }
2918
2919 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2920 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2921 sc->sc_wdcdev.PIO_cap = 4;
2922 sc->sc_wdcdev.DMA_cap = 2;
2923 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2924
2925 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2926 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2927 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2928 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2929 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2930 cp = &sc->pciide_channels[channel];
2931 cmd680_channel_map(pa, sc, channel);
2932 if (cp->hw_ok == 0)
2933 continue;
2934 cmd680_setup_channel(&cp->wdc_channel);
2935 }
2936 }
2937
2938 void
2939 cmd680_channel_map(pa, sc, channel)
2940 struct pci_attach_args *pa;
2941 struct pciide_softc *sc;
2942 int channel;
2943 {
2944 struct pciide_channel *cp = &sc->pciide_channels[channel];
2945 bus_size_t cmdsize, ctlsize;
2946 int interface, i, reg;
2947 static const u_int8_t init_val[] =
2948 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2949 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2950
2951 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2952 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2953 PCIIDE_INTERFACE_SETTABLE(1);
2954 interface |= PCIIDE_INTERFACE_PCI(0) |
2955 PCIIDE_INTERFACE_PCI(1);
2956 } else {
2957 interface = PCI_INTERFACE(pa->pa_class);
2958 }
2959
2960 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2961 cp->name = PCIIDE_CHANNEL_NAME(channel);
2962 cp->wdc_channel.channel = channel;
2963 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2964
2965 cp->wdc_channel.ch_queue =
2966 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2967 if (cp->wdc_channel.ch_queue == NULL) {
2968 aprint_error("%s %s channel: "
2969 "can't allocate memory for command queue",
2970 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2971 return;
2972 }
2973
2974 /* XXX */
2975 reg = 0xa2 + channel * 16;
2976 for (i = 0; i < sizeof(init_val); i++)
2977 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2978
2979 aprint_normal("%s: %s channel %s to %s mode\n",
2980 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2981 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2982 "configured" : "wired",
2983 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2984 "native-PCI" : "compatibility");
2985
2986 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2987 if (cp->hw_ok == 0)
2988 return;
2989 pciide_map_compat_intr(pa, cp, channel, interface);
2990 }
2991
2992 void
2993 cmd680_setup_channel(chp)
2994 struct channel_softc *chp;
2995 {
2996 struct ata_drive_datas *drvp;
2997 u_int8_t mode, off, scsc;
2998 u_int16_t val;
2999 u_int32_t idedma_ctl;
3000 int drive;
3001 struct pciide_channel *cp = (struct pciide_channel*)chp;
3002 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3003 pci_chipset_tag_t pc = sc->sc_pc;
3004 pcitag_t pa = sc->sc_tag;
3005 static const u_int8_t udma2_tbl[] =
3006 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
3007 static const u_int8_t udma_tbl[] =
3008 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
3009 static const u_int16_t dma_tbl[] =
3010 { 0x2208, 0x10c2, 0x10c1 };
3011 static const u_int16_t pio_tbl[] =
3012 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
3013
3014 idedma_ctl = 0;
3015 pciide_channel_dma_setup(cp);
3016 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
3017
3018 for (drive = 0; drive < 2; drive++) {
3019 drvp = &chp->ch_drive[drive];
3020 /* If no drive, skip */
3021 if ((drvp->drive_flags & DRIVE) == 0)
3022 continue;
3023 mode &= ~(0x03 << (drive * 4));
3024 if (drvp->drive_flags & DRIVE_UDMA) {
3025 drvp->drive_flags &= ~DRIVE_DMA;
3026 off = 0xa0 + chp->channel * 16;
3027 if (drvp->UDMA_mode > 2 &&
3028 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3029 drvp->UDMA_mode = 2;
3030 scsc = pciide_pci_read(pc, pa, 0x8a);
3031 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3032 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3033 scsc = pciide_pci_read(pc, pa, 0x8a);
3034 if ((scsc & 0x30) == 0)
3035 drvp->UDMA_mode = 5;
3036 }
3037 mode |= 0x03 << (drive * 4);
3038 off = 0xac + chp->channel * 16 + drive * 2;
3039 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3040 if (scsc & 0x30)
3041 val |= udma2_tbl[drvp->UDMA_mode];
3042 else
3043 val |= udma_tbl[drvp->UDMA_mode];
3044 pciide_pci_write(pc, pa, off, val);
3045 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3046 } else if (drvp->drive_flags & DRIVE_DMA) {
3047 mode |= 0x02 << (drive * 4);
3048 off = 0xa8 + chp->channel * 16 + drive * 2;
3049 val = dma_tbl[drvp->DMA_mode];
3050 pciide_pci_write(pc, pa, off, val & 0xff);
3051 pciide_pci_write(pc, pa, off, val >> 8);
3052 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3053 } else {
3054 mode |= 0x01 << (drive * 4);
3055 off = 0xa4 + chp->channel * 16 + drive * 2;
3056 val = pio_tbl[drvp->PIO_mode];
3057 pciide_pci_write(pc, pa, off, val & 0xff);
3058 pciide_pci_write(pc, pa, off, val >> 8);
3059 }
3060 }
3061
3062 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3063 if (idedma_ctl != 0) {
3064 /* Add software bits in status register */
3065 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3066 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3067 idedma_ctl);
3068 }
3069 pciide_print_modes(cp);
3070 }
3071
3072 void
3073 cmd3112_chip_map(sc, pa)
3074 struct pciide_softc *sc;
3075 struct pci_attach_args *pa;
3076 {
3077 struct pciide_channel *cp;
3078 bus_size_t cmdsize, ctlsize;
3079 pcireg_t interface;
3080 int channel;
3081
3082 if (pciide_chipen(sc, pa) == 0)
3083 return;
3084
3085 aprint_normal("%s: bus-master DMA support present",
3086 sc->sc_wdcdev.sc_dev.dv_xname);
3087 pciide_mapreg_dma(sc, pa);
3088 aprint_normal("\n");
3089
3090 /*
3091 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3092 * corruption if DMA transfers cross an 8K boundary. This is
3093 * apparently hard to tickle, but we'll go ahead and play it
3094 * safe.
3095 */
3096 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3097 sc->sc_dma_maxsegsz = 8192;
3098 sc->sc_dma_boundary = 8192;
3099 }
3100
3101 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3102 WDC_CAPABILITY_MODE;
3103 sc->sc_wdcdev.PIO_cap = 4;
3104 if (sc->sc_dma_ok) {
3105 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3106 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3107 sc->sc_wdcdev.irqack = pciide_irqack;
3108 sc->sc_wdcdev.DMA_cap = 2;
3109 sc->sc_wdcdev.UDMA_cap = 6;
3110 }
3111 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3112
3113 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3114 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3115
3116 /*
3117 * The 3112 can be told to identify as a RAID controller.
3118 * In this case, we have to fake interface
3119 */
3120 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3121 interface = PCI_INTERFACE(pa->pa_class);
3122 } else {
3123 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3124 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3125 }
3126
3127 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3128 cp = &sc->pciide_channels[channel];
3129 if (pciide_chansetup(sc, channel, interface) == 0)
3130 continue;
3131 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3132 pciide_pci_intr);
3133 if (cp->hw_ok == 0)
3134 continue;
3135 pciide_map_compat_intr(pa, cp, channel, interface);
3136 cmd3112_setup_channel(&cp->wdc_channel);
3137 }
3138 }
3139
3140 void
3141 cmd3112_setup_channel(chp)
3142 struct channel_softc *chp;
3143 {
3144 struct ata_drive_datas *drvp;
3145 int drive;
3146 u_int32_t idedma_ctl, dtm;
3147 struct pciide_channel *cp = (struct pciide_channel*)chp;
3148 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3149
3150 /* setup DMA if needed */
3151 pciide_channel_dma_setup(cp);
3152
3153 idedma_ctl = 0;
3154 dtm = 0;
3155
3156 for (drive = 0; drive < 2; drive++) {
3157 drvp = &chp->ch_drive[drive];
3158 /* If no drive, skip */
3159 if ((drvp->drive_flags & DRIVE) == 0)
3160 continue;
3161 if (drvp->drive_flags & DRIVE_UDMA) {
3162 /* use Ultra/DMA */
3163 drvp->drive_flags &= ~DRIVE_DMA;
3164 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3165 dtm |= DTM_IDEx_DMA;
3166 } else if (drvp->drive_flags & DRIVE_DMA) {
3167 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3168 dtm |= DTM_IDEx_DMA;
3169 } else {
3170 dtm |= DTM_IDEx_PIO;
3171 }
3172 }
3173
3174 /*
3175 * Nothing to do to setup modes; it is meaningless in S-ATA
3176 * (but many S-ATA drives still want to get the SET_FEATURE
3177 * command).
3178 */
3179 if (idedma_ctl != 0) {
3180 /* Add software bits in status register */
3181 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3182 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3183 idedma_ctl);
3184 }
3185 pci_conf_write(sc->sc_pc, sc->sc_tag,
3186 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3187 pciide_print_modes(cp);
3188 }
3189
3190 void
3191 cy693_chip_map(sc, pa)
3192 struct pciide_softc *sc;
3193 struct pci_attach_args *pa;
3194 {
3195 struct pciide_channel *cp;
3196 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3197 bus_size_t cmdsize, ctlsize;
3198
3199 if (pciide_chipen(sc, pa) == 0)
3200 return;
3201 /*
3202 * this chip has 2 PCI IDE functions, one for primary and one for
3203 * secondary. So we need to call pciide_mapregs_compat() with
3204 * the real channel
3205 */
3206 if (pa->pa_function == 1) {
3207 sc->sc_cy_compatchan = 0;
3208 } else if (pa->pa_function == 2) {
3209 sc->sc_cy_compatchan = 1;
3210 } else {
3211 aprint_error("%s: unexpected PCI function %d\n",
3212 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3213 return;
3214 }
3215 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3216 aprint_normal("%s: bus-master DMA support present",
3217 sc->sc_wdcdev.sc_dev.dv_xname);
3218 pciide_mapreg_dma(sc, pa);
3219 } else {
3220 aprint_normal("%s: hardware does not support DMA",
3221 sc->sc_wdcdev.sc_dev.dv_xname);
3222 sc->sc_dma_ok = 0;
3223 }
3224 aprint_normal("\n");
3225
3226 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3227 if (sc->sc_cy_handle == NULL) {
3228 aprint_error("%s: unable to map hyperCache control registers\n",
3229 sc->sc_wdcdev.sc_dev.dv_xname);
3230 sc->sc_dma_ok = 0;
3231 }
3232
3233 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3234 WDC_CAPABILITY_MODE;
3235 if (sc->sc_dma_ok) {
3236 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3237 sc->sc_wdcdev.irqack = pciide_irqack;
3238 }
3239 sc->sc_wdcdev.PIO_cap = 4;
3240 sc->sc_wdcdev.DMA_cap = 2;
3241 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3242
3243 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3244 sc->sc_wdcdev.nchannels = 1;
3245
3246 /* Only one channel for this chip; if we are here it's enabled */
3247 cp = &sc->pciide_channels[0];
3248 sc->wdc_chanarray[0] = &cp->wdc_channel;
3249 cp->name = PCIIDE_CHANNEL_NAME(0);
3250 cp->wdc_channel.channel = 0;
3251 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3252 cp->wdc_channel.ch_queue =
3253 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3254 if (cp->wdc_channel.ch_queue == NULL) {
3255 aprint_error("%s primary channel: "
3256 "can't allocate memory for command queue",
3257 sc->sc_wdcdev.sc_dev.dv_xname);
3258 return;
3259 }
3260 aprint_normal("%s: primary channel %s to ",
3261 sc->sc_wdcdev.sc_dev.dv_xname,
3262 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3263 "configured" : "wired");
3264 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3265 aprint_normal("native-PCI");
3266 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3267 pciide_pci_intr);
3268 } else {
3269 aprint_normal("compatibility");
3270 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3271 &cmdsize, &ctlsize);
3272 }
3273 aprint_normal(" mode\n");
3274 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3275 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3276 wdcattach(&cp->wdc_channel);
3277 if (pciide_chan_candisable(cp)) {
3278 pci_conf_write(sc->sc_pc, sc->sc_tag,
3279 PCI_COMMAND_STATUS_REG, 0);
3280 }
3281 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3282 if (cp->hw_ok == 0)
3283 return;
3284 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3285 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3286 cy693_setup_channel(&cp->wdc_channel);
3287 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3288 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3289 }
3290
3291 void
3292 cy693_setup_channel(chp)
3293 struct channel_softc *chp;
3294 {
3295 struct ata_drive_datas *drvp;
3296 int drive;
3297 u_int32_t cy_cmd_ctrl;
3298 u_int32_t idedma_ctl;
3299 struct pciide_channel *cp = (struct pciide_channel*)chp;
3300 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3301 int dma_mode = -1;
3302
3303 cy_cmd_ctrl = idedma_ctl = 0;
3304
3305 /* setup DMA if needed */
3306 pciide_channel_dma_setup(cp);
3307
3308 for (drive = 0; drive < 2; drive++) {
3309 drvp = &chp->ch_drive[drive];
3310 /* If no drive, skip */
3311 if ((drvp->drive_flags & DRIVE) == 0)
3312 continue;
3313 /* add timing values, setup DMA if needed */
3314 if (drvp->drive_flags & DRIVE_DMA) {
3315 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3316 /* use Multiword DMA */
3317 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3318 dma_mode = drvp->DMA_mode;
3319 }
3320 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3321 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3322 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3323 CY_CMD_CTRL_IOW_REC_OFF(drive));
3324 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3325 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3326 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3327 CY_CMD_CTRL_IOR_REC_OFF(drive));
3328 }
3329 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3330 chp->ch_drive[0].DMA_mode = dma_mode;
3331 chp->ch_drive[1].DMA_mode = dma_mode;
3332
3333 if (dma_mode == -1)
3334 dma_mode = 0;
3335
3336 if (sc->sc_cy_handle != NULL) {
3337 /* Note: `multiple' is implied. */
3338 cy82c693_write(sc->sc_cy_handle,
3339 (sc->sc_cy_compatchan == 0) ?
3340 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3341 }
3342
3343 pciide_print_modes(cp);
3344
3345 if (idedma_ctl != 0) {
3346 /* Add software bits in status register */
3347 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3348 IDEDMA_CTL, idedma_ctl);
3349 }
3350 }
3351
3352 static struct sis_hostbr_type {
3353 u_int16_t id;
3354 u_int8_t rev;
3355 u_int8_t udma_mode;
3356 char *name;
3357 u_int8_t type;
3358 #define SIS_TYPE_NOUDMA 0
3359 #define SIS_TYPE_66 1
3360 #define SIS_TYPE_100OLD 2
3361 #define SIS_TYPE_100NEW 3
3362 #define SIS_TYPE_133OLD 4
3363 #define SIS_TYPE_133NEW 5
3364 #define SIS_TYPE_SOUTH 6
3365 } sis_hostbr_type[] = {
3366 /* Most infos here are from sos (at) freebsd.org */
3367 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3368 #if 0
3369 /*
3370 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3371 * have problems with UDMA (info provided by Christos)
3372 */
3373 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3374 #endif
3375 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3376 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3377 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3378 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3379 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3380 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3381 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3382 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3383 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3384 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3385 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3386 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3387 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3388 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3389 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3390 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3391 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3392 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3393 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3394 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3395 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3396 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3397 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3398 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3399 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3400 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3401 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3402 /*
3403 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3404 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3405 */
3406 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3407 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3408 };
3409
3410 static struct sis_hostbr_type *sis_hostbr_type_match;
3411
3412 static int
3413 sis_hostbr_match(pa)
3414 struct pci_attach_args *pa;
3415 {
3416 int i;
3417 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3418 return 0;
3419 sis_hostbr_type_match = NULL;
3420 for (i = 0;
3421 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3422 i++) {
3423 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3424 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3425 sis_hostbr_type_match = &sis_hostbr_type[i];
3426 }
3427 return (sis_hostbr_type_match != NULL);
3428 }
3429
3430 static int sis_south_match(pa)
3431 struct pci_attach_args *pa;
3432 {
3433 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3434 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3435 PCI_REVISION(pa->pa_class) >= 0x10);
3436 }
3437
3438 void
3439 sis_chip_map(sc, pa)
3440 struct pciide_softc *sc;
3441 struct pci_attach_args *pa;
3442 {
3443 struct pciide_channel *cp;
3444 int channel;
3445 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3446 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3447 pcireg_t rev = PCI_REVISION(pa->pa_class);
3448 bus_size_t cmdsize, ctlsize;
3449
3450 if (pciide_chipen(sc, pa) == 0)
3451 return;
3452 aprint_normal(": Silicon Integrated System ");
3453 pci_find_device(NULL, sis_hostbr_match);
3454 if (sis_hostbr_type_match) {
3455 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3456 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3457 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3458 SIS_REG_57) & 0x7f);
3459 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3460 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3461 aprint_normal("96X UDMA%d",
3462 sis_hostbr_type_match->udma_mode);
3463 sc->sis_type = SIS_TYPE_133NEW;
3464 sc->sc_wdcdev.UDMA_cap =
3465 sis_hostbr_type_match->udma_mode;
3466 } else {
3467 if (pci_find_device(NULL, sis_south_match)) {
3468 sc->sis_type = SIS_TYPE_133OLD;
3469 sc->sc_wdcdev.UDMA_cap =
3470 sis_hostbr_type_match->udma_mode;
3471 } else {
3472 sc->sis_type = SIS_TYPE_100NEW;
3473 sc->sc_wdcdev.UDMA_cap =
3474 sis_hostbr_type_match->udma_mode;
3475 }
3476 }
3477 } else {
3478 sc->sis_type = sis_hostbr_type_match->type;
3479 sc->sc_wdcdev.UDMA_cap =
3480 sis_hostbr_type_match->udma_mode;
3481 }
3482 aprint_normal(sis_hostbr_type_match->name);
3483 } else {
3484 aprint_normal("5597/5598");
3485 if (rev >= 0xd0) {
3486 sc->sc_wdcdev.UDMA_cap = 2;
3487 sc->sis_type = SIS_TYPE_66;
3488 } else {
3489 sc->sc_wdcdev.UDMA_cap = 0;
3490 sc->sis_type = SIS_TYPE_NOUDMA;
3491 }
3492 }
3493 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3494 PCI_REVISION(pa->pa_class));
3495 aprint_normal("%s: bus-master DMA support present",
3496 sc->sc_wdcdev.sc_dev.dv_xname);
3497 pciide_mapreg_dma(sc, pa);
3498 aprint_normal("\n");
3499
3500 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3501 WDC_CAPABILITY_MODE;
3502 if (sc->sc_dma_ok) {
3503 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3504 sc->sc_wdcdev.irqack = pciide_irqack;
3505 if (sc->sis_type >= SIS_TYPE_66)
3506 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3507 }
3508
3509 sc->sc_wdcdev.PIO_cap = 4;
3510 sc->sc_wdcdev.DMA_cap = 2;
3511
3512 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3513 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3514 switch(sc->sis_type) {
3515 case SIS_TYPE_NOUDMA:
3516 case SIS_TYPE_66:
3517 case SIS_TYPE_100OLD:
3518 sc->sc_wdcdev.set_modes = sis_setup_channel;
3519 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3520 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3521 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3522 break;
3523 case SIS_TYPE_100NEW:
3524 case SIS_TYPE_133OLD:
3525 sc->sc_wdcdev.set_modes = sis_setup_channel;
3526 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3527 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3528 break;
3529 case SIS_TYPE_133NEW:
3530 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3531 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3532 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3533 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3534 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3535 break;
3536 }
3537
3538
3539 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3540 cp = &sc->pciide_channels[channel];
3541 if (pciide_chansetup(sc, channel, interface) == 0)
3542 continue;
3543 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3544 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3545 aprint_normal("%s: %s channel ignored (disabled)\n",
3546 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3547 continue;
3548 }
3549 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3550 pciide_pci_intr);
3551 if (cp->hw_ok == 0)
3552 continue;
3553 if (pciide_chan_candisable(cp)) {
3554 if (channel == 0)
3555 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3556 else
3557 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3558 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3559 sis_ctr0);
3560 }
3561 pciide_map_compat_intr(pa, cp, channel, interface);
3562 if (cp->hw_ok == 0)
3563 continue;
3564 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3565 }
3566 }
3567
3568 void
3569 sis96x_setup_channel(chp)
3570 struct channel_softc *chp;
3571 {
3572 struct ata_drive_datas *drvp;
3573 int drive;
3574 u_int32_t sis_tim;
3575 u_int32_t idedma_ctl;
3576 int regtim;
3577 struct pciide_channel *cp = (struct pciide_channel*)chp;
3578 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3579
3580 sis_tim = 0;
3581 idedma_ctl = 0;
3582 /* setup DMA if needed */
3583 pciide_channel_dma_setup(cp);
3584
3585 for (drive = 0; drive < 2; drive++) {
3586 regtim = SIS_TIM133(
3587 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3588 chp->channel, drive);
3589 drvp = &chp->ch_drive[drive];
3590 /* If no drive, skip */
3591 if ((drvp->drive_flags & DRIVE) == 0)
3592 continue;
3593 /* add timing values, setup DMA if needed */
3594 if (drvp->drive_flags & DRIVE_UDMA) {
3595 /* use Ultra/DMA */
3596 drvp->drive_flags &= ~DRIVE_DMA;
3597 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3598 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3599 if (drvp->UDMA_mode > 2)
3600 drvp->UDMA_mode = 2;
3601 }
3602 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3603 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3604 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3605 } else if (drvp->drive_flags & DRIVE_DMA) {
3606 /*
3607 * use Multiword DMA
3608 * Timings will be used for both PIO and DMA,
3609 * so adjust DMA mode if needed
3610 */
3611 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3612 drvp->PIO_mode = drvp->DMA_mode + 2;
3613 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3614 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3615 drvp->PIO_mode - 2 : 0;
3616 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3617 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3618 } else {
3619 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3620 }
3621 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3622 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3623 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3624 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3625 }
3626 if (idedma_ctl != 0) {
3627 /* Add software bits in status register */
3628 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3629 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3630 idedma_ctl);
3631 }
3632 pciide_print_modes(cp);
3633 }
3634
3635 void
3636 sis_setup_channel(chp)
3637 struct channel_softc *chp;
3638 {
3639 struct ata_drive_datas *drvp;
3640 int drive;
3641 u_int32_t sis_tim;
3642 u_int32_t idedma_ctl;
3643 struct pciide_channel *cp = (struct pciide_channel*)chp;
3644 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3645
3646 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3647 "channel %d 0x%x\n", chp->channel,
3648 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3649 DEBUG_PROBE);
3650 sis_tim = 0;
3651 idedma_ctl = 0;
3652 /* setup DMA if needed */
3653 pciide_channel_dma_setup(cp);
3654
3655 for (drive = 0; drive < 2; drive++) {
3656 drvp = &chp->ch_drive[drive];
3657 /* If no drive, skip */
3658 if ((drvp->drive_flags & DRIVE) == 0)
3659 continue;
3660 /* add timing values, setup DMA if needed */
3661 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3662 (drvp->drive_flags & DRIVE_UDMA) == 0)
3663 goto pio;
3664
3665 if (drvp->drive_flags & DRIVE_UDMA) {
3666 /* use Ultra/DMA */
3667 drvp->drive_flags &= ~DRIVE_DMA;
3668 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3669 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3670 if (drvp->UDMA_mode > 2)
3671 drvp->UDMA_mode = 2;
3672 }
3673 switch (sc->sis_type) {
3674 case SIS_TYPE_66:
3675 case SIS_TYPE_100OLD:
3676 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3677 SIS_TIM66_UDMA_TIME_OFF(drive);
3678 break;
3679 case SIS_TYPE_100NEW:
3680 sis_tim |=
3681 sis_udma100new_tim[drvp->UDMA_mode] <<
3682 SIS_TIM100_UDMA_TIME_OFF(drive);
3683 case SIS_TYPE_133OLD:
3684 sis_tim |=
3685 sis_udma133old_tim[drvp->UDMA_mode] <<
3686 SIS_TIM100_UDMA_TIME_OFF(drive);
3687 break;
3688 default:
3689 aprint_error("unknown SiS IDE type %d\n",
3690 sc->sis_type);
3691 }
3692 } else {
3693 /*
3694 * use Multiword DMA
3695 * Timings will be used for both PIO and DMA,
3696 * so adjust DMA mode if needed
3697 */
3698 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3699 drvp->PIO_mode = drvp->DMA_mode + 2;
3700 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3701 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3702 drvp->PIO_mode - 2 : 0;
3703 if (drvp->DMA_mode == 0)
3704 drvp->PIO_mode = 0;
3705 }
3706 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3707 pio: switch (sc->sis_type) {
3708 case SIS_TYPE_NOUDMA:
3709 case SIS_TYPE_66:
3710 case SIS_TYPE_100OLD:
3711 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3712 SIS_TIM66_ACT_OFF(drive);
3713 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3714 SIS_TIM66_REC_OFF(drive);
3715 break;
3716 case SIS_TYPE_100NEW:
3717 case SIS_TYPE_133OLD:
3718 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3719 SIS_TIM100_ACT_OFF(drive);
3720 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3721 SIS_TIM100_REC_OFF(drive);
3722 break;
3723 default:
3724 aprint_error("unknown SiS IDE type %d\n",
3725 sc->sis_type);
3726 }
3727 }
3728 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3729 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3730 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3731 if (idedma_ctl != 0) {
3732 /* Add software bits in status register */
3733 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3734 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3735 idedma_ctl);
3736 }
3737 pciide_print_modes(cp);
3738 }
3739
3740 void
3741 acer_chip_map(sc, pa)
3742 struct pciide_softc *sc;
3743 struct pci_attach_args *pa;
3744 {
3745 struct pciide_channel *cp;
3746 int channel;
3747 pcireg_t cr, interface;
3748 bus_size_t cmdsize, ctlsize;
3749 pcireg_t rev = PCI_REVISION(pa->pa_class);
3750
3751 if (pciide_chipen(sc, pa) == 0)
3752 return;
3753 aprint_normal("%s: bus-master DMA support present",
3754 sc->sc_wdcdev.sc_dev.dv_xname);
3755 pciide_mapreg_dma(sc, pa);
3756 aprint_normal("\n");
3757 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3758 WDC_CAPABILITY_MODE;
3759 if (sc->sc_dma_ok) {
3760 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3761 if (rev >= 0x20) {
3762 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3763 if (rev >= 0xC4)
3764 sc->sc_wdcdev.UDMA_cap = 5;
3765 else if (rev >= 0xC2)
3766 sc->sc_wdcdev.UDMA_cap = 4;
3767 else
3768 sc->sc_wdcdev.UDMA_cap = 2;
3769 }
3770 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3771 sc->sc_wdcdev.irqack = pciide_irqack;
3772 }
3773
3774 sc->sc_wdcdev.PIO_cap = 4;
3775 sc->sc_wdcdev.DMA_cap = 2;
3776 sc->sc_wdcdev.set_modes = acer_setup_channel;
3777 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3778 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3779
3780 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3781 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3782 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3783
3784 /* Enable "microsoft register bits" R/W. */
3785 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3786 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3787 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3788 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3789 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3790 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3791 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3792 ~ACER_CHANSTATUSREGS_RO);
3793 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3794 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3795 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3796 /* Don't use cr, re-read the real register content instead */
3797 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3798 PCI_CLASS_REG));
3799
3800 /* From linux: enable "Cable Detection" */
3801 if (rev >= 0xC2) {
3802 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3803 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3804 | ACER_0x4B_CDETECT);
3805 }
3806
3807 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3808 cp = &sc->pciide_channels[channel];
3809 if (pciide_chansetup(sc, channel, interface) == 0)
3810 continue;
3811 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3812 aprint_normal("%s: %s channel ignored (disabled)\n",
3813 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3814 continue;
3815 }
3816 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3817 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3818 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3819 if (cp->hw_ok == 0)
3820 continue;
3821 if (pciide_chan_candisable(cp)) {
3822 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3823 pci_conf_write(sc->sc_pc, sc->sc_tag,
3824 PCI_CLASS_REG, cr);
3825 }
3826 pciide_map_compat_intr(pa, cp, channel, interface);
3827 acer_setup_channel(&cp->wdc_channel);
3828 }
3829 }
3830
3831 void
3832 acer_setup_channel(chp)
3833 struct channel_softc *chp;
3834 {
3835 struct ata_drive_datas *drvp;
3836 int drive;
3837 u_int32_t acer_fifo_udma;
3838 u_int32_t idedma_ctl;
3839 struct pciide_channel *cp = (struct pciide_channel*)chp;
3840 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3841
3842 idedma_ctl = 0;
3843 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3844 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3845 acer_fifo_udma), DEBUG_PROBE);
3846 /* setup DMA if needed */
3847 pciide_channel_dma_setup(cp);
3848
3849 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3850 DRIVE_UDMA) { /* check 80 pins cable */
3851 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3852 ACER_0x4A_80PIN(chp->channel)) {
3853 if (chp->ch_drive[0].UDMA_mode > 2)
3854 chp->ch_drive[0].UDMA_mode = 2;
3855 if (chp->ch_drive[1].UDMA_mode > 2)
3856 chp->ch_drive[1].UDMA_mode = 2;
3857 }
3858 }
3859
3860 for (drive = 0; drive < 2; drive++) {
3861 drvp = &chp->ch_drive[drive];
3862 /* If no drive, skip */
3863 if ((drvp->drive_flags & DRIVE) == 0)
3864 continue;
3865 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3866 "channel %d drive %d 0x%x\n", chp->channel, drive,
3867 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3868 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3869 /* clear FIFO/DMA mode */
3870 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3871 ACER_UDMA_EN(chp->channel, drive) |
3872 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3873
3874 /* add timing values, setup DMA if needed */
3875 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3876 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3877 acer_fifo_udma |=
3878 ACER_FTH_OPL(chp->channel, drive, 0x1);
3879 goto pio;
3880 }
3881
3882 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3883 if (drvp->drive_flags & DRIVE_UDMA) {
3884 /* use Ultra/DMA */
3885 drvp->drive_flags &= ~DRIVE_DMA;
3886 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3887 acer_fifo_udma |=
3888 ACER_UDMA_TIM(chp->channel, drive,
3889 acer_udma[drvp->UDMA_mode]);
3890 /* XXX disable if one drive < UDMA3 ? */
3891 if (drvp->UDMA_mode >= 3) {
3892 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3893 ACER_0x4B,
3894 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3895 ACER_0x4B) | ACER_0x4B_UDMA66);
3896 }
3897 } else {
3898 /*
3899 * use Multiword DMA
3900 * Timings will be used for both PIO and DMA,
3901 * so adjust DMA mode if needed
3902 */
3903 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3904 drvp->PIO_mode = drvp->DMA_mode + 2;
3905 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3906 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3907 drvp->PIO_mode - 2 : 0;
3908 if (drvp->DMA_mode == 0)
3909 drvp->PIO_mode = 0;
3910 }
3911 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3912 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3913 ACER_IDETIM(chp->channel, drive),
3914 acer_pio[drvp->PIO_mode]);
3915 }
3916 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3917 acer_fifo_udma), DEBUG_PROBE);
3918 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3919 if (idedma_ctl != 0) {
3920 /* Add software bits in status register */
3921 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3922 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3923 idedma_ctl);
3924 }
3925 pciide_print_modes(cp);
3926 }
3927
3928 int
3929 acer_pci_intr(arg)
3930 void *arg;
3931 {
3932 struct pciide_softc *sc = arg;
3933 struct pciide_channel *cp;
3934 struct channel_softc *wdc_cp;
3935 int i, rv, crv;
3936 u_int32_t chids;
3937
3938 rv = 0;
3939 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3940 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3941 cp = &sc->pciide_channels[i];
3942 wdc_cp = &cp->wdc_channel;
3943 /* If a compat channel skip. */
3944 if (cp->compat)
3945 continue;
3946 if (chids & ACER_CHIDS_INT(i)) {
3947 crv = wdcintr(wdc_cp);
3948 if (crv == 0)
3949 printf("%s:%d: bogus intr\n",
3950 sc->sc_wdcdev.sc_dev.dv_xname, i);
3951 else
3952 rv = 1;
3953 }
3954 }
3955 return rv;
3956 }
3957
3958 void
3959 hpt_chip_map(sc, pa)
3960 struct pciide_softc *sc;
3961 struct pci_attach_args *pa;
3962 {
3963 struct pciide_channel *cp;
3964 int i, compatchan, revision;
3965 pcireg_t interface;
3966 bus_size_t cmdsize, ctlsize;
3967
3968 if (pciide_chipen(sc, pa) == 0)
3969 return;
3970 revision = PCI_REVISION(pa->pa_class);
3971 aprint_normal(": Triones/Highpoint ");
3972 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3973 aprint_normal("HPT374 IDE Controller\n");
3974 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3975 aprint_normal("HPT372 IDE Controller\n");
3976 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3977 if (revision == HPT372_REV)
3978 aprint_normal("HPT372 IDE Controller\n");
3979 else if (revision == HPT370_REV)
3980 aprint_normal("HPT370 IDE Controller\n");
3981 else if (revision == HPT370A_REV)
3982 aprint_normal("HPT370A IDE Controller\n");
3983 else if (revision == HPT366_REV)
3984 aprint_normal("HPT366 IDE Controller\n");
3985 else
3986 aprint_normal("unknown HPT IDE controller rev %d\n",
3987 revision);
3988 } else
3989 aprint_normal("unknown HPT IDE controller 0x%x\n",
3990 sc->sc_pp->ide_product);
3991
3992 /*
3993 * when the chip is in native mode it identifies itself as a
3994 * 'misc mass storage'. Fake interface in this case.
3995 */
3996 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3997 interface = PCI_INTERFACE(pa->pa_class);
3998 } else {
3999 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4000 PCIIDE_INTERFACE_PCI(0);
4001 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4002 (revision == HPT370_REV || revision == HPT370A_REV ||
4003 revision == HPT372_REV)) ||
4004 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4005 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4006 interface |= PCIIDE_INTERFACE_PCI(1);
4007 }
4008
4009 aprint_normal("%s: bus-master DMA support present",
4010 sc->sc_wdcdev.sc_dev.dv_xname);
4011 pciide_mapreg_dma(sc, pa);
4012 aprint_normal("\n");
4013 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4014 WDC_CAPABILITY_MODE;
4015 if (sc->sc_dma_ok) {
4016 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4017 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4018 sc->sc_wdcdev.irqack = pciide_irqack;
4019 }
4020 sc->sc_wdcdev.PIO_cap = 4;
4021 sc->sc_wdcdev.DMA_cap = 2;
4022
4023 sc->sc_wdcdev.set_modes = hpt_setup_channel;
4024 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4025 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4026 revision == HPT366_REV) {
4027 sc->sc_wdcdev.UDMA_cap = 4;
4028 /*
4029 * The 366 has 2 PCI IDE functions, one for primary and one
4030 * for secondary. So we need to call pciide_mapregs_compat()
4031 * with the real channel
4032 */
4033 if (pa->pa_function == 0) {
4034 compatchan = 0;
4035 } else if (pa->pa_function == 1) {
4036 compatchan = 1;
4037 } else {
4038 aprint_error("%s: unexpected PCI function %d\n",
4039 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4040 return;
4041 }
4042 sc->sc_wdcdev.nchannels = 1;
4043 } else {
4044 sc->sc_wdcdev.nchannels = 2;
4045 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4046 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4047 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4048 revision == HPT372_REV))
4049 sc->sc_wdcdev.UDMA_cap = 6;
4050 else
4051 sc->sc_wdcdev.UDMA_cap = 5;
4052 }
4053 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4054 cp = &sc->pciide_channels[i];
4055 if (sc->sc_wdcdev.nchannels > 1) {
4056 compatchan = i;
4057 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4058 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4059 aprint_normal(
4060 "%s: %s channel ignored (disabled)\n",
4061 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4062 continue;
4063 }
4064 }
4065 if (pciide_chansetup(sc, i, interface) == 0)
4066 continue;
4067 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4068 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4069 &ctlsize, hpt_pci_intr);
4070 } else {
4071 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4072 &cmdsize, &ctlsize);
4073 }
4074 if (cp->hw_ok == 0)
4075 return;
4076 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4077 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4078 wdcattach(&cp->wdc_channel);
4079 hpt_setup_channel(&cp->wdc_channel);
4080 }
4081 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4082 (revision == HPT370_REV || revision == HPT370A_REV ||
4083 revision == HPT372_REV)) ||
4084 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4085 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4086 /*
4087 * HPT370_REV and highter has a bit to disable interrupts,
4088 * make sure to clear it
4089 */
4090 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4091 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4092 ~HPT_CSEL_IRQDIS);
4093 }
4094 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4095 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4096 revision == HPT372_REV ) ||
4097 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4098 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4099 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4100 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4101 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4102 return;
4103 }
4104
4105 void
4106 hpt_setup_channel(chp)
4107 struct channel_softc *chp;
4108 {
4109 struct ata_drive_datas *drvp;
4110 int drive;
4111 int cable;
4112 u_int32_t before, after;
4113 u_int32_t idedma_ctl;
4114 struct pciide_channel *cp = (struct pciide_channel*)chp;
4115 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4116 int revision =
4117 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4118
4119 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4120
4121 /* setup DMA if needed */
4122 pciide_channel_dma_setup(cp);
4123
4124 idedma_ctl = 0;
4125
4126 /* Per drive settings */
4127 for (drive = 0; drive < 2; drive++) {
4128 drvp = &chp->ch_drive[drive];
4129 /* If no drive, skip */
4130 if ((drvp->drive_flags & DRIVE) == 0)
4131 continue;
4132 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4133 HPT_IDETIM(chp->channel, drive));
4134
4135 /* add timing values, setup DMA if needed */
4136 if (drvp->drive_flags & DRIVE_UDMA) {
4137 /* use Ultra/DMA */
4138 drvp->drive_flags &= ~DRIVE_DMA;
4139 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4140 drvp->UDMA_mode > 2)
4141 drvp->UDMA_mode = 2;
4142 switch (sc->sc_pp->ide_product) {
4143 case PCI_PRODUCT_TRIONES_HPT374:
4144 after = hpt374_udma[drvp->UDMA_mode];
4145 break;
4146 case PCI_PRODUCT_TRIONES_HPT372:
4147 after = hpt372_udma[drvp->UDMA_mode];
4148 break;
4149 case PCI_PRODUCT_TRIONES_HPT366:
4150 default:
4151 switch(revision) {
4152 case HPT372_REV:
4153 after = hpt372_udma[drvp->UDMA_mode];
4154 break;
4155 case HPT370_REV:
4156 case HPT370A_REV:
4157 after = hpt370_udma[drvp->UDMA_mode];
4158 break;
4159 case HPT366_REV:
4160 default:
4161 after = hpt366_udma[drvp->UDMA_mode];
4162 break;
4163 }
4164 }
4165 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4166 } else if (drvp->drive_flags & DRIVE_DMA) {
4167 /*
4168 * use Multiword DMA.
4169 * Timings will be used for both PIO and DMA, so adjust
4170 * DMA mode if needed
4171 */
4172 if (drvp->PIO_mode >= 3 &&
4173 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4174 drvp->DMA_mode = drvp->PIO_mode - 2;
4175 }
4176 switch (sc->sc_pp->ide_product) {
4177 case PCI_PRODUCT_TRIONES_HPT374:
4178 after = hpt374_dma[drvp->DMA_mode];
4179 break;
4180 case PCI_PRODUCT_TRIONES_HPT372:
4181 after = hpt372_dma[drvp->DMA_mode];
4182 break;
4183 case PCI_PRODUCT_TRIONES_HPT366:
4184 default:
4185 switch(revision) {
4186 case HPT372_REV:
4187 after = hpt372_dma[drvp->DMA_mode];
4188 break;
4189 case HPT370_REV:
4190 case HPT370A_REV:
4191 after = hpt370_dma[drvp->DMA_mode];
4192 break;
4193 case HPT366_REV:
4194 default:
4195 after = hpt366_dma[drvp->DMA_mode];
4196 break;
4197 }
4198 }
4199 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4200 } else {
4201 /* PIO only */
4202 switch (sc->sc_pp->ide_product) {
4203 case PCI_PRODUCT_TRIONES_HPT374:
4204 after = hpt374_pio[drvp->PIO_mode];
4205 break;
4206 case PCI_PRODUCT_TRIONES_HPT372:
4207 after = hpt372_pio[drvp->PIO_mode];
4208 break;
4209 case PCI_PRODUCT_TRIONES_HPT366:
4210 default:
4211 switch(revision) {
4212 case HPT372_REV:
4213 after = hpt372_pio[drvp->PIO_mode];
4214 break;
4215 case HPT370_REV:
4216 case HPT370A_REV:
4217 after = hpt370_pio[drvp->PIO_mode];
4218 break;
4219 case HPT366_REV:
4220 default:
4221 after = hpt366_pio[drvp->PIO_mode];
4222 break;
4223 }
4224 }
4225 }
4226 pci_conf_write(sc->sc_pc, sc->sc_tag,
4227 HPT_IDETIM(chp->channel, drive), after);
4228 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4229 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4230 after, before), DEBUG_PROBE);
4231 }
4232 if (idedma_ctl != 0) {
4233 /* Add software bits in status register */
4234 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4235 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4236 idedma_ctl);
4237 }
4238 pciide_print_modes(cp);
4239 }
4240
4241 int
4242 hpt_pci_intr(arg)
4243 void *arg;
4244 {
4245 struct pciide_softc *sc = arg;
4246 struct pciide_channel *cp;
4247 struct channel_softc *wdc_cp;
4248 int rv = 0;
4249 int dmastat, i, crv;
4250
4251 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4252 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4253 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4254 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4255 IDEDMA_CTL_INTR)
4256 continue;
4257 cp = &sc->pciide_channels[i];
4258 wdc_cp = &cp->wdc_channel;
4259 crv = wdcintr(wdc_cp);
4260 if (crv == 0) {
4261 printf("%s:%d: bogus intr\n",
4262 sc->sc_wdcdev.sc_dev.dv_xname, i);
4263 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4264 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4265 } else
4266 rv = 1;
4267 }
4268 return rv;
4269 }
4270
4271
4272 /* Macros to test product */
4273 #define PDC_IS_262(sc) \
4274 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4275 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4276 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4277 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4278 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4279 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4280 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4281 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4282 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4283 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4284 #define PDC_IS_265(sc) \
4285 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4286 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4287 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4288 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4289 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4290 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4291 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4292 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4293 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4294 #define PDC_IS_268(sc) \
4295 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4296 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4297 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4298 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4299 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4300 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4301 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4302 #define PDC_IS_276(sc) \
4303 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4304 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4305 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4306 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4307 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4308
4309 void
4310 pdc202xx_chip_map(sc, pa)
4311 struct pciide_softc *sc;
4312 struct pci_attach_args *pa;
4313 {
4314 struct pciide_channel *cp;
4315 int channel;
4316 pcireg_t interface, st, mode;
4317 bus_size_t cmdsize, ctlsize;
4318
4319 if (!PDC_IS_268(sc)) {
4320 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4321 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4322 st), DEBUG_PROBE);
4323 }
4324 if (pciide_chipen(sc, pa) == 0)
4325 return;
4326
4327 /* turn off RAID mode */
4328 if (!PDC_IS_268(sc))
4329 st &= ~PDC2xx_STATE_IDERAID;
4330
4331 /*
4332 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4333 * mode. We have to fake interface
4334 */
4335 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4336 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4337 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4338
4339 aprint_normal("%s: bus-master DMA support present",
4340 sc->sc_wdcdev.sc_dev.dv_xname);
4341 pciide_mapreg_dma(sc, pa);
4342 aprint_normal("\n");
4343 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4344 WDC_CAPABILITY_MODE;
4345 if (sc->sc_dma_ok) {
4346 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4347 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4348 sc->sc_wdcdev.irqack = pciide_irqack;
4349 }
4350 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4351 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4352 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4353 sc->sc_wdcdev.PIO_cap = 4;
4354 sc->sc_wdcdev.DMA_cap = 2;
4355 if (PDC_IS_276(sc))
4356 sc->sc_wdcdev.UDMA_cap = 6;
4357 else if (PDC_IS_265(sc))
4358 sc->sc_wdcdev.UDMA_cap = 5;
4359 else if (PDC_IS_262(sc))
4360 sc->sc_wdcdev.UDMA_cap = 4;
4361 else
4362 sc->sc_wdcdev.UDMA_cap = 2;
4363 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4364 pdc20268_setup_channel : pdc202xx_setup_channel;
4365 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4366 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4367
4368 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4369 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4370 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4371 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4372 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4373 }
4374
4375 if (!PDC_IS_268(sc)) {
4376 /* setup failsafe defaults */
4377 mode = 0;
4378 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4379 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4380 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4381 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4382 for (channel = 0;
4383 channel < sc->sc_wdcdev.nchannels;
4384 channel++) {
4385 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4386 "drive 0 initial timings 0x%x, now 0x%x\n",
4387 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4388 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4389 DEBUG_PROBE);
4390 pci_conf_write(sc->sc_pc, sc->sc_tag,
4391 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4392 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4393 "drive 1 initial timings 0x%x, now 0x%x\n",
4394 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4395 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4396 pci_conf_write(sc->sc_pc, sc->sc_tag,
4397 PDC2xx_TIM(channel, 1), mode);
4398 }
4399
4400 mode = PDC2xx_SCR_DMA;
4401 if (PDC_IS_265(sc)) {
4402 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4403 } else if (PDC_IS_262(sc)) {
4404 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4405 } else {
4406 /* the BIOS set it up this way */
4407 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4408 }
4409 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4410 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4411 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4412 "now 0x%x\n",
4413 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4414 PDC2xx_SCR),
4415 mode), DEBUG_PROBE);
4416 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4417 PDC2xx_SCR, mode);
4418
4419 /* controller initial state register is OK even without BIOS */
4420 /* Set DMA mode to IDE DMA compatibility */
4421 mode =
4422 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4423 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4424 DEBUG_PROBE);
4425 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4426 mode | 0x1);
4427 mode =
4428 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4429 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4430 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4431 mode | 0x1);
4432 }
4433
4434 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4435 cp = &sc->pciide_channels[channel];
4436 if (pciide_chansetup(sc, channel, interface) == 0)
4437 continue;
4438 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4439 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4440 aprint_normal("%s: %s channel ignored (disabled)\n",
4441 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4442 continue;
4443 }
4444 if (PDC_IS_265(sc))
4445 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4446 pdc20265_pci_intr);
4447 else
4448 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4449 pdc202xx_pci_intr);
4450 if (cp->hw_ok == 0)
4451 continue;
4452 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4453 st &= ~(PDC_IS_262(sc) ?
4454 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4455 pciide_map_compat_intr(pa, cp, channel, interface);
4456 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4457 }
4458 if (!PDC_IS_268(sc)) {
4459 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4460 "0x%x\n", st), DEBUG_PROBE);
4461 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4462 }
4463 return;
4464 }
4465
4466 void
4467 pdc202xx_setup_channel(chp)
4468 struct channel_softc *chp;
4469 {
4470 struct ata_drive_datas *drvp;
4471 int drive;
4472 pcireg_t mode, st;
4473 u_int32_t idedma_ctl, scr, atapi;
4474 struct pciide_channel *cp = (struct pciide_channel*)chp;
4475 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4476 int channel = chp->channel;
4477
4478 /* setup DMA if needed */
4479 pciide_channel_dma_setup(cp);
4480
4481 idedma_ctl = 0;
4482 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4483 sc->sc_wdcdev.sc_dev.dv_xname,
4484 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4485 DEBUG_PROBE);
4486
4487 /* Per channel settings */
4488 if (PDC_IS_262(sc)) {
4489 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4490 PDC262_U66);
4491 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4492 /* Trim UDMA mode */
4493 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4494 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4495 chp->ch_drive[0].UDMA_mode <= 2) ||
4496 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4497 chp->ch_drive[1].UDMA_mode <= 2)) {
4498 if (chp->ch_drive[0].UDMA_mode > 2)
4499 chp->ch_drive[0].UDMA_mode = 2;
4500 if (chp->ch_drive[1].UDMA_mode > 2)
4501 chp->ch_drive[1].UDMA_mode = 2;
4502 }
4503 /* Set U66 if needed */
4504 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4505 chp->ch_drive[0].UDMA_mode > 2) ||
4506 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4507 chp->ch_drive[1].UDMA_mode > 2))
4508 scr |= PDC262_U66_EN(channel);
4509 else
4510 scr &= ~PDC262_U66_EN(channel);
4511 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4512 PDC262_U66, scr);
4513 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4514 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4515 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4516 PDC262_ATAPI(channel))), DEBUG_PROBE);
4517 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4518 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4519 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4520 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4521 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4522 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4523 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4524 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4525 atapi = 0;
4526 else
4527 atapi = PDC262_ATAPI_UDMA;
4528 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4529 PDC262_ATAPI(channel), atapi);
4530 }
4531 }
4532 for (drive = 0; drive < 2; drive++) {
4533 drvp = &chp->ch_drive[drive];
4534 /* If no drive, skip */
4535 if ((drvp->drive_flags & DRIVE) == 0)
4536 continue;
4537 mode = 0;
4538 if (drvp->drive_flags & DRIVE_UDMA) {
4539 /* use Ultra/DMA */
4540 drvp->drive_flags &= ~DRIVE_DMA;
4541 mode = PDC2xx_TIM_SET_MB(mode,
4542 pdc2xx_udma_mb[drvp->UDMA_mode]);
4543 mode = PDC2xx_TIM_SET_MC(mode,
4544 pdc2xx_udma_mc[drvp->UDMA_mode]);
4545 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4546 } else if (drvp->drive_flags & DRIVE_DMA) {
4547 mode = PDC2xx_TIM_SET_MB(mode,
4548 pdc2xx_dma_mb[drvp->DMA_mode]);
4549 mode = PDC2xx_TIM_SET_MC(mode,
4550 pdc2xx_dma_mc[drvp->DMA_mode]);
4551 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4552 } else {
4553 mode = PDC2xx_TIM_SET_MB(mode,
4554 pdc2xx_dma_mb[0]);
4555 mode = PDC2xx_TIM_SET_MC(mode,
4556 pdc2xx_dma_mc[0]);
4557 }
4558 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4559 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4560 if (drvp->drive_flags & DRIVE_ATA)
4561 mode |= PDC2xx_TIM_PRE;
4562 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4563 if (drvp->PIO_mode >= 3) {
4564 mode |= PDC2xx_TIM_IORDY;
4565 if (drive == 0)
4566 mode |= PDC2xx_TIM_IORDYp;
4567 }
4568 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4569 "timings 0x%x\n",
4570 sc->sc_wdcdev.sc_dev.dv_xname,
4571 chp->channel, drive, mode), DEBUG_PROBE);
4572 pci_conf_write(sc->sc_pc, sc->sc_tag,
4573 PDC2xx_TIM(chp->channel, drive), mode);
4574 }
4575 if (idedma_ctl != 0) {
4576 /* Add software bits in status register */
4577 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4578 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4579 idedma_ctl);
4580 }
4581 pciide_print_modes(cp);
4582 }
4583
4584 void
4585 pdc20268_setup_channel(chp)
4586 struct channel_softc *chp;
4587 {
4588 struct ata_drive_datas *drvp;
4589 int drive;
4590 u_int32_t idedma_ctl;
4591 struct pciide_channel *cp = (struct pciide_channel*)chp;
4592 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4593 int u100;
4594
4595 /* setup DMA if needed */
4596 pciide_channel_dma_setup(cp);
4597
4598 idedma_ctl = 0;
4599
4600 /* I don't know what this is for, FreeBSD does it ... */
4601 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4602 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4603
4604 /*
4605 * cable type detect, from FreeBSD
4606 */
4607 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4608 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4609 0 : 1;
4610
4611 for (drive = 0; drive < 2; drive++) {
4612 drvp = &chp->ch_drive[drive];
4613 /* If no drive, skip */
4614 if ((drvp->drive_flags & DRIVE) == 0)
4615 continue;
4616 if (drvp->drive_flags & DRIVE_UDMA) {
4617 /* use Ultra/DMA */
4618 drvp->drive_flags &= ~DRIVE_DMA;
4619 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4620 if (drvp->UDMA_mode > 2 && u100 == 0)
4621 drvp->UDMA_mode = 2;
4622 } else if (drvp->drive_flags & DRIVE_DMA) {
4623 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4624 }
4625 }
4626 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4627 if (idedma_ctl != 0) {
4628 /* Add software bits in status register */
4629 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4630 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4631 idedma_ctl);
4632 }
4633 pciide_print_modes(cp);
4634 }
4635
4636 int
4637 pdc202xx_pci_intr(arg)
4638 void *arg;
4639 {
4640 struct pciide_softc *sc = arg;
4641 struct pciide_channel *cp;
4642 struct channel_softc *wdc_cp;
4643 int i, rv, crv;
4644 u_int32_t scr;
4645
4646 rv = 0;
4647 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4648 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4649 cp = &sc->pciide_channels[i];
4650 wdc_cp = &cp->wdc_channel;
4651 /* If a compat channel skip. */
4652 if (cp->compat)
4653 continue;
4654 if (scr & PDC2xx_SCR_INT(i)) {
4655 crv = wdcintr(wdc_cp);
4656 if (crv == 0)
4657 printf("%s:%d: bogus intr (reg 0x%x)\n",
4658 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4659 else
4660 rv = 1;
4661 }
4662 }
4663 return rv;
4664 }
4665
4666 int
4667 pdc20265_pci_intr(arg)
4668 void *arg;
4669 {
4670 struct pciide_softc *sc = arg;
4671 struct pciide_channel *cp;
4672 struct channel_softc *wdc_cp;
4673 int i, rv, crv;
4674 u_int32_t dmastat;
4675
4676 rv = 0;
4677 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4678 cp = &sc->pciide_channels[i];
4679 wdc_cp = &cp->wdc_channel;
4680 /* If a compat channel skip. */
4681 if (cp->compat)
4682 continue;
4683 /*
4684 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4685 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4686 * So use it instead (requires 2 reg reads instead of 1,
4687 * but we can't do it another way).
4688 */
4689 dmastat = bus_space_read_1(sc->sc_dma_iot,
4690 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4691 if((dmastat & IDEDMA_CTL_INTR) == 0)
4692 continue;
4693 crv = wdcintr(wdc_cp);
4694 if (crv == 0)
4695 printf("%s:%d: bogus intr\n",
4696 sc->sc_wdcdev.sc_dev.dv_xname, i);
4697 else
4698 rv = 1;
4699 }
4700 return rv;
4701 }
4702
4703 static void
4704 pdc20262_dma_start(v, channel, drive)
4705 void *v;
4706 int channel, drive;
4707 {
4708 struct pciide_softc *sc = v;
4709 struct pciide_dma_maps *dma_maps =
4710 &sc->pciide_channels[channel].dma_maps[drive];
4711 int atapi;
4712
4713 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4714 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4715 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4716 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4717 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4718 PDC262_ATAPI(channel), atapi);
4719 }
4720
4721 pciide_dma_start(v, channel, drive);
4722 }
4723
4724 int
4725 pdc20262_dma_finish(v, channel, drive, force)
4726 void *v;
4727 int channel, drive;
4728 int force;
4729 {
4730 struct pciide_softc *sc = v;
4731 struct pciide_dma_maps *dma_maps =
4732 &sc->pciide_channels[channel].dma_maps[drive];
4733 struct channel_softc *chp;
4734 int atapi, error;
4735
4736 error = pciide_dma_finish(v, channel, drive, force);
4737
4738 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4739 chp = sc->wdc_chanarray[channel];
4740 atapi = 0;
4741 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4742 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4743 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4744 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4745 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4746 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4747 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4748 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4749 atapi = PDC262_ATAPI_UDMA;
4750 }
4751 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4752 PDC262_ATAPI(channel), atapi);
4753 }
4754
4755 return error;
4756 }
4757
4758 void
4759 opti_chip_map(sc, pa)
4760 struct pciide_softc *sc;
4761 struct pci_attach_args *pa;
4762 {
4763 struct pciide_channel *cp;
4764 bus_size_t cmdsize, ctlsize;
4765 pcireg_t interface;
4766 u_int8_t init_ctrl;
4767 int channel;
4768
4769 if (pciide_chipen(sc, pa) == 0)
4770 return;
4771 aprint_normal("%s: bus-master DMA support present",
4772 sc->sc_wdcdev.sc_dev.dv_xname);
4773
4774 /*
4775 * XXXSCW:
4776 * There seem to be a couple of buggy revisions/implementations
4777 * of the OPTi pciide chipset. This kludge seems to fix one of
4778 * the reported problems (PR/11644) but still fails for the
4779 * other (PR/13151), although the latter may be due to other
4780 * issues too...
4781 */
4782 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4783 aprint_normal(" but disabled due to chip rev. <= 0x12");
4784 sc->sc_dma_ok = 0;
4785 } else
4786 pciide_mapreg_dma(sc, pa);
4787
4788 aprint_normal("\n");
4789
4790 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4791 WDC_CAPABILITY_MODE;
4792 sc->sc_wdcdev.PIO_cap = 4;
4793 if (sc->sc_dma_ok) {
4794 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4795 sc->sc_wdcdev.irqack = pciide_irqack;
4796 sc->sc_wdcdev.DMA_cap = 2;
4797 }
4798 sc->sc_wdcdev.set_modes = opti_setup_channel;
4799
4800 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4801 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4802
4803 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4804 OPTI_REG_INIT_CONTROL);
4805
4806 interface = PCI_INTERFACE(pa->pa_class);
4807
4808 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4809 cp = &sc->pciide_channels[channel];
4810 if (pciide_chansetup(sc, channel, interface) == 0)
4811 continue;
4812 if (channel == 1 &&
4813 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4814 aprint_normal("%s: %s channel ignored (disabled)\n",
4815 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4816 continue;
4817 }
4818 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4819 pciide_pci_intr);
4820 if (cp->hw_ok == 0)
4821 continue;
4822 pciide_map_compat_intr(pa, cp, channel, interface);
4823 if (cp->hw_ok == 0)
4824 continue;
4825 opti_setup_channel(&cp->wdc_channel);
4826 }
4827 }
4828
4829 void
4830 opti_setup_channel(chp)
4831 struct channel_softc *chp;
4832 {
4833 struct ata_drive_datas *drvp;
4834 struct pciide_channel *cp = (struct pciide_channel*)chp;
4835 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4836 int drive, spd;
4837 int mode[2];
4838 u_int8_t rv, mr;
4839
4840 /*
4841 * The `Delay' and `Address Setup Time' fields of the
4842 * Miscellaneous Register are always zero initially.
4843 */
4844 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4845 mr &= ~(OPTI_MISC_DELAY_MASK |
4846 OPTI_MISC_ADDR_SETUP_MASK |
4847 OPTI_MISC_INDEX_MASK);
4848
4849 /* Prime the control register before setting timing values */
4850 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4851
4852 /* Determine the clockrate of the PCIbus the chip is attached to */
4853 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4854 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4855
4856 /* setup DMA if needed */
4857 pciide_channel_dma_setup(cp);
4858
4859 for (drive = 0; drive < 2; drive++) {
4860 drvp = &chp->ch_drive[drive];
4861 /* If no drive, skip */
4862 if ((drvp->drive_flags & DRIVE) == 0) {
4863 mode[drive] = -1;
4864 continue;
4865 }
4866
4867 if ((drvp->drive_flags & DRIVE_DMA)) {
4868 /*
4869 * Timings will be used for both PIO and DMA,
4870 * so adjust DMA mode if needed
4871 */
4872 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4873 drvp->PIO_mode = drvp->DMA_mode + 2;
4874 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4875 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4876 drvp->PIO_mode - 2 : 0;
4877 if (drvp->DMA_mode == 0)
4878 drvp->PIO_mode = 0;
4879
4880 mode[drive] = drvp->DMA_mode + 5;
4881 } else
4882 mode[drive] = drvp->PIO_mode;
4883
4884 if (drive && mode[0] >= 0 &&
4885 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4886 /*
4887 * Can't have two drives using different values
4888 * for `Address Setup Time'.
4889 * Slow down the faster drive to compensate.
4890 */
4891 int d = (opti_tim_as[spd][mode[0]] >
4892 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4893
4894 mode[d] = mode[1-d];
4895 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4896 chp->ch_drive[d].DMA_mode = 0;
4897 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4898 }
4899 }
4900
4901 for (drive = 0; drive < 2; drive++) {
4902 int m;
4903 if ((m = mode[drive]) < 0)
4904 continue;
4905
4906 /* Set the Address Setup Time and select appropriate index */
4907 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4908 rv |= OPTI_MISC_INDEX(drive);
4909 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4910
4911 /* Set the pulse width and recovery timing parameters */
4912 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4913 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4914 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4915 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4916
4917 /* Set the Enhanced Mode register appropriately */
4918 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4919 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4920 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4921 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4922 }
4923
4924 /* Finally, enable the timings */
4925 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4926
4927 pciide_print_modes(cp);
4928 }
4929
4930 #define ACARD_IS_850(sc) \
4931 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4932
4933 void
4934 acard_chip_map(sc, pa)
4935 struct pciide_softc *sc;
4936 struct pci_attach_args *pa;
4937 {
4938 struct pciide_channel *cp;
4939 int i;
4940 pcireg_t interface;
4941 bus_size_t cmdsize, ctlsize;
4942
4943 if (pciide_chipen(sc, pa) == 0)
4944 return;
4945
4946 /*
4947 * when the chip is in native mode it identifies itself as a
4948 * 'misc mass storage'. Fake interface in this case.
4949 */
4950 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4951 interface = PCI_INTERFACE(pa->pa_class);
4952 } else {
4953 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4954 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4955 }
4956
4957 aprint_normal("%s: bus-master DMA support present",
4958 sc->sc_wdcdev.sc_dev.dv_xname);
4959 pciide_mapreg_dma(sc, pa);
4960 aprint_normal("\n");
4961 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4962 WDC_CAPABILITY_MODE;
4963
4964 if (sc->sc_dma_ok) {
4965 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4966 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4967 sc->sc_wdcdev.irqack = pciide_irqack;
4968 }
4969 sc->sc_wdcdev.PIO_cap = 4;
4970 sc->sc_wdcdev.DMA_cap = 2;
4971 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4972
4973 sc->sc_wdcdev.set_modes = acard_setup_channel;
4974 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4975 sc->sc_wdcdev.nchannels = 2;
4976
4977 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4978 cp = &sc->pciide_channels[i];
4979 if (pciide_chansetup(sc, i, interface) == 0)
4980 continue;
4981 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4982 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4983 &ctlsize, pciide_pci_intr);
4984 } else {
4985 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4986 &cmdsize, &ctlsize);
4987 }
4988 if (cp->hw_ok == 0)
4989 return;
4990 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4991 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4992 wdcattach(&cp->wdc_channel);
4993 acard_setup_channel(&cp->wdc_channel);
4994 }
4995 if (!ACARD_IS_850(sc)) {
4996 u_int32_t reg;
4997 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4998 reg &= ~ATP860_CTRL_INT;
4999 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
5000 }
5001 }
5002
5003 void
5004 acard_setup_channel(chp)
5005 struct channel_softc *chp;
5006 {
5007 struct ata_drive_datas *drvp;
5008 struct pciide_channel *cp = (struct pciide_channel*)chp;
5009 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5010 int channel = chp->channel;
5011 int drive;
5012 u_int32_t idetime, udma_mode;
5013 u_int32_t idedma_ctl;
5014
5015 /* setup DMA if needed */
5016 pciide_channel_dma_setup(cp);
5017
5018 if (ACARD_IS_850(sc)) {
5019 idetime = 0;
5020 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
5021 udma_mode &= ~ATP850_UDMA_MASK(channel);
5022 } else {
5023 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
5024 idetime &= ~ATP860_SETTIME_MASK(channel);
5025 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
5026 udma_mode &= ~ATP860_UDMA_MASK(channel);
5027
5028 /* check 80 pins cable */
5029 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
5030 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
5031 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5032 & ATP860_CTRL_80P(chp->channel)) {
5033 if (chp->ch_drive[0].UDMA_mode > 2)
5034 chp->ch_drive[0].UDMA_mode = 2;
5035 if (chp->ch_drive[1].UDMA_mode > 2)
5036 chp->ch_drive[1].UDMA_mode = 2;
5037 }
5038 }
5039 }
5040
5041 idedma_ctl = 0;
5042
5043 /* Per drive settings */
5044 for (drive = 0; drive < 2; drive++) {
5045 drvp = &chp->ch_drive[drive];
5046 /* If no drive, skip */
5047 if ((drvp->drive_flags & DRIVE) == 0)
5048 continue;
5049 /* add timing values, setup DMA if needed */
5050 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5051 (drvp->drive_flags & DRIVE_UDMA)) {
5052 /* use Ultra/DMA */
5053 if (ACARD_IS_850(sc)) {
5054 idetime |= ATP850_SETTIME(drive,
5055 acard_act_udma[drvp->UDMA_mode],
5056 acard_rec_udma[drvp->UDMA_mode]);
5057 udma_mode |= ATP850_UDMA_MODE(channel, drive,
5058 acard_udma_conf[drvp->UDMA_mode]);
5059 } else {
5060 idetime |= ATP860_SETTIME(channel, drive,
5061 acard_act_udma[drvp->UDMA_mode],
5062 acard_rec_udma[drvp->UDMA_mode]);
5063 udma_mode |= ATP860_UDMA_MODE(channel, drive,
5064 acard_udma_conf[drvp->UDMA_mode]);
5065 }
5066 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5067 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5068 (drvp->drive_flags & DRIVE_DMA)) {
5069 /* use Multiword DMA */
5070 drvp->drive_flags &= ~DRIVE_UDMA;
5071 if (ACARD_IS_850(sc)) {
5072 idetime |= ATP850_SETTIME(drive,
5073 acard_act_dma[drvp->DMA_mode],
5074 acard_rec_dma[drvp->DMA_mode]);
5075 } else {
5076 idetime |= ATP860_SETTIME(channel, drive,
5077 acard_act_dma[drvp->DMA_mode],
5078 acard_rec_dma[drvp->DMA_mode]);
5079 }
5080 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5081 } else {
5082 /* PIO only */
5083 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5084 if (ACARD_IS_850(sc)) {
5085 idetime |= ATP850_SETTIME(drive,
5086 acard_act_pio[drvp->PIO_mode],
5087 acard_rec_pio[drvp->PIO_mode]);
5088 } else {
5089 idetime |= ATP860_SETTIME(channel, drive,
5090 acard_act_pio[drvp->PIO_mode],
5091 acard_rec_pio[drvp->PIO_mode]);
5092 }
5093 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5094 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5095 | ATP8x0_CTRL_EN(channel));
5096 }
5097 }
5098
5099 if (idedma_ctl != 0) {
5100 /* Add software bits in status register */
5101 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5102 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5103 }
5104 pciide_print_modes(cp);
5105
5106 if (ACARD_IS_850(sc)) {
5107 pci_conf_write(sc->sc_pc, sc->sc_tag,
5108 ATP850_IDETIME(channel), idetime);
5109 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5110 } else {
5111 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5112 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5113 }
5114 }
5115
5116 int
5117 acard_pci_intr(arg)
5118 void *arg;
5119 {
5120 struct pciide_softc *sc = arg;
5121 struct pciide_channel *cp;
5122 struct channel_softc *wdc_cp;
5123 int rv = 0;
5124 int dmastat, i, crv;
5125
5126 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5127 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5128 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5129 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5130 continue;
5131 cp = &sc->pciide_channels[i];
5132 wdc_cp = &cp->wdc_channel;
5133 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5134 (void)wdcintr(wdc_cp);
5135 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5136 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5137 continue;
5138 }
5139 crv = wdcintr(wdc_cp);
5140 if (crv == 0)
5141 printf("%s:%d: bogus intr\n",
5142 sc->sc_wdcdev.sc_dev.dv_xname, i);
5143 else if (crv == 1)
5144 rv = 1;
5145 else if (rv == 0)
5146 rv = crv;
5147 }
5148 return rv;
5149 }
5150
5151 static int
5152 sl82c105_bugchk(struct pci_attach_args *pa)
5153 {
5154
5155 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5156 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5157 return (0);
5158
5159 if (PCI_REVISION(pa->pa_class) <= 0x05)
5160 return (1);
5161
5162 return (0);
5163 }
5164
5165 void
5166 sl82c105_chip_map(sc, pa)
5167 struct pciide_softc *sc;
5168 struct pci_attach_args *pa;
5169 {
5170 struct pciide_channel *cp;
5171 bus_size_t cmdsize, ctlsize;
5172 pcireg_t interface, idecr;
5173 int channel;
5174
5175 if (pciide_chipen(sc, pa) == 0)
5176 return;
5177
5178 aprint_normal("%s: bus-master DMA support present",
5179 sc->sc_wdcdev.sc_dev.dv_xname);
5180
5181 /*
5182 * Check to see if we're part of the Winbond 83c553 Southbridge.
5183 * If so, we need to disable DMA on rev. <= 5 of that chip.
5184 */
5185 if (pci_find_device(pa, sl82c105_bugchk)) {
5186 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5187 sc->sc_dma_ok = 0;
5188 } else
5189 pciide_mapreg_dma(sc, pa);
5190 aprint_normal("\n");
5191
5192 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5193 WDC_CAPABILITY_MODE;
5194 sc->sc_wdcdev.PIO_cap = 4;
5195 if (sc->sc_dma_ok) {
5196 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5197 sc->sc_wdcdev.irqack = pciide_irqack;
5198 sc->sc_wdcdev.DMA_cap = 2;
5199 }
5200 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5201
5202 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5203 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5204
5205 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5206
5207 interface = PCI_INTERFACE(pa->pa_class);
5208
5209 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5210 cp = &sc->pciide_channels[channel];
5211 if (pciide_chansetup(sc, channel, interface) == 0)
5212 continue;
5213 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5214 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5215 aprint_normal("%s: %s channel ignored (disabled)\n",
5216 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5217 continue;
5218 }
5219 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5220 pciide_pci_intr);
5221 if (cp->hw_ok == 0)
5222 continue;
5223 pciide_map_compat_intr(pa, cp, channel, interface);
5224 if (cp->hw_ok == 0)
5225 continue;
5226 sl82c105_setup_channel(&cp->wdc_channel);
5227 }
5228 }
5229
5230 void
5231 sl82c105_setup_channel(chp)
5232 struct channel_softc *chp;
5233 {
5234 struct ata_drive_datas *drvp;
5235 struct pciide_channel *cp = (struct pciide_channel*)chp;
5236 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5237 int pxdx_reg, drive;
5238 pcireg_t pxdx;
5239
5240 /* Set up DMA if needed. */
5241 pciide_channel_dma_setup(cp);
5242
5243 for (drive = 0; drive < 2; drive++) {
5244 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5245 : SYMPH_P1D0CR) + (drive * 4);
5246
5247 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5248
5249 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5250 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5251
5252 drvp = &chp->ch_drive[drive];
5253 /* If no drive, skip. */
5254 if ((drvp->drive_flags & DRIVE) == 0) {
5255 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5256 continue;
5257 }
5258
5259 if (drvp->drive_flags & DRIVE_DMA) {
5260 /*
5261 * Timings will be used for both PIO and DMA,
5262 * so adjust DMA mode if needed.
5263 */
5264 if (drvp->PIO_mode >= 3) {
5265 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5266 drvp->DMA_mode = drvp->PIO_mode - 2;
5267 if (drvp->DMA_mode < 1) {
5268 /*
5269 * Can't mix both PIO and DMA.
5270 * Disable DMA.
5271 */
5272 drvp->drive_flags &= ~DRIVE_DMA;
5273 }
5274 } else {
5275 /*
5276 * Can't mix both PIO and DMA. Disable
5277 * DMA.
5278 */
5279 drvp->drive_flags &= ~DRIVE_DMA;
5280 }
5281 }
5282
5283 if (drvp->drive_flags & DRIVE_DMA) {
5284 /* Use multi-word DMA. */
5285 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5286 PxDx_CMD_ON_SHIFT;
5287 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5288 } else {
5289 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5290 PxDx_CMD_ON_SHIFT;
5291 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5292 }
5293
5294 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5295
5296 /* ...and set the mode for this drive. */
5297 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5298 }
5299
5300 pciide_print_modes(cp);
5301 }
5302
5303 void
5304 serverworks_chip_map(sc, pa)
5305 struct pciide_softc *sc;
5306 struct pci_attach_args *pa;
5307 {
5308 struct pciide_channel *cp;
5309 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5310 pcitag_t pcib_tag;
5311 int channel;
5312 bus_size_t cmdsize, ctlsize;
5313
5314 if (pciide_chipen(sc, pa) == 0)
5315 return;
5316
5317 aprint_normal("%s: bus-master DMA support present",
5318 sc->sc_wdcdev.sc_dev.dv_xname);
5319 pciide_mapreg_dma(sc, pa);
5320 aprint_normal("\n");
5321 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5322 WDC_CAPABILITY_MODE;
5323
5324 if (sc->sc_dma_ok) {
5325 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5326 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5327 sc->sc_wdcdev.irqack = pciide_irqack;
5328 }
5329 sc->sc_wdcdev.PIO_cap = 4;
5330 sc->sc_wdcdev.DMA_cap = 2;
5331 switch (sc->sc_pp->ide_product) {
5332 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5333 sc->sc_wdcdev.UDMA_cap = 2;
5334 break;
5335 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5336 if (PCI_REVISION(pa->pa_class) < 0x92)
5337 sc->sc_wdcdev.UDMA_cap = 4;
5338 else
5339 sc->sc_wdcdev.UDMA_cap = 5;
5340 break;
5341 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5342 sc->sc_wdcdev.UDMA_cap = 5;
5343 break;
5344 }
5345
5346 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5347 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5348 sc->sc_wdcdev.nchannels = 2;
5349
5350 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5351 cp = &sc->pciide_channels[channel];
5352 if (pciide_chansetup(sc, channel, interface) == 0)
5353 continue;
5354 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5355 serverworks_pci_intr);
5356 if (cp->hw_ok == 0)
5357 return;
5358 pciide_map_compat_intr(pa, cp, channel, interface);
5359 if (cp->hw_ok == 0)
5360 return;
5361 serverworks_setup_channel(&cp->wdc_channel);
5362 }
5363
5364 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5365 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5366 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5367 }
5368
5369 void
5370 serverworks_setup_channel(chp)
5371 struct channel_softc *chp;
5372 {
5373 struct ata_drive_datas *drvp;
5374 struct pciide_channel *cp = (struct pciide_channel*)chp;
5375 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5376 int channel = chp->channel;
5377 int drive, unit;
5378 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5379 u_int32_t idedma_ctl;
5380 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5381 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5382
5383 /* setup DMA if needed */
5384 pciide_channel_dma_setup(cp);
5385
5386 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5387 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5388 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5389 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5390
5391 pio_time &= ~(0xffff << (16 * channel));
5392 dma_time &= ~(0xffff << (16 * channel));
5393 pio_mode &= ~(0xff << (8 * channel + 16));
5394 udma_mode &= ~(0xff << (8 * channel + 16));
5395 udma_mode &= ~(3 << (2 * channel));
5396
5397 idedma_ctl = 0;
5398
5399 /* Per drive settings */
5400 for (drive = 0; drive < 2; drive++) {
5401 drvp = &chp->ch_drive[drive];
5402 /* If no drive, skip */
5403 if ((drvp->drive_flags & DRIVE) == 0)
5404 continue;
5405 unit = drive + 2 * channel;
5406 /* add timing values, setup DMA if needed */
5407 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5408 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5409 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5410 (drvp->drive_flags & DRIVE_UDMA)) {
5411 /* use Ultra/DMA, check for 80-pin cable */
5412 if (drvp->UDMA_mode > 2 &&
5413 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5414 drvp->UDMA_mode = 2;
5415 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5416 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5417 udma_mode |= 1 << unit;
5418 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5419 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5420 (drvp->drive_flags & DRIVE_DMA)) {
5421 /* use Multiword DMA */
5422 drvp->drive_flags &= ~DRIVE_UDMA;
5423 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5424 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5425 } else {
5426 /* PIO only */
5427 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5428 }
5429 }
5430
5431 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5432 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5433 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5434 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5435 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5436
5437 if (idedma_ctl != 0) {
5438 /* Add software bits in status register */
5439 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5440 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5441 }
5442 pciide_print_modes(cp);
5443 }
5444
5445 int
5446 serverworks_pci_intr(arg)
5447 void *arg;
5448 {
5449 struct pciide_softc *sc = arg;
5450 struct pciide_channel *cp;
5451 struct channel_softc *wdc_cp;
5452 int rv = 0;
5453 int dmastat, i, crv;
5454
5455 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5456 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5457 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5458 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5459 IDEDMA_CTL_INTR)
5460 continue;
5461 cp = &sc->pciide_channels[i];
5462 wdc_cp = &cp->wdc_channel;
5463 crv = wdcintr(wdc_cp);
5464 if (crv == 0) {
5465 printf("%s:%d: bogus intr\n",
5466 sc->sc_wdcdev.sc_dev.dv_xname, i);
5467 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5468 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5469 } else
5470 rv = 1;
5471 }
5472 return rv;
5473 }
5474
5475 void
5476 artisea_chip_map(sc, pa)
5477 struct pciide_softc *sc;
5478 struct pci_attach_args *pa;
5479 {
5480 struct pciide_channel *cp;
5481 bus_size_t cmdsize, ctlsize;
5482 pcireg_t interface;
5483 int channel;
5484
5485 if (pciide_chipen(sc, pa) == 0)
5486 return;
5487
5488 aprint_normal("%s: bus-master DMA support resent",
5489 sc->sc_wdcdev.sc_dev.dv_xname);
5490 #ifndef PCIIDE_I31244_ENABLEDMA
5491 if (PCI_REVISION(pa->pa_class) == 0) {
5492 aprint_normal(" but disabled due to rev. 0");
5493 sc->sc_dma_ok = 0;
5494 } else
5495 #endif
5496 pciide_mapreg_dma(sc, pa);
5497 aprint_normal("\n");
5498
5499 /*
5500 * XXX Configure LEDs to show activity.
5501 */
5502
5503 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5504 WDC_CAPABILITY_MODE;
5505 sc->sc_wdcdev.PIO_cap = 4;
5506 if (sc->sc_dma_ok) {
5507 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5508 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5509 sc->sc_wdcdev.irqack = pciide_irqack;
5510 sc->sc_wdcdev.DMA_cap = 2;
5511 sc->sc_wdcdev.UDMA_cap = 6;
5512 }
5513 sc->sc_wdcdev.set_modes = sata_setup_channel;
5514
5515 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5516 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5517
5518 interface = PCI_INTERFACE(pa->pa_class);
5519
5520 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5521 cp = &sc->pciide_channels[channel];
5522 if (pciide_chansetup(sc, channel, interface) == 0)
5523 continue;
5524 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5525 pciide_pci_intr);
5526 if (cp->hw_ok == 0)
5527 continue;
5528 pciide_map_compat_intr(pa, cp, channel, interface);
5529 sata_setup_channel(&cp->wdc_channel);
5530 }
5531 }
5532