pciide.c revision 1.195 1 /* $NetBSD: pciide.c,v 1.195 2003/08/10 14:51:55 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.195 2003/08/10 14:51:55 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_setup_channel __P((struct channel_softc*));
179
180 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_setup_channel __P((struct channel_softc*));
183 void cmd_channel_map __P((struct pci_attach_args *,
184 struct pciide_softc *, int));
185 int cmd_pci_intr __P((void *));
186 void cmd646_9_irqack __P((struct channel_softc *));
187 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void cmd680_setup_channel __P((struct channel_softc*));
189 void cmd680_channel_map __P((struct pci_attach_args *,
190 struct pciide_softc *, int));
191
192 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void cmd3112_setup_channel __P((struct channel_softc*));
194
195 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void cy693_setup_channel __P((struct channel_softc*));
197
198 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void sis_setup_channel __P((struct channel_softc*));
200 void sis96x_setup_channel __P((struct channel_softc*));
201 static int sis_hostbr_match __P(( struct pci_attach_args *));
202 static int sis_south_match __P(( struct pci_attach_args *));
203
204 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
205 void acer_setup_channel __P((struct channel_softc*));
206 int acer_pci_intr __P((void *));
207
208 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void pdc202xx_setup_channel __P((struct channel_softc*));
210 void pdc20268_setup_channel __P((struct channel_softc*));
211 int pdc202xx_pci_intr __P((void *));
212 int pdc20265_pci_intr __P((void *));
213 static void pdc20262_dma_start __P((void*, int, int));
214 static int pdc20262_dma_finish __P((void*, int, int, int));
215
216 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
217 void opti_setup_channel __P((struct channel_softc*));
218
219 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
220 void hpt_setup_channel __P((struct channel_softc*));
221 int hpt_pci_intr __P((void *));
222
223 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
224 void acard_setup_channel __P((struct channel_softc*));
225 int acard_pci_intr __P((void *));
226
227 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
228 void serverworks_setup_channel __P((struct channel_softc*));
229 int serverworks_pci_intr __P((void *));
230
231 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
232 void sl82c105_setup_channel __P((struct channel_softc*));
233
234 void pciide_channel_dma_setup __P((struct pciide_channel *));
235 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
236 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
237 void pciide_dma_start __P((void*, int, int));
238 int pciide_dma_finish __P((void*, int, int, int));
239 void pciide_irqack __P((struct channel_softc *));
240 void pciide_print_modes __P((struct pciide_channel *));
241
242 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
243
244 struct pciide_product_desc {
245 u_int32_t ide_product;
246 int ide_flags;
247 const char *ide_name;
248 /* map and setup chip, probe drives */
249 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
250 };
251
252 /* Flags for ide_flags */
253 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
254 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
255
256 /* Default product description for devices not known from this controller */
257 const struct pciide_product_desc default_product_desc = {
258 0,
259 0,
260 "Generic PCI IDE controller",
261 default_chip_map,
262 };
263
264 const struct pciide_product_desc pciide_intel_products[] = {
265 { PCI_PRODUCT_INTEL_82092AA,
266 0,
267 "Intel 82092AA IDE controller",
268 default_chip_map,
269 },
270 { PCI_PRODUCT_INTEL_82371FB_IDE,
271 0,
272 "Intel 82371FB IDE controller (PIIX)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82371SB_IDE,
276 0,
277 "Intel 82371SB IDE Interface (PIIX3)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82371AB_IDE,
281 0,
282 "Intel 82371AB IDE controller (PIIX4)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82440MX_IDE,
286 0,
287 "Intel 82440MX IDE controller",
288 piix_chip_map
289 },
290 { PCI_PRODUCT_INTEL_82801AA_IDE,
291 0,
292 "Intel 82801AA IDE Controller (ICH)",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801AB_IDE,
296 0,
297 "Intel 82801AB IDE Controller (ICH0)",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801BA_IDE,
301 0,
302 "Intel 82801BA IDE Controller (ICH2)",
303 piix_chip_map,
304 },
305 { PCI_PRODUCT_INTEL_82801BAM_IDE,
306 0,
307 "Intel 82801BAM IDE Controller (ICH2-M)",
308 piix_chip_map,
309 },
310 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
311 0,
312 "Intel 82801CA IDE Controller (ICH3)",
313 piix_chip_map,
314 },
315 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
316 0,
317 "Intel 82801CA IDE Controller (ICH3)",
318 piix_chip_map,
319 },
320 { PCI_PRODUCT_INTEL_82801DB_IDE,
321 0,
322 "Intel 82801DB IDE Controller (ICH4)",
323 piix_chip_map,
324 },
325 { PCI_PRODUCT_INTEL_82801DBM_IDE,
326 0,
327 "Intel 82801DBM IDE Controller (ICH4-M)",
328 piix_chip_map,
329 },
330 { PCI_PRODUCT_INTEL_82801EB_IDE,
331 0,
332 "Intel 82801EB IDE Controller (ICH5)",
333 piix_chip_map,
334 },
335 { PCI_PRODUCT_INTEL_31244,
336 0,
337 "Intel 31244 Serial ATA Controller",
338 artisea_chip_map,
339 },
340 { 0,
341 0,
342 NULL,
343 NULL
344 }
345 };
346
347 const struct pciide_product_desc pciide_amd_products[] = {
348 { PCI_PRODUCT_AMD_PBC756_IDE,
349 0,
350 "Advanced Micro Devices AMD756 IDE Controller",
351 amd7x6_chip_map
352 },
353 { PCI_PRODUCT_AMD_PBC766_IDE,
354 0,
355 "Advanced Micro Devices AMD766 IDE Controller",
356 amd7x6_chip_map
357 },
358 { PCI_PRODUCT_AMD_PBC768_IDE,
359 0,
360 "Advanced Micro Devices AMD768 IDE Controller",
361 amd7x6_chip_map
362 },
363 { PCI_PRODUCT_AMD_PBC8111_IDE,
364 0,
365 "Advanced Micro Devices AMD8111 IDE Controller",
366 amd7x6_chip_map
367 },
368 { 0,
369 0,
370 NULL,
371 NULL
372 }
373 };
374
375 const struct pciide_product_desc pciide_nvidia_products[] = {
376 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
377 0,
378 "NVIDIA nForce IDE Controller",
379 amd7x6_chip_map
380 },
381 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
382 0,
383 "NVIDIA nForce2 IDE Controller",
384 amd7x6_chip_map
385 },
386 { 0,
387 0,
388 NULL,
389 NULL
390 }
391 };
392
393 const struct pciide_product_desc pciide_cmd_products[] = {
394 { PCI_PRODUCT_CMDTECH_640,
395 0,
396 "CMD Technology PCI0640",
397 cmd_chip_map
398 },
399 { PCI_PRODUCT_CMDTECH_643,
400 0,
401 "CMD Technology PCI0643",
402 cmd0643_9_chip_map,
403 },
404 { PCI_PRODUCT_CMDTECH_646,
405 0,
406 "CMD Technology PCI0646",
407 cmd0643_9_chip_map,
408 },
409 { PCI_PRODUCT_CMDTECH_648,
410 IDE_PCI_CLASS_OVERRIDE,
411 "CMD Technology PCI0648",
412 cmd0643_9_chip_map,
413 },
414 { PCI_PRODUCT_CMDTECH_649,
415 IDE_PCI_CLASS_OVERRIDE,
416 "CMD Technology PCI0649",
417 cmd0643_9_chip_map,
418 },
419 { PCI_PRODUCT_CMDTECH_680,
420 IDE_PCI_CLASS_OVERRIDE,
421 "Silicon Image 0680",
422 cmd680_chip_map,
423 },
424 { PCI_PRODUCT_CMDTECH_3112,
425 IDE_PCI_CLASS_OVERRIDE,
426 "Silicon Image SATALink 3112",
427 cmd3112_chip_map,
428 },
429 { 0,
430 0,
431 NULL,
432 NULL
433 }
434 };
435
436 const struct pciide_product_desc pciide_via_products[] = {
437 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
438 0,
439 NULL,
440 apollo_chip_map,
441 },
442 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
443 0,
444 NULL,
445 apollo_chip_map,
446 },
447 { 0,
448 0,
449 NULL,
450 NULL
451 }
452 };
453
454 const struct pciide_product_desc pciide_cypress_products[] = {
455 { PCI_PRODUCT_CONTAQ_82C693,
456 IDE_16BIT_IOSPACE,
457 "Cypress 82C693 IDE Controller",
458 cy693_chip_map,
459 },
460 { 0,
461 0,
462 NULL,
463 NULL
464 }
465 };
466
467 const struct pciide_product_desc pciide_sis_products[] = {
468 { PCI_PRODUCT_SIS_5597_IDE,
469 0,
470 NULL,
471 sis_chip_map,
472 },
473 { 0,
474 0,
475 NULL,
476 NULL
477 }
478 };
479
480 const struct pciide_product_desc pciide_acer_products[] = {
481 { PCI_PRODUCT_ALI_M5229,
482 0,
483 "Acer Labs M5229 UDMA IDE Controller",
484 acer_chip_map,
485 },
486 { 0,
487 0,
488 NULL,
489 NULL
490 }
491 };
492
493 const struct pciide_product_desc pciide_promise_products[] = {
494 { PCI_PRODUCT_PROMISE_ULTRA33,
495 IDE_PCI_CLASS_OVERRIDE,
496 "Promise Ultra33/ATA Bus Master IDE Accelerator",
497 pdc202xx_chip_map,
498 },
499 { PCI_PRODUCT_PROMISE_ULTRA66,
500 IDE_PCI_CLASS_OVERRIDE,
501 "Promise Ultra66/ATA Bus Master IDE Accelerator",
502 pdc202xx_chip_map,
503 },
504 { PCI_PRODUCT_PROMISE_ULTRA100,
505 IDE_PCI_CLASS_OVERRIDE,
506 "Promise Ultra100/ATA Bus Master IDE Accelerator",
507 pdc202xx_chip_map,
508 },
509 { PCI_PRODUCT_PROMISE_ULTRA100X,
510 IDE_PCI_CLASS_OVERRIDE,
511 "Promise Ultra100/ATA Bus Master IDE Accelerator",
512 pdc202xx_chip_map,
513 },
514 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
515 IDE_PCI_CLASS_OVERRIDE,
516 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
517 pdc202xx_chip_map,
518 },
519 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
520 IDE_PCI_CLASS_OVERRIDE,
521 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
522 pdc202xx_chip_map,
523 },
524 { PCI_PRODUCT_PROMISE_ULTRA133,
525 IDE_PCI_CLASS_OVERRIDE,
526 "Promise Ultra133/ATA Bus Master IDE Accelerator",
527 pdc202xx_chip_map,
528 },
529 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
530 IDE_PCI_CLASS_OVERRIDE,
531 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
532 pdc202xx_chip_map,
533 },
534 { PCI_PRODUCT_PROMISE_MBULTRA133,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
537 pdc202xx_chip_map,
538 },
539 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
542 pdc202xx_chip_map,
543 },
544 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
547 pdc202xx_chip_map,
548 },
549 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
550 IDE_PCI_CLASS_OVERRIDE,
551 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
552 pdc202xx_chip_map,
553 },
554 { 0,
555 0,
556 NULL,
557 NULL
558 }
559 };
560
561 const struct pciide_product_desc pciide_opti_products[] = {
562 { PCI_PRODUCT_OPTI_82C621,
563 0,
564 "OPTi 82c621 PCI IDE controller",
565 opti_chip_map,
566 },
567 { PCI_PRODUCT_OPTI_82C568,
568 0,
569 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
570 opti_chip_map,
571 },
572 { PCI_PRODUCT_OPTI_82D568,
573 0,
574 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
575 opti_chip_map,
576 },
577 { 0,
578 0,
579 NULL,
580 NULL
581 }
582 };
583
584 const struct pciide_product_desc pciide_triones_products[] = {
585 { PCI_PRODUCT_TRIONES_HPT366,
586 IDE_PCI_CLASS_OVERRIDE,
587 NULL,
588 hpt_chip_map,
589 },
590 { PCI_PRODUCT_TRIONES_HPT372,
591 IDE_PCI_CLASS_OVERRIDE,
592 NULL,
593 hpt_chip_map
594 },
595 { PCI_PRODUCT_TRIONES_HPT374,
596 IDE_PCI_CLASS_OVERRIDE,
597 NULL,
598 hpt_chip_map
599 },
600 { 0,
601 0,
602 NULL,
603 NULL
604 }
605 };
606
607 const struct pciide_product_desc pciide_acard_products[] = {
608 { PCI_PRODUCT_ACARD_ATP850U,
609 IDE_PCI_CLASS_OVERRIDE,
610 "Acard ATP850U Ultra33 IDE Controller",
611 acard_chip_map,
612 },
613 { PCI_PRODUCT_ACARD_ATP860,
614 IDE_PCI_CLASS_OVERRIDE,
615 "Acard ATP860 Ultra66 IDE Controller",
616 acard_chip_map,
617 },
618 { PCI_PRODUCT_ACARD_ATP860A,
619 IDE_PCI_CLASS_OVERRIDE,
620 "Acard ATP860-A Ultra66 IDE Controller",
621 acard_chip_map,
622 },
623 { 0,
624 0,
625 NULL,
626 NULL
627 }
628 };
629
630 const struct pciide_product_desc pciide_serverworks_products[] = {
631 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
632 0,
633 "ServerWorks OSB4 IDE Controller",
634 serverworks_chip_map,
635 },
636 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
637 0,
638 "ServerWorks CSB5 IDE Controller",
639 serverworks_chip_map,
640 },
641 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
642 0,
643 "ServerWorks CSB6 RAID/IDE Controller",
644 serverworks_chip_map,
645 },
646 { 0,
647 0,
648 NULL,
649 }
650 };
651
652 const struct pciide_product_desc pciide_symphony_products[] = {
653 { PCI_PRODUCT_SYMPHONY_82C105,
654 0,
655 "Symphony Labs 82C105 IDE controller",
656 sl82c105_chip_map,
657 },
658 { 0,
659 0,
660 NULL,
661 }
662 };
663
664 const struct pciide_product_desc pciide_winbond_products[] = {
665 { PCI_PRODUCT_WINBOND_W83C553F_1,
666 0,
667 "Winbond W83C553F IDE controller",
668 sl82c105_chip_map,
669 },
670 { 0,
671 0,
672 NULL,
673 }
674 };
675
676 struct pciide_vendor_desc {
677 u_int32_t ide_vendor;
678 const struct pciide_product_desc *ide_products;
679 };
680
681 const struct pciide_vendor_desc pciide_vendors[] = {
682 { PCI_VENDOR_INTEL, pciide_intel_products },
683 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
684 { PCI_VENDOR_VIATECH, pciide_via_products },
685 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
686 { PCI_VENDOR_SIS, pciide_sis_products },
687 { PCI_VENDOR_ALI, pciide_acer_products },
688 { PCI_VENDOR_PROMISE, pciide_promise_products },
689 { PCI_VENDOR_AMD, pciide_amd_products },
690 { PCI_VENDOR_OPTI, pciide_opti_products },
691 { PCI_VENDOR_TRIONES, pciide_triones_products },
692 { PCI_VENDOR_ACARD, pciide_acard_products },
693 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
694 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
695 { PCI_VENDOR_WINBOND, pciide_winbond_products },
696 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
697 { 0, NULL }
698 };
699
700 /* options passed via the 'flags' config keyword */
701 #define PCIIDE_OPTIONS_DMA 0x01
702 #define PCIIDE_OPTIONS_NODMA 0x02
703
704 int pciide_match __P((struct device *, struct cfdata *, void *));
705 void pciide_attach __P((struct device *, struct device *, void *));
706
707 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
708 pciide_match, pciide_attach, NULL, NULL);
709
710 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
711 int pciide_mapregs_compat __P(( struct pci_attach_args *,
712 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
713 int pciide_mapregs_native __P((struct pci_attach_args *,
714 struct pciide_channel *, bus_size_t *, bus_size_t *,
715 int (*pci_intr) __P((void *))));
716 void pciide_mapreg_dma __P((struct pciide_softc *,
717 struct pci_attach_args *));
718 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
719 void pciide_mapchan __P((struct pci_attach_args *,
720 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
721 int (*pci_intr) __P((void *))));
722 int pciide_chan_candisable __P((struct pciide_channel *));
723 void pciide_map_compat_intr __P(( struct pci_attach_args *,
724 struct pciide_channel *, int, int));
725 int pciide_compat_intr __P((void *));
726 int pciide_pci_intr __P((void *));
727 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
728
729 const struct pciide_product_desc *
730 pciide_lookup_product(id)
731 u_int32_t id;
732 {
733 const struct pciide_product_desc *pp;
734 const struct pciide_vendor_desc *vp;
735
736 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
737 if (PCI_VENDOR(id) == vp->ide_vendor)
738 break;
739
740 if ((pp = vp->ide_products) == NULL)
741 return NULL;
742
743 for (; pp->chip_map != NULL; pp++)
744 if (PCI_PRODUCT(id) == pp->ide_product)
745 break;
746
747 if (pp->chip_map == NULL)
748 return NULL;
749 return pp;
750 }
751
752 int
753 pciide_match(parent, match, aux)
754 struct device *parent;
755 struct cfdata *match;
756 void *aux;
757 {
758 struct pci_attach_args *pa = aux;
759 const struct pciide_product_desc *pp;
760
761 /*
762 * Check the ID register to see that it's a PCI IDE controller.
763 * If it is, we assume that we can deal with it; it _should_
764 * work in a standardized way...
765 */
766 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
767 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
768 return (1);
769 }
770
771 /*
772 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
773 * controllers. Let see if we can deal with it anyway.
774 */
775 pp = pciide_lookup_product(pa->pa_id);
776 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
777 return (1);
778 }
779
780 return (0);
781 }
782
783 void
784 pciide_attach(parent, self, aux)
785 struct device *parent, *self;
786 void *aux;
787 {
788 struct pci_attach_args *pa = aux;
789 pci_chipset_tag_t pc = pa->pa_pc;
790 pcitag_t tag = pa->pa_tag;
791 struct pciide_softc *sc = (struct pciide_softc *)self;
792 pcireg_t csr;
793 char devinfo[256];
794 const char *displaydev;
795
796 aprint_naive(": disk controller\n");
797
798 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
799 sc->sc_pp = pciide_lookup_product(pa->pa_id);
800 if (sc->sc_pp == NULL) {
801 sc->sc_pp = &default_product_desc;
802 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
803 displaydev = devinfo;
804 } else
805 displaydev = sc->sc_pp->ide_name;
806
807 /* if displaydev == NULL, printf is done in chip-specific map */
808 if (displaydev)
809 aprint_normal(": %s (rev. 0x%02x)\n", displaydev,
810 PCI_REVISION(pa->pa_class));
811
812 sc->sc_pc = pa->pa_pc;
813 sc->sc_tag = pa->pa_tag;
814
815 /* Set up DMA defaults; these might be adjusted by chip_map. */
816 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
817 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
818
819 #ifdef WDCDEBUG
820 if (wdcdebug_pciide_mask & DEBUG_PROBE)
821 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
822 #endif
823 sc->sc_pp->chip_map(sc, pa);
824
825 if (sc->sc_dma_ok) {
826 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
827 csr |= PCI_COMMAND_MASTER_ENABLE;
828 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
829 }
830 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
831 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
832 }
833
834 /* tell whether the chip is enabled or not */
835 int
836 pciide_chipen(sc, pa)
837 struct pciide_softc *sc;
838 struct pci_attach_args *pa;
839 {
840 pcireg_t csr;
841 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
842 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
843 PCI_COMMAND_STATUS_REG);
844 aprint_normal("%s: device disabled (at %s)\n",
845 sc->sc_wdcdev.sc_dev.dv_xname,
846 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
847 "device" : "bridge");
848 return 0;
849 }
850 return 1;
851 }
852
853 int
854 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
855 struct pci_attach_args *pa;
856 struct pciide_channel *cp;
857 int compatchan;
858 bus_size_t *cmdsizep, *ctlsizep;
859 {
860 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
861 struct channel_softc *wdc_cp = &cp->wdc_channel;
862
863 cp->compat = 1;
864 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
865 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
866
867 wdc_cp->cmd_iot = pa->pa_iot;
868 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
869 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
870 aprint_error("%s: couldn't map %s channel cmd regs\n",
871 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
872 return (0);
873 }
874
875 wdc_cp->ctl_iot = pa->pa_iot;
876 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
877 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
878 aprint_error("%s: couldn't map %s channel ctl regs\n",
879 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
880 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
881 PCIIDE_COMPAT_CMD_SIZE);
882 return (0);
883 }
884
885 return (1);
886 }
887
888 int
889 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
890 struct pci_attach_args * pa;
891 struct pciide_channel *cp;
892 bus_size_t *cmdsizep, *ctlsizep;
893 int (*pci_intr) __P((void *));
894 {
895 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
896 struct channel_softc *wdc_cp = &cp->wdc_channel;
897 const char *intrstr;
898 pci_intr_handle_t intrhandle;
899
900 cp->compat = 0;
901
902 if (sc->sc_pci_ih == NULL) {
903 if (pci_intr_map(pa, &intrhandle) != 0) {
904 aprint_error("%s: couldn't map native-PCI interrupt\n",
905 sc->sc_wdcdev.sc_dev.dv_xname);
906 return 0;
907 }
908 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
909 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
910 intrhandle, IPL_BIO, pci_intr, sc);
911 if (sc->sc_pci_ih != NULL) {
912 aprint_normal("%s: using %s for native-PCI interrupt\n",
913 sc->sc_wdcdev.sc_dev.dv_xname,
914 intrstr ? intrstr : "unknown interrupt");
915 } else {
916 aprint_error(
917 "%s: couldn't establish native-PCI interrupt",
918 sc->sc_wdcdev.sc_dev.dv_xname);
919 if (intrstr != NULL)
920 aprint_normal(" at %s", intrstr);
921 aprint_normal("\n");
922 return 0;
923 }
924 }
925 cp->ih = sc->sc_pci_ih;
926 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
927 PCI_MAPREG_TYPE_IO, 0,
928 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
929 aprint_error("%s: couldn't map %s channel cmd regs\n",
930 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
931 return 0;
932 }
933
934 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
935 PCI_MAPREG_TYPE_IO, 0,
936 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
937 aprint_error("%s: couldn't map %s channel ctl regs\n",
938 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
939 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
940 return 0;
941 }
942 /*
943 * In native mode, 4 bytes of I/O space are mapped for the control
944 * register, the control register is at offset 2. Pass the generic
945 * code a handle for only one byte at the right offset.
946 */
947 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
948 &wdc_cp->ctl_ioh) != 0) {
949 aprint_error("%s: unable to subregion %s channel ctl regs\n",
950 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
951 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
952 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
953 return 0;
954 }
955 return (1);
956 }
957
958 void
959 pciide_mapreg_dma(sc, pa)
960 struct pciide_softc *sc;
961 struct pci_attach_args *pa;
962 {
963 pcireg_t maptype;
964 bus_addr_t addr;
965
966 /*
967 * Map DMA registers
968 *
969 * Note that sc_dma_ok is the right variable to test to see if
970 * DMA can be done. If the interface doesn't support DMA,
971 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
972 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
973 * non-zero if the interface supports DMA and the registers
974 * could be mapped.
975 *
976 * XXX Note that despite the fact that the Bus Master IDE specs
977 * XXX say that "The bus master IDE function uses 16 bytes of IO
978 * XXX space," some controllers (at least the United
979 * XXX Microelectronics UM8886BF) place it in memory space.
980 */
981 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
982 PCIIDE_REG_BUS_MASTER_DMA);
983
984 switch (maptype) {
985 case PCI_MAPREG_TYPE_IO:
986 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
987 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
988 &addr, NULL, NULL) == 0);
989 if (sc->sc_dma_ok == 0) {
990 aprint_normal(
991 ", but unused (couldn't query registers)");
992 break;
993 }
994 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
995 && addr >= 0x10000) {
996 sc->sc_dma_ok = 0;
997 aprint_normal(
998 ", but unused (registers at unsafe address "
999 "%#lx)", (unsigned long)addr);
1000 break;
1001 }
1002 /* FALLTHROUGH */
1003
1004 case PCI_MAPREG_MEM_TYPE_32BIT:
1005 sc->sc_dma_ok = (pci_mapreg_map(pa,
1006 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1007 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1008 sc->sc_dmat = pa->pa_dmat;
1009 if (sc->sc_dma_ok == 0) {
1010 aprint_normal(", but unused (couldn't map registers)");
1011 } else {
1012 sc->sc_wdcdev.dma_arg = sc;
1013 sc->sc_wdcdev.dma_init = pciide_dma_init;
1014 sc->sc_wdcdev.dma_start = pciide_dma_start;
1015 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1016 }
1017
1018 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1019 PCIIDE_OPTIONS_NODMA) {
1020 aprint_normal(
1021 ", but unused (forced off by config file)");
1022 sc->sc_dma_ok = 0;
1023 }
1024 break;
1025
1026 default:
1027 sc->sc_dma_ok = 0;
1028 aprint_normal(
1029 ", but unsupported register maptype (0x%x)", maptype);
1030 }
1031 }
1032
1033 int
1034 pciide_compat_intr(arg)
1035 void *arg;
1036 {
1037 struct pciide_channel *cp = arg;
1038
1039 #ifdef DIAGNOSTIC
1040 /* should only be called for a compat channel */
1041 if (cp->compat == 0)
1042 panic("pciide compat intr called for non-compat chan %p", cp);
1043 #endif
1044 return (wdcintr(&cp->wdc_channel));
1045 }
1046
1047 int
1048 pciide_pci_intr(arg)
1049 void *arg;
1050 {
1051 struct pciide_softc *sc = arg;
1052 struct pciide_channel *cp;
1053 struct channel_softc *wdc_cp;
1054 int i, rv, crv;
1055
1056 rv = 0;
1057 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1058 cp = &sc->pciide_channels[i];
1059 wdc_cp = &cp->wdc_channel;
1060
1061 /* If a compat channel skip. */
1062 if (cp->compat)
1063 continue;
1064 /* if this channel not waiting for intr, skip */
1065 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1066 continue;
1067
1068 crv = wdcintr(wdc_cp);
1069 if (crv == 0)
1070 ; /* leave rv alone */
1071 else if (crv == 1)
1072 rv = 1; /* claim the intr */
1073 else if (rv == 0) /* crv should be -1 in this case */
1074 rv = crv; /* if we've done no better, take it */
1075 }
1076 return (rv);
1077 }
1078
1079 void
1080 pciide_channel_dma_setup(cp)
1081 struct pciide_channel *cp;
1082 {
1083 int drive;
1084 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1085 struct ata_drive_datas *drvp;
1086
1087 for (drive = 0; drive < 2; drive++) {
1088 drvp = &cp->wdc_channel.ch_drive[drive];
1089 /* If no drive, skip */
1090 if ((drvp->drive_flags & DRIVE) == 0)
1091 continue;
1092 /* setup DMA if needed */
1093 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1094 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1095 sc->sc_dma_ok == 0) {
1096 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1097 continue;
1098 }
1099 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1100 != 0) {
1101 /* Abort DMA setup */
1102 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1103 continue;
1104 }
1105 }
1106 }
1107
1108 int
1109 pciide_dma_table_setup(sc, channel, drive)
1110 struct pciide_softc *sc;
1111 int channel, drive;
1112 {
1113 bus_dma_segment_t seg;
1114 int error, rseg;
1115 const bus_size_t dma_table_size =
1116 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1117 struct pciide_dma_maps *dma_maps =
1118 &sc->pciide_channels[channel].dma_maps[drive];
1119
1120 /* If table was already allocated, just return */
1121 if (dma_maps->dma_table)
1122 return 0;
1123
1124 /* Allocate memory for the DMA tables and map it */
1125 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1126 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1127 BUS_DMA_NOWAIT)) != 0) {
1128 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1129 "allocate", drive, error);
1130 return error;
1131 }
1132 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1133 dma_table_size,
1134 (caddr_t *)&dma_maps->dma_table,
1135 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1136 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1137 "map", drive, error);
1138 return error;
1139 }
1140 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1141 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1142 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1143 /* Create and load table DMA map for this disk */
1144 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1145 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1146 &dma_maps->dmamap_table)) != 0) {
1147 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1148 "create", drive, error);
1149 return error;
1150 }
1151 if ((error = bus_dmamap_load(sc->sc_dmat,
1152 dma_maps->dmamap_table,
1153 dma_maps->dma_table,
1154 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1155 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1156 "load", drive, error);
1157 return error;
1158 }
1159 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1160 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1161 DEBUG_PROBE);
1162 /* Create a xfer DMA map for this drive */
1163 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1164 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1165 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1166 &dma_maps->dmamap_xfer)) != 0) {
1167 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1168 "create xfer", drive, error);
1169 return error;
1170 }
1171 return 0;
1172 }
1173
1174 int
1175 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1176 void *v;
1177 int channel, drive;
1178 void *databuf;
1179 size_t datalen;
1180 int flags;
1181 {
1182 struct pciide_softc *sc = v;
1183 int error, seg;
1184 struct pciide_dma_maps *dma_maps =
1185 &sc->pciide_channels[channel].dma_maps[drive];
1186
1187 error = bus_dmamap_load(sc->sc_dmat,
1188 dma_maps->dmamap_xfer,
1189 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1190 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1191 if (error) {
1192 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1193 "load xfer", drive, error);
1194 return error;
1195 }
1196
1197 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1198 dma_maps->dmamap_xfer->dm_mapsize,
1199 (flags & WDC_DMA_READ) ?
1200 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1201
1202 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1203 #ifdef DIAGNOSTIC
1204 /* A segment must not cross a 64k boundary */
1205 {
1206 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1207 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1208 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1209 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1210 printf("pciide_dma: segment %d physical addr 0x%lx"
1211 " len 0x%lx not properly aligned\n",
1212 seg, phys, len);
1213 panic("pciide_dma: buf align");
1214 }
1215 }
1216 #endif
1217 dma_maps->dma_table[seg].base_addr =
1218 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1219 dma_maps->dma_table[seg].byte_count =
1220 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1221 IDEDMA_BYTE_COUNT_MASK);
1222 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1223 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1224 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1225
1226 }
1227 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1228 htole32(IDEDMA_BYTE_COUNT_EOT);
1229
1230 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1231 dma_maps->dmamap_table->dm_mapsize,
1232 BUS_DMASYNC_PREWRITE);
1233
1234 /* Maps are ready. Start DMA function */
1235 #ifdef DIAGNOSTIC
1236 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1237 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1238 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1239 panic("pciide_dma_init: table align");
1240 }
1241 #endif
1242
1243 /* Clear status bits */
1244 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1245 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1246 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1247 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1248 /* Write table addr */
1249 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1250 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1251 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1252 /* set read/write */
1253 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1254 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1255 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1256 /* remember flags */
1257 dma_maps->dma_flags = flags;
1258 return 0;
1259 }
1260
1261 void
1262 pciide_dma_start(v, channel, drive)
1263 void *v;
1264 int channel, drive;
1265 {
1266 struct pciide_softc *sc = v;
1267
1268 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1269 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1270 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1271 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1272 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1273 }
1274
1275 int
1276 pciide_dma_finish(v, channel, drive, force)
1277 void *v;
1278 int channel, drive;
1279 int force;
1280 {
1281 struct pciide_softc *sc = v;
1282 u_int8_t status;
1283 int error = 0;
1284 struct pciide_dma_maps *dma_maps =
1285 &sc->pciide_channels[channel].dma_maps[drive];
1286
1287 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1288 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1289 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1290 DEBUG_XFERS);
1291
1292 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1293 return WDC_DMAST_NOIRQ;
1294
1295 /* stop DMA channel */
1296 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1297 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1298 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1299 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1300
1301 /* Unload the map of the data buffer */
1302 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1303 dma_maps->dmamap_xfer->dm_mapsize,
1304 (dma_maps->dma_flags & WDC_DMA_READ) ?
1305 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1306 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1307
1308 if ((status & IDEDMA_CTL_ERR) != 0) {
1309 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1310 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1311 error |= WDC_DMAST_ERR;
1312 }
1313
1314 if ((status & IDEDMA_CTL_INTR) == 0) {
1315 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1316 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1317 drive, status);
1318 error |= WDC_DMAST_NOIRQ;
1319 }
1320
1321 if ((status & IDEDMA_CTL_ACT) != 0) {
1322 /* data underrun, may be a valid condition for ATAPI */
1323 error |= WDC_DMAST_UNDER;
1324 }
1325 return error;
1326 }
1327
1328 void
1329 pciide_irqack(chp)
1330 struct channel_softc *chp;
1331 {
1332 struct pciide_channel *cp = (struct pciide_channel*)chp;
1333 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1334
1335 /* clear status bits in IDE DMA registers */
1336 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1337 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1338 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1339 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1340 }
1341
1342 /* some common code used by several chip_map */
1343 int
1344 pciide_chansetup(sc, channel, interface)
1345 struct pciide_softc *sc;
1346 int channel;
1347 pcireg_t interface;
1348 {
1349 struct pciide_channel *cp = &sc->pciide_channels[channel];
1350 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1351 cp->name = PCIIDE_CHANNEL_NAME(channel);
1352 cp->wdc_channel.channel = channel;
1353 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1354 cp->wdc_channel.ch_queue =
1355 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1356 if (cp->wdc_channel.ch_queue == NULL) {
1357 aprint_error("%s %s channel: "
1358 "can't allocate memory for command queue",
1359 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1360 return 0;
1361 }
1362 aprint_normal("%s: %s channel %s to %s mode\n",
1363 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1364 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1365 "configured" : "wired",
1366 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1367 "native-PCI" : "compatibility");
1368 return 1;
1369 }
1370
1371 /* some common code used by several chip channel_map */
1372 void
1373 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1374 struct pci_attach_args *pa;
1375 struct pciide_channel *cp;
1376 pcireg_t interface;
1377 bus_size_t *cmdsizep, *ctlsizep;
1378 int (*pci_intr) __P((void *));
1379 {
1380 struct channel_softc *wdc_cp = &cp->wdc_channel;
1381
1382 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1383 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1384 pci_intr);
1385 else
1386 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1387 wdc_cp->channel, cmdsizep, ctlsizep);
1388
1389 if (cp->hw_ok == 0)
1390 return;
1391 wdc_cp->data32iot = wdc_cp->cmd_iot;
1392 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1393 wdcattach(wdc_cp);
1394 }
1395
1396 /*
1397 * Generic code to call to know if a channel can be disabled. Return 1
1398 * if channel can be disabled, 0 if not
1399 */
1400 int
1401 pciide_chan_candisable(cp)
1402 struct pciide_channel *cp;
1403 {
1404 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1405 struct channel_softc *wdc_cp = &cp->wdc_channel;
1406
1407 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1408 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1409 aprint_normal("%s: disabling %s channel (no drives)\n",
1410 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1411 cp->hw_ok = 0;
1412 return 1;
1413 }
1414 return 0;
1415 }
1416
1417 /*
1418 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1419 * Set hw_ok=0 on failure
1420 */
1421 void
1422 pciide_map_compat_intr(pa, cp, compatchan, interface)
1423 struct pci_attach_args *pa;
1424 struct pciide_channel *cp;
1425 int compatchan, interface;
1426 {
1427 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1428 struct channel_softc *wdc_cp = &cp->wdc_channel;
1429
1430 if (cp->hw_ok == 0)
1431 return;
1432 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1433 return;
1434
1435 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1436 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1437 pa, compatchan, pciide_compat_intr, cp);
1438 if (cp->ih == NULL) {
1439 #endif
1440 aprint_error("%s: no compatibility interrupt for use by %s "
1441 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1442 cp->hw_ok = 0;
1443 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1444 }
1445 #endif
1446 }
1447
1448 void
1449 pciide_print_modes(cp)
1450 struct pciide_channel *cp;
1451 {
1452 wdc_print_modes(&cp->wdc_channel);
1453 }
1454
1455 void
1456 default_chip_map(sc, pa)
1457 struct pciide_softc *sc;
1458 struct pci_attach_args *pa;
1459 {
1460 struct pciide_channel *cp;
1461 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1462 pcireg_t csr;
1463 int channel, drive;
1464 struct ata_drive_datas *drvp;
1465 u_int8_t idedma_ctl;
1466 bus_size_t cmdsize, ctlsize;
1467 char *failreason;
1468
1469 if (pciide_chipen(sc, pa) == 0)
1470 return;
1471
1472 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1473 aprint_normal("%s: bus-master DMA support present",
1474 sc->sc_wdcdev.sc_dev.dv_xname);
1475 if (sc->sc_pp == &default_product_desc &&
1476 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1477 PCIIDE_OPTIONS_DMA) == 0) {
1478 aprint_normal(", but unused (no driver support)");
1479 sc->sc_dma_ok = 0;
1480 } else {
1481 pciide_mapreg_dma(sc, pa);
1482 if (sc->sc_dma_ok != 0)
1483 aprint_normal(", used without full driver "
1484 "support");
1485 }
1486 } else {
1487 aprint_normal("%s: hardware does not support DMA",
1488 sc->sc_wdcdev.sc_dev.dv_xname);
1489 sc->sc_dma_ok = 0;
1490 }
1491 aprint_normal("\n");
1492 if (sc->sc_dma_ok) {
1493 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1494 sc->sc_wdcdev.irqack = pciide_irqack;
1495 }
1496 sc->sc_wdcdev.PIO_cap = 0;
1497 sc->sc_wdcdev.DMA_cap = 0;
1498
1499 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1500 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1501 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1502
1503 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1504 cp = &sc->pciide_channels[channel];
1505 if (pciide_chansetup(sc, channel, interface) == 0)
1506 continue;
1507 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1508 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1509 &ctlsize, pciide_pci_intr);
1510 } else {
1511 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1512 channel, &cmdsize, &ctlsize);
1513 }
1514 if (cp->hw_ok == 0)
1515 continue;
1516 /*
1517 * Check to see if something appears to be there.
1518 */
1519 failreason = NULL;
1520 if (!wdcprobe(&cp->wdc_channel)) {
1521 failreason = "not responding; disabled or no drives?";
1522 goto next;
1523 }
1524 /*
1525 * Now, make sure it's actually attributable to this PCI IDE
1526 * channel by trying to access the channel again while the
1527 * PCI IDE controller's I/O space is disabled. (If the
1528 * channel no longer appears to be there, it belongs to
1529 * this controller.) YUCK!
1530 */
1531 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1532 PCI_COMMAND_STATUS_REG);
1533 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1534 csr & ~PCI_COMMAND_IO_ENABLE);
1535 if (wdcprobe(&cp->wdc_channel))
1536 failreason = "other hardware responding at addresses";
1537 pci_conf_write(sc->sc_pc, sc->sc_tag,
1538 PCI_COMMAND_STATUS_REG, csr);
1539 next:
1540 if (failreason) {
1541 aprint_error("%s: %s channel ignored (%s)\n",
1542 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1543 failreason);
1544 cp->hw_ok = 0;
1545 bus_space_unmap(cp->wdc_channel.cmd_iot,
1546 cp->wdc_channel.cmd_ioh, cmdsize);
1547 if (interface & PCIIDE_INTERFACE_PCI(channel))
1548 bus_space_unmap(cp->wdc_channel.ctl_iot,
1549 cp->ctl_baseioh, ctlsize);
1550 else
1551 bus_space_unmap(cp->wdc_channel.ctl_iot,
1552 cp->wdc_channel.ctl_ioh, ctlsize);
1553 } else {
1554 pciide_map_compat_intr(pa, cp, channel, interface);
1555 }
1556 if (cp->hw_ok) {
1557 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1558 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1559 wdcattach(&cp->wdc_channel);
1560 }
1561 }
1562
1563 if (sc->sc_dma_ok == 0)
1564 return;
1565
1566 /* Allocate DMA maps */
1567 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1568 idedma_ctl = 0;
1569 cp = &sc->pciide_channels[channel];
1570 for (drive = 0; drive < 2; drive++) {
1571 drvp = &cp->wdc_channel.ch_drive[drive];
1572 /* If no drive, skip */
1573 if ((drvp->drive_flags & DRIVE) == 0)
1574 continue;
1575 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1576 continue;
1577 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1578 /* Abort DMA setup */
1579 aprint_error(
1580 "%s:%d:%d: can't allocate DMA maps, "
1581 "using PIO transfers\n",
1582 sc->sc_wdcdev.sc_dev.dv_xname,
1583 channel, drive);
1584 drvp->drive_flags &= ~DRIVE_DMA;
1585 }
1586 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1587 sc->sc_wdcdev.sc_dev.dv_xname,
1588 channel, drive);
1589 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1590 }
1591 if (idedma_ctl != 0) {
1592 /* Add software bits in status register */
1593 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1594 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1595 idedma_ctl);
1596 }
1597 }
1598 }
1599
1600 void
1601 sata_setup_channel(chp)
1602 struct channel_softc *chp;
1603 {
1604 struct ata_drive_datas *drvp;
1605 int drive;
1606 u_int32_t idedma_ctl;
1607 struct pciide_channel *cp = (struct pciide_channel*)chp;
1608 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1609
1610 /* setup DMA if needed */
1611 pciide_channel_dma_setup(cp);
1612
1613 idedma_ctl = 0;
1614
1615 for (drive = 0; drive < 2; drive++) {
1616 drvp = &chp->ch_drive[drive];
1617 /* If no drive, skip */
1618 if ((drvp->drive_flags & DRIVE) == 0)
1619 continue;
1620 if (drvp->drive_flags & DRIVE_UDMA) {
1621 /* use Ultra/DMA */
1622 drvp->drive_flags &= ~DRIVE_DMA;
1623 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1624 } else if (drvp->drive_flags & DRIVE_DMA) {
1625 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1626 }
1627 }
1628
1629 /*
1630 * Nothing to do to setup modes; it is meaningless in S-ATA
1631 * (but many S-ATA drives still want to get the SET_FEATURE
1632 * command).
1633 */
1634 if (idedma_ctl != 0) {
1635 /* Add software bits in status register */
1636 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1637 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1638 idedma_ctl);
1639 }
1640 pciide_print_modes(cp);
1641 }
1642
1643 void
1644 piix_chip_map(sc, pa)
1645 struct pciide_softc *sc;
1646 struct pci_attach_args *pa;
1647 {
1648 struct pciide_channel *cp;
1649 int channel;
1650 u_int32_t idetim;
1651 bus_size_t cmdsize, ctlsize;
1652
1653 if (pciide_chipen(sc, pa) == 0)
1654 return;
1655
1656 aprint_normal("%s: bus-master DMA support present",
1657 sc->sc_wdcdev.sc_dev.dv_xname);
1658 pciide_mapreg_dma(sc, pa);
1659 aprint_normal("\n");
1660 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1661 WDC_CAPABILITY_MODE;
1662 if (sc->sc_dma_ok) {
1663 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1664 sc->sc_wdcdev.irqack = pciide_irqack;
1665 switch(sc->sc_pp->ide_product) {
1666 case PCI_PRODUCT_INTEL_82371AB_IDE:
1667 case PCI_PRODUCT_INTEL_82440MX_IDE:
1668 case PCI_PRODUCT_INTEL_82801AA_IDE:
1669 case PCI_PRODUCT_INTEL_82801AB_IDE:
1670 case PCI_PRODUCT_INTEL_82801BA_IDE:
1671 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1672 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1673 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1674 case PCI_PRODUCT_INTEL_82801DB_IDE:
1675 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1676 case PCI_PRODUCT_INTEL_82801EB_IDE:
1677 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1678 }
1679 }
1680 sc->sc_wdcdev.PIO_cap = 4;
1681 sc->sc_wdcdev.DMA_cap = 2;
1682 switch(sc->sc_pp->ide_product) {
1683 case PCI_PRODUCT_INTEL_82801AA_IDE:
1684 sc->sc_wdcdev.UDMA_cap = 4;
1685 break;
1686 case PCI_PRODUCT_INTEL_82801BA_IDE:
1687 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1688 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1689 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1690 case PCI_PRODUCT_INTEL_82801DB_IDE:
1691 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1692 case PCI_PRODUCT_INTEL_82801EB_IDE:
1693 sc->sc_wdcdev.UDMA_cap = 5;
1694 break;
1695 default:
1696 sc->sc_wdcdev.UDMA_cap = 2;
1697 }
1698 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1699 sc->sc_wdcdev.set_modes = piix_setup_channel;
1700 else
1701 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1702 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1703 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1704
1705 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1706 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1707 DEBUG_PROBE);
1708 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1709 WDCDEBUG_PRINT((", sidetim=0x%x",
1710 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1711 DEBUG_PROBE);
1712 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1713 WDCDEBUG_PRINT((", udamreg 0x%x",
1714 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1715 DEBUG_PROBE);
1716 }
1717 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1718 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1719 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1720 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1721 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1722 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1723 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1724 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1725 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1726 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1727 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1728 DEBUG_PROBE);
1729 }
1730
1731 }
1732 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1733
1734 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1735 cp = &sc->pciide_channels[channel];
1736 /* PIIX is compat-only */
1737 if (pciide_chansetup(sc, channel, 0) == 0)
1738 continue;
1739 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1740 if ((PIIX_IDETIM_READ(idetim, channel) &
1741 PIIX_IDETIM_IDE) == 0) {
1742 aprint_normal("%s: %s channel ignored (disabled)\n",
1743 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1744 continue;
1745 }
1746 /* PIIX are compat-only pciide devices */
1747 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1748 if (cp->hw_ok == 0)
1749 continue;
1750 if (pciide_chan_candisable(cp)) {
1751 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1752 channel);
1753 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1754 idetim);
1755 }
1756 pciide_map_compat_intr(pa, cp, channel, 0);
1757 if (cp->hw_ok == 0)
1758 continue;
1759 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1760 }
1761
1762 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1763 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1764 DEBUG_PROBE);
1765 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1766 WDCDEBUG_PRINT((", sidetim=0x%x",
1767 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1768 DEBUG_PROBE);
1769 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1770 WDCDEBUG_PRINT((", udamreg 0x%x",
1771 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1772 DEBUG_PROBE);
1773 }
1774 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1775 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1776 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1777 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1778 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1779 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1780 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1781 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1782 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1783 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1784 DEBUG_PROBE);
1785 }
1786 }
1787 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1788 }
1789
1790 void
1791 piix_setup_channel(chp)
1792 struct channel_softc *chp;
1793 {
1794 u_int8_t mode[2], drive;
1795 u_int32_t oidetim, idetim, idedma_ctl;
1796 struct pciide_channel *cp = (struct pciide_channel*)chp;
1797 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1798 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1799
1800 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1801 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1802 idedma_ctl = 0;
1803
1804 /* set up new idetim: Enable IDE registers decode */
1805 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1806 chp->channel);
1807
1808 /* setup DMA */
1809 pciide_channel_dma_setup(cp);
1810
1811 /*
1812 * Here we have to mess up with drives mode: PIIX can't have
1813 * different timings for master and slave drives.
1814 * We need to find the best combination.
1815 */
1816
1817 /* If both drives supports DMA, take the lower mode */
1818 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1819 (drvp[1].drive_flags & DRIVE_DMA)) {
1820 mode[0] = mode[1] =
1821 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1822 drvp[0].DMA_mode = mode[0];
1823 drvp[1].DMA_mode = mode[1];
1824 goto ok;
1825 }
1826 /*
1827 * If only one drive supports DMA, use its mode, and
1828 * put the other one in PIO mode 0 if mode not compatible
1829 */
1830 if (drvp[0].drive_flags & DRIVE_DMA) {
1831 mode[0] = drvp[0].DMA_mode;
1832 mode[1] = drvp[1].PIO_mode;
1833 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1834 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1835 mode[1] = drvp[1].PIO_mode = 0;
1836 goto ok;
1837 }
1838 if (drvp[1].drive_flags & DRIVE_DMA) {
1839 mode[1] = drvp[1].DMA_mode;
1840 mode[0] = drvp[0].PIO_mode;
1841 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1842 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1843 mode[0] = drvp[0].PIO_mode = 0;
1844 goto ok;
1845 }
1846 /*
1847 * If both drives are not DMA, takes the lower mode, unless
1848 * one of them is PIO mode < 2
1849 */
1850 if (drvp[0].PIO_mode < 2) {
1851 mode[0] = drvp[0].PIO_mode = 0;
1852 mode[1] = drvp[1].PIO_mode;
1853 } else if (drvp[1].PIO_mode < 2) {
1854 mode[1] = drvp[1].PIO_mode = 0;
1855 mode[0] = drvp[0].PIO_mode;
1856 } else {
1857 mode[0] = mode[1] =
1858 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1859 drvp[0].PIO_mode = mode[0];
1860 drvp[1].PIO_mode = mode[1];
1861 }
1862 ok: /* The modes are setup */
1863 for (drive = 0; drive < 2; drive++) {
1864 if (drvp[drive].drive_flags & DRIVE_DMA) {
1865 idetim |= piix_setup_idetim_timings(
1866 mode[drive], 1, chp->channel);
1867 goto end;
1868 }
1869 }
1870 /* If we are there, none of the drives are DMA */
1871 if (mode[0] >= 2)
1872 idetim |= piix_setup_idetim_timings(
1873 mode[0], 0, chp->channel);
1874 else
1875 idetim |= piix_setup_idetim_timings(
1876 mode[1], 0, chp->channel);
1877 end: /*
1878 * timing mode is now set up in the controller. Enable
1879 * it per-drive
1880 */
1881 for (drive = 0; drive < 2; drive++) {
1882 /* If no drive, skip */
1883 if ((drvp[drive].drive_flags & DRIVE) == 0)
1884 continue;
1885 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1886 if (drvp[drive].drive_flags & DRIVE_DMA)
1887 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1888 }
1889 if (idedma_ctl != 0) {
1890 /* Add software bits in status register */
1891 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1892 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1893 idedma_ctl);
1894 }
1895 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1896 pciide_print_modes(cp);
1897 }
1898
1899 void
1900 piix3_4_setup_channel(chp)
1901 struct channel_softc *chp;
1902 {
1903 struct ata_drive_datas *drvp;
1904 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1905 struct pciide_channel *cp = (struct pciide_channel*)chp;
1906 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1907 int drive;
1908 int channel = chp->channel;
1909
1910 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1911 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1912 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1913 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1914 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1915 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1916 PIIX_SIDETIM_RTC_MASK(channel));
1917
1918 idedma_ctl = 0;
1919 /* If channel disabled, no need to go further */
1920 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1921 return;
1922 /* set up new idetim: Enable IDE registers decode */
1923 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1924
1925 /* setup DMA if needed */
1926 pciide_channel_dma_setup(cp);
1927
1928 for (drive = 0; drive < 2; drive++) {
1929 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1930 PIIX_UDMATIM_SET(0x3, channel, drive));
1931 drvp = &chp->ch_drive[drive];
1932 /* If no drive, skip */
1933 if ((drvp->drive_flags & DRIVE) == 0)
1934 continue;
1935 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1936 (drvp->drive_flags & DRIVE_UDMA) == 0))
1937 goto pio;
1938
1939 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1940 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1941 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1942 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1943 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1944 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1945 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1946 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1947 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1948 ideconf |= PIIX_CONFIG_PINGPONG;
1949 }
1950 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1951 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1952 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1953 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1954 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1955 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1956 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1957 /* setup Ultra/100 */
1958 if (drvp->UDMA_mode > 2 &&
1959 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1960 drvp->UDMA_mode = 2;
1961 if (drvp->UDMA_mode > 4) {
1962 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1963 } else {
1964 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1965 if (drvp->UDMA_mode > 2) {
1966 ideconf |= PIIX_CONFIG_UDMA66(channel,
1967 drive);
1968 } else {
1969 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1970 drive);
1971 }
1972 }
1973 }
1974 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1975 /* setup Ultra/66 */
1976 if (drvp->UDMA_mode > 2 &&
1977 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1978 drvp->UDMA_mode = 2;
1979 if (drvp->UDMA_mode > 2)
1980 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1981 else
1982 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1983 }
1984 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1985 (drvp->drive_flags & DRIVE_UDMA)) {
1986 /* use Ultra/DMA */
1987 drvp->drive_flags &= ~DRIVE_DMA;
1988 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1989 udmareg |= PIIX_UDMATIM_SET(
1990 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1991 } else {
1992 /* use Multiword DMA */
1993 drvp->drive_flags &= ~DRIVE_UDMA;
1994 if (drive == 0) {
1995 idetim |= piix_setup_idetim_timings(
1996 drvp->DMA_mode, 1, channel);
1997 } else {
1998 sidetim |= piix_setup_sidetim_timings(
1999 drvp->DMA_mode, 1, channel);
2000 idetim =PIIX_IDETIM_SET(idetim,
2001 PIIX_IDETIM_SITRE, channel);
2002 }
2003 }
2004 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2005
2006 pio: /* use PIO mode */
2007 idetim |= piix_setup_idetim_drvs(drvp);
2008 if (drive == 0) {
2009 idetim |= piix_setup_idetim_timings(
2010 drvp->PIO_mode, 0, channel);
2011 } else {
2012 sidetim |= piix_setup_sidetim_timings(
2013 drvp->PIO_mode, 0, channel);
2014 idetim =PIIX_IDETIM_SET(idetim,
2015 PIIX_IDETIM_SITRE, channel);
2016 }
2017 }
2018 if (idedma_ctl != 0) {
2019 /* Add software bits in status register */
2020 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2021 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2022 idedma_ctl);
2023 }
2024 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2025 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2026 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2027 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2028 pciide_print_modes(cp);
2029 }
2030
2031
2032 /* setup ISP and RTC fields, based on mode */
2033 static u_int32_t
2034 piix_setup_idetim_timings(mode, dma, channel)
2035 u_int8_t mode;
2036 u_int8_t dma;
2037 u_int8_t channel;
2038 {
2039
2040 if (dma)
2041 return PIIX_IDETIM_SET(0,
2042 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2043 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2044 channel);
2045 else
2046 return PIIX_IDETIM_SET(0,
2047 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2048 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2049 channel);
2050 }
2051
2052 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2053 static u_int32_t
2054 piix_setup_idetim_drvs(drvp)
2055 struct ata_drive_datas *drvp;
2056 {
2057 u_int32_t ret = 0;
2058 struct channel_softc *chp = drvp->chnl_softc;
2059 u_int8_t channel = chp->channel;
2060 u_int8_t drive = drvp->drive;
2061
2062 /*
2063 * If drive is using UDMA, timings setups are independant
2064 * So just check DMA and PIO here.
2065 */
2066 if (drvp->drive_flags & DRIVE_DMA) {
2067 /* if mode = DMA mode 0, use compatible timings */
2068 if ((drvp->drive_flags & DRIVE_DMA) &&
2069 drvp->DMA_mode == 0) {
2070 drvp->PIO_mode = 0;
2071 return ret;
2072 }
2073 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2074 /*
2075 * PIO and DMA timings are the same, use fast timings for PIO
2076 * too, else use compat timings.
2077 */
2078 if ((piix_isp_pio[drvp->PIO_mode] !=
2079 piix_isp_dma[drvp->DMA_mode]) ||
2080 (piix_rtc_pio[drvp->PIO_mode] !=
2081 piix_rtc_dma[drvp->DMA_mode]))
2082 drvp->PIO_mode = 0;
2083 /* if PIO mode <= 2, use compat timings for PIO */
2084 if (drvp->PIO_mode <= 2) {
2085 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2086 channel);
2087 return ret;
2088 }
2089 }
2090
2091 /*
2092 * Now setup PIO modes. If mode < 2, use compat timings.
2093 * Else enable fast timings. Enable IORDY and prefetch/post
2094 * if PIO mode >= 3.
2095 */
2096
2097 if (drvp->PIO_mode < 2)
2098 return ret;
2099
2100 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2101 if (drvp->PIO_mode >= 3) {
2102 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2103 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2104 }
2105 return ret;
2106 }
2107
2108 /* setup values in SIDETIM registers, based on mode */
2109 static u_int32_t
2110 piix_setup_sidetim_timings(mode, dma, channel)
2111 u_int8_t mode;
2112 u_int8_t dma;
2113 u_int8_t channel;
2114 {
2115 if (dma)
2116 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2117 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2118 else
2119 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2120 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2121 }
2122
2123 void
2124 amd7x6_chip_map(sc, pa)
2125 struct pciide_softc *sc;
2126 struct pci_attach_args *pa;
2127 {
2128 struct pciide_channel *cp;
2129 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2130 int channel;
2131 pcireg_t chanenable;
2132 bus_size_t cmdsize, ctlsize;
2133
2134 if (pciide_chipen(sc, pa) == 0)
2135 return;
2136 aprint_normal("%s: bus-master DMA support present",
2137 sc->sc_wdcdev.sc_dev.dv_xname);
2138 pciide_mapreg_dma(sc, pa);
2139 aprint_normal("\n");
2140 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2141 WDC_CAPABILITY_MODE;
2142 if (sc->sc_dma_ok) {
2143 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2144 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2145 sc->sc_wdcdev.irqack = pciide_irqack;
2146 }
2147 sc->sc_wdcdev.PIO_cap = 4;
2148 sc->sc_wdcdev.DMA_cap = 2;
2149
2150 switch (sc->sc_pci_vendor) {
2151 case PCI_VENDOR_AMD:
2152 switch (sc->sc_pp->ide_product) {
2153 case PCI_PRODUCT_AMD_PBC766_IDE:
2154 case PCI_PRODUCT_AMD_PBC768_IDE:
2155 case PCI_PRODUCT_AMD_PBC8111_IDE:
2156 sc->sc_wdcdev.UDMA_cap = 5;
2157 break;
2158 default:
2159 sc->sc_wdcdev.UDMA_cap = 4;
2160 }
2161 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2162 break;
2163
2164 case PCI_VENDOR_NVIDIA:
2165 switch (sc->sc_pp->ide_product) {
2166 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2167 sc->sc_wdcdev.UDMA_cap = 5;
2168 break;
2169 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2170 sc->sc_wdcdev.UDMA_cap = 6;
2171 break;
2172 }
2173 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2174 break;
2175
2176 default:
2177 panic("amd7x6_chip_map: unknown vendor");
2178 }
2179 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2180 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2181 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2182 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2183 AMD7X6_CHANSTATUS_EN(sc));
2184
2185 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2186 DEBUG_PROBE);
2187 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2188 cp = &sc->pciide_channels[channel];
2189 if (pciide_chansetup(sc, channel, interface) == 0)
2190 continue;
2191
2192 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2193 aprint_normal("%s: %s channel ignored (disabled)\n",
2194 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2195 continue;
2196 }
2197 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2198 pciide_pci_intr);
2199
2200 if (pciide_chan_candisable(cp))
2201 chanenable &= ~AMD7X6_CHAN_EN(channel);
2202 pciide_map_compat_intr(pa, cp, channel, interface);
2203 if (cp->hw_ok == 0)
2204 continue;
2205
2206 amd7x6_setup_channel(&cp->wdc_channel);
2207 }
2208 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2209 chanenable);
2210 return;
2211 }
2212
2213 void
2214 amd7x6_setup_channel(chp)
2215 struct channel_softc *chp;
2216 {
2217 u_int32_t udmatim_reg, datatim_reg;
2218 u_int8_t idedma_ctl;
2219 int mode, drive;
2220 struct ata_drive_datas *drvp;
2221 struct pciide_channel *cp = (struct pciide_channel*)chp;
2222 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2223 #ifndef PCIIDE_AMD756_ENABLEDMA
2224 int rev = PCI_REVISION(
2225 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2226 #endif
2227
2228 idedma_ctl = 0;
2229 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2230 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2231 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2232 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2233
2234 /* setup DMA if needed */
2235 pciide_channel_dma_setup(cp);
2236
2237 for (drive = 0; drive < 2; drive++) {
2238 drvp = &chp->ch_drive[drive];
2239 /* If no drive, skip */
2240 if ((drvp->drive_flags & DRIVE) == 0)
2241 continue;
2242 /* add timing values, setup DMA if needed */
2243 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2244 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2245 mode = drvp->PIO_mode;
2246 goto pio;
2247 }
2248 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2249 (drvp->drive_flags & DRIVE_UDMA)) {
2250 /* use Ultra/DMA */
2251 drvp->drive_flags &= ~DRIVE_DMA;
2252 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2253 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2254 AMD7X6_UDMA_TIME(chp->channel, drive,
2255 amd7x6_udma_tim[drvp->UDMA_mode]);
2256 /* can use PIO timings, MW DMA unused */
2257 mode = drvp->PIO_mode;
2258 } else {
2259 /* use Multiword DMA, but only if revision is OK */
2260 drvp->drive_flags &= ~DRIVE_UDMA;
2261 #ifndef PCIIDE_AMD756_ENABLEDMA
2262 /*
2263 * The workaround doesn't seem to be necessary
2264 * with all drives, so it can be disabled by
2265 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2266 * triggered.
2267 */
2268 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2269 sc->sc_pp->ide_product ==
2270 PCI_PRODUCT_AMD_PBC756_IDE &&
2271 AMD756_CHIPREV_DISABLEDMA(rev)) {
2272 aprint_normal(
2273 "%s:%d:%d: multi-word DMA disabled due "
2274 "to chip revision\n",
2275 sc->sc_wdcdev.sc_dev.dv_xname,
2276 chp->channel, drive);
2277 mode = drvp->PIO_mode;
2278 drvp->drive_flags &= ~DRIVE_DMA;
2279 goto pio;
2280 }
2281 #endif
2282 /* mode = min(pio, dma+2) */
2283 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2284 mode = drvp->PIO_mode;
2285 else
2286 mode = drvp->DMA_mode + 2;
2287 }
2288 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2289
2290 pio: /* setup PIO mode */
2291 if (mode <= 2) {
2292 drvp->DMA_mode = 0;
2293 drvp->PIO_mode = 0;
2294 mode = 0;
2295 } else {
2296 drvp->PIO_mode = mode;
2297 drvp->DMA_mode = mode - 2;
2298 }
2299 datatim_reg |=
2300 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2301 amd7x6_pio_set[mode]) |
2302 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2303 amd7x6_pio_rec[mode]);
2304 }
2305 if (idedma_ctl != 0) {
2306 /* Add software bits in status register */
2307 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2308 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2309 idedma_ctl);
2310 }
2311 pciide_print_modes(cp);
2312 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2313 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2314 }
2315
2316 void
2317 apollo_chip_map(sc, pa)
2318 struct pciide_softc *sc;
2319 struct pci_attach_args *pa;
2320 {
2321 struct pciide_channel *cp;
2322 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2323 int channel;
2324 u_int32_t ideconf;
2325 bus_size_t cmdsize, ctlsize;
2326 pcitag_t pcib_tag;
2327 pcireg_t pcib_id, pcib_class;
2328
2329 if (pciide_chipen(sc, pa) == 0)
2330 return;
2331 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2332 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2333 /* and read ID and rev of the ISA bridge */
2334 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2335 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2336 aprint_normal(": VIA Technologies ");
2337 switch (PCI_PRODUCT(pcib_id)) {
2338 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2339 aprint_normal("VT82C586 (Apollo VP) ");
2340 if(PCI_REVISION(pcib_class) >= 0x02) {
2341 aprint_normal("ATA33 controller\n");
2342 sc->sc_wdcdev.UDMA_cap = 2;
2343 } else {
2344 aprint_normal("controller\n");
2345 sc->sc_wdcdev.UDMA_cap = 0;
2346 }
2347 break;
2348 case PCI_PRODUCT_VIATECH_VT82C596A:
2349 aprint_normal("VT82C596A (Apollo Pro) ");
2350 if (PCI_REVISION(pcib_class) >= 0x12) {
2351 aprint_normal("ATA66 controller\n");
2352 sc->sc_wdcdev.UDMA_cap = 4;
2353 } else {
2354 aprint_normal("ATA33 controller\n");
2355 sc->sc_wdcdev.UDMA_cap = 2;
2356 }
2357 break;
2358 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2359 aprint_normal("VT82C686A (Apollo KX133) ");
2360 if (PCI_REVISION(pcib_class) >= 0x40) {
2361 aprint_normal("ATA100 controller\n");
2362 sc->sc_wdcdev.UDMA_cap = 5;
2363 } else {
2364 aprint_normal("ATA66 controller\n");
2365 sc->sc_wdcdev.UDMA_cap = 4;
2366 }
2367 break;
2368 case PCI_PRODUCT_VIATECH_VT8231:
2369 aprint_normal("VT8231 ATA100 controller\n");
2370 sc->sc_wdcdev.UDMA_cap = 5;
2371 break;
2372 case PCI_PRODUCT_VIATECH_VT8233:
2373 aprint_normal("VT8233 ATA100 controller\n");
2374 sc->sc_wdcdev.UDMA_cap = 5;
2375 break;
2376 case PCI_PRODUCT_VIATECH_VT8233A:
2377 aprint_normal("VT8233A ATA133 controller\n");
2378 sc->sc_wdcdev.UDMA_cap = 6;
2379 break;
2380 case PCI_PRODUCT_VIATECH_VT8235:
2381 aprint_normal("VT8235 ATA133 controller\n");
2382 sc->sc_wdcdev.UDMA_cap = 6;
2383 break;
2384 default:
2385 aprint_normal("unknown ATA controller\n");
2386 sc->sc_wdcdev.UDMA_cap = 0;
2387 }
2388
2389 aprint_normal("%s: bus-master DMA support present",
2390 sc->sc_wdcdev.sc_dev.dv_xname);
2391 pciide_mapreg_dma(sc, pa);
2392 aprint_normal("\n");
2393 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2394 WDC_CAPABILITY_MODE;
2395 if (sc->sc_dma_ok) {
2396 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2397 sc->sc_wdcdev.irqack = pciide_irqack;
2398 if (sc->sc_wdcdev.UDMA_cap > 0)
2399 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2400 }
2401 sc->sc_wdcdev.PIO_cap = 4;
2402 sc->sc_wdcdev.DMA_cap = 2;
2403 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2404 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2405 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2406
2407 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2408 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2409 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2410 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2411 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2412 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2413 DEBUG_PROBE);
2414
2415 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2416 cp = &sc->pciide_channels[channel];
2417 if (pciide_chansetup(sc, channel, interface) == 0)
2418 continue;
2419
2420 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2421 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2422 aprint_normal("%s: %s channel ignored (disabled)\n",
2423 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2424 continue;
2425 }
2426 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2427 pciide_pci_intr);
2428 if (cp->hw_ok == 0)
2429 continue;
2430 if (pciide_chan_candisable(cp)) {
2431 ideconf &= ~APO_IDECONF_EN(channel);
2432 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2433 ideconf);
2434 }
2435 pciide_map_compat_intr(pa, cp, channel, interface);
2436
2437 if (cp->hw_ok == 0)
2438 continue;
2439 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2440 }
2441 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2442 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2443 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2444 }
2445
2446 void
2447 apollo_setup_channel(chp)
2448 struct channel_softc *chp;
2449 {
2450 u_int32_t udmatim_reg, datatim_reg;
2451 u_int8_t idedma_ctl;
2452 int mode, drive;
2453 struct ata_drive_datas *drvp;
2454 struct pciide_channel *cp = (struct pciide_channel*)chp;
2455 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2456
2457 idedma_ctl = 0;
2458 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2459 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2460 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2461 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2462
2463 /* setup DMA if needed */
2464 pciide_channel_dma_setup(cp);
2465
2466 for (drive = 0; drive < 2; drive++) {
2467 drvp = &chp->ch_drive[drive];
2468 /* If no drive, skip */
2469 if ((drvp->drive_flags & DRIVE) == 0)
2470 continue;
2471 /* add timing values, setup DMA if needed */
2472 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2473 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2474 mode = drvp->PIO_mode;
2475 goto pio;
2476 }
2477 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2478 (drvp->drive_flags & DRIVE_UDMA)) {
2479 /* use Ultra/DMA */
2480 drvp->drive_flags &= ~DRIVE_DMA;
2481 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2482 APO_UDMA_EN_MTH(chp->channel, drive);
2483 if (sc->sc_wdcdev.UDMA_cap == 6) {
2484 /* 8233a */
2485 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2486 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2487 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2488 /* 686b */
2489 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2490 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2491 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2492 /* 596b or 686a */
2493 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2494 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2495 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2496 } else {
2497 /* 596a or 586b */
2498 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2499 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2500 }
2501 /* can use PIO timings, MW DMA unused */
2502 mode = drvp->PIO_mode;
2503 } else {
2504 /* use Multiword DMA */
2505 drvp->drive_flags &= ~DRIVE_UDMA;
2506 /* mode = min(pio, dma+2) */
2507 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2508 mode = drvp->PIO_mode;
2509 else
2510 mode = drvp->DMA_mode + 2;
2511 }
2512 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2513
2514 pio: /* setup PIO mode */
2515 if (mode <= 2) {
2516 drvp->DMA_mode = 0;
2517 drvp->PIO_mode = 0;
2518 mode = 0;
2519 } else {
2520 drvp->PIO_mode = mode;
2521 drvp->DMA_mode = mode - 2;
2522 }
2523 datatim_reg |=
2524 APO_DATATIM_PULSE(chp->channel, drive,
2525 apollo_pio_set[mode]) |
2526 APO_DATATIM_RECOV(chp->channel, drive,
2527 apollo_pio_rec[mode]);
2528 }
2529 if (idedma_ctl != 0) {
2530 /* Add software bits in status register */
2531 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2532 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2533 idedma_ctl);
2534 }
2535 pciide_print_modes(cp);
2536 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2537 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2538 }
2539
2540 void
2541 cmd_channel_map(pa, sc, channel)
2542 struct pci_attach_args *pa;
2543 struct pciide_softc *sc;
2544 int channel;
2545 {
2546 struct pciide_channel *cp = &sc->pciide_channels[channel];
2547 bus_size_t cmdsize, ctlsize;
2548 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2549 int interface, one_channel;
2550
2551 /*
2552 * The 0648/0649 can be told to identify as a RAID controller.
2553 * In this case, we have to fake interface
2554 */
2555 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2556 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2557 PCIIDE_INTERFACE_SETTABLE(1);
2558 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2559 CMD_CONF_DSA1)
2560 interface |= PCIIDE_INTERFACE_PCI(0) |
2561 PCIIDE_INTERFACE_PCI(1);
2562 } else {
2563 interface = PCI_INTERFACE(pa->pa_class);
2564 }
2565
2566 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2567 cp->name = PCIIDE_CHANNEL_NAME(channel);
2568 cp->wdc_channel.channel = channel;
2569 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2570
2571 /*
2572 * Older CMD64X doesn't have independant channels
2573 */
2574 switch (sc->sc_pp->ide_product) {
2575 case PCI_PRODUCT_CMDTECH_649:
2576 one_channel = 0;
2577 break;
2578 default:
2579 one_channel = 1;
2580 break;
2581 }
2582
2583 if (channel > 0 && one_channel) {
2584 cp->wdc_channel.ch_queue =
2585 sc->pciide_channels[0].wdc_channel.ch_queue;
2586 } else {
2587 cp->wdc_channel.ch_queue =
2588 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2589 }
2590 if (cp->wdc_channel.ch_queue == NULL) {
2591 aprint_error("%s %s channel: "
2592 "can't allocate memory for command queue",
2593 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2594 return;
2595 }
2596
2597 aprint_normal("%s: %s channel %s to %s mode\n",
2598 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2599 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2600 "configured" : "wired",
2601 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2602 "native-PCI" : "compatibility");
2603
2604 /*
2605 * with a CMD PCI64x, if we get here, the first channel is enabled:
2606 * there's no way to disable the first channel without disabling
2607 * the whole device
2608 */
2609 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2610 aprint_normal("%s: %s channel ignored (disabled)\n",
2611 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2612 return;
2613 }
2614
2615 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2616 if (cp->hw_ok == 0)
2617 return;
2618 if (channel == 1) {
2619 if (pciide_chan_candisable(cp)) {
2620 ctrl &= ~CMD_CTRL_2PORT;
2621 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2622 CMD_CTRL, ctrl);
2623 }
2624 }
2625 pciide_map_compat_intr(pa, cp, channel, interface);
2626 }
2627
2628 int
2629 cmd_pci_intr(arg)
2630 void *arg;
2631 {
2632 struct pciide_softc *sc = arg;
2633 struct pciide_channel *cp;
2634 struct channel_softc *wdc_cp;
2635 int i, rv, crv;
2636 u_int32_t priirq, secirq;
2637
2638 rv = 0;
2639 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2640 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2641 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2642 cp = &sc->pciide_channels[i];
2643 wdc_cp = &cp->wdc_channel;
2644 /* If a compat channel skip. */
2645 if (cp->compat)
2646 continue;
2647 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2648 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2649 crv = wdcintr(wdc_cp);
2650 if (crv == 0)
2651 printf("%s:%d: bogus intr\n",
2652 sc->sc_wdcdev.sc_dev.dv_xname, i);
2653 else
2654 rv = 1;
2655 }
2656 }
2657 return rv;
2658 }
2659
2660 void
2661 cmd_chip_map(sc, pa)
2662 struct pciide_softc *sc;
2663 struct pci_attach_args *pa;
2664 {
2665 int channel;
2666
2667 /*
2668 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2669 * and base adresses registers can be disabled at
2670 * hardware level. In this case, the device is wired
2671 * in compat mode and its first channel is always enabled,
2672 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2673 * In fact, it seems that the first channel of the CMD PCI0640
2674 * can't be disabled.
2675 */
2676
2677 #ifdef PCIIDE_CMD064x_DISABLE
2678 if (pciide_chipen(sc, pa) == 0)
2679 return;
2680 #endif
2681
2682 aprint_normal("%s: hardware does not support DMA\n",
2683 sc->sc_wdcdev.sc_dev.dv_xname);
2684 sc->sc_dma_ok = 0;
2685
2686 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2687 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2688 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2689
2690 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2691 cmd_channel_map(pa, sc, channel);
2692 }
2693 }
2694
2695 void
2696 cmd0643_9_chip_map(sc, pa)
2697 struct pciide_softc *sc;
2698 struct pci_attach_args *pa;
2699 {
2700 struct pciide_channel *cp;
2701 int channel;
2702 pcireg_t rev = PCI_REVISION(pa->pa_class);
2703
2704 /*
2705 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2706 * and base adresses registers can be disabled at
2707 * hardware level. In this case, the device is wired
2708 * in compat mode and its first channel is always enabled,
2709 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2710 * In fact, it seems that the first channel of the CMD PCI0640
2711 * can't be disabled.
2712 */
2713
2714 #ifdef PCIIDE_CMD064x_DISABLE
2715 if (pciide_chipen(sc, pa) == 0)
2716 return;
2717 #endif
2718 aprint_normal("%s: bus-master DMA support present",
2719 sc->sc_wdcdev.sc_dev.dv_xname);
2720 pciide_mapreg_dma(sc, pa);
2721 aprint_normal("\n");
2722 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2723 WDC_CAPABILITY_MODE;
2724 if (sc->sc_dma_ok) {
2725 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2726 switch (sc->sc_pp->ide_product) {
2727 case PCI_PRODUCT_CMDTECH_649:
2728 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2729 sc->sc_wdcdev.UDMA_cap = 5;
2730 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2731 break;
2732 case PCI_PRODUCT_CMDTECH_648:
2733 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2734 sc->sc_wdcdev.UDMA_cap = 4;
2735 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2736 break;
2737 case PCI_PRODUCT_CMDTECH_646:
2738 if (rev >= CMD0646U2_REV) {
2739 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2740 sc->sc_wdcdev.UDMA_cap = 2;
2741 } else if (rev >= CMD0646U_REV) {
2742 /*
2743 * Linux's driver claims that the 646U is broken
2744 * with UDMA. Only enable it if we know what we're
2745 * doing
2746 */
2747 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2749 sc->sc_wdcdev.UDMA_cap = 2;
2750 #endif
2751 /* explicitly disable UDMA */
2752 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2753 CMD_UDMATIM(0), 0);
2754 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2755 CMD_UDMATIM(1), 0);
2756 }
2757 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2758 break;
2759 default:
2760 sc->sc_wdcdev.irqack = pciide_irqack;
2761 }
2762 }
2763
2764 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2765 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2766 sc->sc_wdcdev.PIO_cap = 4;
2767 sc->sc_wdcdev.DMA_cap = 2;
2768 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2769
2770 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2771 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2772 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2773 DEBUG_PROBE);
2774
2775 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2776 cp = &sc->pciide_channels[channel];
2777 cmd_channel_map(pa, sc, channel);
2778 if (cp->hw_ok == 0)
2779 continue;
2780 cmd0643_9_setup_channel(&cp->wdc_channel);
2781 }
2782 /*
2783 * note - this also makes sure we clear the irq disable and reset
2784 * bits
2785 */
2786 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2787 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2788 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2789 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2790 DEBUG_PROBE);
2791 }
2792
2793 void
2794 cmd0643_9_setup_channel(chp)
2795 struct channel_softc *chp;
2796 {
2797 struct ata_drive_datas *drvp;
2798 u_int8_t tim;
2799 u_int32_t idedma_ctl, udma_reg;
2800 int drive;
2801 struct pciide_channel *cp = (struct pciide_channel*)chp;
2802 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2803
2804 idedma_ctl = 0;
2805 /* setup DMA if needed */
2806 pciide_channel_dma_setup(cp);
2807
2808 for (drive = 0; drive < 2; drive++) {
2809 drvp = &chp->ch_drive[drive];
2810 /* If no drive, skip */
2811 if ((drvp->drive_flags & DRIVE) == 0)
2812 continue;
2813 /* add timing values, setup DMA if needed */
2814 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2815 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2816 if (drvp->drive_flags & DRIVE_UDMA) {
2817 /* UltraDMA on a 646U2, 0648 or 0649 */
2818 drvp->drive_flags &= ~DRIVE_DMA;
2819 udma_reg = pciide_pci_read(sc->sc_pc,
2820 sc->sc_tag, CMD_UDMATIM(chp->channel));
2821 if (drvp->UDMA_mode > 2 &&
2822 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2823 CMD_BICSR) &
2824 CMD_BICSR_80(chp->channel)) == 0)
2825 drvp->UDMA_mode = 2;
2826 if (drvp->UDMA_mode > 2)
2827 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2828 else if (sc->sc_wdcdev.UDMA_cap > 2)
2829 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2830 udma_reg |= CMD_UDMATIM_UDMA(drive);
2831 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2832 CMD_UDMATIM_TIM_OFF(drive));
2833 udma_reg |=
2834 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2835 CMD_UDMATIM_TIM_OFF(drive));
2836 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2837 CMD_UDMATIM(chp->channel), udma_reg);
2838 } else {
2839 /*
2840 * use Multiword DMA.
2841 * Timings will be used for both PIO and DMA,
2842 * so adjust DMA mode if needed
2843 * if we have a 0646U2/8/9, turn off UDMA
2844 */
2845 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2846 udma_reg = pciide_pci_read(sc->sc_pc,
2847 sc->sc_tag,
2848 CMD_UDMATIM(chp->channel));
2849 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2850 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2851 CMD_UDMATIM(chp->channel),
2852 udma_reg);
2853 }
2854 if (drvp->PIO_mode >= 3 &&
2855 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2856 drvp->DMA_mode = drvp->PIO_mode - 2;
2857 }
2858 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2859 }
2860 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2861 }
2862 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2863 CMD_DATA_TIM(chp->channel, drive), tim);
2864 }
2865 if (idedma_ctl != 0) {
2866 /* Add software bits in status register */
2867 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2868 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2869 idedma_ctl);
2870 }
2871 pciide_print_modes(cp);
2872 }
2873
2874 void
2875 cmd646_9_irqack(chp)
2876 struct channel_softc *chp;
2877 {
2878 u_int32_t priirq, secirq;
2879 struct pciide_channel *cp = (struct pciide_channel*)chp;
2880 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2881
2882 if (chp->channel == 0) {
2883 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2884 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2885 } else {
2886 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2887 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2888 }
2889 pciide_irqack(chp);
2890 }
2891
2892 void
2893 cmd680_chip_map(sc, pa)
2894 struct pciide_softc *sc;
2895 struct pci_attach_args *pa;
2896 {
2897 struct pciide_channel *cp;
2898 int channel;
2899
2900 if (pciide_chipen(sc, pa) == 0)
2901 return;
2902 aprint_normal("%s: bus-master DMA support present",
2903 sc->sc_wdcdev.sc_dev.dv_xname);
2904 pciide_mapreg_dma(sc, pa);
2905 aprint_normal("\n");
2906 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2907 WDC_CAPABILITY_MODE;
2908 if (sc->sc_dma_ok) {
2909 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2910 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2911 sc->sc_wdcdev.UDMA_cap = 6;
2912 sc->sc_wdcdev.irqack = pciide_irqack;
2913 }
2914
2915 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2916 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2917 sc->sc_wdcdev.PIO_cap = 4;
2918 sc->sc_wdcdev.DMA_cap = 2;
2919 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2920
2921 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2922 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2923 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2924 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2925 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2926 cp = &sc->pciide_channels[channel];
2927 cmd680_channel_map(pa, sc, channel);
2928 if (cp->hw_ok == 0)
2929 continue;
2930 cmd680_setup_channel(&cp->wdc_channel);
2931 }
2932 }
2933
2934 void
2935 cmd680_channel_map(pa, sc, channel)
2936 struct pci_attach_args *pa;
2937 struct pciide_softc *sc;
2938 int channel;
2939 {
2940 struct pciide_channel *cp = &sc->pciide_channels[channel];
2941 bus_size_t cmdsize, ctlsize;
2942 int interface, i, reg;
2943 static const u_int8_t init_val[] =
2944 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2945 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2946
2947 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2948 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2949 PCIIDE_INTERFACE_SETTABLE(1);
2950 interface |= PCIIDE_INTERFACE_PCI(0) |
2951 PCIIDE_INTERFACE_PCI(1);
2952 } else {
2953 interface = PCI_INTERFACE(pa->pa_class);
2954 }
2955
2956 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2957 cp->name = PCIIDE_CHANNEL_NAME(channel);
2958 cp->wdc_channel.channel = channel;
2959 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2960
2961 cp->wdc_channel.ch_queue =
2962 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2963 if (cp->wdc_channel.ch_queue == NULL) {
2964 aprint_error("%s %s channel: "
2965 "can't allocate memory for command queue",
2966 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2967 return;
2968 }
2969
2970 /* XXX */
2971 reg = 0xa2 + channel * 16;
2972 for (i = 0; i < sizeof(init_val); i++)
2973 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2974
2975 aprint_normal("%s: %s channel %s to %s mode\n",
2976 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2977 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2978 "configured" : "wired",
2979 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2980 "native-PCI" : "compatibility");
2981
2982 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2983 if (cp->hw_ok == 0)
2984 return;
2985 pciide_map_compat_intr(pa, cp, channel, interface);
2986 }
2987
2988 void
2989 cmd680_setup_channel(chp)
2990 struct channel_softc *chp;
2991 {
2992 struct ata_drive_datas *drvp;
2993 u_int8_t mode, off, scsc;
2994 u_int16_t val;
2995 u_int32_t idedma_ctl;
2996 int drive;
2997 struct pciide_channel *cp = (struct pciide_channel*)chp;
2998 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2999 pci_chipset_tag_t pc = sc->sc_pc;
3000 pcitag_t pa = sc->sc_tag;
3001 static const u_int8_t udma2_tbl[] =
3002 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
3003 static const u_int8_t udma_tbl[] =
3004 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
3005 static const u_int16_t dma_tbl[] =
3006 { 0x2208, 0x10c2, 0x10c1 };
3007 static const u_int16_t pio_tbl[] =
3008 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
3009
3010 idedma_ctl = 0;
3011 pciide_channel_dma_setup(cp);
3012 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
3013
3014 for (drive = 0; drive < 2; drive++) {
3015 drvp = &chp->ch_drive[drive];
3016 /* If no drive, skip */
3017 if ((drvp->drive_flags & DRIVE) == 0)
3018 continue;
3019 mode &= ~(0x03 << (drive * 4));
3020 if (drvp->drive_flags & DRIVE_UDMA) {
3021 drvp->drive_flags &= ~DRIVE_DMA;
3022 off = 0xa0 + chp->channel * 16;
3023 if (drvp->UDMA_mode > 2 &&
3024 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3025 drvp->UDMA_mode = 2;
3026 scsc = pciide_pci_read(pc, pa, 0x8a);
3027 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3028 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3029 scsc = pciide_pci_read(pc, pa, 0x8a);
3030 if ((scsc & 0x30) == 0)
3031 drvp->UDMA_mode = 5;
3032 }
3033 mode |= 0x03 << (drive * 4);
3034 off = 0xac + chp->channel * 16 + drive * 2;
3035 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3036 if (scsc & 0x30)
3037 val |= udma2_tbl[drvp->UDMA_mode];
3038 else
3039 val |= udma_tbl[drvp->UDMA_mode];
3040 pciide_pci_write(pc, pa, off, val);
3041 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3042 } else if (drvp->drive_flags & DRIVE_DMA) {
3043 mode |= 0x02 << (drive * 4);
3044 off = 0xa8 + chp->channel * 16 + drive * 2;
3045 val = dma_tbl[drvp->DMA_mode];
3046 pciide_pci_write(pc, pa, off, val & 0xff);
3047 pciide_pci_write(pc, pa, off, val >> 8);
3048 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3049 } else {
3050 mode |= 0x01 << (drive * 4);
3051 off = 0xa4 + chp->channel * 16 + drive * 2;
3052 val = pio_tbl[drvp->PIO_mode];
3053 pciide_pci_write(pc, pa, off, val & 0xff);
3054 pciide_pci_write(pc, pa, off, val >> 8);
3055 }
3056 }
3057
3058 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3059 if (idedma_ctl != 0) {
3060 /* Add software bits in status register */
3061 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3062 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3063 idedma_ctl);
3064 }
3065 pciide_print_modes(cp);
3066 }
3067
3068 void
3069 cmd3112_chip_map(sc, pa)
3070 struct pciide_softc *sc;
3071 struct pci_attach_args *pa;
3072 {
3073 struct pciide_channel *cp;
3074 bus_size_t cmdsize, ctlsize;
3075 pcireg_t interface;
3076 int channel;
3077
3078 if (pciide_chipen(sc, pa) == 0)
3079 return;
3080
3081 aprint_normal("%s: bus-master DMA support present",
3082 sc->sc_wdcdev.sc_dev.dv_xname);
3083 pciide_mapreg_dma(sc, pa);
3084 aprint_normal("\n");
3085
3086 /*
3087 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3088 * corruption if DMA transfers cross an 8K boundary. This is
3089 * apparently hard to tickle, but we'll go ahead and play it
3090 * safe.
3091 */
3092 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3093 sc->sc_dma_maxsegsz = 8192;
3094 sc->sc_dma_boundary = 8192;
3095 }
3096
3097 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3098 WDC_CAPABILITY_MODE;
3099 sc->sc_wdcdev.PIO_cap = 4;
3100 if (sc->sc_dma_ok) {
3101 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3102 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3103 sc->sc_wdcdev.irqack = pciide_irqack;
3104 sc->sc_wdcdev.DMA_cap = 2;
3105 sc->sc_wdcdev.UDMA_cap = 6;
3106 }
3107 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3108
3109 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3110 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3111
3112 /*
3113 * The 3112 can be told to identify as a RAID controller.
3114 * In this case, we have to fake interface
3115 */
3116 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3117 interface = PCI_INTERFACE(pa->pa_class);
3118 } else {
3119 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3120 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3121 }
3122
3123 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3124 cp = &sc->pciide_channels[channel];
3125 if (pciide_chansetup(sc, channel, interface) == 0)
3126 continue;
3127 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3128 pciide_pci_intr);
3129 if (cp->hw_ok == 0)
3130 continue;
3131 pciide_map_compat_intr(pa, cp, channel, interface);
3132 cmd3112_setup_channel(&cp->wdc_channel);
3133 }
3134 }
3135
3136 void
3137 cmd3112_setup_channel(chp)
3138 struct channel_softc *chp;
3139 {
3140 struct ata_drive_datas *drvp;
3141 int drive;
3142 u_int32_t idedma_ctl, dtm;
3143 struct pciide_channel *cp = (struct pciide_channel*)chp;
3144 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3145
3146 /* setup DMA if needed */
3147 pciide_channel_dma_setup(cp);
3148
3149 idedma_ctl = 0;
3150 dtm = 0;
3151
3152 for (drive = 0; drive < 2; drive++) {
3153 drvp = &chp->ch_drive[drive];
3154 /* If no drive, skip */
3155 if ((drvp->drive_flags & DRIVE) == 0)
3156 continue;
3157 if (drvp->drive_flags & DRIVE_UDMA) {
3158 /* use Ultra/DMA */
3159 drvp->drive_flags &= ~DRIVE_DMA;
3160 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3161 dtm |= DTM_IDEx_DMA;
3162 } else if (drvp->drive_flags & DRIVE_DMA) {
3163 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3164 dtm |= DTM_IDEx_DMA;
3165 } else {
3166 dtm |= DTM_IDEx_PIO;
3167 }
3168 }
3169
3170 /*
3171 * Nothing to do to setup modes; it is meaningless in S-ATA
3172 * (but many S-ATA drives still want to get the SET_FEATURE
3173 * command).
3174 */
3175 if (idedma_ctl != 0) {
3176 /* Add software bits in status register */
3177 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3178 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3179 idedma_ctl);
3180 }
3181 pci_conf_write(sc->sc_pc, sc->sc_tag,
3182 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3183 pciide_print_modes(cp);
3184 }
3185
3186 void
3187 cy693_chip_map(sc, pa)
3188 struct pciide_softc *sc;
3189 struct pci_attach_args *pa;
3190 {
3191 struct pciide_channel *cp;
3192 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3193 bus_size_t cmdsize, ctlsize;
3194
3195 if (pciide_chipen(sc, pa) == 0)
3196 return;
3197 /*
3198 * this chip has 2 PCI IDE functions, one for primary and one for
3199 * secondary. So we need to call pciide_mapregs_compat() with
3200 * the real channel
3201 */
3202 if (pa->pa_function == 1) {
3203 sc->sc_cy_compatchan = 0;
3204 } else if (pa->pa_function == 2) {
3205 sc->sc_cy_compatchan = 1;
3206 } else {
3207 aprint_error("%s: unexpected PCI function %d\n",
3208 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3209 return;
3210 }
3211 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3212 aprint_normal("%s: bus-master DMA support present",
3213 sc->sc_wdcdev.sc_dev.dv_xname);
3214 pciide_mapreg_dma(sc, pa);
3215 } else {
3216 aprint_normal("%s: hardware does not support DMA",
3217 sc->sc_wdcdev.sc_dev.dv_xname);
3218 sc->sc_dma_ok = 0;
3219 }
3220 aprint_normal("\n");
3221
3222 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3223 if (sc->sc_cy_handle == NULL) {
3224 aprint_error("%s: unable to map hyperCache control registers\n",
3225 sc->sc_wdcdev.sc_dev.dv_xname);
3226 sc->sc_dma_ok = 0;
3227 }
3228
3229 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3230 WDC_CAPABILITY_MODE;
3231 if (sc->sc_dma_ok) {
3232 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3233 sc->sc_wdcdev.irqack = pciide_irqack;
3234 }
3235 sc->sc_wdcdev.PIO_cap = 4;
3236 sc->sc_wdcdev.DMA_cap = 2;
3237 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3238
3239 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3240 sc->sc_wdcdev.nchannels = 1;
3241
3242 /* Only one channel for this chip; if we are here it's enabled */
3243 cp = &sc->pciide_channels[0];
3244 sc->wdc_chanarray[0] = &cp->wdc_channel;
3245 cp->name = PCIIDE_CHANNEL_NAME(0);
3246 cp->wdc_channel.channel = 0;
3247 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3248 cp->wdc_channel.ch_queue =
3249 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3250 if (cp->wdc_channel.ch_queue == NULL) {
3251 aprint_error("%s primary channel: "
3252 "can't allocate memory for command queue",
3253 sc->sc_wdcdev.sc_dev.dv_xname);
3254 return;
3255 }
3256 aprint_normal("%s: primary channel %s to ",
3257 sc->sc_wdcdev.sc_dev.dv_xname,
3258 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3259 "configured" : "wired");
3260 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3261 aprint_normal("native-PCI");
3262 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3263 pciide_pci_intr);
3264 } else {
3265 aprint_normal("compatibility");
3266 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3267 &cmdsize, &ctlsize);
3268 }
3269 aprint_normal(" mode\n");
3270 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3271 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3272 wdcattach(&cp->wdc_channel);
3273 if (pciide_chan_candisable(cp)) {
3274 pci_conf_write(sc->sc_pc, sc->sc_tag,
3275 PCI_COMMAND_STATUS_REG, 0);
3276 }
3277 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3278 if (cp->hw_ok == 0)
3279 return;
3280 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3281 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3282 cy693_setup_channel(&cp->wdc_channel);
3283 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3284 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3285 }
3286
3287 void
3288 cy693_setup_channel(chp)
3289 struct channel_softc *chp;
3290 {
3291 struct ata_drive_datas *drvp;
3292 int drive;
3293 u_int32_t cy_cmd_ctrl;
3294 u_int32_t idedma_ctl;
3295 struct pciide_channel *cp = (struct pciide_channel*)chp;
3296 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3297 int dma_mode = -1;
3298
3299 cy_cmd_ctrl = idedma_ctl = 0;
3300
3301 /* setup DMA if needed */
3302 pciide_channel_dma_setup(cp);
3303
3304 for (drive = 0; drive < 2; drive++) {
3305 drvp = &chp->ch_drive[drive];
3306 /* If no drive, skip */
3307 if ((drvp->drive_flags & DRIVE) == 0)
3308 continue;
3309 /* add timing values, setup DMA if needed */
3310 if (drvp->drive_flags & DRIVE_DMA) {
3311 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3312 /* use Multiword DMA */
3313 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3314 dma_mode = drvp->DMA_mode;
3315 }
3316 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3317 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3318 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3319 CY_CMD_CTRL_IOW_REC_OFF(drive));
3320 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3321 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3322 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3323 CY_CMD_CTRL_IOR_REC_OFF(drive));
3324 }
3325 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3326 chp->ch_drive[0].DMA_mode = dma_mode;
3327 chp->ch_drive[1].DMA_mode = dma_mode;
3328
3329 if (dma_mode == -1)
3330 dma_mode = 0;
3331
3332 if (sc->sc_cy_handle != NULL) {
3333 /* Note: `multiple' is implied. */
3334 cy82c693_write(sc->sc_cy_handle,
3335 (sc->sc_cy_compatchan == 0) ?
3336 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3337 }
3338
3339 pciide_print_modes(cp);
3340
3341 if (idedma_ctl != 0) {
3342 /* Add software bits in status register */
3343 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3344 IDEDMA_CTL, idedma_ctl);
3345 }
3346 }
3347
3348 static struct sis_hostbr_type {
3349 u_int16_t id;
3350 u_int8_t rev;
3351 u_int8_t udma_mode;
3352 char *name;
3353 u_int8_t type;
3354 #define SIS_TYPE_NOUDMA 0
3355 #define SIS_TYPE_66 1
3356 #define SIS_TYPE_100OLD 2
3357 #define SIS_TYPE_100NEW 3
3358 #define SIS_TYPE_133OLD 4
3359 #define SIS_TYPE_133NEW 5
3360 #define SIS_TYPE_SOUTH 6
3361 } sis_hostbr_type[] = {
3362 /* Most infos here are from sos (at) freebsd.org */
3363 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3364 #if 0
3365 /*
3366 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3367 * have problems with UDMA (info provided by Christos)
3368 */
3369 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3370 #endif
3371 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3372 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3373 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3374 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3375 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3376 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3377 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3378 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3379 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3380 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3381 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3382 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3383 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3384 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3385 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3386 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3387 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3388 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3389 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3390 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3391 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3392 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3393 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3394 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3395 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3396 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3397 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3398 /*
3399 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3400 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3401 */
3402 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3403 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3404 };
3405
3406 static struct sis_hostbr_type *sis_hostbr_type_match;
3407
3408 static int
3409 sis_hostbr_match(pa)
3410 struct pci_attach_args *pa;
3411 {
3412 int i;
3413 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3414 return 0;
3415 sis_hostbr_type_match = NULL;
3416 for (i = 0;
3417 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3418 i++) {
3419 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3420 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3421 sis_hostbr_type_match = &sis_hostbr_type[i];
3422 }
3423 return (sis_hostbr_type_match != NULL);
3424 }
3425
3426 static int sis_south_match(pa)
3427 struct pci_attach_args *pa;
3428 {
3429 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3430 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3431 PCI_REVISION(pa->pa_class) >= 0x10);
3432 }
3433
3434 void
3435 sis_chip_map(sc, pa)
3436 struct pciide_softc *sc;
3437 struct pci_attach_args *pa;
3438 {
3439 struct pciide_channel *cp;
3440 int channel;
3441 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3442 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3443 pcireg_t rev = PCI_REVISION(pa->pa_class);
3444 bus_size_t cmdsize, ctlsize;
3445
3446 if (pciide_chipen(sc, pa) == 0)
3447 return;
3448 aprint_normal(": Silicon Integrated System ");
3449 pci_find_device(NULL, sis_hostbr_match);
3450 if (sis_hostbr_type_match) {
3451 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3452 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3453 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3454 SIS_REG_57) & 0x7f);
3455 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3456 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3457 aprint_normal("96X UDMA%d",
3458 sis_hostbr_type_match->udma_mode);
3459 sc->sis_type = SIS_TYPE_133NEW;
3460 sc->sc_wdcdev.UDMA_cap =
3461 sis_hostbr_type_match->udma_mode;
3462 } else {
3463 if (pci_find_device(NULL, sis_south_match)) {
3464 sc->sis_type = SIS_TYPE_133OLD;
3465 sc->sc_wdcdev.UDMA_cap =
3466 sis_hostbr_type_match->udma_mode;
3467 } else {
3468 sc->sis_type = SIS_TYPE_100NEW;
3469 sc->sc_wdcdev.UDMA_cap =
3470 sis_hostbr_type_match->udma_mode;
3471 }
3472 }
3473 } else {
3474 sc->sis_type = sis_hostbr_type_match->type;
3475 sc->sc_wdcdev.UDMA_cap =
3476 sis_hostbr_type_match->udma_mode;
3477 }
3478 aprint_normal(sis_hostbr_type_match->name);
3479 } else {
3480 aprint_normal("5597/5598");
3481 if (rev >= 0xd0) {
3482 sc->sc_wdcdev.UDMA_cap = 2;
3483 sc->sis_type = SIS_TYPE_66;
3484 } else {
3485 sc->sc_wdcdev.UDMA_cap = 0;
3486 sc->sis_type = SIS_TYPE_NOUDMA;
3487 }
3488 }
3489 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3490 PCI_REVISION(pa->pa_class));
3491 aprint_normal("%s: bus-master DMA support present",
3492 sc->sc_wdcdev.sc_dev.dv_xname);
3493 pciide_mapreg_dma(sc, pa);
3494 aprint_normal("\n");
3495
3496 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3497 WDC_CAPABILITY_MODE;
3498 if (sc->sc_dma_ok) {
3499 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3500 sc->sc_wdcdev.irqack = pciide_irqack;
3501 if (sc->sis_type >= SIS_TYPE_66)
3502 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3503 }
3504
3505 sc->sc_wdcdev.PIO_cap = 4;
3506 sc->sc_wdcdev.DMA_cap = 2;
3507
3508 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3509 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3510 switch(sc->sis_type) {
3511 case SIS_TYPE_NOUDMA:
3512 case SIS_TYPE_66:
3513 case SIS_TYPE_100OLD:
3514 sc->sc_wdcdev.set_modes = sis_setup_channel;
3515 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3516 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3517 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3518 break;
3519 case SIS_TYPE_100NEW:
3520 case SIS_TYPE_133OLD:
3521 sc->sc_wdcdev.set_modes = sis_setup_channel;
3522 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3523 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3524 break;
3525 case SIS_TYPE_133NEW:
3526 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3527 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3528 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3529 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3530 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3531 break;
3532 }
3533
3534
3535 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3536 cp = &sc->pciide_channels[channel];
3537 if (pciide_chansetup(sc, channel, interface) == 0)
3538 continue;
3539 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3540 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3541 aprint_normal("%s: %s channel ignored (disabled)\n",
3542 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3543 continue;
3544 }
3545 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3546 pciide_pci_intr);
3547 if (cp->hw_ok == 0)
3548 continue;
3549 if (pciide_chan_candisable(cp)) {
3550 if (channel == 0)
3551 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3552 else
3553 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3554 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3555 sis_ctr0);
3556 }
3557 pciide_map_compat_intr(pa, cp, channel, interface);
3558 if (cp->hw_ok == 0)
3559 continue;
3560 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3561 }
3562 }
3563
3564 void
3565 sis96x_setup_channel(chp)
3566 struct channel_softc *chp;
3567 {
3568 struct ata_drive_datas *drvp;
3569 int drive;
3570 u_int32_t sis_tim;
3571 u_int32_t idedma_ctl;
3572 int regtim;
3573 struct pciide_channel *cp = (struct pciide_channel*)chp;
3574 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3575
3576 sis_tim = 0;
3577 idedma_ctl = 0;
3578 /* setup DMA if needed */
3579 pciide_channel_dma_setup(cp);
3580
3581 for (drive = 0; drive < 2; drive++) {
3582 regtim = SIS_TIM133(
3583 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3584 chp->channel, drive);
3585 drvp = &chp->ch_drive[drive];
3586 /* If no drive, skip */
3587 if ((drvp->drive_flags & DRIVE) == 0)
3588 continue;
3589 /* add timing values, setup DMA if needed */
3590 if (drvp->drive_flags & DRIVE_UDMA) {
3591 /* use Ultra/DMA */
3592 drvp->drive_flags &= ~DRIVE_DMA;
3593 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3594 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3595 if (drvp->UDMA_mode > 2)
3596 drvp->UDMA_mode = 2;
3597 }
3598 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3599 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3600 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3601 } else if (drvp->drive_flags & DRIVE_DMA) {
3602 /*
3603 * use Multiword DMA
3604 * Timings will be used for both PIO and DMA,
3605 * so adjust DMA mode if needed
3606 */
3607 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3608 drvp->PIO_mode = drvp->DMA_mode + 2;
3609 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3610 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3611 drvp->PIO_mode - 2 : 0;
3612 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3613 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3614 } else {
3615 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3616 }
3617 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3618 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3619 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3620 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3621 }
3622 if (idedma_ctl != 0) {
3623 /* Add software bits in status register */
3624 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3625 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3626 idedma_ctl);
3627 }
3628 pciide_print_modes(cp);
3629 }
3630
3631 void
3632 sis_setup_channel(chp)
3633 struct channel_softc *chp;
3634 {
3635 struct ata_drive_datas *drvp;
3636 int drive;
3637 u_int32_t sis_tim;
3638 u_int32_t idedma_ctl;
3639 struct pciide_channel *cp = (struct pciide_channel*)chp;
3640 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3641
3642 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3643 "channel %d 0x%x\n", chp->channel,
3644 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3645 DEBUG_PROBE);
3646 sis_tim = 0;
3647 idedma_ctl = 0;
3648 /* setup DMA if needed */
3649 pciide_channel_dma_setup(cp);
3650
3651 for (drive = 0; drive < 2; drive++) {
3652 drvp = &chp->ch_drive[drive];
3653 /* If no drive, skip */
3654 if ((drvp->drive_flags & DRIVE) == 0)
3655 continue;
3656 /* add timing values, setup DMA if needed */
3657 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3658 (drvp->drive_flags & DRIVE_UDMA) == 0)
3659 goto pio;
3660
3661 if (drvp->drive_flags & DRIVE_UDMA) {
3662 /* use Ultra/DMA */
3663 drvp->drive_flags &= ~DRIVE_DMA;
3664 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3665 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3666 if (drvp->UDMA_mode > 2)
3667 drvp->UDMA_mode = 2;
3668 }
3669 switch (sc->sis_type) {
3670 case SIS_TYPE_66:
3671 case SIS_TYPE_100OLD:
3672 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3673 SIS_TIM66_UDMA_TIME_OFF(drive);
3674 break;
3675 case SIS_TYPE_100NEW:
3676 sis_tim |=
3677 sis_udma100new_tim[drvp->UDMA_mode] <<
3678 SIS_TIM100_UDMA_TIME_OFF(drive);
3679 case SIS_TYPE_133OLD:
3680 sis_tim |=
3681 sis_udma133old_tim[drvp->UDMA_mode] <<
3682 SIS_TIM100_UDMA_TIME_OFF(drive);
3683 break;
3684 default:
3685 aprint_error("unknown SiS IDE type %d\n",
3686 sc->sis_type);
3687 }
3688 } else {
3689 /*
3690 * use Multiword DMA
3691 * Timings will be used for both PIO and DMA,
3692 * so adjust DMA mode if needed
3693 */
3694 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3695 drvp->PIO_mode = drvp->DMA_mode + 2;
3696 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3697 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3698 drvp->PIO_mode - 2 : 0;
3699 if (drvp->DMA_mode == 0)
3700 drvp->PIO_mode = 0;
3701 }
3702 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3703 pio: switch (sc->sis_type) {
3704 case SIS_TYPE_NOUDMA:
3705 case SIS_TYPE_66:
3706 case SIS_TYPE_100OLD:
3707 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3708 SIS_TIM66_ACT_OFF(drive);
3709 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3710 SIS_TIM66_REC_OFF(drive);
3711 break;
3712 case SIS_TYPE_100NEW:
3713 case SIS_TYPE_133OLD:
3714 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3715 SIS_TIM100_ACT_OFF(drive);
3716 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3717 SIS_TIM100_REC_OFF(drive);
3718 break;
3719 default:
3720 aprint_error("unknown SiS IDE type %d\n",
3721 sc->sis_type);
3722 }
3723 }
3724 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3725 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3726 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3727 if (idedma_ctl != 0) {
3728 /* Add software bits in status register */
3729 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3730 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3731 idedma_ctl);
3732 }
3733 pciide_print_modes(cp);
3734 }
3735
3736 void
3737 acer_chip_map(sc, pa)
3738 struct pciide_softc *sc;
3739 struct pci_attach_args *pa;
3740 {
3741 struct pciide_channel *cp;
3742 int channel;
3743 pcireg_t cr, interface;
3744 bus_size_t cmdsize, ctlsize;
3745 pcireg_t rev = PCI_REVISION(pa->pa_class);
3746
3747 if (pciide_chipen(sc, pa) == 0)
3748 return;
3749 aprint_normal("%s: bus-master DMA support present",
3750 sc->sc_wdcdev.sc_dev.dv_xname);
3751 pciide_mapreg_dma(sc, pa);
3752 aprint_normal("\n");
3753 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3754 WDC_CAPABILITY_MODE;
3755 if (sc->sc_dma_ok) {
3756 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3757 if (rev >= 0x20) {
3758 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3759 if (rev >= 0xC4)
3760 sc->sc_wdcdev.UDMA_cap = 5;
3761 else if (rev >= 0xC2)
3762 sc->sc_wdcdev.UDMA_cap = 4;
3763 else
3764 sc->sc_wdcdev.UDMA_cap = 2;
3765 }
3766 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3767 sc->sc_wdcdev.irqack = pciide_irqack;
3768 }
3769
3770 sc->sc_wdcdev.PIO_cap = 4;
3771 sc->sc_wdcdev.DMA_cap = 2;
3772 sc->sc_wdcdev.set_modes = acer_setup_channel;
3773 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3774 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3775
3776 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3777 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3778 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3779
3780 /* Enable "microsoft register bits" R/W. */
3781 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3782 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3783 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3784 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3785 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3786 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3787 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3788 ~ACER_CHANSTATUSREGS_RO);
3789 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3790 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3791 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3792 /* Don't use cr, re-read the real register content instead */
3793 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3794 PCI_CLASS_REG));
3795
3796 /* From linux: enable "Cable Detection" */
3797 if (rev >= 0xC2) {
3798 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3799 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3800 | ACER_0x4B_CDETECT);
3801 }
3802
3803 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3804 cp = &sc->pciide_channels[channel];
3805 if (pciide_chansetup(sc, channel, interface) == 0)
3806 continue;
3807 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3808 aprint_normal("%s: %s channel ignored (disabled)\n",
3809 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3810 continue;
3811 }
3812 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3813 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3814 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3815 if (cp->hw_ok == 0)
3816 continue;
3817 if (pciide_chan_candisable(cp)) {
3818 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3819 pci_conf_write(sc->sc_pc, sc->sc_tag,
3820 PCI_CLASS_REG, cr);
3821 }
3822 pciide_map_compat_intr(pa, cp, channel, interface);
3823 acer_setup_channel(&cp->wdc_channel);
3824 }
3825 }
3826
3827 void
3828 acer_setup_channel(chp)
3829 struct channel_softc *chp;
3830 {
3831 struct ata_drive_datas *drvp;
3832 int drive;
3833 u_int32_t acer_fifo_udma;
3834 u_int32_t idedma_ctl;
3835 struct pciide_channel *cp = (struct pciide_channel*)chp;
3836 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3837
3838 idedma_ctl = 0;
3839 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3840 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3841 acer_fifo_udma), DEBUG_PROBE);
3842 /* setup DMA if needed */
3843 pciide_channel_dma_setup(cp);
3844
3845 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3846 DRIVE_UDMA) { /* check 80 pins cable */
3847 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3848 ACER_0x4A_80PIN(chp->channel)) {
3849 if (chp->ch_drive[0].UDMA_mode > 2)
3850 chp->ch_drive[0].UDMA_mode = 2;
3851 if (chp->ch_drive[1].UDMA_mode > 2)
3852 chp->ch_drive[1].UDMA_mode = 2;
3853 }
3854 }
3855
3856 for (drive = 0; drive < 2; drive++) {
3857 drvp = &chp->ch_drive[drive];
3858 /* If no drive, skip */
3859 if ((drvp->drive_flags & DRIVE) == 0)
3860 continue;
3861 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3862 "channel %d drive %d 0x%x\n", chp->channel, drive,
3863 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3864 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3865 /* clear FIFO/DMA mode */
3866 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3867 ACER_UDMA_EN(chp->channel, drive) |
3868 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3869
3870 /* add timing values, setup DMA if needed */
3871 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3872 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3873 acer_fifo_udma |=
3874 ACER_FTH_OPL(chp->channel, drive, 0x1);
3875 goto pio;
3876 }
3877
3878 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3879 if (drvp->drive_flags & DRIVE_UDMA) {
3880 /* use Ultra/DMA */
3881 drvp->drive_flags &= ~DRIVE_DMA;
3882 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3883 acer_fifo_udma |=
3884 ACER_UDMA_TIM(chp->channel, drive,
3885 acer_udma[drvp->UDMA_mode]);
3886 /* XXX disable if one drive < UDMA3 ? */
3887 if (drvp->UDMA_mode >= 3) {
3888 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3889 ACER_0x4B,
3890 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3891 ACER_0x4B) | ACER_0x4B_UDMA66);
3892 }
3893 } else {
3894 /*
3895 * use Multiword DMA
3896 * Timings will be used for both PIO and DMA,
3897 * so adjust DMA mode if needed
3898 */
3899 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3900 drvp->PIO_mode = drvp->DMA_mode + 2;
3901 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3902 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3903 drvp->PIO_mode - 2 : 0;
3904 if (drvp->DMA_mode == 0)
3905 drvp->PIO_mode = 0;
3906 }
3907 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3908 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3909 ACER_IDETIM(chp->channel, drive),
3910 acer_pio[drvp->PIO_mode]);
3911 }
3912 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3913 acer_fifo_udma), DEBUG_PROBE);
3914 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3915 if (idedma_ctl != 0) {
3916 /* Add software bits in status register */
3917 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3918 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3919 idedma_ctl);
3920 }
3921 pciide_print_modes(cp);
3922 }
3923
3924 int
3925 acer_pci_intr(arg)
3926 void *arg;
3927 {
3928 struct pciide_softc *sc = arg;
3929 struct pciide_channel *cp;
3930 struct channel_softc *wdc_cp;
3931 int i, rv, crv;
3932 u_int32_t chids;
3933
3934 rv = 0;
3935 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3936 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3937 cp = &sc->pciide_channels[i];
3938 wdc_cp = &cp->wdc_channel;
3939 /* If a compat channel skip. */
3940 if (cp->compat)
3941 continue;
3942 if (chids & ACER_CHIDS_INT(i)) {
3943 crv = wdcintr(wdc_cp);
3944 if (crv == 0)
3945 printf("%s:%d: bogus intr\n",
3946 sc->sc_wdcdev.sc_dev.dv_xname, i);
3947 else
3948 rv = 1;
3949 }
3950 }
3951 return rv;
3952 }
3953
3954 void
3955 hpt_chip_map(sc, pa)
3956 struct pciide_softc *sc;
3957 struct pci_attach_args *pa;
3958 {
3959 struct pciide_channel *cp;
3960 int i, compatchan, revision;
3961 pcireg_t interface;
3962 bus_size_t cmdsize, ctlsize;
3963
3964 if (pciide_chipen(sc, pa) == 0)
3965 return;
3966 revision = PCI_REVISION(pa->pa_class);
3967 aprint_normal(": Triones/Highpoint ");
3968 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3969 aprint_normal("HPT374 IDE Controller\n");
3970 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3971 aprint_normal("HPT372 IDE Controller\n");
3972 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3973 if (revision == HPT372_REV)
3974 aprint_normal("HPT372 IDE Controller\n");
3975 else if (revision == HPT370_REV)
3976 aprint_normal("HPT370 IDE Controller\n");
3977 else if (revision == HPT370A_REV)
3978 aprint_normal("HPT370A IDE Controller\n");
3979 else if (revision == HPT366_REV)
3980 aprint_normal("HPT366 IDE Controller\n");
3981 else
3982 aprint_normal("unknown HPT IDE controller rev %d\n",
3983 revision);
3984 } else
3985 aprint_normal("unknown HPT IDE controller 0x%x\n",
3986 sc->sc_pp->ide_product);
3987
3988 /*
3989 * when the chip is in native mode it identifies itself as a
3990 * 'misc mass storage'. Fake interface in this case.
3991 */
3992 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3993 interface = PCI_INTERFACE(pa->pa_class);
3994 } else {
3995 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3996 PCIIDE_INTERFACE_PCI(0);
3997 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3998 (revision == HPT370_REV || revision == HPT370A_REV ||
3999 revision == HPT372_REV)) ||
4000 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4001 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4002 interface |= PCIIDE_INTERFACE_PCI(1);
4003 }
4004
4005 aprint_normal("%s: bus-master DMA support present",
4006 sc->sc_wdcdev.sc_dev.dv_xname);
4007 pciide_mapreg_dma(sc, pa);
4008 aprint_normal("\n");
4009 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4010 WDC_CAPABILITY_MODE;
4011 if (sc->sc_dma_ok) {
4012 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4013 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4014 sc->sc_wdcdev.irqack = pciide_irqack;
4015 }
4016 sc->sc_wdcdev.PIO_cap = 4;
4017 sc->sc_wdcdev.DMA_cap = 2;
4018
4019 sc->sc_wdcdev.set_modes = hpt_setup_channel;
4020 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4021 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4022 revision == HPT366_REV) {
4023 sc->sc_wdcdev.UDMA_cap = 4;
4024 /*
4025 * The 366 has 2 PCI IDE functions, one for primary and one
4026 * for secondary. So we need to call pciide_mapregs_compat()
4027 * with the real channel
4028 */
4029 if (pa->pa_function == 0) {
4030 compatchan = 0;
4031 } else if (pa->pa_function == 1) {
4032 compatchan = 1;
4033 } else {
4034 aprint_error("%s: unexpected PCI function %d\n",
4035 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4036 return;
4037 }
4038 sc->sc_wdcdev.nchannels = 1;
4039 } else {
4040 sc->sc_wdcdev.nchannels = 2;
4041 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4042 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4043 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4044 revision == HPT372_REV))
4045 sc->sc_wdcdev.UDMA_cap = 6;
4046 else
4047 sc->sc_wdcdev.UDMA_cap = 5;
4048 }
4049 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4050 cp = &sc->pciide_channels[i];
4051 if (sc->sc_wdcdev.nchannels > 1) {
4052 compatchan = i;
4053 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4054 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4055 aprint_normal(
4056 "%s: %s channel ignored (disabled)\n",
4057 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4058 continue;
4059 }
4060 }
4061 if (pciide_chansetup(sc, i, interface) == 0)
4062 continue;
4063 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4064 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4065 &ctlsize, hpt_pci_intr);
4066 } else {
4067 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4068 &cmdsize, &ctlsize);
4069 }
4070 if (cp->hw_ok == 0)
4071 return;
4072 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4073 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4074 wdcattach(&cp->wdc_channel);
4075 hpt_setup_channel(&cp->wdc_channel);
4076 }
4077 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4078 (revision == HPT370_REV || revision == HPT370A_REV ||
4079 revision == HPT372_REV)) ||
4080 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4081 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4082 /*
4083 * HPT370_REV and highter has a bit to disable interrupts,
4084 * make sure to clear it
4085 */
4086 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4087 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4088 ~HPT_CSEL_IRQDIS);
4089 }
4090 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4091 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4092 revision == HPT372_REV ) ||
4093 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4094 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4095 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4096 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4097 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4098 return;
4099 }
4100
4101 void
4102 hpt_setup_channel(chp)
4103 struct channel_softc *chp;
4104 {
4105 struct ata_drive_datas *drvp;
4106 int drive;
4107 int cable;
4108 u_int32_t before, after;
4109 u_int32_t idedma_ctl;
4110 struct pciide_channel *cp = (struct pciide_channel*)chp;
4111 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4112 int revision =
4113 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4114
4115 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4116
4117 /* setup DMA if needed */
4118 pciide_channel_dma_setup(cp);
4119
4120 idedma_ctl = 0;
4121
4122 /* Per drive settings */
4123 for (drive = 0; drive < 2; drive++) {
4124 drvp = &chp->ch_drive[drive];
4125 /* If no drive, skip */
4126 if ((drvp->drive_flags & DRIVE) == 0)
4127 continue;
4128 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4129 HPT_IDETIM(chp->channel, drive));
4130
4131 /* add timing values, setup DMA if needed */
4132 if (drvp->drive_flags & DRIVE_UDMA) {
4133 /* use Ultra/DMA */
4134 drvp->drive_flags &= ~DRIVE_DMA;
4135 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4136 drvp->UDMA_mode > 2)
4137 drvp->UDMA_mode = 2;
4138 switch (sc->sc_pp->ide_product) {
4139 case PCI_PRODUCT_TRIONES_HPT374:
4140 after = hpt374_udma[drvp->UDMA_mode];
4141 break;
4142 case PCI_PRODUCT_TRIONES_HPT372:
4143 after = hpt372_udma[drvp->UDMA_mode];
4144 break;
4145 case PCI_PRODUCT_TRIONES_HPT366:
4146 default:
4147 switch(revision) {
4148 case HPT372_REV:
4149 after = hpt372_udma[drvp->UDMA_mode];
4150 break;
4151 case HPT370_REV:
4152 case HPT370A_REV:
4153 after = hpt370_udma[drvp->UDMA_mode];
4154 break;
4155 case HPT366_REV:
4156 default:
4157 after = hpt366_udma[drvp->UDMA_mode];
4158 break;
4159 }
4160 }
4161 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4162 } else if (drvp->drive_flags & DRIVE_DMA) {
4163 /*
4164 * use Multiword DMA.
4165 * Timings will be used for both PIO and DMA, so adjust
4166 * DMA mode if needed
4167 */
4168 if (drvp->PIO_mode >= 3 &&
4169 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4170 drvp->DMA_mode = drvp->PIO_mode - 2;
4171 }
4172 switch (sc->sc_pp->ide_product) {
4173 case PCI_PRODUCT_TRIONES_HPT374:
4174 after = hpt374_dma[drvp->DMA_mode];
4175 break;
4176 case PCI_PRODUCT_TRIONES_HPT372:
4177 after = hpt372_dma[drvp->DMA_mode];
4178 break;
4179 case PCI_PRODUCT_TRIONES_HPT366:
4180 default:
4181 switch(revision) {
4182 case HPT372_REV:
4183 after = hpt372_dma[drvp->DMA_mode];
4184 break;
4185 case HPT370_REV:
4186 case HPT370A_REV:
4187 after = hpt370_dma[drvp->DMA_mode];
4188 break;
4189 case HPT366_REV:
4190 default:
4191 after = hpt366_dma[drvp->DMA_mode];
4192 break;
4193 }
4194 }
4195 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4196 } else {
4197 /* PIO only */
4198 switch (sc->sc_pp->ide_product) {
4199 case PCI_PRODUCT_TRIONES_HPT374:
4200 after = hpt374_pio[drvp->PIO_mode];
4201 break;
4202 case PCI_PRODUCT_TRIONES_HPT372:
4203 after = hpt372_pio[drvp->PIO_mode];
4204 break;
4205 case PCI_PRODUCT_TRIONES_HPT366:
4206 default:
4207 switch(revision) {
4208 case HPT372_REV:
4209 after = hpt372_pio[drvp->PIO_mode];
4210 break;
4211 case HPT370_REV:
4212 case HPT370A_REV:
4213 after = hpt370_pio[drvp->PIO_mode];
4214 break;
4215 case HPT366_REV:
4216 default:
4217 after = hpt366_pio[drvp->PIO_mode];
4218 break;
4219 }
4220 }
4221 }
4222 pci_conf_write(sc->sc_pc, sc->sc_tag,
4223 HPT_IDETIM(chp->channel, drive), after);
4224 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4225 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4226 after, before), DEBUG_PROBE);
4227 }
4228 if (idedma_ctl != 0) {
4229 /* Add software bits in status register */
4230 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4231 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4232 idedma_ctl);
4233 }
4234 pciide_print_modes(cp);
4235 }
4236
4237 int
4238 hpt_pci_intr(arg)
4239 void *arg;
4240 {
4241 struct pciide_softc *sc = arg;
4242 struct pciide_channel *cp;
4243 struct channel_softc *wdc_cp;
4244 int rv = 0;
4245 int dmastat, i, crv;
4246
4247 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4248 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4249 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4250 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4251 IDEDMA_CTL_INTR)
4252 continue;
4253 cp = &sc->pciide_channels[i];
4254 wdc_cp = &cp->wdc_channel;
4255 crv = wdcintr(wdc_cp);
4256 if (crv == 0) {
4257 printf("%s:%d: bogus intr\n",
4258 sc->sc_wdcdev.sc_dev.dv_xname, i);
4259 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4260 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4261 } else
4262 rv = 1;
4263 }
4264 return rv;
4265 }
4266
4267
4268 /* Macros to test product */
4269 #define PDC_IS_262(sc) \
4270 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4271 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4272 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4273 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4274 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4275 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4276 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4277 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4278 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4279 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4280 #define PDC_IS_265(sc) \
4281 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4282 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4283 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4284 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4285 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4286 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4287 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4288 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4289 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4290 #define PDC_IS_268(sc) \
4291 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4292 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4293 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4294 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4295 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4296 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4297 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4298 #define PDC_IS_276(sc) \
4299 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4300 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4301 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4302 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4303 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4304
4305 void
4306 pdc202xx_chip_map(sc, pa)
4307 struct pciide_softc *sc;
4308 struct pci_attach_args *pa;
4309 {
4310 struct pciide_channel *cp;
4311 int channel;
4312 pcireg_t interface, st, mode;
4313 bus_size_t cmdsize, ctlsize;
4314
4315 if (!PDC_IS_268(sc)) {
4316 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4317 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4318 st), DEBUG_PROBE);
4319 }
4320 if (pciide_chipen(sc, pa) == 0)
4321 return;
4322
4323 /* turn off RAID mode */
4324 if (!PDC_IS_268(sc))
4325 st &= ~PDC2xx_STATE_IDERAID;
4326
4327 /*
4328 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4329 * mode. We have to fake interface
4330 */
4331 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4332 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4333 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4334
4335 aprint_normal("%s: bus-master DMA support present",
4336 sc->sc_wdcdev.sc_dev.dv_xname);
4337 pciide_mapreg_dma(sc, pa);
4338 aprint_normal("\n");
4339 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4340 WDC_CAPABILITY_MODE;
4341 if (sc->sc_dma_ok) {
4342 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4343 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4344 sc->sc_wdcdev.irqack = pciide_irqack;
4345 }
4346 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4347 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4348 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4349 sc->sc_wdcdev.PIO_cap = 4;
4350 sc->sc_wdcdev.DMA_cap = 2;
4351 if (PDC_IS_276(sc))
4352 sc->sc_wdcdev.UDMA_cap = 6;
4353 else if (PDC_IS_265(sc))
4354 sc->sc_wdcdev.UDMA_cap = 5;
4355 else if (PDC_IS_262(sc))
4356 sc->sc_wdcdev.UDMA_cap = 4;
4357 else
4358 sc->sc_wdcdev.UDMA_cap = 2;
4359 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4360 pdc20268_setup_channel : pdc202xx_setup_channel;
4361 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4362 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4363
4364 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4365 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4366 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4367 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4368 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4369 }
4370
4371 if (!PDC_IS_268(sc)) {
4372 /* setup failsafe defaults */
4373 mode = 0;
4374 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4375 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4376 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4377 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4378 for (channel = 0;
4379 channel < sc->sc_wdcdev.nchannels;
4380 channel++) {
4381 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4382 "drive 0 initial timings 0x%x, now 0x%x\n",
4383 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4384 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4385 DEBUG_PROBE);
4386 pci_conf_write(sc->sc_pc, sc->sc_tag,
4387 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4388 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4389 "drive 1 initial timings 0x%x, now 0x%x\n",
4390 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4391 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4392 pci_conf_write(sc->sc_pc, sc->sc_tag,
4393 PDC2xx_TIM(channel, 1), mode);
4394 }
4395
4396 mode = PDC2xx_SCR_DMA;
4397 if (PDC_IS_265(sc)) {
4398 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4399 } else if (PDC_IS_262(sc)) {
4400 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4401 } else {
4402 /* the BIOS set it up this way */
4403 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4404 }
4405 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4406 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4407 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4408 "now 0x%x\n",
4409 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4410 PDC2xx_SCR),
4411 mode), DEBUG_PROBE);
4412 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4413 PDC2xx_SCR, mode);
4414
4415 /* controller initial state register is OK even without BIOS */
4416 /* Set DMA mode to IDE DMA compatibility */
4417 mode =
4418 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4419 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4420 DEBUG_PROBE);
4421 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4422 mode | 0x1);
4423 mode =
4424 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4425 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4426 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4427 mode | 0x1);
4428 }
4429
4430 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4431 cp = &sc->pciide_channels[channel];
4432 if (pciide_chansetup(sc, channel, interface) == 0)
4433 continue;
4434 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4435 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4436 aprint_normal("%s: %s channel ignored (disabled)\n",
4437 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4438 continue;
4439 }
4440 if (PDC_IS_265(sc))
4441 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4442 pdc20265_pci_intr);
4443 else
4444 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4445 pdc202xx_pci_intr);
4446 if (cp->hw_ok == 0)
4447 continue;
4448 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4449 st &= ~(PDC_IS_262(sc) ?
4450 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4451 pciide_map_compat_intr(pa, cp, channel, interface);
4452 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4453 }
4454 if (!PDC_IS_268(sc)) {
4455 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4456 "0x%x\n", st), DEBUG_PROBE);
4457 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4458 }
4459 return;
4460 }
4461
4462 void
4463 pdc202xx_setup_channel(chp)
4464 struct channel_softc *chp;
4465 {
4466 struct ata_drive_datas *drvp;
4467 int drive;
4468 pcireg_t mode, st;
4469 u_int32_t idedma_ctl, scr, atapi;
4470 struct pciide_channel *cp = (struct pciide_channel*)chp;
4471 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4472 int channel = chp->channel;
4473
4474 /* setup DMA if needed */
4475 pciide_channel_dma_setup(cp);
4476
4477 idedma_ctl = 0;
4478 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4479 sc->sc_wdcdev.sc_dev.dv_xname,
4480 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4481 DEBUG_PROBE);
4482
4483 /* Per channel settings */
4484 if (PDC_IS_262(sc)) {
4485 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4486 PDC262_U66);
4487 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4488 /* Trim UDMA mode */
4489 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4490 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4491 chp->ch_drive[0].UDMA_mode <= 2) ||
4492 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4493 chp->ch_drive[1].UDMA_mode <= 2)) {
4494 if (chp->ch_drive[0].UDMA_mode > 2)
4495 chp->ch_drive[0].UDMA_mode = 2;
4496 if (chp->ch_drive[1].UDMA_mode > 2)
4497 chp->ch_drive[1].UDMA_mode = 2;
4498 }
4499 /* Set U66 if needed */
4500 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4501 chp->ch_drive[0].UDMA_mode > 2) ||
4502 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4503 chp->ch_drive[1].UDMA_mode > 2))
4504 scr |= PDC262_U66_EN(channel);
4505 else
4506 scr &= ~PDC262_U66_EN(channel);
4507 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4508 PDC262_U66, scr);
4509 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4510 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4511 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4512 PDC262_ATAPI(channel))), DEBUG_PROBE);
4513 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4514 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4515 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4516 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4517 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4518 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4519 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4520 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4521 atapi = 0;
4522 else
4523 atapi = PDC262_ATAPI_UDMA;
4524 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4525 PDC262_ATAPI(channel), atapi);
4526 }
4527 }
4528 for (drive = 0; drive < 2; drive++) {
4529 drvp = &chp->ch_drive[drive];
4530 /* If no drive, skip */
4531 if ((drvp->drive_flags & DRIVE) == 0)
4532 continue;
4533 mode = 0;
4534 if (drvp->drive_flags & DRIVE_UDMA) {
4535 /* use Ultra/DMA */
4536 drvp->drive_flags &= ~DRIVE_DMA;
4537 mode = PDC2xx_TIM_SET_MB(mode,
4538 pdc2xx_udma_mb[drvp->UDMA_mode]);
4539 mode = PDC2xx_TIM_SET_MC(mode,
4540 pdc2xx_udma_mc[drvp->UDMA_mode]);
4541 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4542 } else if (drvp->drive_flags & DRIVE_DMA) {
4543 mode = PDC2xx_TIM_SET_MB(mode,
4544 pdc2xx_dma_mb[drvp->DMA_mode]);
4545 mode = PDC2xx_TIM_SET_MC(mode,
4546 pdc2xx_dma_mc[drvp->DMA_mode]);
4547 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4548 } else {
4549 mode = PDC2xx_TIM_SET_MB(mode,
4550 pdc2xx_dma_mb[0]);
4551 mode = PDC2xx_TIM_SET_MC(mode,
4552 pdc2xx_dma_mc[0]);
4553 }
4554 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4555 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4556 if (drvp->drive_flags & DRIVE_ATA)
4557 mode |= PDC2xx_TIM_PRE;
4558 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4559 if (drvp->PIO_mode >= 3) {
4560 mode |= PDC2xx_TIM_IORDY;
4561 if (drive == 0)
4562 mode |= PDC2xx_TIM_IORDYp;
4563 }
4564 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4565 "timings 0x%x\n",
4566 sc->sc_wdcdev.sc_dev.dv_xname,
4567 chp->channel, drive, mode), DEBUG_PROBE);
4568 pci_conf_write(sc->sc_pc, sc->sc_tag,
4569 PDC2xx_TIM(chp->channel, drive), mode);
4570 }
4571 if (idedma_ctl != 0) {
4572 /* Add software bits in status register */
4573 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4574 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4575 idedma_ctl);
4576 }
4577 pciide_print_modes(cp);
4578 }
4579
4580 void
4581 pdc20268_setup_channel(chp)
4582 struct channel_softc *chp;
4583 {
4584 struct ata_drive_datas *drvp;
4585 int drive;
4586 u_int32_t idedma_ctl;
4587 struct pciide_channel *cp = (struct pciide_channel*)chp;
4588 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4589 int u100;
4590
4591 /* setup DMA if needed */
4592 pciide_channel_dma_setup(cp);
4593
4594 idedma_ctl = 0;
4595
4596 /* I don't know what this is for, FreeBSD does it ... */
4597 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4598 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4599
4600 /*
4601 * cable type detect, from FreeBSD
4602 */
4603 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4604 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4605 0 : 1;
4606
4607 for (drive = 0; drive < 2; drive++) {
4608 drvp = &chp->ch_drive[drive];
4609 /* If no drive, skip */
4610 if ((drvp->drive_flags & DRIVE) == 0)
4611 continue;
4612 if (drvp->drive_flags & DRIVE_UDMA) {
4613 /* use Ultra/DMA */
4614 drvp->drive_flags &= ~DRIVE_DMA;
4615 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4616 if (drvp->UDMA_mode > 2 && u100 == 0)
4617 drvp->UDMA_mode = 2;
4618 } else if (drvp->drive_flags & DRIVE_DMA) {
4619 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4620 }
4621 }
4622 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4623 if (idedma_ctl != 0) {
4624 /* Add software bits in status register */
4625 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4626 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4627 idedma_ctl);
4628 }
4629 pciide_print_modes(cp);
4630 }
4631
4632 int
4633 pdc202xx_pci_intr(arg)
4634 void *arg;
4635 {
4636 struct pciide_softc *sc = arg;
4637 struct pciide_channel *cp;
4638 struct channel_softc *wdc_cp;
4639 int i, rv, crv;
4640 u_int32_t scr;
4641
4642 rv = 0;
4643 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4644 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4645 cp = &sc->pciide_channels[i];
4646 wdc_cp = &cp->wdc_channel;
4647 /* If a compat channel skip. */
4648 if (cp->compat)
4649 continue;
4650 if (scr & PDC2xx_SCR_INT(i)) {
4651 crv = wdcintr(wdc_cp);
4652 if (crv == 0)
4653 printf("%s:%d: bogus intr (reg 0x%x)\n",
4654 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4655 else
4656 rv = 1;
4657 }
4658 }
4659 return rv;
4660 }
4661
4662 int
4663 pdc20265_pci_intr(arg)
4664 void *arg;
4665 {
4666 struct pciide_softc *sc = arg;
4667 struct pciide_channel *cp;
4668 struct channel_softc *wdc_cp;
4669 int i, rv, crv;
4670 u_int32_t dmastat;
4671
4672 rv = 0;
4673 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4674 cp = &sc->pciide_channels[i];
4675 wdc_cp = &cp->wdc_channel;
4676 /* If a compat channel skip. */
4677 if (cp->compat)
4678 continue;
4679 /*
4680 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4681 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4682 * So use it instead (requires 2 reg reads instead of 1,
4683 * but we can't do it another way).
4684 */
4685 dmastat = bus_space_read_1(sc->sc_dma_iot,
4686 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4687 if((dmastat & IDEDMA_CTL_INTR) == 0)
4688 continue;
4689 crv = wdcintr(wdc_cp);
4690 if (crv == 0)
4691 printf("%s:%d: bogus intr\n",
4692 sc->sc_wdcdev.sc_dev.dv_xname, i);
4693 else
4694 rv = 1;
4695 }
4696 return rv;
4697 }
4698
4699 static void
4700 pdc20262_dma_start(v, channel, drive)
4701 void *v;
4702 int channel, drive;
4703 {
4704 struct pciide_softc *sc = v;
4705 struct pciide_dma_maps *dma_maps =
4706 &sc->pciide_channels[channel].dma_maps[drive];
4707 int atapi;
4708
4709 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4710 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4711 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4712 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4713 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4714 PDC262_ATAPI(channel), atapi);
4715 }
4716
4717 pciide_dma_start(v, channel, drive);
4718 }
4719
4720 int
4721 pdc20262_dma_finish(v, channel, drive, force)
4722 void *v;
4723 int channel, drive;
4724 int force;
4725 {
4726 struct pciide_softc *sc = v;
4727 struct pciide_dma_maps *dma_maps =
4728 &sc->pciide_channels[channel].dma_maps[drive];
4729 struct channel_softc *chp;
4730 int atapi, error;
4731
4732 error = pciide_dma_finish(v, channel, drive, force);
4733
4734 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4735 chp = sc->wdc_chanarray[channel];
4736 atapi = 0;
4737 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4738 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4739 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4740 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4741 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4742 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4743 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4744 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4745 atapi = PDC262_ATAPI_UDMA;
4746 }
4747 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4748 PDC262_ATAPI(channel), atapi);
4749 }
4750
4751 return error;
4752 }
4753
4754 void
4755 opti_chip_map(sc, pa)
4756 struct pciide_softc *sc;
4757 struct pci_attach_args *pa;
4758 {
4759 struct pciide_channel *cp;
4760 bus_size_t cmdsize, ctlsize;
4761 pcireg_t interface;
4762 u_int8_t init_ctrl;
4763 int channel;
4764
4765 if (pciide_chipen(sc, pa) == 0)
4766 return;
4767 aprint_normal("%s: bus-master DMA support present",
4768 sc->sc_wdcdev.sc_dev.dv_xname);
4769
4770 /*
4771 * XXXSCW:
4772 * There seem to be a couple of buggy revisions/implementations
4773 * of the OPTi pciide chipset. This kludge seems to fix one of
4774 * the reported problems (PR/11644) but still fails for the
4775 * other (PR/13151), although the latter may be due to other
4776 * issues too...
4777 */
4778 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4779 aprint_normal(" but disabled due to chip rev. <= 0x12");
4780 sc->sc_dma_ok = 0;
4781 } else
4782 pciide_mapreg_dma(sc, pa);
4783
4784 aprint_normal("\n");
4785
4786 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4787 WDC_CAPABILITY_MODE;
4788 sc->sc_wdcdev.PIO_cap = 4;
4789 if (sc->sc_dma_ok) {
4790 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4791 sc->sc_wdcdev.irqack = pciide_irqack;
4792 sc->sc_wdcdev.DMA_cap = 2;
4793 }
4794 sc->sc_wdcdev.set_modes = opti_setup_channel;
4795
4796 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4797 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4798
4799 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4800 OPTI_REG_INIT_CONTROL);
4801
4802 interface = PCI_INTERFACE(pa->pa_class);
4803
4804 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4805 cp = &sc->pciide_channels[channel];
4806 if (pciide_chansetup(sc, channel, interface) == 0)
4807 continue;
4808 if (channel == 1 &&
4809 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4810 aprint_normal("%s: %s channel ignored (disabled)\n",
4811 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4812 continue;
4813 }
4814 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4815 pciide_pci_intr);
4816 if (cp->hw_ok == 0)
4817 continue;
4818 pciide_map_compat_intr(pa, cp, channel, interface);
4819 if (cp->hw_ok == 0)
4820 continue;
4821 opti_setup_channel(&cp->wdc_channel);
4822 }
4823 }
4824
4825 void
4826 opti_setup_channel(chp)
4827 struct channel_softc *chp;
4828 {
4829 struct ata_drive_datas *drvp;
4830 struct pciide_channel *cp = (struct pciide_channel*)chp;
4831 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4832 int drive, spd;
4833 int mode[2];
4834 u_int8_t rv, mr;
4835
4836 /*
4837 * The `Delay' and `Address Setup Time' fields of the
4838 * Miscellaneous Register are always zero initially.
4839 */
4840 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4841 mr &= ~(OPTI_MISC_DELAY_MASK |
4842 OPTI_MISC_ADDR_SETUP_MASK |
4843 OPTI_MISC_INDEX_MASK);
4844
4845 /* Prime the control register before setting timing values */
4846 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4847
4848 /* Determine the clockrate of the PCIbus the chip is attached to */
4849 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4850 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4851
4852 /* setup DMA if needed */
4853 pciide_channel_dma_setup(cp);
4854
4855 for (drive = 0; drive < 2; drive++) {
4856 drvp = &chp->ch_drive[drive];
4857 /* If no drive, skip */
4858 if ((drvp->drive_flags & DRIVE) == 0) {
4859 mode[drive] = -1;
4860 continue;
4861 }
4862
4863 if ((drvp->drive_flags & DRIVE_DMA)) {
4864 /*
4865 * Timings will be used for both PIO and DMA,
4866 * so adjust DMA mode if needed
4867 */
4868 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4869 drvp->PIO_mode = drvp->DMA_mode + 2;
4870 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4871 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4872 drvp->PIO_mode - 2 : 0;
4873 if (drvp->DMA_mode == 0)
4874 drvp->PIO_mode = 0;
4875
4876 mode[drive] = drvp->DMA_mode + 5;
4877 } else
4878 mode[drive] = drvp->PIO_mode;
4879
4880 if (drive && mode[0] >= 0 &&
4881 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4882 /*
4883 * Can't have two drives using different values
4884 * for `Address Setup Time'.
4885 * Slow down the faster drive to compensate.
4886 */
4887 int d = (opti_tim_as[spd][mode[0]] >
4888 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4889
4890 mode[d] = mode[1-d];
4891 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4892 chp->ch_drive[d].DMA_mode = 0;
4893 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4894 }
4895 }
4896
4897 for (drive = 0; drive < 2; drive++) {
4898 int m;
4899 if ((m = mode[drive]) < 0)
4900 continue;
4901
4902 /* Set the Address Setup Time and select appropriate index */
4903 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4904 rv |= OPTI_MISC_INDEX(drive);
4905 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4906
4907 /* Set the pulse width and recovery timing parameters */
4908 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4909 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4910 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4911 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4912
4913 /* Set the Enhanced Mode register appropriately */
4914 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4915 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4916 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4917 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4918 }
4919
4920 /* Finally, enable the timings */
4921 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4922
4923 pciide_print_modes(cp);
4924 }
4925
4926 #define ACARD_IS_850(sc) \
4927 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4928
4929 void
4930 acard_chip_map(sc, pa)
4931 struct pciide_softc *sc;
4932 struct pci_attach_args *pa;
4933 {
4934 struct pciide_channel *cp;
4935 int i;
4936 pcireg_t interface;
4937 bus_size_t cmdsize, ctlsize;
4938
4939 if (pciide_chipen(sc, pa) == 0)
4940 return;
4941
4942 /*
4943 * when the chip is in native mode it identifies itself as a
4944 * 'misc mass storage'. Fake interface in this case.
4945 */
4946 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4947 interface = PCI_INTERFACE(pa->pa_class);
4948 } else {
4949 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4950 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4951 }
4952
4953 aprint_normal("%s: bus-master DMA support present",
4954 sc->sc_wdcdev.sc_dev.dv_xname);
4955 pciide_mapreg_dma(sc, pa);
4956 aprint_normal("\n");
4957 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4958 WDC_CAPABILITY_MODE;
4959
4960 if (sc->sc_dma_ok) {
4961 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4962 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4963 sc->sc_wdcdev.irqack = pciide_irqack;
4964 }
4965 sc->sc_wdcdev.PIO_cap = 4;
4966 sc->sc_wdcdev.DMA_cap = 2;
4967 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4968
4969 sc->sc_wdcdev.set_modes = acard_setup_channel;
4970 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4971 sc->sc_wdcdev.nchannels = 2;
4972
4973 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4974 cp = &sc->pciide_channels[i];
4975 if (pciide_chansetup(sc, i, interface) == 0)
4976 continue;
4977 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4978 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4979 &ctlsize, pciide_pci_intr);
4980 } else {
4981 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4982 &cmdsize, &ctlsize);
4983 }
4984 if (cp->hw_ok == 0)
4985 return;
4986 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4987 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4988 wdcattach(&cp->wdc_channel);
4989 acard_setup_channel(&cp->wdc_channel);
4990 }
4991 if (!ACARD_IS_850(sc)) {
4992 u_int32_t reg;
4993 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4994 reg &= ~ATP860_CTRL_INT;
4995 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4996 }
4997 }
4998
4999 void
5000 acard_setup_channel(chp)
5001 struct channel_softc *chp;
5002 {
5003 struct ata_drive_datas *drvp;
5004 struct pciide_channel *cp = (struct pciide_channel*)chp;
5005 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5006 int channel = chp->channel;
5007 int drive;
5008 u_int32_t idetime, udma_mode;
5009 u_int32_t idedma_ctl;
5010
5011 /* setup DMA if needed */
5012 pciide_channel_dma_setup(cp);
5013
5014 if (ACARD_IS_850(sc)) {
5015 idetime = 0;
5016 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
5017 udma_mode &= ~ATP850_UDMA_MASK(channel);
5018 } else {
5019 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
5020 idetime &= ~ATP860_SETTIME_MASK(channel);
5021 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
5022 udma_mode &= ~ATP860_UDMA_MASK(channel);
5023
5024 /* check 80 pins cable */
5025 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
5026 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
5027 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5028 & ATP860_CTRL_80P(chp->channel)) {
5029 if (chp->ch_drive[0].UDMA_mode > 2)
5030 chp->ch_drive[0].UDMA_mode = 2;
5031 if (chp->ch_drive[1].UDMA_mode > 2)
5032 chp->ch_drive[1].UDMA_mode = 2;
5033 }
5034 }
5035 }
5036
5037 idedma_ctl = 0;
5038
5039 /* Per drive settings */
5040 for (drive = 0; drive < 2; drive++) {
5041 drvp = &chp->ch_drive[drive];
5042 /* If no drive, skip */
5043 if ((drvp->drive_flags & DRIVE) == 0)
5044 continue;
5045 /* add timing values, setup DMA if needed */
5046 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5047 (drvp->drive_flags & DRIVE_UDMA)) {
5048 /* use Ultra/DMA */
5049 if (ACARD_IS_850(sc)) {
5050 idetime |= ATP850_SETTIME(drive,
5051 acard_act_udma[drvp->UDMA_mode],
5052 acard_rec_udma[drvp->UDMA_mode]);
5053 udma_mode |= ATP850_UDMA_MODE(channel, drive,
5054 acard_udma_conf[drvp->UDMA_mode]);
5055 } else {
5056 idetime |= ATP860_SETTIME(channel, drive,
5057 acard_act_udma[drvp->UDMA_mode],
5058 acard_rec_udma[drvp->UDMA_mode]);
5059 udma_mode |= ATP860_UDMA_MODE(channel, drive,
5060 acard_udma_conf[drvp->UDMA_mode]);
5061 }
5062 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5063 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5064 (drvp->drive_flags & DRIVE_DMA)) {
5065 /* use Multiword DMA */
5066 drvp->drive_flags &= ~DRIVE_UDMA;
5067 if (ACARD_IS_850(sc)) {
5068 idetime |= ATP850_SETTIME(drive,
5069 acard_act_dma[drvp->DMA_mode],
5070 acard_rec_dma[drvp->DMA_mode]);
5071 } else {
5072 idetime |= ATP860_SETTIME(channel, drive,
5073 acard_act_dma[drvp->DMA_mode],
5074 acard_rec_dma[drvp->DMA_mode]);
5075 }
5076 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5077 } else {
5078 /* PIO only */
5079 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5080 if (ACARD_IS_850(sc)) {
5081 idetime |= ATP850_SETTIME(drive,
5082 acard_act_pio[drvp->PIO_mode],
5083 acard_rec_pio[drvp->PIO_mode]);
5084 } else {
5085 idetime |= ATP860_SETTIME(channel, drive,
5086 acard_act_pio[drvp->PIO_mode],
5087 acard_rec_pio[drvp->PIO_mode]);
5088 }
5089 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5090 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5091 | ATP8x0_CTRL_EN(channel));
5092 }
5093 }
5094
5095 if (idedma_ctl != 0) {
5096 /* Add software bits in status register */
5097 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5098 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5099 }
5100 pciide_print_modes(cp);
5101
5102 if (ACARD_IS_850(sc)) {
5103 pci_conf_write(sc->sc_pc, sc->sc_tag,
5104 ATP850_IDETIME(channel), idetime);
5105 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5106 } else {
5107 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5108 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5109 }
5110 }
5111
5112 int
5113 acard_pci_intr(arg)
5114 void *arg;
5115 {
5116 struct pciide_softc *sc = arg;
5117 struct pciide_channel *cp;
5118 struct channel_softc *wdc_cp;
5119 int rv = 0;
5120 int dmastat, i, crv;
5121
5122 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5123 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5124 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5125 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5126 continue;
5127 cp = &sc->pciide_channels[i];
5128 wdc_cp = &cp->wdc_channel;
5129 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5130 (void)wdcintr(wdc_cp);
5131 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5132 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5133 continue;
5134 }
5135 crv = wdcintr(wdc_cp);
5136 if (crv == 0)
5137 printf("%s:%d: bogus intr\n",
5138 sc->sc_wdcdev.sc_dev.dv_xname, i);
5139 else if (crv == 1)
5140 rv = 1;
5141 else if (rv == 0)
5142 rv = crv;
5143 }
5144 return rv;
5145 }
5146
5147 static int
5148 sl82c105_bugchk(struct pci_attach_args *pa)
5149 {
5150
5151 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5152 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5153 return (0);
5154
5155 if (PCI_REVISION(pa->pa_class) <= 0x05)
5156 return (1);
5157
5158 return (0);
5159 }
5160
5161 void
5162 sl82c105_chip_map(sc, pa)
5163 struct pciide_softc *sc;
5164 struct pci_attach_args *pa;
5165 {
5166 struct pciide_channel *cp;
5167 bus_size_t cmdsize, ctlsize;
5168 pcireg_t interface, idecr;
5169 int channel;
5170
5171 if (pciide_chipen(sc, pa) == 0)
5172 return;
5173
5174 aprint_normal("%s: bus-master DMA support present",
5175 sc->sc_wdcdev.sc_dev.dv_xname);
5176
5177 /*
5178 * Check to see if we're part of the Winbond 83c553 Southbridge.
5179 * If so, we need to disable DMA on rev. <= 5 of that chip.
5180 */
5181 if (pci_find_device(pa, sl82c105_bugchk)) {
5182 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5183 sc->sc_dma_ok = 0;
5184 } else
5185 pciide_mapreg_dma(sc, pa);
5186 aprint_normal("\n");
5187
5188 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5189 WDC_CAPABILITY_MODE;
5190 sc->sc_wdcdev.PIO_cap = 4;
5191 if (sc->sc_dma_ok) {
5192 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5193 sc->sc_wdcdev.irqack = pciide_irqack;
5194 sc->sc_wdcdev.DMA_cap = 2;
5195 }
5196 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5197
5198 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5199 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5200
5201 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5202
5203 interface = PCI_INTERFACE(pa->pa_class);
5204
5205 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5206 cp = &sc->pciide_channels[channel];
5207 if (pciide_chansetup(sc, channel, interface) == 0)
5208 continue;
5209 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5210 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5211 aprint_normal("%s: %s channel ignored (disabled)\n",
5212 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5213 continue;
5214 }
5215 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5216 pciide_pci_intr);
5217 if (cp->hw_ok == 0)
5218 continue;
5219 pciide_map_compat_intr(pa, cp, channel, interface);
5220 if (cp->hw_ok == 0)
5221 continue;
5222 sl82c105_setup_channel(&cp->wdc_channel);
5223 }
5224 }
5225
5226 void
5227 sl82c105_setup_channel(chp)
5228 struct channel_softc *chp;
5229 {
5230 struct ata_drive_datas *drvp;
5231 struct pciide_channel *cp = (struct pciide_channel*)chp;
5232 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5233 int pxdx_reg, drive;
5234 pcireg_t pxdx;
5235
5236 /* Set up DMA if needed. */
5237 pciide_channel_dma_setup(cp);
5238
5239 for (drive = 0; drive < 2; drive++) {
5240 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5241 : SYMPH_P1D0CR) + (drive * 4);
5242
5243 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5244
5245 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5246 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5247
5248 drvp = &chp->ch_drive[drive];
5249 /* If no drive, skip. */
5250 if ((drvp->drive_flags & DRIVE) == 0) {
5251 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5252 continue;
5253 }
5254
5255 if (drvp->drive_flags & DRIVE_DMA) {
5256 /*
5257 * Timings will be used for both PIO and DMA,
5258 * so adjust DMA mode if needed.
5259 */
5260 if (drvp->PIO_mode >= 3) {
5261 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5262 drvp->DMA_mode = drvp->PIO_mode - 2;
5263 if (drvp->DMA_mode < 1) {
5264 /*
5265 * Can't mix both PIO and DMA.
5266 * Disable DMA.
5267 */
5268 drvp->drive_flags &= ~DRIVE_DMA;
5269 }
5270 } else {
5271 /*
5272 * Can't mix both PIO and DMA. Disable
5273 * DMA.
5274 */
5275 drvp->drive_flags &= ~DRIVE_DMA;
5276 }
5277 }
5278
5279 if (drvp->drive_flags & DRIVE_DMA) {
5280 /* Use multi-word DMA. */
5281 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5282 PxDx_CMD_ON_SHIFT;
5283 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5284 } else {
5285 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5286 PxDx_CMD_ON_SHIFT;
5287 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5288 }
5289
5290 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5291
5292 /* ...and set the mode for this drive. */
5293 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5294 }
5295
5296 pciide_print_modes(cp);
5297 }
5298
5299 void
5300 serverworks_chip_map(sc, pa)
5301 struct pciide_softc *sc;
5302 struct pci_attach_args *pa;
5303 {
5304 struct pciide_channel *cp;
5305 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5306 pcitag_t pcib_tag;
5307 int channel;
5308 bus_size_t cmdsize, ctlsize;
5309
5310 if (pciide_chipen(sc, pa) == 0)
5311 return;
5312
5313 aprint_normal("%s: bus-master DMA support present",
5314 sc->sc_wdcdev.sc_dev.dv_xname);
5315 pciide_mapreg_dma(sc, pa);
5316 aprint_normal("\n");
5317 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5318 WDC_CAPABILITY_MODE;
5319
5320 if (sc->sc_dma_ok) {
5321 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5323 sc->sc_wdcdev.irqack = pciide_irqack;
5324 }
5325 sc->sc_wdcdev.PIO_cap = 4;
5326 sc->sc_wdcdev.DMA_cap = 2;
5327 switch (sc->sc_pp->ide_product) {
5328 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5329 sc->sc_wdcdev.UDMA_cap = 2;
5330 break;
5331 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5332 if (PCI_REVISION(pa->pa_class) < 0x92)
5333 sc->sc_wdcdev.UDMA_cap = 4;
5334 else
5335 sc->sc_wdcdev.UDMA_cap = 5;
5336 break;
5337 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5338 sc->sc_wdcdev.UDMA_cap = 5;
5339 break;
5340 }
5341
5342 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5343 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5344 sc->sc_wdcdev.nchannels = 2;
5345
5346 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5347 cp = &sc->pciide_channels[channel];
5348 if (pciide_chansetup(sc, channel, interface) == 0)
5349 continue;
5350 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5351 serverworks_pci_intr);
5352 if (cp->hw_ok == 0)
5353 return;
5354 pciide_map_compat_intr(pa, cp, channel, interface);
5355 if (cp->hw_ok == 0)
5356 return;
5357 serverworks_setup_channel(&cp->wdc_channel);
5358 }
5359
5360 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5361 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5362 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5363 }
5364
5365 void
5366 serverworks_setup_channel(chp)
5367 struct channel_softc *chp;
5368 {
5369 struct ata_drive_datas *drvp;
5370 struct pciide_channel *cp = (struct pciide_channel*)chp;
5371 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5372 int channel = chp->channel;
5373 int drive, unit;
5374 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5375 u_int32_t idedma_ctl;
5376 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5377 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5378
5379 /* setup DMA if needed */
5380 pciide_channel_dma_setup(cp);
5381
5382 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5383 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5384 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5385 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5386
5387 pio_time &= ~(0xffff << (16 * channel));
5388 dma_time &= ~(0xffff << (16 * channel));
5389 pio_mode &= ~(0xff << (8 * channel + 16));
5390 udma_mode &= ~(0xff << (8 * channel + 16));
5391 udma_mode &= ~(3 << (2 * channel));
5392
5393 idedma_ctl = 0;
5394
5395 /* Per drive settings */
5396 for (drive = 0; drive < 2; drive++) {
5397 drvp = &chp->ch_drive[drive];
5398 /* If no drive, skip */
5399 if ((drvp->drive_flags & DRIVE) == 0)
5400 continue;
5401 unit = drive + 2 * channel;
5402 /* add timing values, setup DMA if needed */
5403 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5404 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5405 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5406 (drvp->drive_flags & DRIVE_UDMA)) {
5407 /* use Ultra/DMA, check for 80-pin cable */
5408 if (drvp->UDMA_mode > 2 &&
5409 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5410 drvp->UDMA_mode = 2;
5411 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5412 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5413 udma_mode |= 1 << unit;
5414 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5415 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5416 (drvp->drive_flags & DRIVE_DMA)) {
5417 /* use Multiword DMA */
5418 drvp->drive_flags &= ~DRIVE_UDMA;
5419 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5420 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5421 } else {
5422 /* PIO only */
5423 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5424 }
5425 }
5426
5427 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5428 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5429 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5430 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5431 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5432
5433 if (idedma_ctl != 0) {
5434 /* Add software bits in status register */
5435 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5436 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5437 }
5438 pciide_print_modes(cp);
5439 }
5440
5441 int
5442 serverworks_pci_intr(arg)
5443 void *arg;
5444 {
5445 struct pciide_softc *sc = arg;
5446 struct pciide_channel *cp;
5447 struct channel_softc *wdc_cp;
5448 int rv = 0;
5449 int dmastat, i, crv;
5450
5451 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5452 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5453 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5454 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5455 IDEDMA_CTL_INTR)
5456 continue;
5457 cp = &sc->pciide_channels[i];
5458 wdc_cp = &cp->wdc_channel;
5459 crv = wdcintr(wdc_cp);
5460 if (crv == 0) {
5461 printf("%s:%d: bogus intr\n",
5462 sc->sc_wdcdev.sc_dev.dv_xname, i);
5463 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5464 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5465 } else
5466 rv = 1;
5467 }
5468 return rv;
5469 }
5470
5471 void
5472 artisea_chip_map(sc, pa)
5473 struct pciide_softc *sc;
5474 struct pci_attach_args *pa;
5475 {
5476 struct pciide_channel *cp;
5477 bus_size_t cmdsize, ctlsize;
5478 pcireg_t interface;
5479 int channel;
5480
5481 if (pciide_chipen(sc, pa) == 0)
5482 return;
5483
5484 aprint_normal("%s: bus-master DMA support resent",
5485 sc->sc_wdcdev.sc_dev.dv_xname);
5486 #ifndef PCIIDE_I31244_ENABLEDMA
5487 if (PCI_REVISION(pa->pa_class) == 0) {
5488 aprint_normal(" but disabled due to rev. 0");
5489 sc->sc_dma_ok = 0;
5490 } else
5491 #endif
5492 pciide_mapreg_dma(sc, pa);
5493 aprint_normal("\n");
5494
5495 /*
5496 * XXX Configure LEDs to show activity.
5497 */
5498
5499 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5500 WDC_CAPABILITY_MODE;
5501 sc->sc_wdcdev.PIO_cap = 4;
5502 if (sc->sc_dma_ok) {
5503 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5504 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5505 sc->sc_wdcdev.irqack = pciide_irqack;
5506 sc->sc_wdcdev.DMA_cap = 2;
5507 sc->sc_wdcdev.UDMA_cap = 6;
5508 }
5509 sc->sc_wdcdev.set_modes = sata_setup_channel;
5510
5511 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5512 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5513
5514 interface = PCI_INTERFACE(pa->pa_class);
5515
5516 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5517 cp = &sc->pciide_channels[channel];
5518 if (pciide_chansetup(sc, channel, interface) == 0)
5519 continue;
5520 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5521 pciide_pci_intr);
5522 if (cp->hw_ok == 0)
5523 continue;
5524 pciide_map_compat_intr(pa, cp, channel, interface);
5525 sata_setup_channel(&cp->wdc_channel);
5526 }
5527 }
5528