pciide.c revision 1.197 1 /* $NetBSD: pciide.c,v 1.197 2003/09/15 20:15:44 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.197 2003/09/15 20:15:44 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_sata_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void apollo_setup_channel __P((struct channel_softc*));
180
181 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void cmd0643_9_setup_channel __P((struct channel_softc*));
184 void cmd_channel_map __P((struct pci_attach_args *,
185 struct pciide_softc *, int));
186 int cmd_pci_intr __P((void *));
187 void cmd646_9_irqack __P((struct channel_softc *));
188 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void cmd680_setup_channel __P((struct channel_softc*));
190 void cmd680_channel_map __P((struct pci_attach_args *,
191 struct pciide_softc *, int));
192
193 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void cmd3112_setup_channel __P((struct channel_softc*));
195
196 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void cy693_setup_channel __P((struct channel_softc*));
198
199 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void sis_setup_channel __P((struct channel_softc*));
201 void sis96x_setup_channel __P((struct channel_softc*));
202 static int sis_hostbr_match __P(( struct pci_attach_args *));
203 static int sis_south_match __P(( struct pci_attach_args *));
204
205 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acer_setup_channel __P((struct channel_softc*));
207 int acer_pci_intr __P((void *));
208
209 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void pdc202xx_setup_channel __P((struct channel_softc*));
211 void pdc20268_setup_channel __P((struct channel_softc*));
212 int pdc202xx_pci_intr __P((void *));
213 int pdc20265_pci_intr __P((void *));
214 static void pdc20262_dma_start __P((void*, int, int));
215 static int pdc20262_dma_finish __P((void*, int, int, int));
216
217 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void opti_setup_channel __P((struct channel_softc*));
219
220 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
221 void hpt_setup_channel __P((struct channel_softc*));
222 int hpt_pci_intr __P((void *));
223
224 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
225 void acard_setup_channel __P((struct channel_softc*));
226 int acard_pci_intr __P((void *));
227
228 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
229 void serverworks_setup_channel __P((struct channel_softc*));
230 int serverworks_pci_intr __P((void *));
231
232 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
233 void sl82c105_setup_channel __P((struct channel_softc*));
234
235 void pciide_channel_dma_setup __P((struct pciide_channel *));
236 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
237 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
238 void pciide_dma_start __P((void*, int, int));
239 int pciide_dma_finish __P((void*, int, int, int));
240 void pciide_irqack __P((struct channel_softc *));
241 void pciide_print_modes __P((struct pciide_channel *));
242
243 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
244
245 struct pciide_product_desc {
246 u_int32_t ide_product;
247 int ide_flags;
248 const char *ide_name;
249 /* map and setup chip, probe drives */
250 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
251 };
252
253 /* Flags for ide_flags */
254 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
255 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
256
257 /* Default product description for devices not known from this controller */
258 const struct pciide_product_desc default_product_desc = {
259 0,
260 0,
261 "Generic PCI IDE controller",
262 default_chip_map,
263 };
264
265 const struct pciide_product_desc pciide_intel_products[] = {
266 { PCI_PRODUCT_INTEL_82092AA,
267 0,
268 "Intel 82092AA IDE controller",
269 default_chip_map,
270 },
271 { PCI_PRODUCT_INTEL_82371FB_IDE,
272 0,
273 "Intel 82371FB IDE controller (PIIX)",
274 piix_chip_map,
275 },
276 { PCI_PRODUCT_INTEL_82371SB_IDE,
277 0,
278 "Intel 82371SB IDE Interface (PIIX3)",
279 piix_chip_map,
280 },
281 { PCI_PRODUCT_INTEL_82371AB_IDE,
282 0,
283 "Intel 82371AB IDE controller (PIIX4)",
284 piix_chip_map,
285 },
286 { PCI_PRODUCT_INTEL_82440MX_IDE,
287 0,
288 "Intel 82440MX IDE controller",
289 piix_chip_map
290 },
291 { PCI_PRODUCT_INTEL_82801AA_IDE,
292 0,
293 "Intel 82801AA IDE Controller (ICH)",
294 piix_chip_map,
295 },
296 { PCI_PRODUCT_INTEL_82801AB_IDE,
297 0,
298 "Intel 82801AB IDE Controller (ICH0)",
299 piix_chip_map,
300 },
301 { PCI_PRODUCT_INTEL_82801BA_IDE,
302 0,
303 "Intel 82801BA IDE Controller (ICH2)",
304 piix_chip_map,
305 },
306 { PCI_PRODUCT_INTEL_82801BAM_IDE,
307 0,
308 "Intel 82801BAM IDE Controller (ICH2-M)",
309 piix_chip_map,
310 },
311 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
312 0,
313 "Intel 82801CA IDE Controller (ICH3)",
314 piix_chip_map,
315 },
316 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
317 0,
318 "Intel 82801CA IDE Controller (ICH3)",
319 piix_chip_map,
320 },
321 { PCI_PRODUCT_INTEL_82801DB_IDE,
322 0,
323 "Intel 82801DB IDE Controller (ICH4)",
324 piix_chip_map,
325 },
326 { PCI_PRODUCT_INTEL_82801DBM_IDE,
327 0,
328 "Intel 82801DBM IDE Controller (ICH4-M)",
329 piix_chip_map,
330 },
331 { PCI_PRODUCT_INTEL_82801EB_IDE,
332 0,
333 "Intel 82801EB IDE Controller (ICH5)",
334 piix_chip_map,
335 },
336 { PCI_PRODUCT_INTEL_31244,
337 0,
338 "Intel 31244 Serial ATA Controller",
339 artisea_chip_map,
340 },
341 { 0,
342 0,
343 NULL,
344 NULL
345 }
346 };
347
348 const struct pciide_product_desc pciide_amd_products[] = {
349 { PCI_PRODUCT_AMD_PBC756_IDE,
350 0,
351 "Advanced Micro Devices AMD756 IDE Controller",
352 amd7x6_chip_map
353 },
354 { PCI_PRODUCT_AMD_PBC766_IDE,
355 0,
356 "Advanced Micro Devices AMD766 IDE Controller",
357 amd7x6_chip_map
358 },
359 { PCI_PRODUCT_AMD_PBC768_IDE,
360 0,
361 "Advanced Micro Devices AMD768 IDE Controller",
362 amd7x6_chip_map
363 },
364 { PCI_PRODUCT_AMD_PBC8111_IDE,
365 0,
366 "Advanced Micro Devices AMD8111 IDE Controller",
367 amd7x6_chip_map
368 },
369 { 0,
370 0,
371 NULL,
372 NULL
373 }
374 };
375
376 const struct pciide_product_desc pciide_nvidia_products[] = {
377 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
378 0,
379 "NVIDIA nForce IDE Controller",
380 amd7x6_chip_map
381 },
382 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
383 0,
384 "NVIDIA nForce2 IDE Controller",
385 amd7x6_chip_map
386 },
387 { 0,
388 0,
389 NULL,
390 NULL
391 }
392 };
393
394 const struct pciide_product_desc pciide_cmd_products[] = {
395 { PCI_PRODUCT_CMDTECH_640,
396 0,
397 "CMD Technology PCI0640",
398 cmd_chip_map
399 },
400 { PCI_PRODUCT_CMDTECH_643,
401 0,
402 "CMD Technology PCI0643",
403 cmd0643_9_chip_map,
404 },
405 { PCI_PRODUCT_CMDTECH_646,
406 0,
407 "CMD Technology PCI0646",
408 cmd0643_9_chip_map,
409 },
410 { PCI_PRODUCT_CMDTECH_648,
411 IDE_PCI_CLASS_OVERRIDE,
412 "CMD Technology PCI0648",
413 cmd0643_9_chip_map,
414 },
415 { PCI_PRODUCT_CMDTECH_649,
416 IDE_PCI_CLASS_OVERRIDE,
417 "CMD Technology PCI0649",
418 cmd0643_9_chip_map,
419 },
420 { PCI_PRODUCT_CMDTECH_680,
421 IDE_PCI_CLASS_OVERRIDE,
422 "Silicon Image 0680",
423 cmd680_chip_map,
424 },
425 { PCI_PRODUCT_CMDTECH_3112,
426 IDE_PCI_CLASS_OVERRIDE,
427 "Silicon Image SATALink 3112",
428 cmd3112_chip_map,
429 },
430 { 0,
431 0,
432 NULL,
433 NULL
434 }
435 };
436
437 const struct pciide_product_desc pciide_via_products[] = {
438 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
439 0,
440 NULL,
441 apollo_chip_map,
442 },
443 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
444 0,
445 NULL,
446 apollo_chip_map,
447 },
448 { PCI_PRODUCT_VIATECH_VT8237_SATA,
449 IDE_PCI_CLASS_OVERRIDE,
450 "VIA Technologies VT8237 SATA Controller",
451 apollo_sata_chip_map,
452 },
453 { 0,
454 0,
455 NULL,
456 NULL
457 }
458 };
459
460 const struct pciide_product_desc pciide_cypress_products[] = {
461 { PCI_PRODUCT_CONTAQ_82C693,
462 IDE_16BIT_IOSPACE,
463 "Cypress 82C693 IDE Controller",
464 cy693_chip_map,
465 },
466 { 0,
467 0,
468 NULL,
469 NULL
470 }
471 };
472
473 const struct pciide_product_desc pciide_sis_products[] = {
474 { PCI_PRODUCT_SIS_5597_IDE,
475 0,
476 NULL,
477 sis_chip_map,
478 },
479 { 0,
480 0,
481 NULL,
482 NULL
483 }
484 };
485
486 const struct pciide_product_desc pciide_acer_products[] = {
487 { PCI_PRODUCT_ALI_M5229,
488 0,
489 "Acer Labs M5229 UDMA IDE Controller",
490 acer_chip_map,
491 },
492 { 0,
493 0,
494 NULL,
495 NULL
496 }
497 };
498
499 const struct pciide_product_desc pciide_promise_products[] = {
500 { PCI_PRODUCT_PROMISE_ULTRA33,
501 IDE_PCI_CLASS_OVERRIDE,
502 "Promise Ultra33/ATA Bus Master IDE Accelerator",
503 pdc202xx_chip_map,
504 },
505 { PCI_PRODUCT_PROMISE_ULTRA66,
506 IDE_PCI_CLASS_OVERRIDE,
507 "Promise Ultra66/ATA Bus Master IDE Accelerator",
508 pdc202xx_chip_map,
509 },
510 { PCI_PRODUCT_PROMISE_ULTRA100,
511 IDE_PCI_CLASS_OVERRIDE,
512 "Promise Ultra100/ATA Bus Master IDE Accelerator",
513 pdc202xx_chip_map,
514 },
515 { PCI_PRODUCT_PROMISE_ULTRA100X,
516 IDE_PCI_CLASS_OVERRIDE,
517 "Promise Ultra100/ATA Bus Master IDE Accelerator",
518 pdc202xx_chip_map,
519 },
520 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
521 IDE_PCI_CLASS_OVERRIDE,
522 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
523 pdc202xx_chip_map,
524 },
525 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
526 IDE_PCI_CLASS_OVERRIDE,
527 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
528 pdc202xx_chip_map,
529 },
530 { PCI_PRODUCT_PROMISE_ULTRA133,
531 IDE_PCI_CLASS_OVERRIDE,
532 "Promise Ultra133/ATA Bus Master IDE Accelerator",
533 pdc202xx_chip_map,
534 },
535 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
536 IDE_PCI_CLASS_OVERRIDE,
537 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
538 pdc202xx_chip_map,
539 },
540 { PCI_PRODUCT_PROMISE_MBULTRA133,
541 IDE_PCI_CLASS_OVERRIDE,
542 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
543 pdc202xx_chip_map,
544 },
545 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
546 IDE_PCI_CLASS_OVERRIDE,
547 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
548 pdc202xx_chip_map,
549 },
550 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
551 IDE_PCI_CLASS_OVERRIDE,
552 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
553 pdc202xx_chip_map,
554 },
555 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
556 IDE_PCI_CLASS_OVERRIDE,
557 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
558 pdc202xx_chip_map,
559 },
560 { 0,
561 0,
562 NULL,
563 NULL
564 }
565 };
566
567 const struct pciide_product_desc pciide_opti_products[] = {
568 { PCI_PRODUCT_OPTI_82C621,
569 0,
570 "OPTi 82c621 PCI IDE controller",
571 opti_chip_map,
572 },
573 { PCI_PRODUCT_OPTI_82C568,
574 0,
575 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
576 opti_chip_map,
577 },
578 { PCI_PRODUCT_OPTI_82D568,
579 0,
580 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
581 opti_chip_map,
582 },
583 { 0,
584 0,
585 NULL,
586 NULL
587 }
588 };
589
590 const struct pciide_product_desc pciide_triones_products[] = {
591 { PCI_PRODUCT_TRIONES_HPT366,
592 IDE_PCI_CLASS_OVERRIDE,
593 NULL,
594 hpt_chip_map,
595 },
596 { PCI_PRODUCT_TRIONES_HPT372,
597 IDE_PCI_CLASS_OVERRIDE,
598 NULL,
599 hpt_chip_map
600 },
601 { PCI_PRODUCT_TRIONES_HPT374,
602 IDE_PCI_CLASS_OVERRIDE,
603 NULL,
604 hpt_chip_map
605 },
606 { 0,
607 0,
608 NULL,
609 NULL
610 }
611 };
612
613 const struct pciide_product_desc pciide_acard_products[] = {
614 { PCI_PRODUCT_ACARD_ATP850U,
615 IDE_PCI_CLASS_OVERRIDE,
616 "Acard ATP850U Ultra33 IDE Controller",
617 acard_chip_map,
618 },
619 { PCI_PRODUCT_ACARD_ATP860,
620 IDE_PCI_CLASS_OVERRIDE,
621 "Acard ATP860 Ultra66 IDE Controller",
622 acard_chip_map,
623 },
624 { PCI_PRODUCT_ACARD_ATP860A,
625 IDE_PCI_CLASS_OVERRIDE,
626 "Acard ATP860-A Ultra66 IDE Controller",
627 acard_chip_map,
628 },
629 { 0,
630 0,
631 NULL,
632 NULL
633 }
634 };
635
636 const struct pciide_product_desc pciide_serverworks_products[] = {
637 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
638 0,
639 "ServerWorks OSB4 IDE Controller",
640 serverworks_chip_map,
641 },
642 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
643 0,
644 "ServerWorks CSB5 IDE Controller",
645 serverworks_chip_map,
646 },
647 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
648 0,
649 "ServerWorks CSB6 RAID/IDE Controller",
650 serverworks_chip_map,
651 },
652 { 0,
653 0,
654 NULL,
655 }
656 };
657
658 const struct pciide_product_desc pciide_symphony_products[] = {
659 { PCI_PRODUCT_SYMPHONY_82C105,
660 0,
661 "Symphony Labs 82C105 IDE controller",
662 sl82c105_chip_map,
663 },
664 { 0,
665 0,
666 NULL,
667 }
668 };
669
670 const struct pciide_product_desc pciide_winbond_products[] = {
671 { PCI_PRODUCT_WINBOND_W83C553F_1,
672 0,
673 "Winbond W83C553F IDE controller",
674 sl82c105_chip_map,
675 },
676 { 0,
677 0,
678 NULL,
679 }
680 };
681
682 struct pciide_vendor_desc {
683 u_int32_t ide_vendor;
684 const struct pciide_product_desc *ide_products;
685 };
686
687 const struct pciide_vendor_desc pciide_vendors[] = {
688 { PCI_VENDOR_INTEL, pciide_intel_products },
689 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
690 { PCI_VENDOR_VIATECH, pciide_via_products },
691 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
692 { PCI_VENDOR_SIS, pciide_sis_products },
693 { PCI_VENDOR_ALI, pciide_acer_products },
694 { PCI_VENDOR_PROMISE, pciide_promise_products },
695 { PCI_VENDOR_AMD, pciide_amd_products },
696 { PCI_VENDOR_OPTI, pciide_opti_products },
697 { PCI_VENDOR_TRIONES, pciide_triones_products },
698 { PCI_VENDOR_ACARD, pciide_acard_products },
699 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
700 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
701 { PCI_VENDOR_WINBOND, pciide_winbond_products },
702 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
703 { 0, NULL }
704 };
705
706 /* options passed via the 'flags' config keyword */
707 #define PCIIDE_OPTIONS_DMA 0x01
708 #define PCIIDE_OPTIONS_NODMA 0x02
709
710 int pciide_match __P((struct device *, struct cfdata *, void *));
711 void pciide_attach __P((struct device *, struct device *, void *));
712
713 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
714 pciide_match, pciide_attach, NULL, NULL);
715
716 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
717 int pciide_mapregs_compat __P(( struct pci_attach_args *,
718 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
719 int pciide_mapregs_native __P((struct pci_attach_args *,
720 struct pciide_channel *, bus_size_t *, bus_size_t *,
721 int (*pci_intr) __P((void *))));
722 void pciide_mapreg_dma __P((struct pciide_softc *,
723 struct pci_attach_args *));
724 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
725 void pciide_mapchan __P((struct pci_attach_args *,
726 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
727 int (*pci_intr) __P((void *))));
728 int pciide_chan_candisable __P((struct pciide_channel *));
729 void pciide_map_compat_intr __P(( struct pci_attach_args *,
730 struct pciide_channel *, int, int));
731 int pciide_compat_intr __P((void *));
732 int pciide_pci_intr __P((void *));
733 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
734
735 const struct pciide_product_desc *
736 pciide_lookup_product(id)
737 u_int32_t id;
738 {
739 const struct pciide_product_desc *pp;
740 const struct pciide_vendor_desc *vp;
741
742 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
743 if (PCI_VENDOR(id) == vp->ide_vendor)
744 break;
745
746 if ((pp = vp->ide_products) == NULL)
747 return NULL;
748
749 for (; pp->chip_map != NULL; pp++)
750 if (PCI_PRODUCT(id) == pp->ide_product)
751 break;
752
753 if (pp->chip_map == NULL)
754 return NULL;
755 return pp;
756 }
757
758 int
759 pciide_match(parent, match, aux)
760 struct device *parent;
761 struct cfdata *match;
762 void *aux;
763 {
764 struct pci_attach_args *pa = aux;
765 const struct pciide_product_desc *pp;
766
767 /*
768 * Check the ID register to see that it's a PCI IDE controller.
769 * If it is, we assume that we can deal with it; it _should_
770 * work in a standardized way...
771 */
772 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
773 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
774 return (1);
775 }
776
777 /*
778 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
779 * controllers. Let see if we can deal with it anyway.
780 */
781 pp = pciide_lookup_product(pa->pa_id);
782 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
783 return (1);
784 }
785
786 return (0);
787 }
788
789 void
790 pciide_attach(parent, self, aux)
791 struct device *parent, *self;
792 void *aux;
793 {
794 struct pci_attach_args *pa = aux;
795 pci_chipset_tag_t pc = pa->pa_pc;
796 pcitag_t tag = pa->pa_tag;
797 struct pciide_softc *sc = (struct pciide_softc *)self;
798 pcireg_t csr;
799 char devinfo[256];
800 const char *displaydev;
801
802 aprint_naive(": disk controller\n");
803
804 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
805 sc->sc_pp = pciide_lookup_product(pa->pa_id);
806 if (sc->sc_pp == NULL) {
807 sc->sc_pp = &default_product_desc;
808 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
809 displaydev = devinfo;
810 } else
811 displaydev = sc->sc_pp->ide_name;
812
813 /* if displaydev == NULL, printf is done in chip-specific map */
814 if (displaydev)
815 aprint_normal(": %s (rev. 0x%02x)\n", displaydev,
816 PCI_REVISION(pa->pa_class));
817
818 sc->sc_pc = pa->pa_pc;
819 sc->sc_tag = pa->pa_tag;
820
821 /* Set up DMA defaults; these might be adjusted by chip_map. */
822 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
823 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
824
825 #ifdef WDCDEBUG
826 if (wdcdebug_pciide_mask & DEBUG_PROBE)
827 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
828 #endif
829 sc->sc_pp->chip_map(sc, pa);
830
831 if (sc->sc_dma_ok) {
832 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
833 csr |= PCI_COMMAND_MASTER_ENABLE;
834 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
835 }
836 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
837 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
838 }
839
840 /* tell whether the chip is enabled or not */
841 int
842 pciide_chipen(sc, pa)
843 struct pciide_softc *sc;
844 struct pci_attach_args *pa;
845 {
846 pcireg_t csr;
847 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
848 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
849 PCI_COMMAND_STATUS_REG);
850 aprint_normal("%s: device disabled (at %s)\n",
851 sc->sc_wdcdev.sc_dev.dv_xname,
852 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
853 "device" : "bridge");
854 return 0;
855 }
856 return 1;
857 }
858
859 int
860 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
861 struct pci_attach_args *pa;
862 struct pciide_channel *cp;
863 int compatchan;
864 bus_size_t *cmdsizep, *ctlsizep;
865 {
866 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
867 struct channel_softc *wdc_cp = &cp->wdc_channel;
868
869 cp->compat = 1;
870 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
871 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
872
873 wdc_cp->cmd_iot = pa->pa_iot;
874 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
875 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
876 aprint_error("%s: couldn't map %s channel cmd regs\n",
877 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
878 return (0);
879 }
880
881 wdc_cp->ctl_iot = pa->pa_iot;
882 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
883 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
884 aprint_error("%s: couldn't map %s channel ctl regs\n",
885 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
886 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
887 PCIIDE_COMPAT_CMD_SIZE);
888 return (0);
889 }
890
891 return (1);
892 }
893
894 int
895 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
896 struct pci_attach_args * pa;
897 struct pciide_channel *cp;
898 bus_size_t *cmdsizep, *ctlsizep;
899 int (*pci_intr) __P((void *));
900 {
901 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
902 struct channel_softc *wdc_cp = &cp->wdc_channel;
903 const char *intrstr;
904 pci_intr_handle_t intrhandle;
905
906 cp->compat = 0;
907
908 if (sc->sc_pci_ih == NULL) {
909 if (pci_intr_map(pa, &intrhandle) != 0) {
910 aprint_error("%s: couldn't map native-PCI interrupt\n",
911 sc->sc_wdcdev.sc_dev.dv_xname);
912 return 0;
913 }
914 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
915 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
916 intrhandle, IPL_BIO, pci_intr, sc);
917 if (sc->sc_pci_ih != NULL) {
918 aprint_normal("%s: using %s for native-PCI interrupt\n",
919 sc->sc_wdcdev.sc_dev.dv_xname,
920 intrstr ? intrstr : "unknown interrupt");
921 } else {
922 aprint_error(
923 "%s: couldn't establish native-PCI interrupt",
924 sc->sc_wdcdev.sc_dev.dv_xname);
925 if (intrstr != NULL)
926 aprint_normal(" at %s", intrstr);
927 aprint_normal("\n");
928 return 0;
929 }
930 }
931 cp->ih = sc->sc_pci_ih;
932 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
933 PCI_MAPREG_TYPE_IO, 0,
934 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
935 aprint_error("%s: couldn't map %s channel cmd regs\n",
936 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
937 return 0;
938 }
939
940 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
941 PCI_MAPREG_TYPE_IO, 0,
942 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
943 aprint_error("%s: couldn't map %s channel ctl regs\n",
944 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
945 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
946 return 0;
947 }
948 /*
949 * In native mode, 4 bytes of I/O space are mapped for the control
950 * register, the control register is at offset 2. Pass the generic
951 * code a handle for only one byte at the right offset.
952 */
953 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
954 &wdc_cp->ctl_ioh) != 0) {
955 aprint_error("%s: unable to subregion %s channel ctl regs\n",
956 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
957 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
958 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
959 return 0;
960 }
961 return (1);
962 }
963
964 void
965 pciide_mapreg_dma(sc, pa)
966 struct pciide_softc *sc;
967 struct pci_attach_args *pa;
968 {
969 pcireg_t maptype;
970 bus_addr_t addr;
971
972 /*
973 * Map DMA registers
974 *
975 * Note that sc_dma_ok is the right variable to test to see if
976 * DMA can be done. If the interface doesn't support DMA,
977 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
978 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
979 * non-zero if the interface supports DMA and the registers
980 * could be mapped.
981 *
982 * XXX Note that despite the fact that the Bus Master IDE specs
983 * XXX say that "The bus master IDE function uses 16 bytes of IO
984 * XXX space," some controllers (at least the United
985 * XXX Microelectronics UM8886BF) place it in memory space.
986 */
987 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
988 PCIIDE_REG_BUS_MASTER_DMA);
989
990 switch (maptype) {
991 case PCI_MAPREG_TYPE_IO:
992 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
993 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
994 &addr, NULL, NULL) == 0);
995 if (sc->sc_dma_ok == 0) {
996 aprint_normal(
997 ", but unused (couldn't query registers)");
998 break;
999 }
1000 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
1001 && addr >= 0x10000) {
1002 sc->sc_dma_ok = 0;
1003 aprint_normal(
1004 ", but unused (registers at unsafe address "
1005 "%#lx)", (unsigned long)addr);
1006 break;
1007 }
1008 /* FALLTHROUGH */
1009
1010 case PCI_MAPREG_MEM_TYPE_32BIT:
1011 sc->sc_dma_ok = (pci_mapreg_map(pa,
1012 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1013 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1014 sc->sc_dmat = pa->pa_dmat;
1015 if (sc->sc_dma_ok == 0) {
1016 aprint_normal(", but unused (couldn't map registers)");
1017 } else {
1018 sc->sc_wdcdev.dma_arg = sc;
1019 sc->sc_wdcdev.dma_init = pciide_dma_init;
1020 sc->sc_wdcdev.dma_start = pciide_dma_start;
1021 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1022 }
1023
1024 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1025 PCIIDE_OPTIONS_NODMA) {
1026 aprint_normal(
1027 ", but unused (forced off by config file)");
1028 sc->sc_dma_ok = 0;
1029 }
1030 break;
1031
1032 default:
1033 sc->sc_dma_ok = 0;
1034 aprint_normal(
1035 ", but unsupported register maptype (0x%x)", maptype);
1036 }
1037 }
1038
1039 int
1040 pciide_compat_intr(arg)
1041 void *arg;
1042 {
1043 struct pciide_channel *cp = arg;
1044
1045 #ifdef DIAGNOSTIC
1046 /* should only be called for a compat channel */
1047 if (cp->compat == 0)
1048 panic("pciide compat intr called for non-compat chan %p", cp);
1049 #endif
1050 return (wdcintr(&cp->wdc_channel));
1051 }
1052
1053 int
1054 pciide_pci_intr(arg)
1055 void *arg;
1056 {
1057 struct pciide_softc *sc = arg;
1058 struct pciide_channel *cp;
1059 struct channel_softc *wdc_cp;
1060 int i, rv, crv;
1061
1062 rv = 0;
1063 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1064 cp = &sc->pciide_channels[i];
1065 wdc_cp = &cp->wdc_channel;
1066
1067 /* If a compat channel skip. */
1068 if (cp->compat)
1069 continue;
1070 /* if this channel not waiting for intr, skip */
1071 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1072 continue;
1073
1074 crv = wdcintr(wdc_cp);
1075 if (crv == 0)
1076 ; /* leave rv alone */
1077 else if (crv == 1)
1078 rv = 1; /* claim the intr */
1079 else if (rv == 0) /* crv should be -1 in this case */
1080 rv = crv; /* if we've done no better, take it */
1081 }
1082 return (rv);
1083 }
1084
1085 void
1086 pciide_channel_dma_setup(cp)
1087 struct pciide_channel *cp;
1088 {
1089 int drive;
1090 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1091 struct ata_drive_datas *drvp;
1092
1093 for (drive = 0; drive < 2; drive++) {
1094 drvp = &cp->wdc_channel.ch_drive[drive];
1095 /* If no drive, skip */
1096 if ((drvp->drive_flags & DRIVE) == 0)
1097 continue;
1098 /* setup DMA if needed */
1099 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1100 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1101 sc->sc_dma_ok == 0) {
1102 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1103 continue;
1104 }
1105 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1106 != 0) {
1107 /* Abort DMA setup */
1108 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1109 continue;
1110 }
1111 }
1112 }
1113
1114 int
1115 pciide_dma_table_setup(sc, channel, drive)
1116 struct pciide_softc *sc;
1117 int channel, drive;
1118 {
1119 bus_dma_segment_t seg;
1120 int error, rseg;
1121 const bus_size_t dma_table_size =
1122 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1123 struct pciide_dma_maps *dma_maps =
1124 &sc->pciide_channels[channel].dma_maps[drive];
1125
1126 /* If table was already allocated, just return */
1127 if (dma_maps->dma_table)
1128 return 0;
1129
1130 /* Allocate memory for the DMA tables and map it */
1131 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1132 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1133 BUS_DMA_NOWAIT)) != 0) {
1134 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1135 "allocate", drive, error);
1136 return error;
1137 }
1138 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1139 dma_table_size,
1140 (caddr_t *)&dma_maps->dma_table,
1141 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1142 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1143 "map", drive, error);
1144 return error;
1145 }
1146 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1147 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1148 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1149 /* Create and load table DMA map for this disk */
1150 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1151 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1152 &dma_maps->dmamap_table)) != 0) {
1153 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1154 "create", drive, error);
1155 return error;
1156 }
1157 if ((error = bus_dmamap_load(sc->sc_dmat,
1158 dma_maps->dmamap_table,
1159 dma_maps->dma_table,
1160 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1161 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1162 "load", drive, error);
1163 return error;
1164 }
1165 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1166 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1167 DEBUG_PROBE);
1168 /* Create a xfer DMA map for this drive */
1169 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1170 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1171 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1172 &dma_maps->dmamap_xfer)) != 0) {
1173 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1174 "create xfer", drive, error);
1175 return error;
1176 }
1177 return 0;
1178 }
1179
1180 int
1181 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1182 void *v;
1183 int channel, drive;
1184 void *databuf;
1185 size_t datalen;
1186 int flags;
1187 {
1188 struct pciide_softc *sc = v;
1189 int error, seg;
1190 struct pciide_dma_maps *dma_maps =
1191 &sc->pciide_channels[channel].dma_maps[drive];
1192
1193 error = bus_dmamap_load(sc->sc_dmat,
1194 dma_maps->dmamap_xfer,
1195 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1196 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1197 if (error) {
1198 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1199 "load xfer", drive, error);
1200 return error;
1201 }
1202
1203 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1204 dma_maps->dmamap_xfer->dm_mapsize,
1205 (flags & WDC_DMA_READ) ?
1206 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1207
1208 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1209 #ifdef DIAGNOSTIC
1210 /* A segment must not cross a 64k boundary */
1211 {
1212 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1213 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1214 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1215 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1216 printf("pciide_dma: segment %d physical addr 0x%lx"
1217 " len 0x%lx not properly aligned\n",
1218 seg, phys, len);
1219 panic("pciide_dma: buf align");
1220 }
1221 }
1222 #endif
1223 dma_maps->dma_table[seg].base_addr =
1224 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1225 dma_maps->dma_table[seg].byte_count =
1226 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1227 IDEDMA_BYTE_COUNT_MASK);
1228 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1229 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1230 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1231
1232 }
1233 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1234 htole32(IDEDMA_BYTE_COUNT_EOT);
1235
1236 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1237 dma_maps->dmamap_table->dm_mapsize,
1238 BUS_DMASYNC_PREWRITE);
1239
1240 /* Maps are ready. Start DMA function */
1241 #ifdef DIAGNOSTIC
1242 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1243 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1244 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1245 panic("pciide_dma_init: table align");
1246 }
1247 #endif
1248
1249 /* Clear status bits */
1250 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1251 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1252 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1253 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1254 /* Write table addr */
1255 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1256 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1257 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1258 /* set read/write */
1259 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1260 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1261 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1262 /* remember flags */
1263 dma_maps->dma_flags = flags;
1264 return 0;
1265 }
1266
1267 void
1268 pciide_dma_start(v, channel, drive)
1269 void *v;
1270 int channel, drive;
1271 {
1272 struct pciide_softc *sc = v;
1273
1274 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1275 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1276 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1277 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1278 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1279 }
1280
1281 int
1282 pciide_dma_finish(v, channel, drive, force)
1283 void *v;
1284 int channel, drive;
1285 int force;
1286 {
1287 struct pciide_softc *sc = v;
1288 u_int8_t status;
1289 int error = 0;
1290 struct pciide_dma_maps *dma_maps =
1291 &sc->pciide_channels[channel].dma_maps[drive];
1292
1293 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1294 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1295 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1296 DEBUG_XFERS);
1297
1298 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1299 return WDC_DMAST_NOIRQ;
1300
1301 /* stop DMA channel */
1302 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1303 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1304 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1305 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1306
1307 /* Unload the map of the data buffer */
1308 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1309 dma_maps->dmamap_xfer->dm_mapsize,
1310 (dma_maps->dma_flags & WDC_DMA_READ) ?
1311 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1312 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1313
1314 if ((status & IDEDMA_CTL_ERR) != 0) {
1315 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1316 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1317 error |= WDC_DMAST_ERR;
1318 }
1319
1320 if ((status & IDEDMA_CTL_INTR) == 0) {
1321 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1322 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1323 drive, status);
1324 error |= WDC_DMAST_NOIRQ;
1325 }
1326
1327 if ((status & IDEDMA_CTL_ACT) != 0) {
1328 /* data underrun, may be a valid condition for ATAPI */
1329 error |= WDC_DMAST_UNDER;
1330 }
1331 return error;
1332 }
1333
1334 void
1335 pciide_irqack(chp)
1336 struct channel_softc *chp;
1337 {
1338 struct pciide_channel *cp = (struct pciide_channel*)chp;
1339 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1340
1341 /* clear status bits in IDE DMA registers */
1342 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1343 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1344 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1345 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1346 }
1347
1348 /* some common code used by several chip_map */
1349 int
1350 pciide_chansetup(sc, channel, interface)
1351 struct pciide_softc *sc;
1352 int channel;
1353 pcireg_t interface;
1354 {
1355 struct pciide_channel *cp = &sc->pciide_channels[channel];
1356 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1357 cp->name = PCIIDE_CHANNEL_NAME(channel);
1358 cp->wdc_channel.channel = channel;
1359 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1360 cp->wdc_channel.ch_queue =
1361 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1362 if (cp->wdc_channel.ch_queue == NULL) {
1363 aprint_error("%s %s channel: "
1364 "can't allocate memory for command queue",
1365 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1366 return 0;
1367 }
1368 aprint_normal("%s: %s channel %s to %s mode\n",
1369 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1370 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1371 "configured" : "wired",
1372 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1373 "native-PCI" : "compatibility");
1374 return 1;
1375 }
1376
1377 /* some common code used by several chip channel_map */
1378 void
1379 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1380 struct pci_attach_args *pa;
1381 struct pciide_channel *cp;
1382 pcireg_t interface;
1383 bus_size_t *cmdsizep, *ctlsizep;
1384 int (*pci_intr) __P((void *));
1385 {
1386 struct channel_softc *wdc_cp = &cp->wdc_channel;
1387
1388 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1389 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1390 pci_intr);
1391 else
1392 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1393 wdc_cp->channel, cmdsizep, ctlsizep);
1394
1395 if (cp->hw_ok == 0)
1396 return;
1397 wdc_cp->data32iot = wdc_cp->cmd_iot;
1398 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1399 wdcattach(wdc_cp);
1400 }
1401
1402 /*
1403 * Generic code to call to know if a channel can be disabled. Return 1
1404 * if channel can be disabled, 0 if not
1405 */
1406 int
1407 pciide_chan_candisable(cp)
1408 struct pciide_channel *cp;
1409 {
1410 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1411 struct channel_softc *wdc_cp = &cp->wdc_channel;
1412
1413 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1414 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1415 aprint_normal("%s: disabling %s channel (no drives)\n",
1416 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1417 cp->hw_ok = 0;
1418 return 1;
1419 }
1420 return 0;
1421 }
1422
1423 /*
1424 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1425 * Set hw_ok=0 on failure
1426 */
1427 void
1428 pciide_map_compat_intr(pa, cp, compatchan, interface)
1429 struct pci_attach_args *pa;
1430 struct pciide_channel *cp;
1431 int compatchan, interface;
1432 {
1433 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1434 struct channel_softc *wdc_cp = &cp->wdc_channel;
1435
1436 if (cp->hw_ok == 0)
1437 return;
1438 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1439 return;
1440
1441 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1442 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1443 pa, compatchan, pciide_compat_intr, cp);
1444 if (cp->ih == NULL) {
1445 #endif
1446 aprint_error("%s: no compatibility interrupt for use by %s "
1447 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1448 cp->hw_ok = 0;
1449 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1450 }
1451 #endif
1452 }
1453
1454 void
1455 pciide_print_modes(cp)
1456 struct pciide_channel *cp;
1457 {
1458 wdc_print_modes(&cp->wdc_channel);
1459 }
1460
1461 void
1462 default_chip_map(sc, pa)
1463 struct pciide_softc *sc;
1464 struct pci_attach_args *pa;
1465 {
1466 struct pciide_channel *cp;
1467 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1468 pcireg_t csr;
1469 int channel, drive;
1470 struct ata_drive_datas *drvp;
1471 u_int8_t idedma_ctl;
1472 bus_size_t cmdsize, ctlsize;
1473 char *failreason;
1474
1475 if (pciide_chipen(sc, pa) == 0)
1476 return;
1477
1478 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1479 aprint_normal("%s: bus-master DMA support present",
1480 sc->sc_wdcdev.sc_dev.dv_xname);
1481 if (sc->sc_pp == &default_product_desc &&
1482 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1483 PCIIDE_OPTIONS_DMA) == 0) {
1484 aprint_normal(", but unused (no driver support)");
1485 sc->sc_dma_ok = 0;
1486 } else {
1487 pciide_mapreg_dma(sc, pa);
1488 if (sc->sc_dma_ok != 0)
1489 aprint_normal(", used without full driver "
1490 "support");
1491 }
1492 } else {
1493 aprint_normal("%s: hardware does not support DMA",
1494 sc->sc_wdcdev.sc_dev.dv_xname);
1495 sc->sc_dma_ok = 0;
1496 }
1497 aprint_normal("\n");
1498 if (sc->sc_dma_ok) {
1499 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1500 sc->sc_wdcdev.irqack = pciide_irqack;
1501 }
1502 sc->sc_wdcdev.PIO_cap = 0;
1503 sc->sc_wdcdev.DMA_cap = 0;
1504
1505 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1506 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1507 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1508
1509 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1510 cp = &sc->pciide_channels[channel];
1511 if (pciide_chansetup(sc, channel, interface) == 0)
1512 continue;
1513 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1514 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1515 &ctlsize, pciide_pci_intr);
1516 } else {
1517 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1518 channel, &cmdsize, &ctlsize);
1519 }
1520 if (cp->hw_ok == 0)
1521 continue;
1522 /*
1523 * Check to see if something appears to be there.
1524 */
1525 failreason = NULL;
1526 if (!wdcprobe(&cp->wdc_channel)) {
1527 failreason = "not responding; disabled or no drives?";
1528 goto next;
1529 }
1530 /*
1531 * Now, make sure it's actually attributable to this PCI IDE
1532 * channel by trying to access the channel again while the
1533 * PCI IDE controller's I/O space is disabled. (If the
1534 * channel no longer appears to be there, it belongs to
1535 * this controller.) YUCK!
1536 */
1537 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1538 PCI_COMMAND_STATUS_REG);
1539 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1540 csr & ~PCI_COMMAND_IO_ENABLE);
1541 if (wdcprobe(&cp->wdc_channel))
1542 failreason = "other hardware responding at addresses";
1543 pci_conf_write(sc->sc_pc, sc->sc_tag,
1544 PCI_COMMAND_STATUS_REG, csr);
1545 next:
1546 if (failreason) {
1547 aprint_error("%s: %s channel ignored (%s)\n",
1548 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1549 failreason);
1550 cp->hw_ok = 0;
1551 bus_space_unmap(cp->wdc_channel.cmd_iot,
1552 cp->wdc_channel.cmd_ioh, cmdsize);
1553 if (interface & PCIIDE_INTERFACE_PCI(channel))
1554 bus_space_unmap(cp->wdc_channel.ctl_iot,
1555 cp->ctl_baseioh, ctlsize);
1556 else
1557 bus_space_unmap(cp->wdc_channel.ctl_iot,
1558 cp->wdc_channel.ctl_ioh, ctlsize);
1559 } else {
1560 pciide_map_compat_intr(pa, cp, channel, interface);
1561 }
1562 if (cp->hw_ok) {
1563 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1564 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1565 wdcattach(&cp->wdc_channel);
1566 }
1567 }
1568
1569 if (sc->sc_dma_ok == 0)
1570 return;
1571
1572 /* Allocate DMA maps */
1573 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1574 idedma_ctl = 0;
1575 cp = &sc->pciide_channels[channel];
1576 for (drive = 0; drive < 2; drive++) {
1577 drvp = &cp->wdc_channel.ch_drive[drive];
1578 /* If no drive, skip */
1579 if ((drvp->drive_flags & DRIVE) == 0)
1580 continue;
1581 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1582 continue;
1583 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1584 /* Abort DMA setup */
1585 aprint_error(
1586 "%s:%d:%d: can't allocate DMA maps, "
1587 "using PIO transfers\n",
1588 sc->sc_wdcdev.sc_dev.dv_xname,
1589 channel, drive);
1590 drvp->drive_flags &= ~DRIVE_DMA;
1591 }
1592 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1593 sc->sc_wdcdev.sc_dev.dv_xname,
1594 channel, drive);
1595 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1596 }
1597 if (idedma_ctl != 0) {
1598 /* Add software bits in status register */
1599 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1600 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1601 idedma_ctl);
1602 }
1603 }
1604 }
1605
1606 void
1607 sata_setup_channel(chp)
1608 struct channel_softc *chp;
1609 {
1610 struct ata_drive_datas *drvp;
1611 int drive;
1612 u_int32_t idedma_ctl;
1613 struct pciide_channel *cp = (struct pciide_channel*)chp;
1614 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1615
1616 /* setup DMA if needed */
1617 pciide_channel_dma_setup(cp);
1618
1619 idedma_ctl = 0;
1620
1621 for (drive = 0; drive < 2; drive++) {
1622 drvp = &chp->ch_drive[drive];
1623 /* If no drive, skip */
1624 if ((drvp->drive_flags & DRIVE) == 0)
1625 continue;
1626 if (drvp->drive_flags & DRIVE_UDMA) {
1627 /* use Ultra/DMA */
1628 drvp->drive_flags &= ~DRIVE_DMA;
1629 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1630 } else if (drvp->drive_flags & DRIVE_DMA) {
1631 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1632 }
1633 }
1634
1635 /*
1636 * Nothing to do to setup modes; it is meaningless in S-ATA
1637 * (but many S-ATA drives still want to get the SET_FEATURE
1638 * command).
1639 */
1640 if (idedma_ctl != 0) {
1641 /* Add software bits in status register */
1642 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1643 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1644 idedma_ctl);
1645 }
1646 pciide_print_modes(cp);
1647 }
1648
1649 void
1650 piix_chip_map(sc, pa)
1651 struct pciide_softc *sc;
1652 struct pci_attach_args *pa;
1653 {
1654 struct pciide_channel *cp;
1655 int channel;
1656 u_int32_t idetim;
1657 bus_size_t cmdsize, ctlsize;
1658
1659 if (pciide_chipen(sc, pa) == 0)
1660 return;
1661
1662 aprint_normal("%s: bus-master DMA support present",
1663 sc->sc_wdcdev.sc_dev.dv_xname);
1664 pciide_mapreg_dma(sc, pa);
1665 aprint_normal("\n");
1666 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1667 WDC_CAPABILITY_MODE;
1668 if (sc->sc_dma_ok) {
1669 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1670 sc->sc_wdcdev.irqack = pciide_irqack;
1671 switch(sc->sc_pp->ide_product) {
1672 case PCI_PRODUCT_INTEL_82371AB_IDE:
1673 case PCI_PRODUCT_INTEL_82440MX_IDE:
1674 case PCI_PRODUCT_INTEL_82801AA_IDE:
1675 case PCI_PRODUCT_INTEL_82801AB_IDE:
1676 case PCI_PRODUCT_INTEL_82801BA_IDE:
1677 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1678 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1679 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1680 case PCI_PRODUCT_INTEL_82801DB_IDE:
1681 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1682 case PCI_PRODUCT_INTEL_82801EB_IDE:
1683 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1684 }
1685 }
1686 sc->sc_wdcdev.PIO_cap = 4;
1687 sc->sc_wdcdev.DMA_cap = 2;
1688 switch(sc->sc_pp->ide_product) {
1689 case PCI_PRODUCT_INTEL_82801AA_IDE:
1690 sc->sc_wdcdev.UDMA_cap = 4;
1691 break;
1692 case PCI_PRODUCT_INTEL_82801BA_IDE:
1693 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1694 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1695 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1696 case PCI_PRODUCT_INTEL_82801DB_IDE:
1697 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1698 case PCI_PRODUCT_INTEL_82801EB_IDE:
1699 sc->sc_wdcdev.UDMA_cap = 5;
1700 break;
1701 default:
1702 sc->sc_wdcdev.UDMA_cap = 2;
1703 }
1704 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1705 sc->sc_wdcdev.set_modes = piix_setup_channel;
1706 else
1707 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1708 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1709 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1710
1711 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1712 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1713 DEBUG_PROBE);
1714 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1715 WDCDEBUG_PRINT((", sidetim=0x%x",
1716 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1717 DEBUG_PROBE);
1718 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1719 WDCDEBUG_PRINT((", udamreg 0x%x",
1720 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1721 DEBUG_PROBE);
1722 }
1723 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1724 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1725 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1726 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1727 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1728 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1729 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1730 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1731 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1732 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1733 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1734 DEBUG_PROBE);
1735 }
1736
1737 }
1738 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1739
1740 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1741 cp = &sc->pciide_channels[channel];
1742 /* PIIX is compat-only */
1743 if (pciide_chansetup(sc, channel, 0) == 0)
1744 continue;
1745 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1746 if ((PIIX_IDETIM_READ(idetim, channel) &
1747 PIIX_IDETIM_IDE) == 0) {
1748 aprint_normal("%s: %s channel ignored (disabled)\n",
1749 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1750 continue;
1751 }
1752 /* PIIX are compat-only pciide devices */
1753 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1754 if (cp->hw_ok == 0)
1755 continue;
1756 if (pciide_chan_candisable(cp)) {
1757 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1758 channel);
1759 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1760 idetim);
1761 }
1762 pciide_map_compat_intr(pa, cp, channel, 0);
1763 if (cp->hw_ok == 0)
1764 continue;
1765 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1766 }
1767
1768 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1769 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1770 DEBUG_PROBE);
1771 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1772 WDCDEBUG_PRINT((", sidetim=0x%x",
1773 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1774 DEBUG_PROBE);
1775 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1776 WDCDEBUG_PRINT((", udamreg 0x%x",
1777 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1778 DEBUG_PROBE);
1779 }
1780 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1781 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1782 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1783 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1784 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1785 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1786 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1787 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1788 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1789 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1790 DEBUG_PROBE);
1791 }
1792 }
1793 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1794 }
1795
1796 void
1797 piix_setup_channel(chp)
1798 struct channel_softc *chp;
1799 {
1800 u_int8_t mode[2], drive;
1801 u_int32_t oidetim, idetim, idedma_ctl;
1802 struct pciide_channel *cp = (struct pciide_channel*)chp;
1803 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1804 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1805
1806 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1807 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1808 idedma_ctl = 0;
1809
1810 /* set up new idetim: Enable IDE registers decode */
1811 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1812 chp->channel);
1813
1814 /* setup DMA */
1815 pciide_channel_dma_setup(cp);
1816
1817 /*
1818 * Here we have to mess up with drives mode: PIIX can't have
1819 * different timings for master and slave drives.
1820 * We need to find the best combination.
1821 */
1822
1823 /* If both drives supports DMA, take the lower mode */
1824 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1825 (drvp[1].drive_flags & DRIVE_DMA)) {
1826 mode[0] = mode[1] =
1827 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1828 drvp[0].DMA_mode = mode[0];
1829 drvp[1].DMA_mode = mode[1];
1830 goto ok;
1831 }
1832 /*
1833 * If only one drive supports DMA, use its mode, and
1834 * put the other one in PIO mode 0 if mode not compatible
1835 */
1836 if (drvp[0].drive_flags & DRIVE_DMA) {
1837 mode[0] = drvp[0].DMA_mode;
1838 mode[1] = drvp[1].PIO_mode;
1839 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1840 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1841 mode[1] = drvp[1].PIO_mode = 0;
1842 goto ok;
1843 }
1844 if (drvp[1].drive_flags & DRIVE_DMA) {
1845 mode[1] = drvp[1].DMA_mode;
1846 mode[0] = drvp[0].PIO_mode;
1847 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1848 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1849 mode[0] = drvp[0].PIO_mode = 0;
1850 goto ok;
1851 }
1852 /*
1853 * If both drives are not DMA, takes the lower mode, unless
1854 * one of them is PIO mode < 2
1855 */
1856 if (drvp[0].PIO_mode < 2) {
1857 mode[0] = drvp[0].PIO_mode = 0;
1858 mode[1] = drvp[1].PIO_mode;
1859 } else if (drvp[1].PIO_mode < 2) {
1860 mode[1] = drvp[1].PIO_mode = 0;
1861 mode[0] = drvp[0].PIO_mode;
1862 } else {
1863 mode[0] = mode[1] =
1864 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1865 drvp[0].PIO_mode = mode[0];
1866 drvp[1].PIO_mode = mode[1];
1867 }
1868 ok: /* The modes are setup */
1869 for (drive = 0; drive < 2; drive++) {
1870 if (drvp[drive].drive_flags & DRIVE_DMA) {
1871 idetim |= piix_setup_idetim_timings(
1872 mode[drive], 1, chp->channel);
1873 goto end;
1874 }
1875 }
1876 /* If we are there, none of the drives are DMA */
1877 if (mode[0] >= 2)
1878 idetim |= piix_setup_idetim_timings(
1879 mode[0], 0, chp->channel);
1880 else
1881 idetim |= piix_setup_idetim_timings(
1882 mode[1], 0, chp->channel);
1883 end: /*
1884 * timing mode is now set up in the controller. Enable
1885 * it per-drive
1886 */
1887 for (drive = 0; drive < 2; drive++) {
1888 /* If no drive, skip */
1889 if ((drvp[drive].drive_flags & DRIVE) == 0)
1890 continue;
1891 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1892 if (drvp[drive].drive_flags & DRIVE_DMA)
1893 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1894 }
1895 if (idedma_ctl != 0) {
1896 /* Add software bits in status register */
1897 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1898 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1899 idedma_ctl);
1900 }
1901 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1902 pciide_print_modes(cp);
1903 }
1904
1905 void
1906 piix3_4_setup_channel(chp)
1907 struct channel_softc *chp;
1908 {
1909 struct ata_drive_datas *drvp;
1910 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1911 struct pciide_channel *cp = (struct pciide_channel*)chp;
1912 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1913 int drive;
1914 int channel = chp->channel;
1915
1916 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1917 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1918 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1919 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1920 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1921 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1922 PIIX_SIDETIM_RTC_MASK(channel));
1923
1924 idedma_ctl = 0;
1925 /* If channel disabled, no need to go further */
1926 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1927 return;
1928 /* set up new idetim: Enable IDE registers decode */
1929 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1930
1931 /* setup DMA if needed */
1932 pciide_channel_dma_setup(cp);
1933
1934 for (drive = 0; drive < 2; drive++) {
1935 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1936 PIIX_UDMATIM_SET(0x3, channel, drive));
1937 drvp = &chp->ch_drive[drive];
1938 /* If no drive, skip */
1939 if ((drvp->drive_flags & DRIVE) == 0)
1940 continue;
1941 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1942 (drvp->drive_flags & DRIVE_UDMA) == 0))
1943 goto pio;
1944
1945 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1946 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1947 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1948 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1949 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1950 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1951 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1952 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1953 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1954 ideconf |= PIIX_CONFIG_PINGPONG;
1955 }
1956 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1957 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1958 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1959 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1960 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1961 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1962 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1963 /* setup Ultra/100 */
1964 if (drvp->UDMA_mode > 2 &&
1965 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1966 drvp->UDMA_mode = 2;
1967 if (drvp->UDMA_mode > 4) {
1968 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1969 } else {
1970 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1971 if (drvp->UDMA_mode > 2) {
1972 ideconf |= PIIX_CONFIG_UDMA66(channel,
1973 drive);
1974 } else {
1975 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1976 drive);
1977 }
1978 }
1979 }
1980 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1981 /* setup Ultra/66 */
1982 if (drvp->UDMA_mode > 2 &&
1983 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1984 drvp->UDMA_mode = 2;
1985 if (drvp->UDMA_mode > 2)
1986 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1987 else
1988 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1989 }
1990 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1991 (drvp->drive_flags & DRIVE_UDMA)) {
1992 /* use Ultra/DMA */
1993 drvp->drive_flags &= ~DRIVE_DMA;
1994 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1995 udmareg |= PIIX_UDMATIM_SET(
1996 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1997 } else {
1998 /* use Multiword DMA */
1999 drvp->drive_flags &= ~DRIVE_UDMA;
2000 if (drive == 0) {
2001 idetim |= piix_setup_idetim_timings(
2002 drvp->DMA_mode, 1, channel);
2003 } else {
2004 sidetim |= piix_setup_sidetim_timings(
2005 drvp->DMA_mode, 1, channel);
2006 idetim =PIIX_IDETIM_SET(idetim,
2007 PIIX_IDETIM_SITRE, channel);
2008 }
2009 }
2010 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2011
2012 pio: /* use PIO mode */
2013 idetim |= piix_setup_idetim_drvs(drvp);
2014 if (drive == 0) {
2015 idetim |= piix_setup_idetim_timings(
2016 drvp->PIO_mode, 0, channel);
2017 } else {
2018 sidetim |= piix_setup_sidetim_timings(
2019 drvp->PIO_mode, 0, channel);
2020 idetim =PIIX_IDETIM_SET(idetim,
2021 PIIX_IDETIM_SITRE, channel);
2022 }
2023 }
2024 if (idedma_ctl != 0) {
2025 /* Add software bits in status register */
2026 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2027 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2028 idedma_ctl);
2029 }
2030 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2031 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2032 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2033 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2034 pciide_print_modes(cp);
2035 }
2036
2037
2038 /* setup ISP and RTC fields, based on mode */
2039 static u_int32_t
2040 piix_setup_idetim_timings(mode, dma, channel)
2041 u_int8_t mode;
2042 u_int8_t dma;
2043 u_int8_t channel;
2044 {
2045
2046 if (dma)
2047 return PIIX_IDETIM_SET(0,
2048 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2049 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2050 channel);
2051 else
2052 return PIIX_IDETIM_SET(0,
2053 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2054 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2055 channel);
2056 }
2057
2058 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2059 static u_int32_t
2060 piix_setup_idetim_drvs(drvp)
2061 struct ata_drive_datas *drvp;
2062 {
2063 u_int32_t ret = 0;
2064 struct channel_softc *chp = drvp->chnl_softc;
2065 u_int8_t channel = chp->channel;
2066 u_int8_t drive = drvp->drive;
2067
2068 /*
2069 * If drive is using UDMA, timings setups are independant
2070 * So just check DMA and PIO here.
2071 */
2072 if (drvp->drive_flags & DRIVE_DMA) {
2073 /* if mode = DMA mode 0, use compatible timings */
2074 if ((drvp->drive_flags & DRIVE_DMA) &&
2075 drvp->DMA_mode == 0) {
2076 drvp->PIO_mode = 0;
2077 return ret;
2078 }
2079 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2080 /*
2081 * PIO and DMA timings are the same, use fast timings for PIO
2082 * too, else use compat timings.
2083 */
2084 if ((piix_isp_pio[drvp->PIO_mode] !=
2085 piix_isp_dma[drvp->DMA_mode]) ||
2086 (piix_rtc_pio[drvp->PIO_mode] !=
2087 piix_rtc_dma[drvp->DMA_mode]))
2088 drvp->PIO_mode = 0;
2089 /* if PIO mode <= 2, use compat timings for PIO */
2090 if (drvp->PIO_mode <= 2) {
2091 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2092 channel);
2093 return ret;
2094 }
2095 }
2096
2097 /*
2098 * Now setup PIO modes. If mode < 2, use compat timings.
2099 * Else enable fast timings. Enable IORDY and prefetch/post
2100 * if PIO mode >= 3.
2101 */
2102
2103 if (drvp->PIO_mode < 2)
2104 return ret;
2105
2106 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2107 if (drvp->PIO_mode >= 3) {
2108 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2109 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2110 }
2111 return ret;
2112 }
2113
2114 /* setup values in SIDETIM registers, based on mode */
2115 static u_int32_t
2116 piix_setup_sidetim_timings(mode, dma, channel)
2117 u_int8_t mode;
2118 u_int8_t dma;
2119 u_int8_t channel;
2120 {
2121 if (dma)
2122 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2123 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2124 else
2125 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2126 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2127 }
2128
2129 void
2130 amd7x6_chip_map(sc, pa)
2131 struct pciide_softc *sc;
2132 struct pci_attach_args *pa;
2133 {
2134 struct pciide_channel *cp;
2135 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2136 int channel;
2137 pcireg_t chanenable;
2138 bus_size_t cmdsize, ctlsize;
2139
2140 if (pciide_chipen(sc, pa) == 0)
2141 return;
2142 aprint_normal("%s: bus-master DMA support present",
2143 sc->sc_wdcdev.sc_dev.dv_xname);
2144 pciide_mapreg_dma(sc, pa);
2145 aprint_normal("\n");
2146 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2147 WDC_CAPABILITY_MODE;
2148 if (sc->sc_dma_ok) {
2149 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2150 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2151 sc->sc_wdcdev.irqack = pciide_irqack;
2152 }
2153 sc->sc_wdcdev.PIO_cap = 4;
2154 sc->sc_wdcdev.DMA_cap = 2;
2155
2156 switch (sc->sc_pci_vendor) {
2157 case PCI_VENDOR_AMD:
2158 switch (sc->sc_pp->ide_product) {
2159 case PCI_PRODUCT_AMD_PBC766_IDE:
2160 case PCI_PRODUCT_AMD_PBC768_IDE:
2161 case PCI_PRODUCT_AMD_PBC8111_IDE:
2162 sc->sc_wdcdev.UDMA_cap = 5;
2163 break;
2164 default:
2165 sc->sc_wdcdev.UDMA_cap = 4;
2166 }
2167 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2168 break;
2169
2170 case PCI_VENDOR_NVIDIA:
2171 switch (sc->sc_pp->ide_product) {
2172 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2173 sc->sc_wdcdev.UDMA_cap = 5;
2174 break;
2175 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2176 sc->sc_wdcdev.UDMA_cap = 6;
2177 break;
2178 }
2179 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2180 break;
2181
2182 default:
2183 panic("amd7x6_chip_map: unknown vendor");
2184 }
2185 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2186 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2187 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2188 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2189 AMD7X6_CHANSTATUS_EN(sc));
2190
2191 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2192 DEBUG_PROBE);
2193 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2194 cp = &sc->pciide_channels[channel];
2195 if (pciide_chansetup(sc, channel, interface) == 0)
2196 continue;
2197
2198 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2199 aprint_normal("%s: %s channel ignored (disabled)\n",
2200 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2201 continue;
2202 }
2203 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2204 pciide_pci_intr);
2205
2206 if (pciide_chan_candisable(cp))
2207 chanenable &= ~AMD7X6_CHAN_EN(channel);
2208 pciide_map_compat_intr(pa, cp, channel, interface);
2209 if (cp->hw_ok == 0)
2210 continue;
2211
2212 amd7x6_setup_channel(&cp->wdc_channel);
2213 }
2214 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2215 chanenable);
2216 return;
2217 }
2218
2219 void
2220 amd7x6_setup_channel(chp)
2221 struct channel_softc *chp;
2222 {
2223 u_int32_t udmatim_reg, datatim_reg;
2224 u_int8_t idedma_ctl;
2225 int mode, drive;
2226 struct ata_drive_datas *drvp;
2227 struct pciide_channel *cp = (struct pciide_channel*)chp;
2228 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2229 #ifndef PCIIDE_AMD756_ENABLEDMA
2230 int rev = PCI_REVISION(
2231 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2232 #endif
2233
2234 idedma_ctl = 0;
2235 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2236 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2237 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2238 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2239
2240 /* setup DMA if needed */
2241 pciide_channel_dma_setup(cp);
2242
2243 for (drive = 0; drive < 2; drive++) {
2244 drvp = &chp->ch_drive[drive];
2245 /* If no drive, skip */
2246 if ((drvp->drive_flags & DRIVE) == 0)
2247 continue;
2248 /* add timing values, setup DMA if needed */
2249 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2250 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2251 mode = drvp->PIO_mode;
2252 goto pio;
2253 }
2254 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2255 (drvp->drive_flags & DRIVE_UDMA)) {
2256 /* use Ultra/DMA */
2257 drvp->drive_flags &= ~DRIVE_DMA;
2258 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2259 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2260 AMD7X6_UDMA_TIME(chp->channel, drive,
2261 amd7x6_udma_tim[drvp->UDMA_mode]);
2262 /* can use PIO timings, MW DMA unused */
2263 mode = drvp->PIO_mode;
2264 } else {
2265 /* use Multiword DMA, but only if revision is OK */
2266 drvp->drive_flags &= ~DRIVE_UDMA;
2267 #ifndef PCIIDE_AMD756_ENABLEDMA
2268 /*
2269 * The workaround doesn't seem to be necessary
2270 * with all drives, so it can be disabled by
2271 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2272 * triggered.
2273 */
2274 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2275 sc->sc_pp->ide_product ==
2276 PCI_PRODUCT_AMD_PBC756_IDE &&
2277 AMD756_CHIPREV_DISABLEDMA(rev)) {
2278 aprint_normal(
2279 "%s:%d:%d: multi-word DMA disabled due "
2280 "to chip revision\n",
2281 sc->sc_wdcdev.sc_dev.dv_xname,
2282 chp->channel, drive);
2283 mode = drvp->PIO_mode;
2284 drvp->drive_flags &= ~DRIVE_DMA;
2285 goto pio;
2286 }
2287 #endif
2288 /* mode = min(pio, dma+2) */
2289 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2290 mode = drvp->PIO_mode;
2291 else
2292 mode = drvp->DMA_mode + 2;
2293 }
2294 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2295
2296 pio: /* setup PIO mode */
2297 if (mode <= 2) {
2298 drvp->DMA_mode = 0;
2299 drvp->PIO_mode = 0;
2300 mode = 0;
2301 } else {
2302 drvp->PIO_mode = mode;
2303 drvp->DMA_mode = mode - 2;
2304 }
2305 datatim_reg |=
2306 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2307 amd7x6_pio_set[mode]) |
2308 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2309 amd7x6_pio_rec[mode]);
2310 }
2311 if (idedma_ctl != 0) {
2312 /* Add software bits in status register */
2313 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2314 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2315 idedma_ctl);
2316 }
2317 pciide_print_modes(cp);
2318 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2319 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2320 }
2321
2322 void
2323 apollo_chip_map(sc, pa)
2324 struct pciide_softc *sc;
2325 struct pci_attach_args *pa;
2326 {
2327 struct pciide_channel *cp;
2328 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2329 int channel;
2330 u_int32_t ideconf;
2331 bus_size_t cmdsize, ctlsize;
2332 pcitag_t pcib_tag;
2333 pcireg_t pcib_id, pcib_class;
2334
2335 if (pciide_chipen(sc, pa) == 0)
2336 return;
2337 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2338 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2339 /* and read ID and rev of the ISA bridge */
2340 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2341 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2342 aprint_normal(": VIA Technologies ");
2343 switch (PCI_PRODUCT(pcib_id)) {
2344 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2345 aprint_normal("VT82C586 (Apollo VP) ");
2346 if(PCI_REVISION(pcib_class) >= 0x02) {
2347 aprint_normal("ATA33 controller\n");
2348 sc->sc_wdcdev.UDMA_cap = 2;
2349 } else {
2350 aprint_normal("controller\n");
2351 sc->sc_wdcdev.UDMA_cap = 0;
2352 }
2353 break;
2354 case PCI_PRODUCT_VIATECH_VT82C596A:
2355 aprint_normal("VT82C596A (Apollo Pro) ");
2356 if (PCI_REVISION(pcib_class) >= 0x12) {
2357 aprint_normal("ATA66 controller\n");
2358 sc->sc_wdcdev.UDMA_cap = 4;
2359 } else {
2360 aprint_normal("ATA33 controller\n");
2361 sc->sc_wdcdev.UDMA_cap = 2;
2362 }
2363 break;
2364 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2365 aprint_normal("VT82C686A (Apollo KX133) ");
2366 if (PCI_REVISION(pcib_class) >= 0x40) {
2367 aprint_normal("ATA100 controller\n");
2368 sc->sc_wdcdev.UDMA_cap = 5;
2369 } else {
2370 aprint_normal("ATA66 controller\n");
2371 sc->sc_wdcdev.UDMA_cap = 4;
2372 }
2373 break;
2374 case PCI_PRODUCT_VIATECH_VT8231:
2375 aprint_normal("VT8231 ATA100 controller\n");
2376 sc->sc_wdcdev.UDMA_cap = 5;
2377 break;
2378 case PCI_PRODUCT_VIATECH_VT8233:
2379 aprint_normal("VT8233 ATA100 controller\n");
2380 sc->sc_wdcdev.UDMA_cap = 5;
2381 break;
2382 case PCI_PRODUCT_VIATECH_VT8233A:
2383 aprint_normal("VT8233A ATA133 controller\n");
2384 sc->sc_wdcdev.UDMA_cap = 6;
2385 break;
2386 case PCI_PRODUCT_VIATECH_VT8235:
2387 aprint_normal("VT8235 ATA133 controller\n");
2388 sc->sc_wdcdev.UDMA_cap = 6;
2389 break;
2390 default:
2391 aprint_normal("unknown ATA controller\n");
2392 sc->sc_wdcdev.UDMA_cap = 0;
2393 }
2394
2395 aprint_normal("%s: bus-master DMA support present",
2396 sc->sc_wdcdev.sc_dev.dv_xname);
2397 pciide_mapreg_dma(sc, pa);
2398 aprint_normal("\n");
2399 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2400 WDC_CAPABILITY_MODE;
2401 if (sc->sc_dma_ok) {
2402 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2403 sc->sc_wdcdev.irqack = pciide_irqack;
2404 if (sc->sc_wdcdev.UDMA_cap > 0)
2405 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2406 }
2407 sc->sc_wdcdev.PIO_cap = 4;
2408 sc->sc_wdcdev.DMA_cap = 2;
2409 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2410 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2411 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2412
2413 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2414 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2415 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2416 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2417 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2418 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2419 DEBUG_PROBE);
2420
2421 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2422 cp = &sc->pciide_channels[channel];
2423 if (pciide_chansetup(sc, channel, interface) == 0)
2424 continue;
2425
2426 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2427 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2428 aprint_normal("%s: %s channel ignored (disabled)\n",
2429 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2430 continue;
2431 }
2432 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2433 pciide_pci_intr);
2434 if (cp->hw_ok == 0)
2435 continue;
2436 if (pciide_chan_candisable(cp)) {
2437 ideconf &= ~APO_IDECONF_EN(channel);
2438 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2439 ideconf);
2440 }
2441 pciide_map_compat_intr(pa, cp, channel, interface);
2442
2443 if (cp->hw_ok == 0)
2444 continue;
2445 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2446 }
2447 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2448 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2449 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2450 }
2451
2452 void
2453 apollo_setup_channel(chp)
2454 struct channel_softc *chp;
2455 {
2456 u_int32_t udmatim_reg, datatim_reg;
2457 u_int8_t idedma_ctl;
2458 int mode, drive;
2459 struct ata_drive_datas *drvp;
2460 struct pciide_channel *cp = (struct pciide_channel*)chp;
2461 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2462
2463 idedma_ctl = 0;
2464 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2465 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2466 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2467 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2468
2469 /* setup DMA if needed */
2470 pciide_channel_dma_setup(cp);
2471
2472 for (drive = 0; drive < 2; drive++) {
2473 drvp = &chp->ch_drive[drive];
2474 /* If no drive, skip */
2475 if ((drvp->drive_flags & DRIVE) == 0)
2476 continue;
2477 /* add timing values, setup DMA if needed */
2478 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2479 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2480 mode = drvp->PIO_mode;
2481 goto pio;
2482 }
2483 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2484 (drvp->drive_flags & DRIVE_UDMA)) {
2485 /* use Ultra/DMA */
2486 drvp->drive_flags &= ~DRIVE_DMA;
2487 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2488 APO_UDMA_EN_MTH(chp->channel, drive);
2489 if (sc->sc_wdcdev.UDMA_cap == 6) {
2490 /* 8233a */
2491 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2492 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2493 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2494 /* 686b */
2495 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2496 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2497 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2498 /* 596b or 686a */
2499 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2500 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2501 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2502 } else {
2503 /* 596a or 586b */
2504 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2505 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2506 }
2507 /* can use PIO timings, MW DMA unused */
2508 mode = drvp->PIO_mode;
2509 } else {
2510 /* use Multiword DMA */
2511 drvp->drive_flags &= ~DRIVE_UDMA;
2512 /* mode = min(pio, dma+2) */
2513 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2514 mode = drvp->PIO_mode;
2515 else
2516 mode = drvp->DMA_mode + 2;
2517 }
2518 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2519
2520 pio: /* setup PIO mode */
2521 if (mode <= 2) {
2522 drvp->DMA_mode = 0;
2523 drvp->PIO_mode = 0;
2524 mode = 0;
2525 } else {
2526 drvp->PIO_mode = mode;
2527 drvp->DMA_mode = mode - 2;
2528 }
2529 datatim_reg |=
2530 APO_DATATIM_PULSE(chp->channel, drive,
2531 apollo_pio_set[mode]) |
2532 APO_DATATIM_RECOV(chp->channel, drive,
2533 apollo_pio_rec[mode]);
2534 }
2535 if (idedma_ctl != 0) {
2536 /* Add software bits in status register */
2537 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2538 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2539 idedma_ctl);
2540 }
2541 pciide_print_modes(cp);
2542 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2543 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2544 }
2545
2546 void
2547 apollo_sata_chip_map(sc, pa)
2548 struct pciide_softc *sc;
2549 struct pci_attach_args *pa;
2550 {
2551 struct pciide_channel *cp;
2552 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2553 int channel;
2554 bus_size_t cmdsize, ctlsize;
2555
2556 if (pciide_chipen(sc, pa) == 0)
2557 return;
2558
2559 if ( interface == 0 ) {
2560 WDCDEBUG_PRINT(("apollo_sata_chip_map interface == 0\n"),
2561 DEBUG_PROBE);
2562 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2563 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2564 }
2565
2566 aprint_normal("%s: bus-master DMA support present",
2567 sc->sc_wdcdev.sc_dev.dv_xname);
2568 pciide_mapreg_dma(sc, pa);
2569 aprint_normal("\n");
2570
2571 if (sc->sc_dma_ok) {
2572 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2573 sc->sc_wdcdev.irqack = pciide_irqack;
2574 }
2575 sc->sc_wdcdev.PIO_cap = 4;
2576 sc->sc_wdcdev.DMA_cap = 2;
2577 sc->sc_wdcdev.UDMA_cap = 6;
2578
2579 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2580 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2581 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2582 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SINGLE_DRIVE;
2583 sc->sc_wdcdev.set_modes = sata_setup_channel;
2584
2585 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2586 cp = &sc->pciide_channels[channel];
2587 if (pciide_chansetup(sc, channel, interface) == 0)
2588 continue;
2589 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2590 pciide_pci_intr);
2591
2592 pciide_map_compat_intr(pa, cp, channel, interface);
2593 sata_setup_channel(&cp->wdc_channel);
2594 }
2595 }
2596
2597 void
2598 cmd_channel_map(pa, sc, channel)
2599 struct pci_attach_args *pa;
2600 struct pciide_softc *sc;
2601 int channel;
2602 {
2603 struct pciide_channel *cp = &sc->pciide_channels[channel];
2604 bus_size_t cmdsize, ctlsize;
2605 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2606 int interface, one_channel;
2607
2608 /*
2609 * The 0648/0649 can be told to identify as a RAID controller.
2610 * In this case, we have to fake interface
2611 */
2612 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2613 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2614 PCIIDE_INTERFACE_SETTABLE(1);
2615 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2616 CMD_CONF_DSA1)
2617 interface |= PCIIDE_INTERFACE_PCI(0) |
2618 PCIIDE_INTERFACE_PCI(1);
2619 } else {
2620 interface = PCI_INTERFACE(pa->pa_class);
2621 }
2622
2623 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2624 cp->name = PCIIDE_CHANNEL_NAME(channel);
2625 cp->wdc_channel.channel = channel;
2626 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2627
2628 /*
2629 * Older CMD64X doesn't have independant channels
2630 */
2631 switch (sc->sc_pp->ide_product) {
2632 case PCI_PRODUCT_CMDTECH_649:
2633 one_channel = 0;
2634 break;
2635 default:
2636 one_channel = 1;
2637 break;
2638 }
2639
2640 if (channel > 0 && one_channel) {
2641 cp->wdc_channel.ch_queue =
2642 sc->pciide_channels[0].wdc_channel.ch_queue;
2643 } else {
2644 cp->wdc_channel.ch_queue =
2645 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2646 }
2647 if (cp->wdc_channel.ch_queue == NULL) {
2648 aprint_error("%s %s channel: "
2649 "can't allocate memory for command queue",
2650 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2651 return;
2652 }
2653
2654 aprint_normal("%s: %s channel %s to %s mode\n",
2655 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2656 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2657 "configured" : "wired",
2658 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2659 "native-PCI" : "compatibility");
2660
2661 /*
2662 * with a CMD PCI64x, if we get here, the first channel is enabled:
2663 * there's no way to disable the first channel without disabling
2664 * the whole device
2665 */
2666 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2667 aprint_normal("%s: %s channel ignored (disabled)\n",
2668 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2669 return;
2670 }
2671
2672 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2673 if (cp->hw_ok == 0)
2674 return;
2675 if (channel == 1) {
2676 if (pciide_chan_candisable(cp)) {
2677 ctrl &= ~CMD_CTRL_2PORT;
2678 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2679 CMD_CTRL, ctrl);
2680 }
2681 }
2682 pciide_map_compat_intr(pa, cp, channel, interface);
2683 }
2684
2685 int
2686 cmd_pci_intr(arg)
2687 void *arg;
2688 {
2689 struct pciide_softc *sc = arg;
2690 struct pciide_channel *cp;
2691 struct channel_softc *wdc_cp;
2692 int i, rv, crv;
2693 u_int32_t priirq, secirq;
2694
2695 rv = 0;
2696 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2697 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2698 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2699 cp = &sc->pciide_channels[i];
2700 wdc_cp = &cp->wdc_channel;
2701 /* If a compat channel skip. */
2702 if (cp->compat)
2703 continue;
2704 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2705 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2706 crv = wdcintr(wdc_cp);
2707 if (crv == 0)
2708 printf("%s:%d: bogus intr\n",
2709 sc->sc_wdcdev.sc_dev.dv_xname, i);
2710 else
2711 rv = 1;
2712 }
2713 }
2714 return rv;
2715 }
2716
2717 void
2718 cmd_chip_map(sc, pa)
2719 struct pciide_softc *sc;
2720 struct pci_attach_args *pa;
2721 {
2722 int channel;
2723
2724 /*
2725 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2726 * and base adresses registers can be disabled at
2727 * hardware level. In this case, the device is wired
2728 * in compat mode and its first channel is always enabled,
2729 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2730 * In fact, it seems that the first channel of the CMD PCI0640
2731 * can't be disabled.
2732 */
2733
2734 #ifdef PCIIDE_CMD064x_DISABLE
2735 if (pciide_chipen(sc, pa) == 0)
2736 return;
2737 #endif
2738
2739 aprint_normal("%s: hardware does not support DMA\n",
2740 sc->sc_wdcdev.sc_dev.dv_xname);
2741 sc->sc_dma_ok = 0;
2742
2743 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2744 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2745 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2746
2747 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2748 cmd_channel_map(pa, sc, channel);
2749 }
2750 }
2751
2752 void
2753 cmd0643_9_chip_map(sc, pa)
2754 struct pciide_softc *sc;
2755 struct pci_attach_args *pa;
2756 {
2757 struct pciide_channel *cp;
2758 int channel;
2759 pcireg_t rev = PCI_REVISION(pa->pa_class);
2760
2761 /*
2762 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2763 * and base adresses registers can be disabled at
2764 * hardware level. In this case, the device is wired
2765 * in compat mode and its first channel is always enabled,
2766 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2767 * In fact, it seems that the first channel of the CMD PCI0640
2768 * can't be disabled.
2769 */
2770
2771 #ifdef PCIIDE_CMD064x_DISABLE
2772 if (pciide_chipen(sc, pa) == 0)
2773 return;
2774 #endif
2775 aprint_normal("%s: bus-master DMA support present",
2776 sc->sc_wdcdev.sc_dev.dv_xname);
2777 pciide_mapreg_dma(sc, pa);
2778 aprint_normal("\n");
2779 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2780 WDC_CAPABILITY_MODE;
2781 if (sc->sc_dma_ok) {
2782 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2783 switch (sc->sc_pp->ide_product) {
2784 case PCI_PRODUCT_CMDTECH_649:
2785 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2786 sc->sc_wdcdev.UDMA_cap = 5;
2787 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2788 break;
2789 case PCI_PRODUCT_CMDTECH_648:
2790 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2791 sc->sc_wdcdev.UDMA_cap = 4;
2792 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2793 break;
2794 case PCI_PRODUCT_CMDTECH_646:
2795 if (rev >= CMD0646U2_REV) {
2796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2797 sc->sc_wdcdev.UDMA_cap = 2;
2798 } else if (rev >= CMD0646U_REV) {
2799 /*
2800 * Linux's driver claims that the 646U is broken
2801 * with UDMA. Only enable it if we know what we're
2802 * doing
2803 */
2804 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2805 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2806 sc->sc_wdcdev.UDMA_cap = 2;
2807 #endif
2808 /* explicitly disable UDMA */
2809 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2810 CMD_UDMATIM(0), 0);
2811 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2812 CMD_UDMATIM(1), 0);
2813 }
2814 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2815 break;
2816 default:
2817 sc->sc_wdcdev.irqack = pciide_irqack;
2818 }
2819 }
2820
2821 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2822 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2823 sc->sc_wdcdev.PIO_cap = 4;
2824 sc->sc_wdcdev.DMA_cap = 2;
2825 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2826
2827 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2828 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2829 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2830 DEBUG_PROBE);
2831
2832 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2833 cp = &sc->pciide_channels[channel];
2834 cmd_channel_map(pa, sc, channel);
2835 if (cp->hw_ok == 0)
2836 continue;
2837 cmd0643_9_setup_channel(&cp->wdc_channel);
2838 }
2839 /*
2840 * note - this also makes sure we clear the irq disable and reset
2841 * bits
2842 */
2843 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2844 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2845 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2846 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2847 DEBUG_PROBE);
2848 }
2849
2850 void
2851 cmd0643_9_setup_channel(chp)
2852 struct channel_softc *chp;
2853 {
2854 struct ata_drive_datas *drvp;
2855 u_int8_t tim;
2856 u_int32_t idedma_ctl, udma_reg;
2857 int drive;
2858 struct pciide_channel *cp = (struct pciide_channel*)chp;
2859 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2860
2861 idedma_ctl = 0;
2862 /* setup DMA if needed */
2863 pciide_channel_dma_setup(cp);
2864
2865 for (drive = 0; drive < 2; drive++) {
2866 drvp = &chp->ch_drive[drive];
2867 /* If no drive, skip */
2868 if ((drvp->drive_flags & DRIVE) == 0)
2869 continue;
2870 /* add timing values, setup DMA if needed */
2871 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2872 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2873 if (drvp->drive_flags & DRIVE_UDMA) {
2874 /* UltraDMA on a 646U2, 0648 or 0649 */
2875 drvp->drive_flags &= ~DRIVE_DMA;
2876 udma_reg = pciide_pci_read(sc->sc_pc,
2877 sc->sc_tag, CMD_UDMATIM(chp->channel));
2878 if (drvp->UDMA_mode > 2 &&
2879 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2880 CMD_BICSR) &
2881 CMD_BICSR_80(chp->channel)) == 0)
2882 drvp->UDMA_mode = 2;
2883 if (drvp->UDMA_mode > 2)
2884 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2885 else if (sc->sc_wdcdev.UDMA_cap > 2)
2886 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2887 udma_reg |= CMD_UDMATIM_UDMA(drive);
2888 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2889 CMD_UDMATIM_TIM_OFF(drive));
2890 udma_reg |=
2891 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2892 CMD_UDMATIM_TIM_OFF(drive));
2893 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2894 CMD_UDMATIM(chp->channel), udma_reg);
2895 } else {
2896 /*
2897 * use Multiword DMA.
2898 * Timings will be used for both PIO and DMA,
2899 * so adjust DMA mode if needed
2900 * if we have a 0646U2/8/9, turn off UDMA
2901 */
2902 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2903 udma_reg = pciide_pci_read(sc->sc_pc,
2904 sc->sc_tag,
2905 CMD_UDMATIM(chp->channel));
2906 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2907 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2908 CMD_UDMATIM(chp->channel),
2909 udma_reg);
2910 }
2911 if (drvp->PIO_mode >= 3 &&
2912 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2913 drvp->DMA_mode = drvp->PIO_mode - 2;
2914 }
2915 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2916 }
2917 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2918 }
2919 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2920 CMD_DATA_TIM(chp->channel, drive), tim);
2921 }
2922 if (idedma_ctl != 0) {
2923 /* Add software bits in status register */
2924 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2925 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2926 idedma_ctl);
2927 }
2928 pciide_print_modes(cp);
2929 }
2930
2931 void
2932 cmd646_9_irqack(chp)
2933 struct channel_softc *chp;
2934 {
2935 u_int32_t priirq, secirq;
2936 struct pciide_channel *cp = (struct pciide_channel*)chp;
2937 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2938
2939 if (chp->channel == 0) {
2940 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2941 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2942 } else {
2943 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2944 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2945 }
2946 pciide_irqack(chp);
2947 }
2948
2949 void
2950 cmd680_chip_map(sc, pa)
2951 struct pciide_softc *sc;
2952 struct pci_attach_args *pa;
2953 {
2954 struct pciide_channel *cp;
2955 int channel;
2956
2957 if (pciide_chipen(sc, pa) == 0)
2958 return;
2959 aprint_normal("%s: bus-master DMA support present",
2960 sc->sc_wdcdev.sc_dev.dv_xname);
2961 pciide_mapreg_dma(sc, pa);
2962 aprint_normal("\n");
2963 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2964 WDC_CAPABILITY_MODE;
2965 if (sc->sc_dma_ok) {
2966 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2967 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2968 sc->sc_wdcdev.UDMA_cap = 6;
2969 sc->sc_wdcdev.irqack = pciide_irqack;
2970 }
2971
2972 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2973 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2974 sc->sc_wdcdev.PIO_cap = 4;
2975 sc->sc_wdcdev.DMA_cap = 2;
2976 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2977
2978 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2979 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2980 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2981 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2982 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2983 cp = &sc->pciide_channels[channel];
2984 cmd680_channel_map(pa, sc, channel);
2985 if (cp->hw_ok == 0)
2986 continue;
2987 cmd680_setup_channel(&cp->wdc_channel);
2988 }
2989 }
2990
2991 void
2992 cmd680_channel_map(pa, sc, channel)
2993 struct pci_attach_args *pa;
2994 struct pciide_softc *sc;
2995 int channel;
2996 {
2997 struct pciide_channel *cp = &sc->pciide_channels[channel];
2998 bus_size_t cmdsize, ctlsize;
2999 int interface, i, reg;
3000 static const u_int8_t init_val[] =
3001 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
3002 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
3003
3004 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
3005 interface = PCIIDE_INTERFACE_SETTABLE(0) |
3006 PCIIDE_INTERFACE_SETTABLE(1);
3007 interface |= PCIIDE_INTERFACE_PCI(0) |
3008 PCIIDE_INTERFACE_PCI(1);
3009 } else {
3010 interface = PCI_INTERFACE(pa->pa_class);
3011 }
3012
3013 sc->wdc_chanarray[channel] = &cp->wdc_channel;
3014 cp->name = PCIIDE_CHANNEL_NAME(channel);
3015 cp->wdc_channel.channel = channel;
3016 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3017
3018 cp->wdc_channel.ch_queue =
3019 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3020 if (cp->wdc_channel.ch_queue == NULL) {
3021 aprint_error("%s %s channel: "
3022 "can't allocate memory for command queue",
3023 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3024 return;
3025 }
3026
3027 /* XXX */
3028 reg = 0xa2 + channel * 16;
3029 for (i = 0; i < sizeof(init_val); i++)
3030 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
3031
3032 aprint_normal("%s: %s channel %s to %s mode\n",
3033 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
3034 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
3035 "configured" : "wired",
3036 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
3037 "native-PCI" : "compatibility");
3038
3039 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
3040 if (cp->hw_ok == 0)
3041 return;
3042 pciide_map_compat_intr(pa, cp, channel, interface);
3043 }
3044
3045 void
3046 cmd680_setup_channel(chp)
3047 struct channel_softc *chp;
3048 {
3049 struct ata_drive_datas *drvp;
3050 u_int8_t mode, off, scsc;
3051 u_int16_t val;
3052 u_int32_t idedma_ctl;
3053 int drive;
3054 struct pciide_channel *cp = (struct pciide_channel*)chp;
3055 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3056 pci_chipset_tag_t pc = sc->sc_pc;
3057 pcitag_t pa = sc->sc_tag;
3058 static const u_int8_t udma2_tbl[] =
3059 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
3060 static const u_int8_t udma_tbl[] =
3061 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
3062 static const u_int16_t dma_tbl[] =
3063 { 0x2208, 0x10c2, 0x10c1 };
3064 static const u_int16_t pio_tbl[] =
3065 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
3066
3067 idedma_ctl = 0;
3068 pciide_channel_dma_setup(cp);
3069 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
3070
3071 for (drive = 0; drive < 2; drive++) {
3072 drvp = &chp->ch_drive[drive];
3073 /* If no drive, skip */
3074 if ((drvp->drive_flags & DRIVE) == 0)
3075 continue;
3076 mode &= ~(0x03 << (drive * 4));
3077 if (drvp->drive_flags & DRIVE_UDMA) {
3078 drvp->drive_flags &= ~DRIVE_DMA;
3079 off = 0xa0 + chp->channel * 16;
3080 if (drvp->UDMA_mode > 2 &&
3081 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3082 drvp->UDMA_mode = 2;
3083 scsc = pciide_pci_read(pc, pa, 0x8a);
3084 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3085 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3086 scsc = pciide_pci_read(pc, pa, 0x8a);
3087 if ((scsc & 0x30) == 0)
3088 drvp->UDMA_mode = 5;
3089 }
3090 mode |= 0x03 << (drive * 4);
3091 off = 0xac + chp->channel * 16 + drive * 2;
3092 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3093 if (scsc & 0x30)
3094 val |= udma2_tbl[drvp->UDMA_mode];
3095 else
3096 val |= udma_tbl[drvp->UDMA_mode];
3097 pciide_pci_write(pc, pa, off, val);
3098 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3099 } else if (drvp->drive_flags & DRIVE_DMA) {
3100 mode |= 0x02 << (drive * 4);
3101 off = 0xa8 + chp->channel * 16 + drive * 2;
3102 val = dma_tbl[drvp->DMA_mode];
3103 pciide_pci_write(pc, pa, off, val & 0xff);
3104 pciide_pci_write(pc, pa, off, val >> 8);
3105 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3106 } else {
3107 mode |= 0x01 << (drive * 4);
3108 off = 0xa4 + chp->channel * 16 + drive * 2;
3109 val = pio_tbl[drvp->PIO_mode];
3110 pciide_pci_write(pc, pa, off, val & 0xff);
3111 pciide_pci_write(pc, pa, off, val >> 8);
3112 }
3113 }
3114
3115 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3116 if (idedma_ctl != 0) {
3117 /* Add software bits in status register */
3118 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3119 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3120 idedma_ctl);
3121 }
3122 pciide_print_modes(cp);
3123 }
3124
3125 void
3126 cmd3112_chip_map(sc, pa)
3127 struct pciide_softc *sc;
3128 struct pci_attach_args *pa;
3129 {
3130 struct pciide_channel *cp;
3131 bus_size_t cmdsize, ctlsize;
3132 pcireg_t interface;
3133 int channel;
3134
3135 if (pciide_chipen(sc, pa) == 0)
3136 return;
3137
3138 aprint_normal("%s: bus-master DMA support present",
3139 sc->sc_wdcdev.sc_dev.dv_xname);
3140 pciide_mapreg_dma(sc, pa);
3141 aprint_normal("\n");
3142
3143 /*
3144 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3145 * corruption if DMA transfers cross an 8K boundary. This is
3146 * apparently hard to tickle, but we'll go ahead and play it
3147 * safe.
3148 */
3149 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3150 sc->sc_dma_maxsegsz = 8192;
3151 sc->sc_dma_boundary = 8192;
3152 }
3153
3154 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3155 WDC_CAPABILITY_MODE;
3156 sc->sc_wdcdev.PIO_cap = 4;
3157 if (sc->sc_dma_ok) {
3158 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3159 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3160 sc->sc_wdcdev.irqack = pciide_irqack;
3161 sc->sc_wdcdev.DMA_cap = 2;
3162 sc->sc_wdcdev.UDMA_cap = 6;
3163 }
3164 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3165
3166 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3167 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3168
3169 /*
3170 * The 3112 can be told to identify as a RAID controller.
3171 * In this case, we have to fake interface
3172 */
3173 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3174 interface = PCI_INTERFACE(pa->pa_class);
3175 } else {
3176 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3177 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3178 }
3179
3180 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3181 cp = &sc->pciide_channels[channel];
3182 if (pciide_chansetup(sc, channel, interface) == 0)
3183 continue;
3184 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3185 pciide_pci_intr);
3186 if (cp->hw_ok == 0)
3187 continue;
3188 pciide_map_compat_intr(pa, cp, channel, interface);
3189 cmd3112_setup_channel(&cp->wdc_channel);
3190 }
3191 }
3192
3193 void
3194 cmd3112_setup_channel(chp)
3195 struct channel_softc *chp;
3196 {
3197 struct ata_drive_datas *drvp;
3198 int drive;
3199 u_int32_t idedma_ctl, dtm;
3200 struct pciide_channel *cp = (struct pciide_channel*)chp;
3201 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3202
3203 /* setup DMA if needed */
3204 pciide_channel_dma_setup(cp);
3205
3206 idedma_ctl = 0;
3207 dtm = 0;
3208
3209 for (drive = 0; drive < 2; drive++) {
3210 drvp = &chp->ch_drive[drive];
3211 /* If no drive, skip */
3212 if ((drvp->drive_flags & DRIVE) == 0)
3213 continue;
3214 if (drvp->drive_flags & DRIVE_UDMA) {
3215 /* use Ultra/DMA */
3216 drvp->drive_flags &= ~DRIVE_DMA;
3217 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3218 dtm |= DTM_IDEx_DMA;
3219 } else if (drvp->drive_flags & DRIVE_DMA) {
3220 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3221 dtm |= DTM_IDEx_DMA;
3222 } else {
3223 dtm |= DTM_IDEx_PIO;
3224 }
3225 }
3226
3227 /*
3228 * Nothing to do to setup modes; it is meaningless in S-ATA
3229 * (but many S-ATA drives still want to get the SET_FEATURE
3230 * command).
3231 */
3232 if (idedma_ctl != 0) {
3233 /* Add software bits in status register */
3234 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3235 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3236 idedma_ctl);
3237 }
3238 pci_conf_write(sc->sc_pc, sc->sc_tag,
3239 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3240 pciide_print_modes(cp);
3241 }
3242
3243 void
3244 cy693_chip_map(sc, pa)
3245 struct pciide_softc *sc;
3246 struct pci_attach_args *pa;
3247 {
3248 struct pciide_channel *cp;
3249 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3250 bus_size_t cmdsize, ctlsize;
3251
3252 if (pciide_chipen(sc, pa) == 0)
3253 return;
3254 /*
3255 * this chip has 2 PCI IDE functions, one for primary and one for
3256 * secondary. So we need to call pciide_mapregs_compat() with
3257 * the real channel
3258 */
3259 if (pa->pa_function == 1) {
3260 sc->sc_cy_compatchan = 0;
3261 } else if (pa->pa_function == 2) {
3262 sc->sc_cy_compatchan = 1;
3263 } else {
3264 aprint_error("%s: unexpected PCI function %d\n",
3265 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3266 return;
3267 }
3268 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3269 aprint_normal("%s: bus-master DMA support present",
3270 sc->sc_wdcdev.sc_dev.dv_xname);
3271 pciide_mapreg_dma(sc, pa);
3272 } else {
3273 aprint_normal("%s: hardware does not support DMA",
3274 sc->sc_wdcdev.sc_dev.dv_xname);
3275 sc->sc_dma_ok = 0;
3276 }
3277 aprint_normal("\n");
3278
3279 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3280 if (sc->sc_cy_handle == NULL) {
3281 aprint_error("%s: unable to map hyperCache control registers\n",
3282 sc->sc_wdcdev.sc_dev.dv_xname);
3283 sc->sc_dma_ok = 0;
3284 }
3285
3286 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3287 WDC_CAPABILITY_MODE;
3288 if (sc->sc_dma_ok) {
3289 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3290 sc->sc_wdcdev.irqack = pciide_irqack;
3291 }
3292 sc->sc_wdcdev.PIO_cap = 4;
3293 sc->sc_wdcdev.DMA_cap = 2;
3294 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3295
3296 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3297 sc->sc_wdcdev.nchannels = 1;
3298
3299 /* Only one channel for this chip; if we are here it's enabled */
3300 cp = &sc->pciide_channels[0];
3301 sc->wdc_chanarray[0] = &cp->wdc_channel;
3302 cp->name = PCIIDE_CHANNEL_NAME(0);
3303 cp->wdc_channel.channel = 0;
3304 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3305 cp->wdc_channel.ch_queue =
3306 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3307 if (cp->wdc_channel.ch_queue == NULL) {
3308 aprint_error("%s primary channel: "
3309 "can't allocate memory for command queue",
3310 sc->sc_wdcdev.sc_dev.dv_xname);
3311 return;
3312 }
3313 aprint_normal("%s: primary channel %s to ",
3314 sc->sc_wdcdev.sc_dev.dv_xname,
3315 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3316 "configured" : "wired");
3317 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3318 aprint_normal("native-PCI");
3319 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3320 pciide_pci_intr);
3321 } else {
3322 aprint_normal("compatibility");
3323 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3324 &cmdsize, &ctlsize);
3325 }
3326 aprint_normal(" mode\n");
3327 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3328 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3329 wdcattach(&cp->wdc_channel);
3330 if (pciide_chan_candisable(cp)) {
3331 pci_conf_write(sc->sc_pc, sc->sc_tag,
3332 PCI_COMMAND_STATUS_REG, 0);
3333 }
3334 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3335 if (cp->hw_ok == 0)
3336 return;
3337 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3338 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3339 cy693_setup_channel(&cp->wdc_channel);
3340 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3341 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3342 }
3343
3344 void
3345 cy693_setup_channel(chp)
3346 struct channel_softc *chp;
3347 {
3348 struct ata_drive_datas *drvp;
3349 int drive;
3350 u_int32_t cy_cmd_ctrl;
3351 u_int32_t idedma_ctl;
3352 struct pciide_channel *cp = (struct pciide_channel*)chp;
3353 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3354 int dma_mode = -1;
3355
3356 cy_cmd_ctrl = idedma_ctl = 0;
3357
3358 /* setup DMA if needed */
3359 pciide_channel_dma_setup(cp);
3360
3361 for (drive = 0; drive < 2; drive++) {
3362 drvp = &chp->ch_drive[drive];
3363 /* If no drive, skip */
3364 if ((drvp->drive_flags & DRIVE) == 0)
3365 continue;
3366 /* add timing values, setup DMA if needed */
3367 if (drvp->drive_flags & DRIVE_DMA) {
3368 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3369 /* use Multiword DMA */
3370 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3371 dma_mode = drvp->DMA_mode;
3372 }
3373 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3374 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3375 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3376 CY_CMD_CTRL_IOW_REC_OFF(drive));
3377 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3378 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3379 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3380 CY_CMD_CTRL_IOR_REC_OFF(drive));
3381 }
3382 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3383 chp->ch_drive[0].DMA_mode = dma_mode;
3384 chp->ch_drive[1].DMA_mode = dma_mode;
3385
3386 if (dma_mode == -1)
3387 dma_mode = 0;
3388
3389 if (sc->sc_cy_handle != NULL) {
3390 /* Note: `multiple' is implied. */
3391 cy82c693_write(sc->sc_cy_handle,
3392 (sc->sc_cy_compatchan == 0) ?
3393 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3394 }
3395
3396 pciide_print_modes(cp);
3397
3398 if (idedma_ctl != 0) {
3399 /* Add software bits in status register */
3400 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3401 IDEDMA_CTL, idedma_ctl);
3402 }
3403 }
3404
3405 static struct sis_hostbr_type {
3406 u_int16_t id;
3407 u_int8_t rev;
3408 u_int8_t udma_mode;
3409 char *name;
3410 u_int8_t type;
3411 #define SIS_TYPE_NOUDMA 0
3412 #define SIS_TYPE_66 1
3413 #define SIS_TYPE_100OLD 2
3414 #define SIS_TYPE_100NEW 3
3415 #define SIS_TYPE_133OLD 4
3416 #define SIS_TYPE_133NEW 5
3417 #define SIS_TYPE_SOUTH 6
3418 } sis_hostbr_type[] = {
3419 /* Most infos here are from sos (at) freebsd.org */
3420 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3421 #if 0
3422 /*
3423 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3424 * have problems with UDMA (info provided by Christos)
3425 */
3426 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3427 #endif
3428 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3429 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3430 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3431 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3432 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3433 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3434 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3435 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3436 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3437 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3438 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3439 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3440 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3441 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3442 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3443 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3444 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3445 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3446 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3447 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3448 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3449 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3450 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3451 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3452 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3453 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3454 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3455 /*
3456 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3457 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3458 */
3459 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3460 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3461 };
3462
3463 static struct sis_hostbr_type *sis_hostbr_type_match;
3464
3465 static int
3466 sis_hostbr_match(pa)
3467 struct pci_attach_args *pa;
3468 {
3469 int i;
3470 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3471 return 0;
3472 sis_hostbr_type_match = NULL;
3473 for (i = 0;
3474 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3475 i++) {
3476 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3477 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3478 sis_hostbr_type_match = &sis_hostbr_type[i];
3479 }
3480 return (sis_hostbr_type_match != NULL);
3481 }
3482
3483 static int sis_south_match(pa)
3484 struct pci_attach_args *pa;
3485 {
3486 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3487 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3488 PCI_REVISION(pa->pa_class) >= 0x10);
3489 }
3490
3491 void
3492 sis_chip_map(sc, pa)
3493 struct pciide_softc *sc;
3494 struct pci_attach_args *pa;
3495 {
3496 struct pciide_channel *cp;
3497 int channel;
3498 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3499 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3500 pcireg_t rev = PCI_REVISION(pa->pa_class);
3501 bus_size_t cmdsize, ctlsize;
3502
3503 if (pciide_chipen(sc, pa) == 0)
3504 return;
3505 aprint_normal(": Silicon Integrated System ");
3506 pci_find_device(NULL, sis_hostbr_match);
3507 if (sis_hostbr_type_match) {
3508 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3509 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3510 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3511 SIS_REG_57) & 0x7f);
3512 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3513 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3514 aprint_normal("96X UDMA%d",
3515 sis_hostbr_type_match->udma_mode);
3516 sc->sis_type = SIS_TYPE_133NEW;
3517 sc->sc_wdcdev.UDMA_cap =
3518 sis_hostbr_type_match->udma_mode;
3519 } else {
3520 if (pci_find_device(NULL, sis_south_match)) {
3521 sc->sis_type = SIS_TYPE_133OLD;
3522 sc->sc_wdcdev.UDMA_cap =
3523 sis_hostbr_type_match->udma_mode;
3524 } else {
3525 sc->sis_type = SIS_TYPE_100NEW;
3526 sc->sc_wdcdev.UDMA_cap =
3527 sis_hostbr_type_match->udma_mode;
3528 }
3529 }
3530 } else {
3531 sc->sis_type = sis_hostbr_type_match->type;
3532 sc->sc_wdcdev.UDMA_cap =
3533 sis_hostbr_type_match->udma_mode;
3534 }
3535 aprint_normal(sis_hostbr_type_match->name);
3536 } else {
3537 aprint_normal("5597/5598");
3538 if (rev >= 0xd0) {
3539 sc->sc_wdcdev.UDMA_cap = 2;
3540 sc->sis_type = SIS_TYPE_66;
3541 } else {
3542 sc->sc_wdcdev.UDMA_cap = 0;
3543 sc->sis_type = SIS_TYPE_NOUDMA;
3544 }
3545 }
3546 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3547 PCI_REVISION(pa->pa_class));
3548 aprint_normal("%s: bus-master DMA support present",
3549 sc->sc_wdcdev.sc_dev.dv_xname);
3550 pciide_mapreg_dma(sc, pa);
3551 aprint_normal("\n");
3552
3553 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3554 WDC_CAPABILITY_MODE;
3555 if (sc->sc_dma_ok) {
3556 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3557 sc->sc_wdcdev.irqack = pciide_irqack;
3558 if (sc->sis_type >= SIS_TYPE_66)
3559 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3560 }
3561
3562 sc->sc_wdcdev.PIO_cap = 4;
3563 sc->sc_wdcdev.DMA_cap = 2;
3564
3565 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3566 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3567 switch(sc->sis_type) {
3568 case SIS_TYPE_NOUDMA:
3569 case SIS_TYPE_66:
3570 case SIS_TYPE_100OLD:
3571 sc->sc_wdcdev.set_modes = sis_setup_channel;
3572 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3573 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3574 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3575 break;
3576 case SIS_TYPE_100NEW:
3577 case SIS_TYPE_133OLD:
3578 sc->sc_wdcdev.set_modes = sis_setup_channel;
3579 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3580 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3581 break;
3582 case SIS_TYPE_133NEW:
3583 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3584 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3585 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3586 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3587 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3588 break;
3589 }
3590
3591
3592 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3593 cp = &sc->pciide_channels[channel];
3594 if (pciide_chansetup(sc, channel, interface) == 0)
3595 continue;
3596 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3597 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3598 aprint_normal("%s: %s channel ignored (disabled)\n",
3599 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3600 continue;
3601 }
3602 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3603 pciide_pci_intr);
3604 if (cp->hw_ok == 0)
3605 continue;
3606 if (pciide_chan_candisable(cp)) {
3607 if (channel == 0)
3608 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3609 else
3610 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3611 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3612 sis_ctr0);
3613 }
3614 pciide_map_compat_intr(pa, cp, channel, interface);
3615 if (cp->hw_ok == 0)
3616 continue;
3617 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3618 }
3619 }
3620
3621 void
3622 sis96x_setup_channel(chp)
3623 struct channel_softc *chp;
3624 {
3625 struct ata_drive_datas *drvp;
3626 int drive;
3627 u_int32_t sis_tim;
3628 u_int32_t idedma_ctl;
3629 int regtim;
3630 struct pciide_channel *cp = (struct pciide_channel*)chp;
3631 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3632
3633 sis_tim = 0;
3634 idedma_ctl = 0;
3635 /* setup DMA if needed */
3636 pciide_channel_dma_setup(cp);
3637
3638 for (drive = 0; drive < 2; drive++) {
3639 regtim = SIS_TIM133(
3640 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3641 chp->channel, drive);
3642 drvp = &chp->ch_drive[drive];
3643 /* If no drive, skip */
3644 if ((drvp->drive_flags & DRIVE) == 0)
3645 continue;
3646 /* add timing values, setup DMA if needed */
3647 if (drvp->drive_flags & DRIVE_UDMA) {
3648 /* use Ultra/DMA */
3649 drvp->drive_flags &= ~DRIVE_DMA;
3650 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3651 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3652 if (drvp->UDMA_mode > 2)
3653 drvp->UDMA_mode = 2;
3654 }
3655 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3656 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3657 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3658 } else if (drvp->drive_flags & DRIVE_DMA) {
3659 /*
3660 * use Multiword DMA
3661 * Timings will be used for both PIO and DMA,
3662 * so adjust DMA mode if needed
3663 */
3664 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3665 drvp->PIO_mode = drvp->DMA_mode + 2;
3666 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3667 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3668 drvp->PIO_mode - 2 : 0;
3669 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3670 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3671 } else {
3672 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3673 }
3674 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3675 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3676 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3677 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3678 }
3679 if (idedma_ctl != 0) {
3680 /* Add software bits in status register */
3681 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3682 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3683 idedma_ctl);
3684 }
3685 pciide_print_modes(cp);
3686 }
3687
3688 void
3689 sis_setup_channel(chp)
3690 struct channel_softc *chp;
3691 {
3692 struct ata_drive_datas *drvp;
3693 int drive;
3694 u_int32_t sis_tim;
3695 u_int32_t idedma_ctl;
3696 struct pciide_channel *cp = (struct pciide_channel*)chp;
3697 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3698
3699 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3700 "channel %d 0x%x\n", chp->channel,
3701 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3702 DEBUG_PROBE);
3703 sis_tim = 0;
3704 idedma_ctl = 0;
3705 /* setup DMA if needed */
3706 pciide_channel_dma_setup(cp);
3707
3708 for (drive = 0; drive < 2; drive++) {
3709 drvp = &chp->ch_drive[drive];
3710 /* If no drive, skip */
3711 if ((drvp->drive_flags & DRIVE) == 0)
3712 continue;
3713 /* add timing values, setup DMA if needed */
3714 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3715 (drvp->drive_flags & DRIVE_UDMA) == 0)
3716 goto pio;
3717
3718 if (drvp->drive_flags & DRIVE_UDMA) {
3719 /* use Ultra/DMA */
3720 drvp->drive_flags &= ~DRIVE_DMA;
3721 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3722 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3723 if (drvp->UDMA_mode > 2)
3724 drvp->UDMA_mode = 2;
3725 }
3726 switch (sc->sis_type) {
3727 case SIS_TYPE_66:
3728 case SIS_TYPE_100OLD:
3729 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3730 SIS_TIM66_UDMA_TIME_OFF(drive);
3731 break;
3732 case SIS_TYPE_100NEW:
3733 sis_tim |=
3734 sis_udma100new_tim[drvp->UDMA_mode] <<
3735 SIS_TIM100_UDMA_TIME_OFF(drive);
3736 case SIS_TYPE_133OLD:
3737 sis_tim |=
3738 sis_udma133old_tim[drvp->UDMA_mode] <<
3739 SIS_TIM100_UDMA_TIME_OFF(drive);
3740 break;
3741 default:
3742 aprint_error("unknown SiS IDE type %d\n",
3743 sc->sis_type);
3744 }
3745 } else {
3746 /*
3747 * use Multiword DMA
3748 * Timings will be used for both PIO and DMA,
3749 * so adjust DMA mode if needed
3750 */
3751 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3752 drvp->PIO_mode = drvp->DMA_mode + 2;
3753 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3754 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3755 drvp->PIO_mode - 2 : 0;
3756 if (drvp->DMA_mode == 0)
3757 drvp->PIO_mode = 0;
3758 }
3759 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3760 pio: switch (sc->sis_type) {
3761 case SIS_TYPE_NOUDMA:
3762 case SIS_TYPE_66:
3763 case SIS_TYPE_100OLD:
3764 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3765 SIS_TIM66_ACT_OFF(drive);
3766 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3767 SIS_TIM66_REC_OFF(drive);
3768 break;
3769 case SIS_TYPE_100NEW:
3770 case SIS_TYPE_133OLD:
3771 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3772 SIS_TIM100_ACT_OFF(drive);
3773 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3774 SIS_TIM100_REC_OFF(drive);
3775 break;
3776 default:
3777 aprint_error("unknown SiS IDE type %d\n",
3778 sc->sis_type);
3779 }
3780 }
3781 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3782 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3783 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3784 if (idedma_ctl != 0) {
3785 /* Add software bits in status register */
3786 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3787 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3788 idedma_ctl);
3789 }
3790 pciide_print_modes(cp);
3791 }
3792
3793 void
3794 acer_chip_map(sc, pa)
3795 struct pciide_softc *sc;
3796 struct pci_attach_args *pa;
3797 {
3798 struct pciide_channel *cp;
3799 int channel;
3800 pcireg_t cr, interface;
3801 bus_size_t cmdsize, ctlsize;
3802 pcireg_t rev = PCI_REVISION(pa->pa_class);
3803
3804 if (pciide_chipen(sc, pa) == 0)
3805 return;
3806 aprint_normal("%s: bus-master DMA support present",
3807 sc->sc_wdcdev.sc_dev.dv_xname);
3808 pciide_mapreg_dma(sc, pa);
3809 aprint_normal("\n");
3810 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3811 WDC_CAPABILITY_MODE;
3812 if (sc->sc_dma_ok) {
3813 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3814 if (rev >= 0x20) {
3815 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3816 if (rev >= 0xC4)
3817 sc->sc_wdcdev.UDMA_cap = 5;
3818 else if (rev >= 0xC2)
3819 sc->sc_wdcdev.UDMA_cap = 4;
3820 else
3821 sc->sc_wdcdev.UDMA_cap = 2;
3822 }
3823 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3824 sc->sc_wdcdev.irqack = pciide_irqack;
3825 }
3826
3827 sc->sc_wdcdev.PIO_cap = 4;
3828 sc->sc_wdcdev.DMA_cap = 2;
3829 sc->sc_wdcdev.set_modes = acer_setup_channel;
3830 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3831 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3832
3833 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3834 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3835 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3836
3837 /* Enable "microsoft register bits" R/W. */
3838 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3839 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3840 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3841 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3842 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3843 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3844 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3845 ~ACER_CHANSTATUSREGS_RO);
3846 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3847 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3848 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3849 /* Don't use cr, re-read the real register content instead */
3850 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3851 PCI_CLASS_REG));
3852
3853 /* From linux: enable "Cable Detection" */
3854 if (rev >= 0xC2) {
3855 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3856 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3857 | ACER_0x4B_CDETECT);
3858 }
3859
3860 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3861 cp = &sc->pciide_channels[channel];
3862 if (pciide_chansetup(sc, channel, interface) == 0)
3863 continue;
3864 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3865 aprint_normal("%s: %s channel ignored (disabled)\n",
3866 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3867 continue;
3868 }
3869 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3870 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3871 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3872 if (cp->hw_ok == 0)
3873 continue;
3874 if (pciide_chan_candisable(cp)) {
3875 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3876 pci_conf_write(sc->sc_pc, sc->sc_tag,
3877 PCI_CLASS_REG, cr);
3878 }
3879 pciide_map_compat_intr(pa, cp, channel, interface);
3880 acer_setup_channel(&cp->wdc_channel);
3881 }
3882 }
3883
3884 void
3885 acer_setup_channel(chp)
3886 struct channel_softc *chp;
3887 {
3888 struct ata_drive_datas *drvp;
3889 int drive;
3890 u_int32_t acer_fifo_udma;
3891 u_int32_t idedma_ctl;
3892 struct pciide_channel *cp = (struct pciide_channel*)chp;
3893 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3894
3895 idedma_ctl = 0;
3896 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3897 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3898 acer_fifo_udma), DEBUG_PROBE);
3899 /* setup DMA if needed */
3900 pciide_channel_dma_setup(cp);
3901
3902 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3903 DRIVE_UDMA) { /* check 80 pins cable */
3904 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3905 ACER_0x4A_80PIN(chp->channel)) {
3906 if (chp->ch_drive[0].UDMA_mode > 2)
3907 chp->ch_drive[0].UDMA_mode = 2;
3908 if (chp->ch_drive[1].UDMA_mode > 2)
3909 chp->ch_drive[1].UDMA_mode = 2;
3910 }
3911 }
3912
3913 for (drive = 0; drive < 2; drive++) {
3914 drvp = &chp->ch_drive[drive];
3915 /* If no drive, skip */
3916 if ((drvp->drive_flags & DRIVE) == 0)
3917 continue;
3918 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3919 "channel %d drive %d 0x%x\n", chp->channel, drive,
3920 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3921 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3922 /* clear FIFO/DMA mode */
3923 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3924 ACER_UDMA_EN(chp->channel, drive) |
3925 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3926
3927 /* add timing values, setup DMA if needed */
3928 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3929 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3930 acer_fifo_udma |=
3931 ACER_FTH_OPL(chp->channel, drive, 0x1);
3932 goto pio;
3933 }
3934
3935 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3936 if (drvp->drive_flags & DRIVE_UDMA) {
3937 /* use Ultra/DMA */
3938 drvp->drive_flags &= ~DRIVE_DMA;
3939 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3940 acer_fifo_udma |=
3941 ACER_UDMA_TIM(chp->channel, drive,
3942 acer_udma[drvp->UDMA_mode]);
3943 /* XXX disable if one drive < UDMA3 ? */
3944 if (drvp->UDMA_mode >= 3) {
3945 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3946 ACER_0x4B,
3947 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3948 ACER_0x4B) | ACER_0x4B_UDMA66);
3949 }
3950 } else {
3951 /*
3952 * use Multiword DMA
3953 * Timings will be used for both PIO and DMA,
3954 * so adjust DMA mode if needed
3955 */
3956 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3957 drvp->PIO_mode = drvp->DMA_mode + 2;
3958 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3959 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3960 drvp->PIO_mode - 2 : 0;
3961 if (drvp->DMA_mode == 0)
3962 drvp->PIO_mode = 0;
3963 }
3964 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3965 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3966 ACER_IDETIM(chp->channel, drive),
3967 acer_pio[drvp->PIO_mode]);
3968 }
3969 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3970 acer_fifo_udma), DEBUG_PROBE);
3971 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3972 if (idedma_ctl != 0) {
3973 /* Add software bits in status register */
3974 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3975 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3976 idedma_ctl);
3977 }
3978 pciide_print_modes(cp);
3979 }
3980
3981 int
3982 acer_pci_intr(arg)
3983 void *arg;
3984 {
3985 struct pciide_softc *sc = arg;
3986 struct pciide_channel *cp;
3987 struct channel_softc *wdc_cp;
3988 int i, rv, crv;
3989 u_int32_t chids;
3990
3991 rv = 0;
3992 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3993 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3994 cp = &sc->pciide_channels[i];
3995 wdc_cp = &cp->wdc_channel;
3996 /* If a compat channel skip. */
3997 if (cp->compat)
3998 continue;
3999 if (chids & ACER_CHIDS_INT(i)) {
4000 crv = wdcintr(wdc_cp);
4001 if (crv == 0)
4002 printf("%s:%d: bogus intr\n",
4003 sc->sc_wdcdev.sc_dev.dv_xname, i);
4004 else
4005 rv = 1;
4006 }
4007 }
4008 return rv;
4009 }
4010
4011 void
4012 hpt_chip_map(sc, pa)
4013 struct pciide_softc *sc;
4014 struct pci_attach_args *pa;
4015 {
4016 struct pciide_channel *cp;
4017 int i, compatchan, revision;
4018 pcireg_t interface;
4019 bus_size_t cmdsize, ctlsize;
4020
4021 if (pciide_chipen(sc, pa) == 0)
4022 return;
4023 revision = PCI_REVISION(pa->pa_class);
4024 aprint_normal(": Triones/Highpoint ");
4025 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4026 aprint_normal("HPT374 IDE Controller\n");
4027 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
4028 aprint_normal("HPT372 IDE Controller\n");
4029 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
4030 if (revision == HPT372_REV)
4031 aprint_normal("HPT372 IDE Controller\n");
4032 else if (revision == HPT370_REV)
4033 aprint_normal("HPT370 IDE Controller\n");
4034 else if (revision == HPT370A_REV)
4035 aprint_normal("HPT370A IDE Controller\n");
4036 else if (revision == HPT366_REV)
4037 aprint_normal("HPT366 IDE Controller\n");
4038 else
4039 aprint_normal("unknown HPT IDE controller rev %d\n",
4040 revision);
4041 } else
4042 aprint_normal("unknown HPT IDE controller 0x%x\n",
4043 sc->sc_pp->ide_product);
4044
4045 /*
4046 * when the chip is in native mode it identifies itself as a
4047 * 'misc mass storage'. Fake interface in this case.
4048 */
4049 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4050 interface = PCI_INTERFACE(pa->pa_class);
4051 } else {
4052 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4053 PCIIDE_INTERFACE_PCI(0);
4054 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4055 (revision == HPT370_REV || revision == HPT370A_REV ||
4056 revision == HPT372_REV)) ||
4057 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4058 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4059 interface |= PCIIDE_INTERFACE_PCI(1);
4060 }
4061
4062 aprint_normal("%s: bus-master DMA support present",
4063 sc->sc_wdcdev.sc_dev.dv_xname);
4064 pciide_mapreg_dma(sc, pa);
4065 aprint_normal("\n");
4066 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4067 WDC_CAPABILITY_MODE;
4068 if (sc->sc_dma_ok) {
4069 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4070 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4071 sc->sc_wdcdev.irqack = pciide_irqack;
4072 }
4073 sc->sc_wdcdev.PIO_cap = 4;
4074 sc->sc_wdcdev.DMA_cap = 2;
4075
4076 sc->sc_wdcdev.set_modes = hpt_setup_channel;
4077 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4078 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4079 revision == HPT366_REV) {
4080 sc->sc_wdcdev.UDMA_cap = 4;
4081 /*
4082 * The 366 has 2 PCI IDE functions, one for primary and one
4083 * for secondary. So we need to call pciide_mapregs_compat()
4084 * with the real channel
4085 */
4086 if (pa->pa_function == 0) {
4087 compatchan = 0;
4088 } else if (pa->pa_function == 1) {
4089 compatchan = 1;
4090 } else {
4091 aprint_error("%s: unexpected PCI function %d\n",
4092 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4093 return;
4094 }
4095 sc->sc_wdcdev.nchannels = 1;
4096 } else {
4097 sc->sc_wdcdev.nchannels = 2;
4098 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4099 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4100 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4101 revision == HPT372_REV))
4102 sc->sc_wdcdev.UDMA_cap = 6;
4103 else
4104 sc->sc_wdcdev.UDMA_cap = 5;
4105 }
4106 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4107 cp = &sc->pciide_channels[i];
4108 if (sc->sc_wdcdev.nchannels > 1) {
4109 compatchan = i;
4110 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4111 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4112 aprint_normal(
4113 "%s: %s channel ignored (disabled)\n",
4114 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4115 continue;
4116 }
4117 }
4118 if (pciide_chansetup(sc, i, interface) == 0)
4119 continue;
4120 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4121 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4122 &ctlsize, hpt_pci_intr);
4123 } else {
4124 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4125 &cmdsize, &ctlsize);
4126 }
4127 if (cp->hw_ok == 0)
4128 return;
4129 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4130 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4131 wdcattach(&cp->wdc_channel);
4132 hpt_setup_channel(&cp->wdc_channel);
4133 }
4134 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4135 (revision == HPT370_REV || revision == HPT370A_REV ||
4136 revision == HPT372_REV)) ||
4137 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4138 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4139 /*
4140 * HPT370_REV and highter has a bit to disable interrupts,
4141 * make sure to clear it
4142 */
4143 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4144 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4145 ~HPT_CSEL_IRQDIS);
4146 }
4147 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4148 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4149 revision == HPT372_REV ) ||
4150 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4151 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4152 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4153 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4154 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4155 return;
4156 }
4157
4158 void
4159 hpt_setup_channel(chp)
4160 struct channel_softc *chp;
4161 {
4162 struct ata_drive_datas *drvp;
4163 int drive;
4164 int cable;
4165 u_int32_t before, after;
4166 u_int32_t idedma_ctl;
4167 struct pciide_channel *cp = (struct pciide_channel*)chp;
4168 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4169 int revision =
4170 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4171
4172 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4173
4174 /* setup DMA if needed */
4175 pciide_channel_dma_setup(cp);
4176
4177 idedma_ctl = 0;
4178
4179 /* Per drive settings */
4180 for (drive = 0; drive < 2; drive++) {
4181 drvp = &chp->ch_drive[drive];
4182 /* If no drive, skip */
4183 if ((drvp->drive_flags & DRIVE) == 0)
4184 continue;
4185 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4186 HPT_IDETIM(chp->channel, drive));
4187
4188 /* add timing values, setup DMA if needed */
4189 if (drvp->drive_flags & DRIVE_UDMA) {
4190 /* use Ultra/DMA */
4191 drvp->drive_flags &= ~DRIVE_DMA;
4192 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4193 drvp->UDMA_mode > 2)
4194 drvp->UDMA_mode = 2;
4195 switch (sc->sc_pp->ide_product) {
4196 case PCI_PRODUCT_TRIONES_HPT374:
4197 after = hpt374_udma[drvp->UDMA_mode];
4198 break;
4199 case PCI_PRODUCT_TRIONES_HPT372:
4200 after = hpt372_udma[drvp->UDMA_mode];
4201 break;
4202 case PCI_PRODUCT_TRIONES_HPT366:
4203 default:
4204 switch(revision) {
4205 case HPT372_REV:
4206 after = hpt372_udma[drvp->UDMA_mode];
4207 break;
4208 case HPT370_REV:
4209 case HPT370A_REV:
4210 after = hpt370_udma[drvp->UDMA_mode];
4211 break;
4212 case HPT366_REV:
4213 default:
4214 after = hpt366_udma[drvp->UDMA_mode];
4215 break;
4216 }
4217 }
4218 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4219 } else if (drvp->drive_flags & DRIVE_DMA) {
4220 /*
4221 * use Multiword DMA.
4222 * Timings will be used for both PIO and DMA, so adjust
4223 * DMA mode if needed
4224 */
4225 if (drvp->PIO_mode >= 3 &&
4226 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4227 drvp->DMA_mode = drvp->PIO_mode - 2;
4228 }
4229 switch (sc->sc_pp->ide_product) {
4230 case PCI_PRODUCT_TRIONES_HPT374:
4231 after = hpt374_dma[drvp->DMA_mode];
4232 break;
4233 case PCI_PRODUCT_TRIONES_HPT372:
4234 after = hpt372_dma[drvp->DMA_mode];
4235 break;
4236 case PCI_PRODUCT_TRIONES_HPT366:
4237 default:
4238 switch(revision) {
4239 case HPT372_REV:
4240 after = hpt372_dma[drvp->DMA_mode];
4241 break;
4242 case HPT370_REV:
4243 case HPT370A_REV:
4244 after = hpt370_dma[drvp->DMA_mode];
4245 break;
4246 case HPT366_REV:
4247 default:
4248 after = hpt366_dma[drvp->DMA_mode];
4249 break;
4250 }
4251 }
4252 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4253 } else {
4254 /* PIO only */
4255 switch (sc->sc_pp->ide_product) {
4256 case PCI_PRODUCT_TRIONES_HPT374:
4257 after = hpt374_pio[drvp->PIO_mode];
4258 break;
4259 case PCI_PRODUCT_TRIONES_HPT372:
4260 after = hpt372_pio[drvp->PIO_mode];
4261 break;
4262 case PCI_PRODUCT_TRIONES_HPT366:
4263 default:
4264 switch(revision) {
4265 case HPT372_REV:
4266 after = hpt372_pio[drvp->PIO_mode];
4267 break;
4268 case HPT370_REV:
4269 case HPT370A_REV:
4270 after = hpt370_pio[drvp->PIO_mode];
4271 break;
4272 case HPT366_REV:
4273 default:
4274 after = hpt366_pio[drvp->PIO_mode];
4275 break;
4276 }
4277 }
4278 }
4279 pci_conf_write(sc->sc_pc, sc->sc_tag,
4280 HPT_IDETIM(chp->channel, drive), after);
4281 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4282 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4283 after, before), DEBUG_PROBE);
4284 }
4285 if (idedma_ctl != 0) {
4286 /* Add software bits in status register */
4287 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4288 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4289 idedma_ctl);
4290 }
4291 pciide_print_modes(cp);
4292 }
4293
4294 int
4295 hpt_pci_intr(arg)
4296 void *arg;
4297 {
4298 struct pciide_softc *sc = arg;
4299 struct pciide_channel *cp;
4300 struct channel_softc *wdc_cp;
4301 int rv = 0;
4302 int dmastat, i, crv;
4303
4304 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4305 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4306 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4307 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4308 IDEDMA_CTL_INTR)
4309 continue;
4310 cp = &sc->pciide_channels[i];
4311 wdc_cp = &cp->wdc_channel;
4312 crv = wdcintr(wdc_cp);
4313 if (crv == 0) {
4314 printf("%s:%d: bogus intr\n",
4315 sc->sc_wdcdev.sc_dev.dv_xname, i);
4316 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4317 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4318 } else
4319 rv = 1;
4320 }
4321 return rv;
4322 }
4323
4324
4325 /* Macros to test product */
4326 #define PDC_IS_262(sc) \
4327 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4328 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4329 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4330 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4331 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4332 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4333 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4334 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4335 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4336 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4337 #define PDC_IS_265(sc) \
4338 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4339 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4340 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4341 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4342 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4343 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4344 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4345 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4346 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4347 #define PDC_IS_268(sc) \
4348 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4349 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4350 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4351 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4352 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4353 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4354 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4355 #define PDC_IS_276(sc) \
4356 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4357 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4358 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4359 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4360 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4361
4362 void
4363 pdc202xx_chip_map(sc, pa)
4364 struct pciide_softc *sc;
4365 struct pci_attach_args *pa;
4366 {
4367 struct pciide_channel *cp;
4368 int channel;
4369 pcireg_t interface, st, mode;
4370 bus_size_t cmdsize, ctlsize;
4371
4372 if (!PDC_IS_268(sc)) {
4373 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4374 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4375 st), DEBUG_PROBE);
4376 }
4377 if (pciide_chipen(sc, pa) == 0)
4378 return;
4379
4380 /* turn off RAID mode */
4381 if (!PDC_IS_268(sc))
4382 st &= ~PDC2xx_STATE_IDERAID;
4383
4384 /*
4385 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4386 * mode. We have to fake interface
4387 */
4388 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4389 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4390 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4391
4392 aprint_normal("%s: bus-master DMA support present",
4393 sc->sc_wdcdev.sc_dev.dv_xname);
4394 pciide_mapreg_dma(sc, pa);
4395 aprint_normal("\n");
4396 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4397 WDC_CAPABILITY_MODE;
4398 if (sc->sc_dma_ok) {
4399 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4400 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4401 sc->sc_wdcdev.irqack = pciide_irqack;
4402 }
4403 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4404 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4405 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4406 sc->sc_wdcdev.PIO_cap = 4;
4407 sc->sc_wdcdev.DMA_cap = 2;
4408 if (PDC_IS_276(sc))
4409 sc->sc_wdcdev.UDMA_cap = 6;
4410 else if (PDC_IS_265(sc))
4411 sc->sc_wdcdev.UDMA_cap = 5;
4412 else if (PDC_IS_262(sc))
4413 sc->sc_wdcdev.UDMA_cap = 4;
4414 else
4415 sc->sc_wdcdev.UDMA_cap = 2;
4416 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4417 pdc20268_setup_channel : pdc202xx_setup_channel;
4418 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4419 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4420
4421 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4422 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4423 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4424 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4425 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4426 }
4427
4428 if (!PDC_IS_268(sc)) {
4429 /* setup failsafe defaults */
4430 mode = 0;
4431 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4432 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4433 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4434 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4435 for (channel = 0;
4436 channel < sc->sc_wdcdev.nchannels;
4437 channel++) {
4438 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4439 "drive 0 initial timings 0x%x, now 0x%x\n",
4440 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4441 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4442 DEBUG_PROBE);
4443 pci_conf_write(sc->sc_pc, sc->sc_tag,
4444 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4445 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4446 "drive 1 initial timings 0x%x, now 0x%x\n",
4447 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4448 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4449 pci_conf_write(sc->sc_pc, sc->sc_tag,
4450 PDC2xx_TIM(channel, 1), mode);
4451 }
4452
4453 mode = PDC2xx_SCR_DMA;
4454 if (PDC_IS_265(sc)) {
4455 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4456 } else if (PDC_IS_262(sc)) {
4457 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4458 } else {
4459 /* the BIOS set it up this way */
4460 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4461 }
4462 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4463 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4464 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4465 "now 0x%x\n",
4466 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4467 PDC2xx_SCR),
4468 mode), DEBUG_PROBE);
4469 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4470 PDC2xx_SCR, mode);
4471
4472 /* controller initial state register is OK even without BIOS */
4473 /* Set DMA mode to IDE DMA compatibility */
4474 mode =
4475 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4476 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4477 DEBUG_PROBE);
4478 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4479 mode | 0x1);
4480 mode =
4481 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4482 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4483 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4484 mode | 0x1);
4485 }
4486
4487 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4488 cp = &sc->pciide_channels[channel];
4489 if (pciide_chansetup(sc, channel, interface) == 0)
4490 continue;
4491 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4492 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4493 aprint_normal("%s: %s channel ignored (disabled)\n",
4494 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4495 continue;
4496 }
4497 if (PDC_IS_265(sc))
4498 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4499 pdc20265_pci_intr);
4500 else
4501 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4502 pdc202xx_pci_intr);
4503 if (cp->hw_ok == 0)
4504 continue;
4505 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4506 st &= ~(PDC_IS_262(sc) ?
4507 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4508 pciide_map_compat_intr(pa, cp, channel, interface);
4509 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4510 }
4511 if (!PDC_IS_268(sc)) {
4512 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4513 "0x%x\n", st), DEBUG_PROBE);
4514 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4515 }
4516 return;
4517 }
4518
4519 void
4520 pdc202xx_setup_channel(chp)
4521 struct channel_softc *chp;
4522 {
4523 struct ata_drive_datas *drvp;
4524 int drive;
4525 pcireg_t mode, st;
4526 u_int32_t idedma_ctl, scr, atapi;
4527 struct pciide_channel *cp = (struct pciide_channel*)chp;
4528 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4529 int channel = chp->channel;
4530
4531 /* setup DMA if needed */
4532 pciide_channel_dma_setup(cp);
4533
4534 idedma_ctl = 0;
4535 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4536 sc->sc_wdcdev.sc_dev.dv_xname,
4537 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4538 DEBUG_PROBE);
4539
4540 /* Per channel settings */
4541 if (PDC_IS_262(sc)) {
4542 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4543 PDC262_U66);
4544 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4545 /* Trim UDMA mode */
4546 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4547 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4548 chp->ch_drive[0].UDMA_mode <= 2) ||
4549 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4550 chp->ch_drive[1].UDMA_mode <= 2)) {
4551 if (chp->ch_drive[0].UDMA_mode > 2)
4552 chp->ch_drive[0].UDMA_mode = 2;
4553 if (chp->ch_drive[1].UDMA_mode > 2)
4554 chp->ch_drive[1].UDMA_mode = 2;
4555 }
4556 /* Set U66 if needed */
4557 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4558 chp->ch_drive[0].UDMA_mode > 2) ||
4559 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4560 chp->ch_drive[1].UDMA_mode > 2))
4561 scr |= PDC262_U66_EN(channel);
4562 else
4563 scr &= ~PDC262_U66_EN(channel);
4564 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4565 PDC262_U66, scr);
4566 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4567 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4568 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4569 PDC262_ATAPI(channel))), DEBUG_PROBE);
4570 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4571 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4572 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4573 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4574 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4575 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4576 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4577 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4578 atapi = 0;
4579 else
4580 atapi = PDC262_ATAPI_UDMA;
4581 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4582 PDC262_ATAPI(channel), atapi);
4583 }
4584 }
4585 for (drive = 0; drive < 2; drive++) {
4586 drvp = &chp->ch_drive[drive];
4587 /* If no drive, skip */
4588 if ((drvp->drive_flags & DRIVE) == 0)
4589 continue;
4590 mode = 0;
4591 if (drvp->drive_flags & DRIVE_UDMA) {
4592 /* use Ultra/DMA */
4593 drvp->drive_flags &= ~DRIVE_DMA;
4594 mode = PDC2xx_TIM_SET_MB(mode,
4595 pdc2xx_udma_mb[drvp->UDMA_mode]);
4596 mode = PDC2xx_TIM_SET_MC(mode,
4597 pdc2xx_udma_mc[drvp->UDMA_mode]);
4598 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4599 } else if (drvp->drive_flags & DRIVE_DMA) {
4600 mode = PDC2xx_TIM_SET_MB(mode,
4601 pdc2xx_dma_mb[drvp->DMA_mode]);
4602 mode = PDC2xx_TIM_SET_MC(mode,
4603 pdc2xx_dma_mc[drvp->DMA_mode]);
4604 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4605 } else {
4606 mode = PDC2xx_TIM_SET_MB(mode,
4607 pdc2xx_dma_mb[0]);
4608 mode = PDC2xx_TIM_SET_MC(mode,
4609 pdc2xx_dma_mc[0]);
4610 }
4611 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4612 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4613 if (drvp->drive_flags & DRIVE_ATA)
4614 mode |= PDC2xx_TIM_PRE;
4615 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4616 if (drvp->PIO_mode >= 3) {
4617 mode |= PDC2xx_TIM_IORDY;
4618 if (drive == 0)
4619 mode |= PDC2xx_TIM_IORDYp;
4620 }
4621 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4622 "timings 0x%x\n",
4623 sc->sc_wdcdev.sc_dev.dv_xname,
4624 chp->channel, drive, mode), DEBUG_PROBE);
4625 pci_conf_write(sc->sc_pc, sc->sc_tag,
4626 PDC2xx_TIM(chp->channel, drive), mode);
4627 }
4628 if (idedma_ctl != 0) {
4629 /* Add software bits in status register */
4630 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4631 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4632 idedma_ctl);
4633 }
4634 pciide_print_modes(cp);
4635 }
4636
4637 void
4638 pdc20268_setup_channel(chp)
4639 struct channel_softc *chp;
4640 {
4641 struct ata_drive_datas *drvp;
4642 int drive;
4643 u_int32_t idedma_ctl;
4644 struct pciide_channel *cp = (struct pciide_channel*)chp;
4645 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4646 int u100;
4647
4648 /* setup DMA if needed */
4649 pciide_channel_dma_setup(cp);
4650
4651 idedma_ctl = 0;
4652
4653 /* I don't know what this is for, FreeBSD does it ... */
4654 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4655 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4656
4657 /*
4658 * cable type detect, from FreeBSD
4659 */
4660 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4661 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4662 0 : 1;
4663
4664 for (drive = 0; drive < 2; drive++) {
4665 drvp = &chp->ch_drive[drive];
4666 /* If no drive, skip */
4667 if ((drvp->drive_flags & DRIVE) == 0)
4668 continue;
4669 if (drvp->drive_flags & DRIVE_UDMA) {
4670 /* use Ultra/DMA */
4671 drvp->drive_flags &= ~DRIVE_DMA;
4672 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4673 if (drvp->UDMA_mode > 2 && u100 == 0)
4674 drvp->UDMA_mode = 2;
4675 } else if (drvp->drive_flags & DRIVE_DMA) {
4676 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4677 }
4678 }
4679 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4680 if (idedma_ctl != 0) {
4681 /* Add software bits in status register */
4682 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4683 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4684 idedma_ctl);
4685 }
4686 pciide_print_modes(cp);
4687 }
4688
4689 int
4690 pdc202xx_pci_intr(arg)
4691 void *arg;
4692 {
4693 struct pciide_softc *sc = arg;
4694 struct pciide_channel *cp;
4695 struct channel_softc *wdc_cp;
4696 int i, rv, crv;
4697 u_int32_t scr;
4698
4699 rv = 0;
4700 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4701 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4702 cp = &sc->pciide_channels[i];
4703 wdc_cp = &cp->wdc_channel;
4704 /* If a compat channel skip. */
4705 if (cp->compat)
4706 continue;
4707 if (scr & PDC2xx_SCR_INT(i)) {
4708 crv = wdcintr(wdc_cp);
4709 if (crv == 0)
4710 printf("%s:%d: bogus intr (reg 0x%x)\n",
4711 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4712 else
4713 rv = 1;
4714 }
4715 }
4716 return rv;
4717 }
4718
4719 int
4720 pdc20265_pci_intr(arg)
4721 void *arg;
4722 {
4723 struct pciide_softc *sc = arg;
4724 struct pciide_channel *cp;
4725 struct channel_softc *wdc_cp;
4726 int i, rv, crv;
4727 u_int32_t dmastat;
4728
4729 rv = 0;
4730 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4731 cp = &sc->pciide_channels[i];
4732 wdc_cp = &cp->wdc_channel;
4733 /* If a compat channel skip. */
4734 if (cp->compat)
4735 continue;
4736 /*
4737 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4738 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4739 * So use it instead (requires 2 reg reads instead of 1,
4740 * but we can't do it another way).
4741 */
4742 dmastat = bus_space_read_1(sc->sc_dma_iot,
4743 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4744 if((dmastat & IDEDMA_CTL_INTR) == 0)
4745 continue;
4746 crv = wdcintr(wdc_cp);
4747 if (crv == 0)
4748 printf("%s:%d: bogus intr\n",
4749 sc->sc_wdcdev.sc_dev.dv_xname, i);
4750 else
4751 rv = 1;
4752 }
4753 return rv;
4754 }
4755
4756 static void
4757 pdc20262_dma_start(v, channel, drive)
4758 void *v;
4759 int channel, drive;
4760 {
4761 struct pciide_softc *sc = v;
4762 struct pciide_dma_maps *dma_maps =
4763 &sc->pciide_channels[channel].dma_maps[drive];
4764 int atapi;
4765
4766 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4767 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4768 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4769 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4770 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4771 PDC262_ATAPI(channel), atapi);
4772 }
4773
4774 pciide_dma_start(v, channel, drive);
4775 }
4776
4777 int
4778 pdc20262_dma_finish(v, channel, drive, force)
4779 void *v;
4780 int channel, drive;
4781 int force;
4782 {
4783 struct pciide_softc *sc = v;
4784 struct pciide_dma_maps *dma_maps =
4785 &sc->pciide_channels[channel].dma_maps[drive];
4786 struct channel_softc *chp;
4787 int atapi, error;
4788
4789 error = pciide_dma_finish(v, channel, drive, force);
4790
4791 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4792 chp = sc->wdc_chanarray[channel];
4793 atapi = 0;
4794 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4795 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4796 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4797 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4798 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4799 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4800 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4801 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4802 atapi = PDC262_ATAPI_UDMA;
4803 }
4804 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4805 PDC262_ATAPI(channel), atapi);
4806 }
4807
4808 return error;
4809 }
4810
4811 void
4812 opti_chip_map(sc, pa)
4813 struct pciide_softc *sc;
4814 struct pci_attach_args *pa;
4815 {
4816 struct pciide_channel *cp;
4817 bus_size_t cmdsize, ctlsize;
4818 pcireg_t interface;
4819 u_int8_t init_ctrl;
4820 int channel;
4821
4822 if (pciide_chipen(sc, pa) == 0)
4823 return;
4824 aprint_normal("%s: bus-master DMA support present",
4825 sc->sc_wdcdev.sc_dev.dv_xname);
4826
4827 /*
4828 * XXXSCW:
4829 * There seem to be a couple of buggy revisions/implementations
4830 * of the OPTi pciide chipset. This kludge seems to fix one of
4831 * the reported problems (PR/11644) but still fails for the
4832 * other (PR/13151), although the latter may be due to other
4833 * issues too...
4834 */
4835 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4836 aprint_normal(" but disabled due to chip rev. <= 0x12");
4837 sc->sc_dma_ok = 0;
4838 } else
4839 pciide_mapreg_dma(sc, pa);
4840
4841 aprint_normal("\n");
4842
4843 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4844 WDC_CAPABILITY_MODE;
4845 sc->sc_wdcdev.PIO_cap = 4;
4846 if (sc->sc_dma_ok) {
4847 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4848 sc->sc_wdcdev.irqack = pciide_irqack;
4849 sc->sc_wdcdev.DMA_cap = 2;
4850 }
4851 sc->sc_wdcdev.set_modes = opti_setup_channel;
4852
4853 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4854 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4855
4856 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4857 OPTI_REG_INIT_CONTROL);
4858
4859 interface = PCI_INTERFACE(pa->pa_class);
4860
4861 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4862 cp = &sc->pciide_channels[channel];
4863 if (pciide_chansetup(sc, channel, interface) == 0)
4864 continue;
4865 if (channel == 1 &&
4866 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4867 aprint_normal("%s: %s channel ignored (disabled)\n",
4868 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4869 continue;
4870 }
4871 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4872 pciide_pci_intr);
4873 if (cp->hw_ok == 0)
4874 continue;
4875 pciide_map_compat_intr(pa, cp, channel, interface);
4876 if (cp->hw_ok == 0)
4877 continue;
4878 opti_setup_channel(&cp->wdc_channel);
4879 }
4880 }
4881
4882 void
4883 opti_setup_channel(chp)
4884 struct channel_softc *chp;
4885 {
4886 struct ata_drive_datas *drvp;
4887 struct pciide_channel *cp = (struct pciide_channel*)chp;
4888 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4889 int drive, spd;
4890 int mode[2];
4891 u_int8_t rv, mr;
4892
4893 /*
4894 * The `Delay' and `Address Setup Time' fields of the
4895 * Miscellaneous Register are always zero initially.
4896 */
4897 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4898 mr &= ~(OPTI_MISC_DELAY_MASK |
4899 OPTI_MISC_ADDR_SETUP_MASK |
4900 OPTI_MISC_INDEX_MASK);
4901
4902 /* Prime the control register before setting timing values */
4903 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4904
4905 /* Determine the clockrate of the PCIbus the chip is attached to */
4906 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4907 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4908
4909 /* setup DMA if needed */
4910 pciide_channel_dma_setup(cp);
4911
4912 for (drive = 0; drive < 2; drive++) {
4913 drvp = &chp->ch_drive[drive];
4914 /* If no drive, skip */
4915 if ((drvp->drive_flags & DRIVE) == 0) {
4916 mode[drive] = -1;
4917 continue;
4918 }
4919
4920 if ((drvp->drive_flags & DRIVE_DMA)) {
4921 /*
4922 * Timings will be used for both PIO and DMA,
4923 * so adjust DMA mode if needed
4924 */
4925 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4926 drvp->PIO_mode = drvp->DMA_mode + 2;
4927 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4928 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4929 drvp->PIO_mode - 2 : 0;
4930 if (drvp->DMA_mode == 0)
4931 drvp->PIO_mode = 0;
4932
4933 mode[drive] = drvp->DMA_mode + 5;
4934 } else
4935 mode[drive] = drvp->PIO_mode;
4936
4937 if (drive && mode[0] >= 0 &&
4938 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4939 /*
4940 * Can't have two drives using different values
4941 * for `Address Setup Time'.
4942 * Slow down the faster drive to compensate.
4943 */
4944 int d = (opti_tim_as[spd][mode[0]] >
4945 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4946
4947 mode[d] = mode[1-d];
4948 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4949 chp->ch_drive[d].DMA_mode = 0;
4950 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4951 }
4952 }
4953
4954 for (drive = 0; drive < 2; drive++) {
4955 int m;
4956 if ((m = mode[drive]) < 0)
4957 continue;
4958
4959 /* Set the Address Setup Time and select appropriate index */
4960 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4961 rv |= OPTI_MISC_INDEX(drive);
4962 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4963
4964 /* Set the pulse width and recovery timing parameters */
4965 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4966 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4967 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4968 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4969
4970 /* Set the Enhanced Mode register appropriately */
4971 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4972 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4973 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4974 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4975 }
4976
4977 /* Finally, enable the timings */
4978 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4979
4980 pciide_print_modes(cp);
4981 }
4982
4983 #define ACARD_IS_850(sc) \
4984 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4985
4986 void
4987 acard_chip_map(sc, pa)
4988 struct pciide_softc *sc;
4989 struct pci_attach_args *pa;
4990 {
4991 struct pciide_channel *cp;
4992 int i;
4993 pcireg_t interface;
4994 bus_size_t cmdsize, ctlsize;
4995
4996 if (pciide_chipen(sc, pa) == 0)
4997 return;
4998
4999 /*
5000 * when the chip is in native mode it identifies itself as a
5001 * 'misc mass storage'. Fake interface in this case.
5002 */
5003 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
5004 interface = PCI_INTERFACE(pa->pa_class);
5005 } else {
5006 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
5007 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
5008 }
5009
5010 aprint_normal("%s: bus-master DMA support present",
5011 sc->sc_wdcdev.sc_dev.dv_xname);
5012 pciide_mapreg_dma(sc, pa);
5013 aprint_normal("\n");
5014 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5015 WDC_CAPABILITY_MODE;
5016
5017 if (sc->sc_dma_ok) {
5018 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5019 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5020 sc->sc_wdcdev.irqack = pciide_irqack;
5021 }
5022 sc->sc_wdcdev.PIO_cap = 4;
5023 sc->sc_wdcdev.DMA_cap = 2;
5024 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
5025
5026 sc->sc_wdcdev.set_modes = acard_setup_channel;
5027 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5028 sc->sc_wdcdev.nchannels = 2;
5029
5030 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5031 cp = &sc->pciide_channels[i];
5032 if (pciide_chansetup(sc, i, interface) == 0)
5033 continue;
5034 if (interface & PCIIDE_INTERFACE_PCI(i)) {
5035 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
5036 &ctlsize, pciide_pci_intr);
5037 } else {
5038 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
5039 &cmdsize, &ctlsize);
5040 }
5041 if (cp->hw_ok == 0)
5042 return;
5043 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
5044 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
5045 wdcattach(&cp->wdc_channel);
5046 acard_setup_channel(&cp->wdc_channel);
5047 }
5048 if (!ACARD_IS_850(sc)) {
5049 u_int32_t reg;
5050 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
5051 reg &= ~ATP860_CTRL_INT;
5052 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
5053 }
5054 }
5055
5056 void
5057 acard_setup_channel(chp)
5058 struct channel_softc *chp;
5059 {
5060 struct ata_drive_datas *drvp;
5061 struct pciide_channel *cp = (struct pciide_channel*)chp;
5062 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5063 int channel = chp->channel;
5064 int drive;
5065 u_int32_t idetime, udma_mode;
5066 u_int32_t idedma_ctl;
5067
5068 /* setup DMA if needed */
5069 pciide_channel_dma_setup(cp);
5070
5071 if (ACARD_IS_850(sc)) {
5072 idetime = 0;
5073 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
5074 udma_mode &= ~ATP850_UDMA_MASK(channel);
5075 } else {
5076 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
5077 idetime &= ~ATP860_SETTIME_MASK(channel);
5078 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
5079 udma_mode &= ~ATP860_UDMA_MASK(channel);
5080
5081 /* check 80 pins cable */
5082 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
5083 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
5084 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5085 & ATP860_CTRL_80P(chp->channel)) {
5086 if (chp->ch_drive[0].UDMA_mode > 2)
5087 chp->ch_drive[0].UDMA_mode = 2;
5088 if (chp->ch_drive[1].UDMA_mode > 2)
5089 chp->ch_drive[1].UDMA_mode = 2;
5090 }
5091 }
5092 }
5093
5094 idedma_ctl = 0;
5095
5096 /* Per drive settings */
5097 for (drive = 0; drive < 2; drive++) {
5098 drvp = &chp->ch_drive[drive];
5099 /* If no drive, skip */
5100 if ((drvp->drive_flags & DRIVE) == 0)
5101 continue;
5102 /* add timing values, setup DMA if needed */
5103 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5104 (drvp->drive_flags & DRIVE_UDMA)) {
5105 /* use Ultra/DMA */
5106 if (ACARD_IS_850(sc)) {
5107 idetime |= ATP850_SETTIME(drive,
5108 acard_act_udma[drvp->UDMA_mode],
5109 acard_rec_udma[drvp->UDMA_mode]);
5110 udma_mode |= ATP850_UDMA_MODE(channel, drive,
5111 acard_udma_conf[drvp->UDMA_mode]);
5112 } else {
5113 idetime |= ATP860_SETTIME(channel, drive,
5114 acard_act_udma[drvp->UDMA_mode],
5115 acard_rec_udma[drvp->UDMA_mode]);
5116 udma_mode |= ATP860_UDMA_MODE(channel, drive,
5117 acard_udma_conf[drvp->UDMA_mode]);
5118 }
5119 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5120 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5121 (drvp->drive_flags & DRIVE_DMA)) {
5122 /* use Multiword DMA */
5123 drvp->drive_flags &= ~DRIVE_UDMA;
5124 if (ACARD_IS_850(sc)) {
5125 idetime |= ATP850_SETTIME(drive,
5126 acard_act_dma[drvp->DMA_mode],
5127 acard_rec_dma[drvp->DMA_mode]);
5128 } else {
5129 idetime |= ATP860_SETTIME(channel, drive,
5130 acard_act_dma[drvp->DMA_mode],
5131 acard_rec_dma[drvp->DMA_mode]);
5132 }
5133 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5134 } else {
5135 /* PIO only */
5136 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5137 if (ACARD_IS_850(sc)) {
5138 idetime |= ATP850_SETTIME(drive,
5139 acard_act_pio[drvp->PIO_mode],
5140 acard_rec_pio[drvp->PIO_mode]);
5141 } else {
5142 idetime |= ATP860_SETTIME(channel, drive,
5143 acard_act_pio[drvp->PIO_mode],
5144 acard_rec_pio[drvp->PIO_mode]);
5145 }
5146 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5147 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5148 | ATP8x0_CTRL_EN(channel));
5149 }
5150 }
5151
5152 if (idedma_ctl != 0) {
5153 /* Add software bits in status register */
5154 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5155 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5156 }
5157 pciide_print_modes(cp);
5158
5159 if (ACARD_IS_850(sc)) {
5160 pci_conf_write(sc->sc_pc, sc->sc_tag,
5161 ATP850_IDETIME(channel), idetime);
5162 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5163 } else {
5164 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5165 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5166 }
5167 }
5168
5169 int
5170 acard_pci_intr(arg)
5171 void *arg;
5172 {
5173 struct pciide_softc *sc = arg;
5174 struct pciide_channel *cp;
5175 struct channel_softc *wdc_cp;
5176 int rv = 0;
5177 int dmastat, i, crv;
5178
5179 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5180 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5181 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5182 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5183 continue;
5184 cp = &sc->pciide_channels[i];
5185 wdc_cp = &cp->wdc_channel;
5186 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5187 (void)wdcintr(wdc_cp);
5188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5189 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5190 continue;
5191 }
5192 crv = wdcintr(wdc_cp);
5193 if (crv == 0)
5194 printf("%s:%d: bogus intr\n",
5195 sc->sc_wdcdev.sc_dev.dv_xname, i);
5196 else if (crv == 1)
5197 rv = 1;
5198 else if (rv == 0)
5199 rv = crv;
5200 }
5201 return rv;
5202 }
5203
5204 static int
5205 sl82c105_bugchk(struct pci_attach_args *pa)
5206 {
5207
5208 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5209 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5210 return (0);
5211
5212 if (PCI_REVISION(pa->pa_class) <= 0x05)
5213 return (1);
5214
5215 return (0);
5216 }
5217
5218 void
5219 sl82c105_chip_map(sc, pa)
5220 struct pciide_softc *sc;
5221 struct pci_attach_args *pa;
5222 {
5223 struct pciide_channel *cp;
5224 bus_size_t cmdsize, ctlsize;
5225 pcireg_t interface, idecr;
5226 int channel;
5227
5228 if (pciide_chipen(sc, pa) == 0)
5229 return;
5230
5231 aprint_normal("%s: bus-master DMA support present",
5232 sc->sc_wdcdev.sc_dev.dv_xname);
5233
5234 /*
5235 * Check to see if we're part of the Winbond 83c553 Southbridge.
5236 * If so, we need to disable DMA on rev. <= 5 of that chip.
5237 */
5238 if (pci_find_device(pa, sl82c105_bugchk)) {
5239 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5240 sc->sc_dma_ok = 0;
5241 } else
5242 pciide_mapreg_dma(sc, pa);
5243 aprint_normal("\n");
5244
5245 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5246 WDC_CAPABILITY_MODE;
5247 sc->sc_wdcdev.PIO_cap = 4;
5248 if (sc->sc_dma_ok) {
5249 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5250 sc->sc_wdcdev.irqack = pciide_irqack;
5251 sc->sc_wdcdev.DMA_cap = 2;
5252 }
5253 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5254
5255 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5256 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5257
5258 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5259
5260 interface = PCI_INTERFACE(pa->pa_class);
5261
5262 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5263 cp = &sc->pciide_channels[channel];
5264 if (pciide_chansetup(sc, channel, interface) == 0)
5265 continue;
5266 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5267 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5268 aprint_normal("%s: %s channel ignored (disabled)\n",
5269 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5270 continue;
5271 }
5272 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5273 pciide_pci_intr);
5274 if (cp->hw_ok == 0)
5275 continue;
5276 pciide_map_compat_intr(pa, cp, channel, interface);
5277 if (cp->hw_ok == 0)
5278 continue;
5279 sl82c105_setup_channel(&cp->wdc_channel);
5280 }
5281 }
5282
5283 void
5284 sl82c105_setup_channel(chp)
5285 struct channel_softc *chp;
5286 {
5287 struct ata_drive_datas *drvp;
5288 struct pciide_channel *cp = (struct pciide_channel*)chp;
5289 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5290 int pxdx_reg, drive;
5291 pcireg_t pxdx;
5292
5293 /* Set up DMA if needed. */
5294 pciide_channel_dma_setup(cp);
5295
5296 for (drive = 0; drive < 2; drive++) {
5297 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5298 : SYMPH_P1D0CR) + (drive * 4);
5299
5300 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5301
5302 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5303 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5304
5305 drvp = &chp->ch_drive[drive];
5306 /* If no drive, skip. */
5307 if ((drvp->drive_flags & DRIVE) == 0) {
5308 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5309 continue;
5310 }
5311
5312 if (drvp->drive_flags & DRIVE_DMA) {
5313 /*
5314 * Timings will be used for both PIO and DMA,
5315 * so adjust DMA mode if needed.
5316 */
5317 if (drvp->PIO_mode >= 3) {
5318 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5319 drvp->DMA_mode = drvp->PIO_mode - 2;
5320 if (drvp->DMA_mode < 1) {
5321 /*
5322 * Can't mix both PIO and DMA.
5323 * Disable DMA.
5324 */
5325 drvp->drive_flags &= ~DRIVE_DMA;
5326 }
5327 } else {
5328 /*
5329 * Can't mix both PIO and DMA. Disable
5330 * DMA.
5331 */
5332 drvp->drive_flags &= ~DRIVE_DMA;
5333 }
5334 }
5335
5336 if (drvp->drive_flags & DRIVE_DMA) {
5337 /* Use multi-word DMA. */
5338 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5339 PxDx_CMD_ON_SHIFT;
5340 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5341 } else {
5342 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5343 PxDx_CMD_ON_SHIFT;
5344 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5345 }
5346
5347 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5348
5349 /* ...and set the mode for this drive. */
5350 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5351 }
5352
5353 pciide_print_modes(cp);
5354 }
5355
5356 void
5357 serverworks_chip_map(sc, pa)
5358 struct pciide_softc *sc;
5359 struct pci_attach_args *pa;
5360 {
5361 struct pciide_channel *cp;
5362 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5363 pcitag_t pcib_tag;
5364 int channel;
5365 bus_size_t cmdsize, ctlsize;
5366
5367 if (pciide_chipen(sc, pa) == 0)
5368 return;
5369
5370 aprint_normal("%s: bus-master DMA support present",
5371 sc->sc_wdcdev.sc_dev.dv_xname);
5372 pciide_mapreg_dma(sc, pa);
5373 aprint_normal("\n");
5374 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5375 WDC_CAPABILITY_MODE;
5376
5377 if (sc->sc_dma_ok) {
5378 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5379 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5380 sc->sc_wdcdev.irqack = pciide_irqack;
5381 }
5382 sc->sc_wdcdev.PIO_cap = 4;
5383 sc->sc_wdcdev.DMA_cap = 2;
5384 switch (sc->sc_pp->ide_product) {
5385 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5386 sc->sc_wdcdev.UDMA_cap = 2;
5387 break;
5388 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5389 if (PCI_REVISION(pa->pa_class) < 0x92)
5390 sc->sc_wdcdev.UDMA_cap = 4;
5391 else
5392 sc->sc_wdcdev.UDMA_cap = 5;
5393 break;
5394 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5395 sc->sc_wdcdev.UDMA_cap = 5;
5396 break;
5397 }
5398
5399 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5400 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5401 sc->sc_wdcdev.nchannels = 2;
5402
5403 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5404 cp = &sc->pciide_channels[channel];
5405 if (pciide_chansetup(sc, channel, interface) == 0)
5406 continue;
5407 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5408 serverworks_pci_intr);
5409 if (cp->hw_ok == 0)
5410 return;
5411 pciide_map_compat_intr(pa, cp, channel, interface);
5412 if (cp->hw_ok == 0)
5413 return;
5414 serverworks_setup_channel(&cp->wdc_channel);
5415 }
5416
5417 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5418 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5419 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5420 }
5421
5422 void
5423 serverworks_setup_channel(chp)
5424 struct channel_softc *chp;
5425 {
5426 struct ata_drive_datas *drvp;
5427 struct pciide_channel *cp = (struct pciide_channel*)chp;
5428 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5429 int channel = chp->channel;
5430 int drive, unit;
5431 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5432 u_int32_t idedma_ctl;
5433 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5434 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5435
5436 /* setup DMA if needed */
5437 pciide_channel_dma_setup(cp);
5438
5439 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5440 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5441 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5442 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5443
5444 pio_time &= ~(0xffff << (16 * channel));
5445 dma_time &= ~(0xffff << (16 * channel));
5446 pio_mode &= ~(0xff << (8 * channel + 16));
5447 udma_mode &= ~(0xff << (8 * channel + 16));
5448 udma_mode &= ~(3 << (2 * channel));
5449
5450 idedma_ctl = 0;
5451
5452 /* Per drive settings */
5453 for (drive = 0; drive < 2; drive++) {
5454 drvp = &chp->ch_drive[drive];
5455 /* If no drive, skip */
5456 if ((drvp->drive_flags & DRIVE) == 0)
5457 continue;
5458 unit = drive + 2 * channel;
5459 /* add timing values, setup DMA if needed */
5460 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5461 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5462 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5463 (drvp->drive_flags & DRIVE_UDMA)) {
5464 /* use Ultra/DMA, check for 80-pin cable */
5465 if (drvp->UDMA_mode > 2 &&
5466 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5467 drvp->UDMA_mode = 2;
5468 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5469 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5470 udma_mode |= 1 << unit;
5471 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5472 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5473 (drvp->drive_flags & DRIVE_DMA)) {
5474 /* use Multiword DMA */
5475 drvp->drive_flags &= ~DRIVE_UDMA;
5476 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5477 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5478 } else {
5479 /* PIO only */
5480 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5481 }
5482 }
5483
5484 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5485 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5486 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5487 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5488 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5489
5490 if (idedma_ctl != 0) {
5491 /* Add software bits in status register */
5492 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5493 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5494 }
5495 pciide_print_modes(cp);
5496 }
5497
5498 int
5499 serverworks_pci_intr(arg)
5500 void *arg;
5501 {
5502 struct pciide_softc *sc = arg;
5503 struct pciide_channel *cp;
5504 struct channel_softc *wdc_cp;
5505 int rv = 0;
5506 int dmastat, i, crv;
5507
5508 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5509 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5510 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5511 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5512 IDEDMA_CTL_INTR)
5513 continue;
5514 cp = &sc->pciide_channels[i];
5515 wdc_cp = &cp->wdc_channel;
5516 crv = wdcintr(wdc_cp);
5517 if (crv == 0) {
5518 printf("%s:%d: bogus intr\n",
5519 sc->sc_wdcdev.sc_dev.dv_xname, i);
5520 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5521 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5522 } else
5523 rv = 1;
5524 }
5525 return rv;
5526 }
5527
5528 void
5529 artisea_chip_map(sc, pa)
5530 struct pciide_softc *sc;
5531 struct pci_attach_args *pa;
5532 {
5533 struct pciide_channel *cp;
5534 bus_size_t cmdsize, ctlsize;
5535 pcireg_t interface;
5536 int channel;
5537
5538 if (pciide_chipen(sc, pa) == 0)
5539 return;
5540
5541 aprint_normal("%s: bus-master DMA support resent",
5542 sc->sc_wdcdev.sc_dev.dv_xname);
5543 #ifndef PCIIDE_I31244_ENABLEDMA
5544 if (PCI_REVISION(pa->pa_class) == 0) {
5545 aprint_normal(" but disabled due to rev. 0");
5546 sc->sc_dma_ok = 0;
5547 } else
5548 #endif
5549 pciide_mapreg_dma(sc, pa);
5550 aprint_normal("\n");
5551
5552 /*
5553 * XXX Configure LEDs to show activity.
5554 */
5555
5556 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5557 WDC_CAPABILITY_MODE;
5558 sc->sc_wdcdev.PIO_cap = 4;
5559 if (sc->sc_dma_ok) {
5560 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5561 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5562 sc->sc_wdcdev.irqack = pciide_irqack;
5563 sc->sc_wdcdev.DMA_cap = 2;
5564 sc->sc_wdcdev.UDMA_cap = 6;
5565 }
5566 sc->sc_wdcdev.set_modes = sata_setup_channel;
5567
5568 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5569 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5570
5571 interface = PCI_INTERFACE(pa->pa_class);
5572
5573 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5574 cp = &sc->pciide_channels[channel];
5575 if (pciide_chansetup(sc, channel, interface) == 0)
5576 continue;
5577 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5578 pciide_pci_intr);
5579 if (cp->hw_ok == 0)
5580 continue;
5581 pciide_map_compat_intr(pa, cp, channel, interface);
5582 sata_setup_channel(&cp->wdc_channel);
5583 }
5584 }
5585