pciide.c revision 1.204 1 /* $NetBSD: pciide.c,v 1.204 2003/09/21 11:30:43 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.204 2003/09/21 11:30:43 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_sata_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void apollo_setup_channel __P((struct channel_softc*));
180
181 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void cmd0643_9_setup_channel __P((struct channel_softc*));
184 void cmd_channel_map __P((struct pci_attach_args *,
185 struct pciide_softc *, int));
186 int cmd_pci_intr __P((void *));
187 void cmd646_9_irqack __P((struct channel_softc *));
188 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void cmd680_setup_channel __P((struct channel_softc*));
190 void cmd680_channel_map __P((struct pci_attach_args *,
191 struct pciide_softc *, int));
192
193 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void cmd3112_setup_channel __P((struct channel_softc*));
195
196 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void cy693_setup_channel __P((struct channel_softc*));
198
199 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void sis_setup_channel __P((struct channel_softc*));
201 void sis96x_setup_channel __P((struct channel_softc*));
202 static int sis_hostbr_match __P(( struct pci_attach_args *));
203 static int sis_south_match __P(( struct pci_attach_args *));
204
205 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acer_setup_channel __P((struct channel_softc*));
207 int acer_pci_intr __P((void *));
208
209 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void pdc202xx_setup_channel __P((struct channel_softc*));
211 void pdc20268_setup_channel __P((struct channel_softc*));
212 int pdc202xx_pci_intr __P((void *));
213 int pdc20265_pci_intr __P((void *));
214 static void pdc20262_dma_start __P((void*, int, int));
215 static int pdc20262_dma_finish __P((void*, int, int, int));
216
217 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void opti_setup_channel __P((struct channel_softc*));
219
220 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
221 void hpt_setup_channel __P((struct channel_softc*));
222 int hpt_pci_intr __P((void *));
223
224 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
225 void acard_setup_channel __P((struct channel_softc*));
226 int acard_pci_intr __P((void *));
227
228 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
229 void serverworks_setup_channel __P((struct channel_softc*));
230 int serverworks_pci_intr __P((void *));
231
232 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
233 void sl82c105_setup_channel __P((struct channel_softc*));
234
235 void pciide_channel_dma_setup __P((struct pciide_channel *));
236 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
237 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
238 void pciide_dma_start __P((void*, int, int));
239 int pciide_dma_finish __P((void*, int, int, int));
240 void pciide_irqack __P((struct channel_softc *));
241
242 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
243
244 struct pciide_product_desc {
245 u_int32_t ide_product;
246 int ide_flags;
247 const char *ide_name;
248 /* map and setup chip, probe drives */
249 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
250 };
251
252 /* Flags for ide_flags */
253 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
254 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
255
256 /* Default product description for devices not known from this controller */
257 const struct pciide_product_desc default_product_desc = {
258 0,
259 0,
260 "Generic PCI IDE controller",
261 default_chip_map,
262 };
263
264 const struct pciide_product_desc pciide_intel_products[] = {
265 { PCI_PRODUCT_INTEL_82092AA,
266 0,
267 "Intel 82092AA IDE controller",
268 default_chip_map,
269 },
270 { PCI_PRODUCT_INTEL_82371FB_IDE,
271 0,
272 "Intel 82371FB IDE controller (PIIX)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82371SB_IDE,
276 0,
277 "Intel 82371SB IDE Interface (PIIX3)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82371AB_IDE,
281 0,
282 "Intel 82371AB IDE controller (PIIX4)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82440MX_IDE,
286 0,
287 "Intel 82440MX IDE controller",
288 piix_chip_map
289 },
290 { PCI_PRODUCT_INTEL_82801AA_IDE,
291 0,
292 "Intel 82801AA IDE Controller (ICH)",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801AB_IDE,
296 0,
297 "Intel 82801AB IDE Controller (ICH0)",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801BA_IDE,
301 0,
302 "Intel 82801BA IDE Controller (ICH2)",
303 piix_chip_map,
304 },
305 { PCI_PRODUCT_INTEL_82801BAM_IDE,
306 0,
307 "Intel 82801BAM IDE Controller (ICH2-M)",
308 piix_chip_map,
309 },
310 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
311 0,
312 "Intel 82801CA IDE Controller (ICH3)",
313 piix_chip_map,
314 },
315 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
316 0,
317 "Intel 82801CA IDE Controller (ICH3)",
318 piix_chip_map,
319 },
320 { PCI_PRODUCT_INTEL_82801DB_IDE,
321 0,
322 "Intel 82801DB IDE Controller (ICH4)",
323 piix_chip_map,
324 },
325 { PCI_PRODUCT_INTEL_82801DBM_IDE,
326 0,
327 "Intel 82801DBM IDE Controller (ICH4-M)",
328 piix_chip_map,
329 },
330 { PCI_PRODUCT_INTEL_82801EB_IDE,
331 0,
332 "Intel 82801EB IDE Controller (ICH5)",
333 piix_chip_map,
334 },
335 { PCI_PRODUCT_INTEL_31244,
336 0,
337 "Intel 31244 Serial ATA Controller",
338 artisea_chip_map,
339 },
340 { PCI_PRODUCT_INTEL_82801EB_SATA,
341 0,
342 "Intel 82801EB Serial ATA Controller",
343 artisea_chip_map,
344 },
345 { 0,
346 0,
347 NULL,
348 NULL
349 }
350 };
351
352 const struct pciide_product_desc pciide_amd_products[] = {
353 { PCI_PRODUCT_AMD_PBC756_IDE,
354 0,
355 "Advanced Micro Devices AMD756 IDE Controller",
356 amd7x6_chip_map
357 },
358 { PCI_PRODUCT_AMD_PBC766_IDE,
359 0,
360 "Advanced Micro Devices AMD766 IDE Controller",
361 amd7x6_chip_map
362 },
363 { PCI_PRODUCT_AMD_PBC768_IDE,
364 0,
365 "Advanced Micro Devices AMD768 IDE Controller",
366 amd7x6_chip_map
367 },
368 { PCI_PRODUCT_AMD_PBC8111_IDE,
369 0,
370 "Advanced Micro Devices AMD8111 IDE Controller",
371 amd7x6_chip_map
372 },
373 { 0,
374 0,
375 NULL,
376 NULL
377 }
378 };
379
380 const struct pciide_product_desc pciide_nvidia_products[] = {
381 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
382 0,
383 "NVIDIA nForce IDE Controller",
384 amd7x6_chip_map
385 },
386 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
387 0,
388 "NVIDIA nForce2 IDE Controller",
389 amd7x6_chip_map
390 },
391 { 0,
392 0,
393 NULL,
394 NULL
395 }
396 };
397
398 const struct pciide_product_desc pciide_cmd_products[] = {
399 { PCI_PRODUCT_CMDTECH_640,
400 0,
401 "CMD Technology PCI0640",
402 cmd_chip_map
403 },
404 { PCI_PRODUCT_CMDTECH_643,
405 0,
406 "CMD Technology PCI0643",
407 cmd0643_9_chip_map,
408 },
409 { PCI_PRODUCT_CMDTECH_646,
410 0,
411 "CMD Technology PCI0646",
412 cmd0643_9_chip_map,
413 },
414 { PCI_PRODUCT_CMDTECH_648,
415 IDE_PCI_CLASS_OVERRIDE,
416 "CMD Technology PCI0648",
417 cmd0643_9_chip_map,
418 },
419 { PCI_PRODUCT_CMDTECH_649,
420 IDE_PCI_CLASS_OVERRIDE,
421 "CMD Technology PCI0649",
422 cmd0643_9_chip_map,
423 },
424 { PCI_PRODUCT_CMDTECH_680,
425 IDE_PCI_CLASS_OVERRIDE,
426 "Silicon Image 0680",
427 cmd680_chip_map,
428 },
429 { PCI_PRODUCT_CMDTECH_3112,
430 IDE_PCI_CLASS_OVERRIDE,
431 "Silicon Image SATALink 3112",
432 cmd3112_chip_map,
433 },
434 { 0,
435 0,
436 NULL,
437 NULL
438 }
439 };
440
441 const struct pciide_product_desc pciide_via_products[] = {
442 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
443 0,
444 NULL,
445 apollo_chip_map,
446 },
447 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
448 0,
449 NULL,
450 apollo_chip_map,
451 },
452 { PCI_PRODUCT_VIATECH_VT8237_SATA,
453 IDE_PCI_CLASS_OVERRIDE,
454 "VIA Technologies VT8237 SATA Controller",
455 apollo_sata_chip_map,
456 },
457 { 0,
458 0,
459 NULL,
460 NULL
461 }
462 };
463
464 const struct pciide_product_desc pciide_cypress_products[] = {
465 { PCI_PRODUCT_CONTAQ_82C693,
466 IDE_16BIT_IOSPACE,
467 "Cypress 82C693 IDE Controller",
468 cy693_chip_map,
469 },
470 { 0,
471 0,
472 NULL,
473 NULL
474 }
475 };
476
477 const struct pciide_product_desc pciide_sis_products[] = {
478 { PCI_PRODUCT_SIS_5597_IDE,
479 0,
480 NULL,
481 sis_chip_map,
482 },
483 { 0,
484 0,
485 NULL,
486 NULL
487 }
488 };
489
490 const struct pciide_product_desc pciide_acer_products[] = {
491 { PCI_PRODUCT_ALI_M5229,
492 0,
493 "Acer Labs M5229 UDMA IDE Controller",
494 acer_chip_map,
495 },
496 { 0,
497 0,
498 NULL,
499 NULL
500 }
501 };
502
503 const struct pciide_product_desc pciide_promise_products[] = {
504 { PCI_PRODUCT_PROMISE_ULTRA33,
505 IDE_PCI_CLASS_OVERRIDE,
506 "Promise Ultra33/ATA Bus Master IDE Accelerator",
507 pdc202xx_chip_map,
508 },
509 { PCI_PRODUCT_PROMISE_ULTRA66,
510 IDE_PCI_CLASS_OVERRIDE,
511 "Promise Ultra66/ATA Bus Master IDE Accelerator",
512 pdc202xx_chip_map,
513 },
514 { PCI_PRODUCT_PROMISE_ULTRA100,
515 IDE_PCI_CLASS_OVERRIDE,
516 "Promise Ultra100/ATA Bus Master IDE Accelerator",
517 pdc202xx_chip_map,
518 },
519 { PCI_PRODUCT_PROMISE_ULTRA100X,
520 IDE_PCI_CLASS_OVERRIDE,
521 "Promise Ultra100/ATA Bus Master IDE Accelerator",
522 pdc202xx_chip_map,
523 },
524 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
525 IDE_PCI_CLASS_OVERRIDE,
526 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
527 pdc202xx_chip_map,
528 },
529 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
530 IDE_PCI_CLASS_OVERRIDE,
531 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
532 pdc202xx_chip_map,
533 },
534 { PCI_PRODUCT_PROMISE_ULTRA133,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Promise Ultra133/ATA Bus Master IDE Accelerator",
537 pdc202xx_chip_map,
538 },
539 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
542 pdc202xx_chip_map,
543 },
544 { PCI_PRODUCT_PROMISE_MBULTRA133,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
547 pdc202xx_chip_map,
548 },
549 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
550 IDE_PCI_CLASS_OVERRIDE,
551 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
552 pdc202xx_chip_map,
553 },
554 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
555 IDE_PCI_CLASS_OVERRIDE,
556 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
557 pdc202xx_chip_map,
558 },
559 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
560 IDE_PCI_CLASS_OVERRIDE,
561 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
562 pdc202xx_chip_map,
563 },
564 { 0,
565 0,
566 NULL,
567 NULL
568 }
569 };
570
571 const struct pciide_product_desc pciide_opti_products[] = {
572 { PCI_PRODUCT_OPTI_82C621,
573 0,
574 "OPTi 82c621 PCI IDE controller",
575 opti_chip_map,
576 },
577 { PCI_PRODUCT_OPTI_82C568,
578 0,
579 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
580 opti_chip_map,
581 },
582 { PCI_PRODUCT_OPTI_82D568,
583 0,
584 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
585 opti_chip_map,
586 },
587 { 0,
588 0,
589 NULL,
590 NULL
591 }
592 };
593
594 const struct pciide_product_desc pciide_triones_products[] = {
595 { PCI_PRODUCT_TRIONES_HPT366,
596 IDE_PCI_CLASS_OVERRIDE,
597 NULL,
598 hpt_chip_map,
599 },
600 { PCI_PRODUCT_TRIONES_HPT372,
601 IDE_PCI_CLASS_OVERRIDE,
602 NULL,
603 hpt_chip_map
604 },
605 { PCI_PRODUCT_TRIONES_HPT374,
606 IDE_PCI_CLASS_OVERRIDE,
607 NULL,
608 hpt_chip_map
609 },
610 { 0,
611 0,
612 NULL,
613 NULL
614 }
615 };
616
617 const struct pciide_product_desc pciide_acard_products[] = {
618 { PCI_PRODUCT_ACARD_ATP850U,
619 IDE_PCI_CLASS_OVERRIDE,
620 "Acard ATP850U Ultra33 IDE Controller",
621 acard_chip_map,
622 },
623 { PCI_PRODUCT_ACARD_ATP860,
624 IDE_PCI_CLASS_OVERRIDE,
625 "Acard ATP860 Ultra66 IDE Controller",
626 acard_chip_map,
627 },
628 { PCI_PRODUCT_ACARD_ATP860A,
629 IDE_PCI_CLASS_OVERRIDE,
630 "Acard ATP860-A Ultra66 IDE Controller",
631 acard_chip_map,
632 },
633 { 0,
634 0,
635 NULL,
636 NULL
637 }
638 };
639
640 const struct pciide_product_desc pciide_serverworks_products[] = {
641 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
642 0,
643 "ServerWorks OSB4 IDE Controller",
644 serverworks_chip_map,
645 },
646 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
647 0,
648 "ServerWorks CSB5 IDE Controller",
649 serverworks_chip_map,
650 },
651 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
652 0,
653 "ServerWorks CSB6 RAID/IDE Controller",
654 serverworks_chip_map,
655 },
656 { 0,
657 0,
658 NULL,
659 }
660 };
661
662 const struct pciide_product_desc pciide_symphony_products[] = {
663 { PCI_PRODUCT_SYMPHONY_82C105,
664 0,
665 "Symphony Labs 82C105 IDE controller",
666 sl82c105_chip_map,
667 },
668 { 0,
669 0,
670 NULL,
671 }
672 };
673
674 const struct pciide_product_desc pciide_winbond_products[] = {
675 { PCI_PRODUCT_WINBOND_W83C553F_1,
676 0,
677 "Winbond W83C553F IDE controller",
678 sl82c105_chip_map,
679 },
680 { 0,
681 0,
682 NULL,
683 }
684 };
685
686 struct pciide_vendor_desc {
687 u_int32_t ide_vendor;
688 const struct pciide_product_desc *ide_products;
689 };
690
691 const struct pciide_vendor_desc pciide_vendors[] = {
692 { PCI_VENDOR_INTEL, pciide_intel_products },
693 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
694 { PCI_VENDOR_VIATECH, pciide_via_products },
695 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
696 { PCI_VENDOR_SIS, pciide_sis_products },
697 { PCI_VENDOR_ALI, pciide_acer_products },
698 { PCI_VENDOR_PROMISE, pciide_promise_products },
699 { PCI_VENDOR_AMD, pciide_amd_products },
700 { PCI_VENDOR_OPTI, pciide_opti_products },
701 { PCI_VENDOR_TRIONES, pciide_triones_products },
702 { PCI_VENDOR_ACARD, pciide_acard_products },
703 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
704 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
705 { PCI_VENDOR_WINBOND, pciide_winbond_products },
706 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
707 { 0, NULL }
708 };
709
710 /* options passed via the 'flags' config keyword */
711 #define PCIIDE_OPTIONS_DMA 0x01
712 #define PCIIDE_OPTIONS_NODMA 0x02
713
714 int pciide_match __P((struct device *, struct cfdata *, void *));
715 void pciide_attach __P((struct device *, struct device *, void *));
716
717 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
718 pciide_match, pciide_attach, NULL, NULL);
719
720 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
721 void pciide_mapregs_compat __P(( struct pci_attach_args *,
722 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
723 void pciide_mapregs_native __P((struct pci_attach_args *,
724 struct pciide_channel *, bus_size_t *, bus_size_t *,
725 int (*pci_intr) __P((void *))));
726 void pciide_mapreg_dma __P((struct pciide_softc *,
727 struct pci_attach_args *));
728 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
729 void pciide_mapchan __P((struct pci_attach_args *,
730 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
731 int (*pci_intr) __P((void *))));
732 void pciide_map_compat_intr __P(( struct pci_attach_args *,
733 struct pciide_channel *, int));
734 int pciide_compat_intr __P((void *));
735 int pciide_pci_intr __P((void *));
736 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
737
738 const struct pciide_product_desc *
739 pciide_lookup_product(id)
740 u_int32_t id;
741 {
742 const struct pciide_product_desc *pp;
743 const struct pciide_vendor_desc *vp;
744
745 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
746 if (PCI_VENDOR(id) == vp->ide_vendor)
747 break;
748
749 if ((pp = vp->ide_products) == NULL)
750 return NULL;
751
752 for (; pp->chip_map != NULL; pp++)
753 if (PCI_PRODUCT(id) == pp->ide_product)
754 break;
755
756 if (pp->chip_map == NULL)
757 return NULL;
758 return pp;
759 }
760
761 int
762 pciide_match(parent, match, aux)
763 struct device *parent;
764 struct cfdata *match;
765 void *aux;
766 {
767 struct pci_attach_args *pa = aux;
768 const struct pciide_product_desc *pp;
769
770 /*
771 * Check the ID register to see that it's a PCI IDE controller.
772 * If it is, we assume that we can deal with it; it _should_
773 * work in a standardized way...
774 */
775 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
776 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
777 return (1);
778 }
779
780 /*
781 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
782 * controllers. Let see if we can deal with it anyway.
783 */
784 pp = pciide_lookup_product(pa->pa_id);
785 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
786 return (1);
787 }
788
789 return (0);
790 }
791
792 void
793 pciide_attach(parent, self, aux)
794 struct device *parent, *self;
795 void *aux;
796 {
797 struct pci_attach_args *pa = aux;
798 pci_chipset_tag_t pc = pa->pa_pc;
799 pcitag_t tag = pa->pa_tag;
800 struct pciide_softc *sc = (struct pciide_softc *)self;
801 pcireg_t csr;
802 char devinfo[256];
803 const char *displaydev;
804
805 aprint_naive(": disk controller\n");
806 aprint_normal("\n");
807
808 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
809 sc->sc_pp = pciide_lookup_product(pa->pa_id);
810 if (sc->sc_pp == NULL) {
811 sc->sc_pp = &default_product_desc;
812 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
813 displaydev = devinfo;
814 } else
815 displaydev = sc->sc_pp->ide_name;
816
817 /* if displaydev == NULL, printf is done in chip-specific map */
818 if (displaydev)
819 aprint_normal("%s: %s (rev. 0x%02x)\n",
820 sc->sc_wdcdev.sc_dev.dv_xname, displaydev,
821 PCI_REVISION(pa->pa_class));
822
823 sc->sc_pc = pa->pa_pc;
824 sc->sc_tag = pa->pa_tag;
825
826 /* Set up DMA defaults; these might be adjusted by chip_map. */
827 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
828 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
829
830 #ifdef WDCDEBUG
831 if (wdcdebug_pciide_mask & DEBUG_PROBE)
832 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
833 #endif
834 sc->sc_pp->chip_map(sc, pa);
835
836 if (sc->sc_dma_ok) {
837 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
838 csr |= PCI_COMMAND_MASTER_ENABLE;
839 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
840 }
841 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
842 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
843
844 config_interrupts(self, wdcattach);
845 }
846
847 /* tell whether the chip is enabled or not */
848 int
849 pciide_chipen(sc, pa)
850 struct pciide_softc *sc;
851 struct pci_attach_args *pa;
852 {
853 pcireg_t csr;
854
855 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
856 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
857 PCI_COMMAND_STATUS_REG);
858 aprint_normal("%s: device disabled (at %s)\n",
859 sc->sc_wdcdev.sc_dev.dv_xname,
860 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
861 "device" : "bridge");
862 return 0;
863 }
864 return 1;
865 }
866
867 void
868 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
869 struct pci_attach_args *pa;
870 struct pciide_channel *cp;
871 int compatchan;
872 bus_size_t *cmdsizep, *ctlsizep;
873 {
874 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
875 struct channel_softc *wdc_cp = &cp->wdc_channel;
876
877 cp->compat = 1;
878 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
879 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
880
881 wdc_cp->cmd_iot = pa->pa_iot;
882 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
883 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
884 aprint_error("%s: couldn't map %s channel cmd regs\n",
885 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
886 goto bad;
887 }
888
889 wdc_cp->ctl_iot = pa->pa_iot;
890 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
891 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
892 aprint_error("%s: couldn't map %s channel ctl regs\n",
893 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
894 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
895 PCIIDE_COMPAT_CMD_SIZE);
896 goto bad;
897 }
898
899 wdc_cp->data32iot = wdc_cp->cmd_iot;
900 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
901 pciide_map_compat_intr(pa, cp, compatchan);
902 return;
903
904 bad:
905 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
906 return;
907 }
908
909 void
910 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
911 struct pci_attach_args * pa;
912 struct pciide_channel *cp;
913 bus_size_t *cmdsizep, *ctlsizep;
914 int (*pci_intr) __P((void *));
915 {
916 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
917 struct channel_softc *wdc_cp = &cp->wdc_channel;
918 const char *intrstr;
919 pci_intr_handle_t intrhandle;
920
921 cp->compat = 0;
922
923 if (sc->sc_pci_ih == NULL) {
924 if (pci_intr_map(pa, &intrhandle) != 0) {
925 aprint_error("%s: couldn't map native-PCI interrupt\n",
926 sc->sc_wdcdev.sc_dev.dv_xname);
927 goto bad;
928 }
929 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
930 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
931 intrhandle, IPL_BIO, pci_intr, sc);
932 if (sc->sc_pci_ih != NULL) {
933 aprint_normal("%s: using %s for native-PCI interrupt\n",
934 sc->sc_wdcdev.sc_dev.dv_xname,
935 intrstr ? intrstr : "unknown interrupt");
936 } else {
937 aprint_error(
938 "%s: couldn't establish native-PCI interrupt",
939 sc->sc_wdcdev.sc_dev.dv_xname);
940 if (intrstr != NULL)
941 aprint_normal(" at %s", intrstr);
942 aprint_normal("\n");
943 goto bad;
944 }
945 }
946 cp->ih = sc->sc_pci_ih;
947 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
948 PCI_MAPREG_TYPE_IO, 0,
949 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
950 aprint_error("%s: couldn't map %s channel cmd regs\n",
951 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
952 goto bad;
953 }
954
955 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
956 PCI_MAPREG_TYPE_IO, 0,
957 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
958 aprint_error("%s: couldn't map %s channel ctl regs\n",
959 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
960 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
961 goto bad;
962 }
963 /*
964 * In native mode, 4 bytes of I/O space are mapped for the control
965 * register, the control register is at offset 2. Pass the generic
966 * code a handle for only one byte at the right offset.
967 */
968 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
969 &wdc_cp->ctl_ioh) != 0) {
970 aprint_error("%s: unable to subregion %s channel ctl regs\n",
971 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
972 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
973 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
974 goto bad;
975 }
976
977 wdc_cp->data32iot = wdc_cp->cmd_iot;
978 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
979 return;
980
981 bad:
982 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
983 return;
984 }
985
986 void
987 pciide_mapreg_dma(sc, pa)
988 struct pciide_softc *sc;
989 struct pci_attach_args *pa;
990 {
991 pcireg_t maptype;
992 bus_addr_t addr;
993
994 /*
995 * Map DMA registers
996 *
997 * Note that sc_dma_ok is the right variable to test to see if
998 * DMA can be done. If the interface doesn't support DMA,
999 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
1000 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
1001 * non-zero if the interface supports DMA and the registers
1002 * could be mapped.
1003 *
1004 * XXX Note that despite the fact that the Bus Master IDE specs
1005 * XXX say that "The bus master IDE function uses 16 bytes of IO
1006 * XXX space," some controllers (at least the United
1007 * XXX Microelectronics UM8886BF) place it in memory space.
1008 */
1009 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
1010 PCIIDE_REG_BUS_MASTER_DMA);
1011
1012 switch (maptype) {
1013 case PCI_MAPREG_TYPE_IO:
1014 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
1015 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
1016 &addr, NULL, NULL) == 0);
1017 if (sc->sc_dma_ok == 0) {
1018 aprint_normal(
1019 ", but unused (couldn't query registers)");
1020 break;
1021 }
1022 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
1023 && addr >= 0x10000) {
1024 sc->sc_dma_ok = 0;
1025 aprint_normal(
1026 ", but unused (registers at unsafe address "
1027 "%#lx)", (unsigned long)addr);
1028 break;
1029 }
1030 /* FALLTHROUGH */
1031
1032 case PCI_MAPREG_MEM_TYPE_32BIT:
1033 sc->sc_dma_ok = (pci_mapreg_map(pa,
1034 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1035 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1036 sc->sc_dmat = pa->pa_dmat;
1037 if (sc->sc_dma_ok == 0) {
1038 aprint_normal(", but unused (couldn't map registers)");
1039 } else {
1040 sc->sc_wdcdev.dma_arg = sc;
1041 sc->sc_wdcdev.dma_init = pciide_dma_init;
1042 sc->sc_wdcdev.dma_start = pciide_dma_start;
1043 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1044 }
1045
1046 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1047 PCIIDE_OPTIONS_NODMA) {
1048 aprint_normal(
1049 ", but unused (forced off by config file)");
1050 sc->sc_dma_ok = 0;
1051 }
1052 break;
1053
1054 default:
1055 sc->sc_dma_ok = 0;
1056 aprint_normal(
1057 ", but unsupported register maptype (0x%x)", maptype);
1058 }
1059 }
1060
1061 int
1062 pciide_compat_intr(arg)
1063 void *arg;
1064 {
1065 struct pciide_channel *cp = arg;
1066
1067 #ifdef DIAGNOSTIC
1068 /* should only be called for a compat channel */
1069 if (cp->compat == 0)
1070 panic("pciide compat intr called for non-compat chan %p", cp);
1071 #endif
1072 return (wdcintr(&cp->wdc_channel));
1073 }
1074
1075 int
1076 pciide_pci_intr(arg)
1077 void *arg;
1078 {
1079 struct pciide_softc *sc = arg;
1080 struct pciide_channel *cp;
1081 struct channel_softc *wdc_cp;
1082 int i, rv, crv;
1083
1084 rv = 0;
1085 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1086 cp = &sc->pciide_channels[i];
1087 wdc_cp = &cp->wdc_channel;
1088
1089 /* If a compat channel skip. */
1090 if (cp->compat)
1091 continue;
1092 /* if this channel not waiting for intr, skip */
1093 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1094 continue;
1095
1096 crv = wdcintr(wdc_cp);
1097 if (crv == 0)
1098 ; /* leave rv alone */
1099 else if (crv == 1)
1100 rv = 1; /* claim the intr */
1101 else if (rv == 0) /* crv should be -1 in this case */
1102 rv = crv; /* if we've done no better, take it */
1103 }
1104 return (rv);
1105 }
1106
1107 void
1108 pciide_channel_dma_setup(cp)
1109 struct pciide_channel *cp;
1110 {
1111 int drive;
1112 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1113 struct ata_drive_datas *drvp;
1114
1115 for (drive = 0; drive < 2; drive++) {
1116 drvp = &cp->wdc_channel.ch_drive[drive];
1117 /* If no drive, skip */
1118 if ((drvp->drive_flags & DRIVE) == 0)
1119 continue;
1120 /* setup DMA if needed */
1121 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1122 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1123 sc->sc_dma_ok == 0) {
1124 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1125 continue;
1126 }
1127 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1128 != 0) {
1129 /* Abort DMA setup */
1130 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1131 continue;
1132 }
1133 }
1134 }
1135
1136 int
1137 pciide_dma_table_setup(sc, channel, drive)
1138 struct pciide_softc *sc;
1139 int channel, drive;
1140 {
1141 bus_dma_segment_t seg;
1142 int error, rseg;
1143 const bus_size_t dma_table_size =
1144 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1145 struct pciide_dma_maps *dma_maps =
1146 &sc->pciide_channels[channel].dma_maps[drive];
1147
1148 /* If table was already allocated, just return */
1149 if (dma_maps->dma_table)
1150 return 0;
1151
1152 /* Allocate memory for the DMA tables and map it */
1153 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1154 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1155 BUS_DMA_NOWAIT)) != 0) {
1156 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1157 "allocate", drive, error);
1158 return error;
1159 }
1160 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1161 dma_table_size,
1162 (caddr_t *)&dma_maps->dma_table,
1163 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1164 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1165 "map", drive, error);
1166 return error;
1167 }
1168 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1169 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1170 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1171 /* Create and load table DMA map for this disk */
1172 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1173 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1174 &dma_maps->dmamap_table)) != 0) {
1175 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1176 "create", drive, error);
1177 return error;
1178 }
1179 if ((error = bus_dmamap_load(sc->sc_dmat,
1180 dma_maps->dmamap_table,
1181 dma_maps->dma_table,
1182 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1183 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1184 "load", drive, error);
1185 return error;
1186 }
1187 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1188 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1189 DEBUG_PROBE);
1190 /* Create a xfer DMA map for this drive */
1191 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1192 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1193 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1194 &dma_maps->dmamap_xfer)) != 0) {
1195 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1196 "create xfer", drive, error);
1197 return error;
1198 }
1199 return 0;
1200 }
1201
1202 int
1203 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1204 void *v;
1205 int channel, drive;
1206 void *databuf;
1207 size_t datalen;
1208 int flags;
1209 {
1210 struct pciide_softc *sc = v;
1211 int error, seg;
1212 struct pciide_dma_maps *dma_maps =
1213 &sc->pciide_channels[channel].dma_maps[drive];
1214
1215 error = bus_dmamap_load(sc->sc_dmat,
1216 dma_maps->dmamap_xfer,
1217 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1218 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1219 if (error) {
1220 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1221 "load xfer", drive, error);
1222 return error;
1223 }
1224
1225 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1226 dma_maps->dmamap_xfer->dm_mapsize,
1227 (flags & WDC_DMA_READ) ?
1228 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1229
1230 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1231 #ifdef DIAGNOSTIC
1232 /* A segment must not cross a 64k boundary */
1233 {
1234 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1235 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1236 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1237 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1238 printf("pciide_dma: segment %d physical addr 0x%lx"
1239 " len 0x%lx not properly aligned\n",
1240 seg, phys, len);
1241 panic("pciide_dma: buf align");
1242 }
1243 }
1244 #endif
1245 dma_maps->dma_table[seg].base_addr =
1246 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1247 dma_maps->dma_table[seg].byte_count =
1248 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1249 IDEDMA_BYTE_COUNT_MASK);
1250 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1251 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1252 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1253
1254 }
1255 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1256 htole32(IDEDMA_BYTE_COUNT_EOT);
1257
1258 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1259 dma_maps->dmamap_table->dm_mapsize,
1260 BUS_DMASYNC_PREWRITE);
1261
1262 /* Maps are ready. Start DMA function */
1263 #ifdef DIAGNOSTIC
1264 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1265 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1266 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1267 panic("pciide_dma_init: table align");
1268 }
1269 #endif
1270
1271 /* Clear status bits */
1272 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1273 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1274 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1275 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1276 /* Write table addr */
1277 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1278 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1279 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1280 /* set read/write */
1281 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1282 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1283 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1284 /* remember flags */
1285 dma_maps->dma_flags = flags;
1286 return 0;
1287 }
1288
1289 void
1290 pciide_dma_start(v, channel, drive)
1291 void *v;
1292 int channel, drive;
1293 {
1294 struct pciide_softc *sc = v;
1295
1296 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1297 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1298 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1299 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1300 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1301 }
1302
1303 int
1304 pciide_dma_finish(v, channel, drive, force)
1305 void *v;
1306 int channel, drive;
1307 int force;
1308 {
1309 struct pciide_softc *sc = v;
1310 u_int8_t status;
1311 int error = 0;
1312 struct pciide_dma_maps *dma_maps =
1313 &sc->pciide_channels[channel].dma_maps[drive];
1314
1315 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1316 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1317 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1318 DEBUG_XFERS);
1319
1320 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1321 return WDC_DMAST_NOIRQ;
1322
1323 /* stop DMA channel */
1324 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1325 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1326 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1327 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1328
1329 /* Unload the map of the data buffer */
1330 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1331 dma_maps->dmamap_xfer->dm_mapsize,
1332 (dma_maps->dma_flags & WDC_DMA_READ) ?
1333 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1334 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1335
1336 if ((status & IDEDMA_CTL_ERR) != 0) {
1337 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1338 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1339 error |= WDC_DMAST_ERR;
1340 }
1341
1342 if ((status & IDEDMA_CTL_INTR) == 0) {
1343 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1344 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1345 drive, status);
1346 error |= WDC_DMAST_NOIRQ;
1347 }
1348
1349 if ((status & IDEDMA_CTL_ACT) != 0) {
1350 /* data underrun, may be a valid condition for ATAPI */
1351 error |= WDC_DMAST_UNDER;
1352 }
1353 return error;
1354 }
1355
1356 void
1357 pciide_irqack(chp)
1358 struct channel_softc *chp;
1359 {
1360 struct pciide_channel *cp = (struct pciide_channel*)chp;
1361 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1362
1363 /* clear status bits in IDE DMA registers */
1364 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1365 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1366 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1367 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1368 }
1369
1370 /* some common code used by several chip_map */
1371 int
1372 pciide_chansetup(sc, channel, interface)
1373 struct pciide_softc *sc;
1374 int channel;
1375 pcireg_t interface;
1376 {
1377 struct pciide_channel *cp = &sc->pciide_channels[channel];
1378 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1379 cp->name = PCIIDE_CHANNEL_NAME(channel);
1380 cp->wdc_channel.channel = channel;
1381 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1382 cp->wdc_channel.ch_queue =
1383 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1384 if (cp->wdc_channel.ch_queue == NULL) {
1385 aprint_error("%s %s channel: "
1386 "can't allocate memory for command queue",
1387 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1388 return 0;
1389 }
1390 aprint_normal("%s: %s channel %s to %s mode\n",
1391 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1392 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1393 "configured" : "wired",
1394 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1395 "native-PCI" : "compatibility");
1396 return 1;
1397 }
1398
1399 /* some common code used by several chip channel_map */
1400 void
1401 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1402 struct pci_attach_args *pa;
1403 struct pciide_channel *cp;
1404 pcireg_t interface;
1405 bus_size_t *cmdsizep, *ctlsizep;
1406 int (*pci_intr) __P((void *));
1407 {
1408 struct channel_softc *wdc_cp = &cp->wdc_channel;
1409
1410 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1411 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr);
1412 else
1413 pciide_mapregs_compat(pa, cp, wdc_cp->channel, cmdsizep,
1414 ctlsizep);
1415 }
1416
1417 /*
1418 * generic code to map the compat intr.
1419 */
1420 void
1421 pciide_map_compat_intr(pa, cp, compatchan)
1422 struct pci_attach_args *pa;
1423 struct pciide_channel *cp;
1424 int compatchan;
1425 {
1426 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1427
1428 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1429 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1430 pa, compatchan, pciide_compat_intr, cp);
1431 if (cp->ih == NULL) {
1432 #endif
1433 aprint_error("%s: no compatibility interrupt for use by %s "
1434 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1435 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
1436 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1437 }
1438 #endif
1439 }
1440
1441 void
1442 default_chip_map(sc, pa)
1443 struct pciide_softc *sc;
1444 struct pci_attach_args *pa;
1445 {
1446 struct pciide_channel *cp;
1447 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1448 pcireg_t csr;
1449 int channel, drive;
1450 struct ata_drive_datas *drvp;
1451 u_int8_t idedma_ctl;
1452 bus_size_t cmdsize, ctlsize;
1453 char *failreason;
1454
1455 if (pciide_chipen(sc, pa) == 0)
1456 return;
1457
1458 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1459 aprint_normal("%s: bus-master DMA support present",
1460 sc->sc_wdcdev.sc_dev.dv_xname);
1461 if (sc->sc_pp == &default_product_desc &&
1462 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1463 PCIIDE_OPTIONS_DMA) == 0) {
1464 aprint_normal(", but unused (no driver support)");
1465 sc->sc_dma_ok = 0;
1466 } else {
1467 pciide_mapreg_dma(sc, pa);
1468 if (sc->sc_dma_ok != 0)
1469 aprint_normal(", used without full driver "
1470 "support");
1471 }
1472 } else {
1473 aprint_normal("%s: hardware does not support DMA",
1474 sc->sc_wdcdev.sc_dev.dv_xname);
1475 sc->sc_dma_ok = 0;
1476 }
1477 aprint_normal("\n");
1478 if (sc->sc_dma_ok) {
1479 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1480 sc->sc_wdcdev.irqack = pciide_irqack;
1481 }
1482 sc->sc_wdcdev.PIO_cap = 0;
1483 sc->sc_wdcdev.DMA_cap = 0;
1484
1485 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1486 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1487 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1488
1489 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1490 cp = &sc->pciide_channels[channel];
1491 if (pciide_chansetup(sc, channel, interface) == 0)
1492 continue;
1493 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1494 pciide_pci_intr);
1495 if (cp->wdc_channel.ch_flags & WDCF_DISABLED)
1496 continue;
1497 /*
1498 * Check to see if something appears to be there.
1499 */
1500 failreason = NULL;
1501 if (!wdcprobe(&cp->wdc_channel)) {
1502 failreason = "not responding; disabled or no drives?";
1503 goto next;
1504 }
1505 /*
1506 * Now, make sure it's actually attributable to this PCI IDE
1507 * channel by trying to access the channel again while the
1508 * PCI IDE controller's I/O space is disabled. (If the
1509 * channel no longer appears to be there, it belongs to
1510 * this controller.) YUCK!
1511 */
1512 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1513 PCI_COMMAND_STATUS_REG);
1514 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1515 csr & ~PCI_COMMAND_IO_ENABLE);
1516 if (wdcprobe(&cp->wdc_channel))
1517 failreason = "other hardware responding at addresses";
1518 pci_conf_write(sc->sc_pc, sc->sc_tag,
1519 PCI_COMMAND_STATUS_REG, csr);
1520 next:
1521 if (failreason) {
1522 aprint_error("%s: %s channel ignored (%s)\n",
1523 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1524 failreason);
1525 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
1526 }
1527 }
1528
1529 if (sc->sc_dma_ok == 0)
1530 return;
1531
1532 /* Allocate DMA maps */
1533 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1534 idedma_ctl = 0;
1535 cp = &sc->pciide_channels[channel];
1536 for (drive = 0; drive < 2; drive++) {
1537 drvp = &cp->wdc_channel.ch_drive[drive];
1538 /* If no drive, skip */
1539 if ((drvp->drive_flags & DRIVE) == 0)
1540 continue;
1541 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1542 continue;
1543 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1544 /* Abort DMA setup */
1545 aprint_error(
1546 "%s:%d:%d: can't allocate DMA maps, "
1547 "using PIO transfers\n",
1548 sc->sc_wdcdev.sc_dev.dv_xname,
1549 channel, drive);
1550 drvp->drive_flags &= ~DRIVE_DMA;
1551 }
1552 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1553 sc->sc_wdcdev.sc_dev.dv_xname,
1554 channel, drive);
1555 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1556 }
1557 if (idedma_ctl != 0) {
1558 /* Add software bits in status register */
1559 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1560 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1561 idedma_ctl);
1562 }
1563 }
1564 }
1565
1566 void
1567 sata_setup_channel(chp)
1568 struct channel_softc *chp;
1569 {
1570 struct ata_drive_datas *drvp;
1571 int drive;
1572 u_int32_t idedma_ctl;
1573 struct pciide_channel *cp = (struct pciide_channel*)chp;
1574 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1575
1576 /* setup DMA if needed */
1577 pciide_channel_dma_setup(cp);
1578
1579 idedma_ctl = 0;
1580
1581 for (drive = 0; drive < 2; drive++) {
1582 drvp = &chp->ch_drive[drive];
1583 /* If no drive, skip */
1584 if ((drvp->drive_flags & DRIVE) == 0)
1585 continue;
1586 if (drvp->drive_flags & DRIVE_UDMA) {
1587 /* use Ultra/DMA */
1588 drvp->drive_flags &= ~DRIVE_DMA;
1589 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1590 } else if (drvp->drive_flags & DRIVE_DMA) {
1591 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1592 }
1593 }
1594
1595 /*
1596 * Nothing to do to setup modes; it is meaningless in S-ATA
1597 * (but many S-ATA drives still want to get the SET_FEATURE
1598 * command).
1599 */
1600 if (idedma_ctl != 0) {
1601 /* Add software bits in status register */
1602 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1603 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1604 idedma_ctl);
1605 }
1606 }
1607
1608 void
1609 piix_chip_map(sc, pa)
1610 struct pciide_softc *sc;
1611 struct pci_attach_args *pa;
1612 {
1613 struct pciide_channel *cp;
1614 int channel;
1615 u_int32_t idetim;
1616 bus_size_t cmdsize, ctlsize;
1617
1618 if (pciide_chipen(sc, pa) == 0)
1619 return;
1620
1621 aprint_normal("%s: bus-master DMA support present",
1622 sc->sc_wdcdev.sc_dev.dv_xname);
1623 pciide_mapreg_dma(sc, pa);
1624 aprint_normal("\n");
1625 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1626 WDC_CAPABILITY_MODE;
1627 if (sc->sc_dma_ok) {
1628 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1629 sc->sc_wdcdev.irqack = pciide_irqack;
1630 switch(sc->sc_pp->ide_product) {
1631 case PCI_PRODUCT_INTEL_82371AB_IDE:
1632 case PCI_PRODUCT_INTEL_82440MX_IDE:
1633 case PCI_PRODUCT_INTEL_82801AA_IDE:
1634 case PCI_PRODUCT_INTEL_82801AB_IDE:
1635 case PCI_PRODUCT_INTEL_82801BA_IDE:
1636 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1637 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1638 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1639 case PCI_PRODUCT_INTEL_82801DB_IDE:
1640 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1641 case PCI_PRODUCT_INTEL_82801EB_IDE:
1642 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1643 }
1644 }
1645 sc->sc_wdcdev.PIO_cap = 4;
1646 sc->sc_wdcdev.DMA_cap = 2;
1647 switch(sc->sc_pp->ide_product) {
1648 case PCI_PRODUCT_INTEL_82801AA_IDE:
1649 sc->sc_wdcdev.UDMA_cap = 4;
1650 break;
1651 case PCI_PRODUCT_INTEL_82801BA_IDE:
1652 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1653 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1654 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1655 case PCI_PRODUCT_INTEL_82801DB_IDE:
1656 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1657 case PCI_PRODUCT_INTEL_82801EB_IDE:
1658 sc->sc_wdcdev.UDMA_cap = 5;
1659 break;
1660 default:
1661 sc->sc_wdcdev.UDMA_cap = 2;
1662 }
1663 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1664 sc->sc_wdcdev.set_modes = piix_setup_channel;
1665 else
1666 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1667 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1668 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1669
1670 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1671 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1672 DEBUG_PROBE);
1673 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1674 WDCDEBUG_PRINT((", sidetim=0x%x",
1675 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1676 DEBUG_PROBE);
1677 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1678 WDCDEBUG_PRINT((", udamreg 0x%x",
1679 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1680 DEBUG_PROBE);
1681 }
1682 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1683 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1684 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1685 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1686 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1687 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1688 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1689 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1690 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1691 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1692 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1693 DEBUG_PROBE);
1694 }
1695
1696 }
1697 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1698
1699 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1700 cp = &sc->pciide_channels[channel];
1701 /* PIIX is compat-only */
1702 if (pciide_chansetup(sc, channel, 0) == 0)
1703 continue;
1704 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1705 if ((PIIX_IDETIM_READ(idetim, channel) &
1706 PIIX_IDETIM_IDE) == 0) {
1707 #if 1
1708 aprint_normal("%s: %s channel ignored (disabled)\n",
1709 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1710 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
1711 continue;
1712 #else
1713 pcireg_t interface;
1714
1715 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1716 channel);
1717 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1718 idetim);
1719 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1720 sc->sc_tag, PCI_CLASS_REG));
1721 aprint_normal("channel %d idetim=%08x interface=%02x\n",
1722 channel, idetim, interface);
1723 #endif
1724 }
1725 /* PIIX are compat-only pciide devices */
1726 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1727 }
1728
1729 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1730 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1731 DEBUG_PROBE);
1732 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1733 WDCDEBUG_PRINT((", sidetim=0x%x",
1734 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1735 DEBUG_PROBE);
1736 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1737 WDCDEBUG_PRINT((", udamreg 0x%x",
1738 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1739 DEBUG_PROBE);
1740 }
1741 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1742 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1743 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1744 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1745 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1746 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1747 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1748 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1749 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1750 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1751 DEBUG_PROBE);
1752 }
1753 }
1754 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1755 }
1756
1757 void
1758 piix_setup_channel(chp)
1759 struct channel_softc *chp;
1760 {
1761 u_int8_t mode[2], drive;
1762 u_int32_t oidetim, idetim, idedma_ctl;
1763 struct pciide_channel *cp = (struct pciide_channel*)chp;
1764 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1765 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1766
1767 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1768 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1769 idedma_ctl = 0;
1770
1771 /* set up new idetim: Enable IDE registers decode */
1772 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1773 chp->channel);
1774
1775 /* setup DMA */
1776 pciide_channel_dma_setup(cp);
1777
1778 /*
1779 * Here we have to mess up with drives mode: PIIX can't have
1780 * different timings for master and slave drives.
1781 * We need to find the best combination.
1782 */
1783
1784 /* If both drives supports DMA, take the lower mode */
1785 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1786 (drvp[1].drive_flags & DRIVE_DMA)) {
1787 mode[0] = mode[1] =
1788 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1789 drvp[0].DMA_mode = mode[0];
1790 drvp[1].DMA_mode = mode[1];
1791 goto ok;
1792 }
1793 /*
1794 * If only one drive supports DMA, use its mode, and
1795 * put the other one in PIO mode 0 if mode not compatible
1796 */
1797 if (drvp[0].drive_flags & DRIVE_DMA) {
1798 mode[0] = drvp[0].DMA_mode;
1799 mode[1] = drvp[1].PIO_mode;
1800 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1801 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1802 mode[1] = drvp[1].PIO_mode = 0;
1803 goto ok;
1804 }
1805 if (drvp[1].drive_flags & DRIVE_DMA) {
1806 mode[1] = drvp[1].DMA_mode;
1807 mode[0] = drvp[0].PIO_mode;
1808 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1809 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1810 mode[0] = drvp[0].PIO_mode = 0;
1811 goto ok;
1812 }
1813 /*
1814 * If both drives are not DMA, takes the lower mode, unless
1815 * one of them is PIO mode < 2
1816 */
1817 if (drvp[0].PIO_mode < 2) {
1818 mode[0] = drvp[0].PIO_mode = 0;
1819 mode[1] = drvp[1].PIO_mode;
1820 } else if (drvp[1].PIO_mode < 2) {
1821 mode[1] = drvp[1].PIO_mode = 0;
1822 mode[0] = drvp[0].PIO_mode;
1823 } else {
1824 mode[0] = mode[1] =
1825 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1826 drvp[0].PIO_mode = mode[0];
1827 drvp[1].PIO_mode = mode[1];
1828 }
1829 ok: /* The modes are setup */
1830 for (drive = 0; drive < 2; drive++) {
1831 if (drvp[drive].drive_flags & DRIVE_DMA) {
1832 idetim |= piix_setup_idetim_timings(
1833 mode[drive], 1, chp->channel);
1834 goto end;
1835 }
1836 }
1837 /* If we are there, none of the drives are DMA */
1838 if (mode[0] >= 2)
1839 idetim |= piix_setup_idetim_timings(
1840 mode[0], 0, chp->channel);
1841 else
1842 idetim |= piix_setup_idetim_timings(
1843 mode[1], 0, chp->channel);
1844 end: /*
1845 * timing mode is now set up in the controller. Enable
1846 * it per-drive
1847 */
1848 for (drive = 0; drive < 2; drive++) {
1849 /* If no drive, skip */
1850 if ((drvp[drive].drive_flags & DRIVE) == 0)
1851 continue;
1852 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1853 if (drvp[drive].drive_flags & DRIVE_DMA)
1854 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1855 }
1856 if (idedma_ctl != 0) {
1857 /* Add software bits in status register */
1858 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1859 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1860 idedma_ctl);
1861 }
1862 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1863 }
1864
1865 void
1866 piix3_4_setup_channel(chp)
1867 struct channel_softc *chp;
1868 {
1869 struct ata_drive_datas *drvp;
1870 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1871 struct pciide_channel *cp = (struct pciide_channel*)chp;
1872 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1873 int drive;
1874 int channel = chp->channel;
1875
1876 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1877 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1878 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1879 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1880 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1881 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1882 PIIX_SIDETIM_RTC_MASK(channel));
1883 idedma_ctl = 0;
1884
1885 /* set up new idetim: Enable IDE registers decode */
1886 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1887
1888 /* setup DMA if needed */
1889 pciide_channel_dma_setup(cp);
1890
1891 for (drive = 0; drive < 2; drive++) {
1892 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1893 PIIX_UDMATIM_SET(0x3, channel, drive));
1894 drvp = &chp->ch_drive[drive];
1895 /* If no drive, skip */
1896 if ((drvp->drive_flags & DRIVE) == 0)
1897 continue;
1898 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1899 (drvp->drive_flags & DRIVE_UDMA) == 0))
1900 goto pio;
1901
1902 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1903 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1904 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1905 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1906 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1907 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1908 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1909 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1910 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1911 ideconf |= PIIX_CONFIG_PINGPONG;
1912 }
1913 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1914 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1915 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1916 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1917 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1918 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1919 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1920 /* setup Ultra/100 */
1921 if (drvp->UDMA_mode > 2 &&
1922 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1923 drvp->UDMA_mode = 2;
1924 if (drvp->UDMA_mode > 4) {
1925 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1926 } else {
1927 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1928 if (drvp->UDMA_mode > 2) {
1929 ideconf |= PIIX_CONFIG_UDMA66(channel,
1930 drive);
1931 } else {
1932 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1933 drive);
1934 }
1935 }
1936 }
1937 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1938 /* setup Ultra/66 */
1939 if (drvp->UDMA_mode > 2 &&
1940 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1941 drvp->UDMA_mode = 2;
1942 if (drvp->UDMA_mode > 2)
1943 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1944 else
1945 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1946 }
1947 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1948 (drvp->drive_flags & DRIVE_UDMA)) {
1949 /* use Ultra/DMA */
1950 drvp->drive_flags &= ~DRIVE_DMA;
1951 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1952 udmareg |= PIIX_UDMATIM_SET(
1953 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1954 } else {
1955 /* use Multiword DMA */
1956 drvp->drive_flags &= ~DRIVE_UDMA;
1957 if (drive == 0) {
1958 idetim |= piix_setup_idetim_timings(
1959 drvp->DMA_mode, 1, channel);
1960 } else {
1961 sidetim |= piix_setup_sidetim_timings(
1962 drvp->DMA_mode, 1, channel);
1963 idetim =PIIX_IDETIM_SET(idetim,
1964 PIIX_IDETIM_SITRE, channel);
1965 }
1966 }
1967 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1968
1969 pio: /* use PIO mode */
1970 idetim |= piix_setup_idetim_drvs(drvp);
1971 if (drive == 0) {
1972 idetim |= piix_setup_idetim_timings(
1973 drvp->PIO_mode, 0, channel);
1974 } else {
1975 sidetim |= piix_setup_sidetim_timings(
1976 drvp->PIO_mode, 0, channel);
1977 idetim =PIIX_IDETIM_SET(idetim,
1978 PIIX_IDETIM_SITRE, channel);
1979 }
1980 }
1981 if (idedma_ctl != 0) {
1982 /* Add software bits in status register */
1983 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1984 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1985 idedma_ctl);
1986 }
1987 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1988 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1989 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1990 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1991 }
1992
1993
1994 /* setup ISP and RTC fields, based on mode */
1995 static u_int32_t
1996 piix_setup_idetim_timings(mode, dma, channel)
1997 u_int8_t mode;
1998 u_int8_t dma;
1999 u_int8_t channel;
2000 {
2001
2002 if (dma)
2003 return PIIX_IDETIM_SET(0,
2004 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2005 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2006 channel);
2007 else
2008 return PIIX_IDETIM_SET(0,
2009 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2010 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2011 channel);
2012 }
2013
2014 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2015 static u_int32_t
2016 piix_setup_idetim_drvs(drvp)
2017 struct ata_drive_datas *drvp;
2018 {
2019 u_int32_t ret = 0;
2020 struct channel_softc *chp = drvp->chnl_softc;
2021 u_int8_t channel = chp->channel;
2022 u_int8_t drive = drvp->drive;
2023
2024 /*
2025 * If drive is using UDMA, timings setups are independant
2026 * So just check DMA and PIO here.
2027 */
2028 if (drvp->drive_flags & DRIVE_DMA) {
2029 /* if mode = DMA mode 0, use compatible timings */
2030 if ((drvp->drive_flags & DRIVE_DMA) &&
2031 drvp->DMA_mode == 0) {
2032 drvp->PIO_mode = 0;
2033 return ret;
2034 }
2035 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2036 /*
2037 * PIO and DMA timings are the same, use fast timings for PIO
2038 * too, else use compat timings.
2039 */
2040 if ((piix_isp_pio[drvp->PIO_mode] !=
2041 piix_isp_dma[drvp->DMA_mode]) ||
2042 (piix_rtc_pio[drvp->PIO_mode] !=
2043 piix_rtc_dma[drvp->DMA_mode]))
2044 drvp->PIO_mode = 0;
2045 /* if PIO mode <= 2, use compat timings for PIO */
2046 if (drvp->PIO_mode <= 2) {
2047 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2048 channel);
2049 return ret;
2050 }
2051 }
2052
2053 /*
2054 * Now setup PIO modes. If mode < 2, use compat timings.
2055 * Else enable fast timings. Enable IORDY and prefetch/post
2056 * if PIO mode >= 3.
2057 */
2058
2059 if (drvp->PIO_mode < 2)
2060 return ret;
2061
2062 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2063 if (drvp->PIO_mode >= 3) {
2064 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2065 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2066 }
2067 return ret;
2068 }
2069
2070 /* setup values in SIDETIM registers, based on mode */
2071 static u_int32_t
2072 piix_setup_sidetim_timings(mode, dma, channel)
2073 u_int8_t mode;
2074 u_int8_t dma;
2075 u_int8_t channel;
2076 {
2077 if (dma)
2078 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2079 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2080 else
2081 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2082 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2083 }
2084
2085 void
2086 amd7x6_chip_map(sc, pa)
2087 struct pciide_softc *sc;
2088 struct pci_attach_args *pa;
2089 {
2090 struct pciide_channel *cp;
2091 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2092 int channel;
2093 pcireg_t chanenable;
2094 bus_size_t cmdsize, ctlsize;
2095
2096 if (pciide_chipen(sc, pa) == 0)
2097 return;
2098
2099 aprint_normal("%s: bus-master DMA support present",
2100 sc->sc_wdcdev.sc_dev.dv_xname);
2101 pciide_mapreg_dma(sc, pa);
2102 aprint_normal("\n");
2103 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2104 WDC_CAPABILITY_MODE;
2105 if (sc->sc_dma_ok) {
2106 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2107 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2108 sc->sc_wdcdev.irqack = pciide_irqack;
2109 }
2110 sc->sc_wdcdev.PIO_cap = 4;
2111 sc->sc_wdcdev.DMA_cap = 2;
2112
2113 switch (sc->sc_pci_vendor) {
2114 case PCI_VENDOR_AMD:
2115 switch (sc->sc_pp->ide_product) {
2116 case PCI_PRODUCT_AMD_PBC766_IDE:
2117 case PCI_PRODUCT_AMD_PBC768_IDE:
2118 case PCI_PRODUCT_AMD_PBC8111_IDE:
2119 sc->sc_wdcdev.UDMA_cap = 5;
2120 break;
2121 default:
2122 sc->sc_wdcdev.UDMA_cap = 4;
2123 }
2124 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2125 break;
2126
2127 case PCI_VENDOR_NVIDIA:
2128 switch (sc->sc_pp->ide_product) {
2129 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2130 sc->sc_wdcdev.UDMA_cap = 5;
2131 break;
2132 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2133 sc->sc_wdcdev.UDMA_cap = 6;
2134 break;
2135 }
2136 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2137 break;
2138
2139 default:
2140 panic("amd7x6_chip_map: unknown vendor");
2141 }
2142 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2143 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2144 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2145 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2146 AMD7X6_CHANSTATUS_EN(sc));
2147
2148 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2149 DEBUG_PROBE);
2150 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2151 cp = &sc->pciide_channels[channel];
2152 if (pciide_chansetup(sc, channel, interface) == 0)
2153 continue;
2154
2155 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2156 aprint_normal("%s: %s channel ignored (disabled)\n",
2157 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2158 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
2159 continue;
2160 }
2161 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2162 pciide_pci_intr);
2163 }
2164 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2165 chanenable);
2166 return;
2167 }
2168
2169 void
2170 amd7x6_setup_channel(chp)
2171 struct channel_softc *chp;
2172 {
2173 u_int32_t udmatim_reg, datatim_reg;
2174 u_int8_t idedma_ctl;
2175 int mode, drive;
2176 struct ata_drive_datas *drvp;
2177 struct pciide_channel *cp = (struct pciide_channel*)chp;
2178 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2179 #ifndef PCIIDE_AMD756_ENABLEDMA
2180 int rev = PCI_REVISION(
2181 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2182 #endif
2183
2184 idedma_ctl = 0;
2185 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2186 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2187 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2188 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2189
2190 /* setup DMA if needed */
2191 pciide_channel_dma_setup(cp);
2192
2193 for (drive = 0; drive < 2; drive++) {
2194 drvp = &chp->ch_drive[drive];
2195 /* If no drive, skip */
2196 if ((drvp->drive_flags & DRIVE) == 0)
2197 continue;
2198 /* add timing values, setup DMA if needed */
2199 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2200 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2201 mode = drvp->PIO_mode;
2202 goto pio;
2203 }
2204 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2205 (drvp->drive_flags & DRIVE_UDMA)) {
2206 /* use Ultra/DMA */
2207 drvp->drive_flags &= ~DRIVE_DMA;
2208 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2209 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2210 AMD7X6_UDMA_TIME(chp->channel, drive,
2211 amd7x6_udma_tim[drvp->UDMA_mode]);
2212 /* can use PIO timings, MW DMA unused */
2213 mode = drvp->PIO_mode;
2214 } else {
2215 /* use Multiword DMA, but only if revision is OK */
2216 drvp->drive_flags &= ~DRIVE_UDMA;
2217 #ifndef PCIIDE_AMD756_ENABLEDMA
2218 /*
2219 * The workaround doesn't seem to be necessary
2220 * with all drives, so it can be disabled by
2221 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2222 * triggered.
2223 */
2224 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2225 sc->sc_pp->ide_product ==
2226 PCI_PRODUCT_AMD_PBC756_IDE &&
2227 AMD756_CHIPREV_DISABLEDMA(rev)) {
2228 aprint_normal(
2229 "%s:%d:%d: multi-word DMA disabled due "
2230 "to chip revision\n",
2231 sc->sc_wdcdev.sc_dev.dv_xname,
2232 chp->channel, drive);
2233 mode = drvp->PIO_mode;
2234 drvp->drive_flags &= ~DRIVE_DMA;
2235 goto pio;
2236 }
2237 #endif
2238 /* mode = min(pio, dma+2) */
2239 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2240 mode = drvp->PIO_mode;
2241 else
2242 mode = drvp->DMA_mode + 2;
2243 }
2244 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2245
2246 pio: /* setup PIO mode */
2247 if (mode <= 2) {
2248 drvp->DMA_mode = 0;
2249 drvp->PIO_mode = 0;
2250 mode = 0;
2251 } else {
2252 drvp->PIO_mode = mode;
2253 drvp->DMA_mode = mode - 2;
2254 }
2255 datatim_reg |=
2256 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2257 amd7x6_pio_set[mode]) |
2258 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2259 amd7x6_pio_rec[mode]);
2260 }
2261 if (idedma_ctl != 0) {
2262 /* Add software bits in status register */
2263 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2264 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2265 idedma_ctl);
2266 }
2267 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2268 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2269 }
2270
2271 void
2272 apollo_chip_map(sc, pa)
2273 struct pciide_softc *sc;
2274 struct pci_attach_args *pa;
2275 {
2276 struct pciide_channel *cp;
2277 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2278 int channel;
2279 u_int32_t ideconf;
2280 bus_size_t cmdsize, ctlsize;
2281 pcitag_t pcib_tag;
2282 pcireg_t pcib_id, pcib_class;
2283
2284 if (pciide_chipen(sc, pa) == 0)
2285 return;
2286
2287 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2288 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2289 /* and read ID and rev of the ISA bridge */
2290 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2291 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2292 aprint_normal("%s: VIA Technologies ", sc->sc_wdcdev.sc_dev.dv_xname);
2293 switch (PCI_PRODUCT(pcib_id)) {
2294 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2295 aprint_normal("VT82C586 (Apollo VP) ");
2296 if(PCI_REVISION(pcib_class) >= 0x02) {
2297 aprint_normal("ATA33 controller\n");
2298 sc->sc_wdcdev.UDMA_cap = 2;
2299 } else {
2300 aprint_normal("controller\n");
2301 sc->sc_wdcdev.UDMA_cap = 0;
2302 }
2303 break;
2304 case PCI_PRODUCT_VIATECH_VT82C596A:
2305 aprint_normal("VT82C596A (Apollo Pro) ");
2306 if (PCI_REVISION(pcib_class) >= 0x12) {
2307 aprint_normal("ATA66 controller\n");
2308 sc->sc_wdcdev.UDMA_cap = 4;
2309 } else {
2310 aprint_normal("ATA33 controller\n");
2311 sc->sc_wdcdev.UDMA_cap = 2;
2312 }
2313 break;
2314 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2315 aprint_normal("VT82C686A (Apollo KX133) ");
2316 if (PCI_REVISION(pcib_class) >= 0x40) {
2317 aprint_normal("ATA100 controller\n");
2318 sc->sc_wdcdev.UDMA_cap = 5;
2319 } else {
2320 aprint_normal("ATA66 controller\n");
2321 sc->sc_wdcdev.UDMA_cap = 4;
2322 }
2323 break;
2324 case PCI_PRODUCT_VIATECH_VT8231:
2325 aprint_normal("VT8231 ATA100 controller\n");
2326 sc->sc_wdcdev.UDMA_cap = 5;
2327 break;
2328 case PCI_PRODUCT_VIATECH_VT8233:
2329 aprint_normal("VT8233 ATA100 controller\n");
2330 sc->sc_wdcdev.UDMA_cap = 5;
2331 break;
2332 case PCI_PRODUCT_VIATECH_VT8233A:
2333 aprint_normal("VT8233A ATA133 controller\n");
2334 sc->sc_wdcdev.UDMA_cap = 6;
2335 break;
2336 case PCI_PRODUCT_VIATECH_VT8235:
2337 aprint_normal("VT8235 ATA133 controller\n");
2338 sc->sc_wdcdev.UDMA_cap = 6;
2339 break;
2340 case PCI_PRODUCT_VIATECH_VT8237_SATA:
2341 aprint_normal("VT8237 ATA133 controller\n");
2342 sc->sc_wdcdev.UDMA_cap = 6;
2343 break;
2344 default:
2345 aprint_normal("unknown ATA controller\n");
2346 sc->sc_wdcdev.UDMA_cap = 0;
2347 }
2348
2349 aprint_normal("%s: bus-master DMA support present",
2350 sc->sc_wdcdev.sc_dev.dv_xname);
2351 pciide_mapreg_dma(sc, pa);
2352 aprint_normal("\n");
2353 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2354 WDC_CAPABILITY_MODE;
2355 if (sc->sc_dma_ok) {
2356 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2357 sc->sc_wdcdev.irqack = pciide_irqack;
2358 if (sc->sc_wdcdev.UDMA_cap > 0)
2359 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2360 }
2361 sc->sc_wdcdev.PIO_cap = 4;
2362 sc->sc_wdcdev.DMA_cap = 2;
2363 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2364 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2365 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2366
2367 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2368 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2369 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2370 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2371 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2372 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2373 DEBUG_PROBE);
2374
2375 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2376 cp = &sc->pciide_channels[channel];
2377 if (pciide_chansetup(sc, channel, interface) == 0)
2378 continue;
2379
2380 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2381 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2382 aprint_normal("%s: %s channel ignored (disabled)\n",
2383 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2384 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
2385 continue;
2386 }
2387 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2388 pciide_pci_intr);
2389 }
2390 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2391 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2392 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2393 }
2394
2395 void
2396 apollo_setup_channel(chp)
2397 struct channel_softc *chp;
2398 {
2399 u_int32_t udmatim_reg, datatim_reg;
2400 u_int8_t idedma_ctl;
2401 int mode, drive;
2402 struct ata_drive_datas *drvp;
2403 struct pciide_channel *cp = (struct pciide_channel*)chp;
2404 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2405
2406 idedma_ctl = 0;
2407 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2408 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2409 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2410 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2411
2412 /* setup DMA if needed */
2413 pciide_channel_dma_setup(cp);
2414
2415 for (drive = 0; drive < 2; drive++) {
2416 drvp = &chp->ch_drive[drive];
2417 /* If no drive, skip */
2418 if ((drvp->drive_flags & DRIVE) == 0)
2419 continue;
2420 /* add timing values, setup DMA if needed */
2421 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2422 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2423 mode = drvp->PIO_mode;
2424 goto pio;
2425 }
2426 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2427 (drvp->drive_flags & DRIVE_UDMA)) {
2428 /* use Ultra/DMA */
2429 drvp->drive_flags &= ~DRIVE_DMA;
2430 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2431 APO_UDMA_EN_MTH(chp->channel, drive);
2432 if (sc->sc_wdcdev.UDMA_cap == 6) {
2433 /* 8233a */
2434 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2435 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2436 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2437 /* 686b */
2438 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2439 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2440 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2441 /* 596b or 686a */
2442 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2443 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2444 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2445 } else {
2446 /* 596a or 586b */
2447 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2448 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2449 }
2450 /* can use PIO timings, MW DMA unused */
2451 mode = drvp->PIO_mode;
2452 } else {
2453 /* use Multiword DMA */
2454 drvp->drive_flags &= ~DRIVE_UDMA;
2455 /* mode = min(pio, dma+2) */
2456 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2457 mode = drvp->PIO_mode;
2458 else
2459 mode = drvp->DMA_mode + 2;
2460 }
2461 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2462
2463 pio: /* setup PIO mode */
2464 if (mode <= 2) {
2465 drvp->DMA_mode = 0;
2466 drvp->PIO_mode = 0;
2467 mode = 0;
2468 } else {
2469 drvp->PIO_mode = mode;
2470 drvp->DMA_mode = mode - 2;
2471 }
2472 datatim_reg |=
2473 APO_DATATIM_PULSE(chp->channel, drive,
2474 apollo_pio_set[mode]) |
2475 APO_DATATIM_RECOV(chp->channel, drive,
2476 apollo_pio_rec[mode]);
2477 }
2478 if (idedma_ctl != 0) {
2479 /* Add software bits in status register */
2480 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2481 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2482 idedma_ctl);
2483 }
2484 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2485 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2486 }
2487
2488 void
2489 apollo_sata_chip_map(sc, pa)
2490 struct pciide_softc *sc;
2491 struct pci_attach_args *pa;
2492 {
2493 struct pciide_channel *cp;
2494 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2495 int channel;
2496 bus_size_t cmdsize, ctlsize;
2497
2498 if (pciide_chipen(sc, pa) == 0)
2499 return;
2500
2501 if ( interface == 0 ) {
2502 WDCDEBUG_PRINT(("apollo_sata_chip_map interface == 0\n"),
2503 DEBUG_PROBE);
2504 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2505 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2506 }
2507
2508 aprint_normal("%s: bus-master DMA support present",
2509 sc->sc_wdcdev.sc_dev.dv_xname);
2510 pciide_mapreg_dma(sc, pa);
2511 aprint_normal("\n");
2512
2513 if (sc->sc_dma_ok) {
2514 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2515 sc->sc_wdcdev.irqack = pciide_irqack;
2516 }
2517 sc->sc_wdcdev.PIO_cap = 4;
2518 sc->sc_wdcdev.DMA_cap = 2;
2519 sc->sc_wdcdev.UDMA_cap = 6;
2520
2521 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2522 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2523 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2524 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SINGLE_DRIVE;
2525 sc->sc_wdcdev.set_modes = sata_setup_channel;
2526
2527 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2528 cp = &sc->pciide_channels[channel];
2529 if (pciide_chansetup(sc, channel, interface) == 0)
2530 continue;
2531 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2532 pciide_pci_intr);
2533 }
2534 }
2535
2536 void
2537 cmd_channel_map(pa, sc, channel)
2538 struct pci_attach_args *pa;
2539 struct pciide_softc *sc;
2540 int channel;
2541 {
2542 struct pciide_channel *cp = &sc->pciide_channels[channel];
2543 bus_size_t cmdsize, ctlsize;
2544 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2545 int interface, one_channel;
2546
2547 /*
2548 * The 0648/0649 can be told to identify as a RAID controller.
2549 * In this case, we have to fake interface
2550 */
2551 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2552 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2553 PCIIDE_INTERFACE_SETTABLE(1);
2554 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2555 CMD_CONF_DSA1)
2556 interface |= PCIIDE_INTERFACE_PCI(0) |
2557 PCIIDE_INTERFACE_PCI(1);
2558 } else {
2559 interface = PCI_INTERFACE(pa->pa_class);
2560 }
2561
2562 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2563 cp->name = PCIIDE_CHANNEL_NAME(channel);
2564 cp->wdc_channel.channel = channel;
2565 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2566
2567 /*
2568 * Older CMD64X doesn't have independant channels
2569 */
2570 switch (sc->sc_pp->ide_product) {
2571 case PCI_PRODUCT_CMDTECH_649:
2572 one_channel = 0;
2573 break;
2574 default:
2575 one_channel = 1;
2576 break;
2577 }
2578
2579 if (channel > 0 && one_channel) {
2580 cp->wdc_channel.ch_queue =
2581 sc->pciide_channels[0].wdc_channel.ch_queue;
2582 } else {
2583 cp->wdc_channel.ch_queue =
2584 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2585 }
2586 if (cp->wdc_channel.ch_queue == NULL) {
2587 aprint_error("%s %s channel: "
2588 "can't allocate memory for command queue",
2589 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2590 return;
2591 }
2592
2593 aprint_normal("%s: %s channel %s to %s mode\n",
2594 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2595 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2596 "configured" : "wired",
2597 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2598 "native-PCI" : "compatibility");
2599
2600 /*
2601 * with a CMD PCI64x, if we get here, the first channel is enabled:
2602 * there's no way to disable the first channel without disabling
2603 * the whole device
2604 */
2605 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2606 aprint_normal("%s: %s channel ignored (disabled)\n",
2607 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2608 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
2609 return;
2610 }
2611
2612 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2613 }
2614
2615 int
2616 cmd_pci_intr(arg)
2617 void *arg;
2618 {
2619 struct pciide_softc *sc = arg;
2620 struct pciide_channel *cp;
2621 struct channel_softc *wdc_cp;
2622 int i, rv, crv;
2623 u_int32_t priirq, secirq;
2624
2625 rv = 0;
2626 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2627 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2628 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2629 cp = &sc->pciide_channels[i];
2630 wdc_cp = &cp->wdc_channel;
2631 /* If a compat channel skip. */
2632 if (cp->compat)
2633 continue;
2634 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2635 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2636 crv = wdcintr(wdc_cp);
2637 if (crv == 0)
2638 printf("%s:%d: bogus intr\n",
2639 sc->sc_wdcdev.sc_dev.dv_xname, i);
2640 else
2641 rv = 1;
2642 }
2643 }
2644 return rv;
2645 }
2646
2647 void
2648 cmd_chip_map(sc, pa)
2649 struct pciide_softc *sc;
2650 struct pci_attach_args *pa;
2651 {
2652 int channel;
2653
2654 /*
2655 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2656 * and base adresses registers can be disabled at
2657 * hardware level. In this case, the device is wired
2658 * in compat mode and its first channel is always enabled,
2659 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2660 * In fact, it seems that the first channel of the CMD PCI0640
2661 * can't be disabled.
2662 */
2663
2664 #ifdef PCIIDE_CMD064x_DISABLE
2665 if (pciide_chipen(sc, pa) == 0)
2666 return;
2667 #endif
2668
2669 aprint_normal("%s: hardware does not support DMA\n",
2670 sc->sc_wdcdev.sc_dev.dv_xname);
2671 sc->sc_dma_ok = 0;
2672
2673 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2674 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2675 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2676
2677 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2678 cmd_channel_map(pa, sc, channel);
2679 }
2680 }
2681
2682 void
2683 cmd0643_9_chip_map(sc, pa)
2684 struct pciide_softc *sc;
2685 struct pci_attach_args *pa;
2686 {
2687 struct pciide_channel *cp;
2688 int channel;
2689 pcireg_t rev = PCI_REVISION(pa->pa_class);
2690
2691 /*
2692 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2693 * and base adresses registers can be disabled at
2694 * hardware level. In this case, the device is wired
2695 * in compat mode and its first channel is always enabled,
2696 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2697 * In fact, it seems that the first channel of the CMD PCI0640
2698 * can't be disabled.
2699 */
2700
2701 #ifdef PCIIDE_CMD064x_DISABLE
2702 if (pciide_chipen(sc, pa) == 0)
2703 return;
2704 #endif
2705
2706 aprint_normal("%s: bus-master DMA support present",
2707 sc->sc_wdcdev.sc_dev.dv_xname);
2708 pciide_mapreg_dma(sc, pa);
2709 aprint_normal("\n");
2710 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2711 WDC_CAPABILITY_MODE;
2712 if (sc->sc_dma_ok) {
2713 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2714 switch (sc->sc_pp->ide_product) {
2715 case PCI_PRODUCT_CMDTECH_649:
2716 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2717 sc->sc_wdcdev.UDMA_cap = 5;
2718 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2719 break;
2720 case PCI_PRODUCT_CMDTECH_648:
2721 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2722 sc->sc_wdcdev.UDMA_cap = 4;
2723 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2724 break;
2725 case PCI_PRODUCT_CMDTECH_646:
2726 if (rev >= CMD0646U2_REV) {
2727 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2728 sc->sc_wdcdev.UDMA_cap = 2;
2729 } else if (rev >= CMD0646U_REV) {
2730 /*
2731 * Linux's driver claims that the 646U is broken
2732 * with UDMA. Only enable it if we know what we're
2733 * doing
2734 */
2735 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2736 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2737 sc->sc_wdcdev.UDMA_cap = 2;
2738 #endif
2739 /* explicitly disable UDMA */
2740 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2741 CMD_UDMATIM(0), 0);
2742 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2743 CMD_UDMATIM(1), 0);
2744 }
2745 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2746 break;
2747 default:
2748 sc->sc_wdcdev.irqack = pciide_irqack;
2749 }
2750 }
2751
2752 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2753 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2754 sc->sc_wdcdev.PIO_cap = 4;
2755 sc->sc_wdcdev.DMA_cap = 2;
2756 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2757
2758 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2759 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2760 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2761 DEBUG_PROBE);
2762
2763 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2764 cp = &sc->pciide_channels[channel];
2765 cmd_channel_map(pa, sc, channel);
2766 }
2767 /*
2768 * note - this also makes sure we clear the irq disable and reset
2769 * bits
2770 */
2771 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2772 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2773 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2774 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2775 DEBUG_PROBE);
2776 }
2777
2778 void
2779 cmd0643_9_setup_channel(chp)
2780 struct channel_softc *chp;
2781 {
2782 struct ata_drive_datas *drvp;
2783 u_int8_t tim;
2784 u_int32_t idedma_ctl, udma_reg;
2785 int drive;
2786 struct pciide_channel *cp = (struct pciide_channel*)chp;
2787 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2788
2789 idedma_ctl = 0;
2790 /* setup DMA if needed */
2791 pciide_channel_dma_setup(cp);
2792
2793 for (drive = 0; drive < 2; drive++) {
2794 drvp = &chp->ch_drive[drive];
2795 /* If no drive, skip */
2796 if ((drvp->drive_flags & DRIVE) == 0)
2797 continue;
2798 /* add timing values, setup DMA if needed */
2799 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2800 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2801 if (drvp->drive_flags & DRIVE_UDMA) {
2802 /* UltraDMA on a 646U2, 0648 or 0649 */
2803 drvp->drive_flags &= ~DRIVE_DMA;
2804 udma_reg = pciide_pci_read(sc->sc_pc,
2805 sc->sc_tag, CMD_UDMATIM(chp->channel));
2806 if (drvp->UDMA_mode > 2 &&
2807 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2808 CMD_BICSR) &
2809 CMD_BICSR_80(chp->channel)) == 0)
2810 drvp->UDMA_mode = 2;
2811 if (drvp->UDMA_mode > 2)
2812 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2813 else if (sc->sc_wdcdev.UDMA_cap > 2)
2814 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2815 udma_reg |= CMD_UDMATIM_UDMA(drive);
2816 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2817 CMD_UDMATIM_TIM_OFF(drive));
2818 udma_reg |=
2819 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2820 CMD_UDMATIM_TIM_OFF(drive));
2821 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2822 CMD_UDMATIM(chp->channel), udma_reg);
2823 } else {
2824 /*
2825 * use Multiword DMA.
2826 * Timings will be used for both PIO and DMA,
2827 * so adjust DMA mode if needed
2828 * if we have a 0646U2/8/9, turn off UDMA
2829 */
2830 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2831 udma_reg = pciide_pci_read(sc->sc_pc,
2832 sc->sc_tag,
2833 CMD_UDMATIM(chp->channel));
2834 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2835 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2836 CMD_UDMATIM(chp->channel),
2837 udma_reg);
2838 }
2839 if (drvp->PIO_mode >= 3 &&
2840 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2841 drvp->DMA_mode = drvp->PIO_mode - 2;
2842 }
2843 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2844 }
2845 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2846 }
2847 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2848 CMD_DATA_TIM(chp->channel, drive), tim);
2849 }
2850 if (idedma_ctl != 0) {
2851 /* Add software bits in status register */
2852 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2853 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2854 idedma_ctl);
2855 }
2856 }
2857
2858 void
2859 cmd646_9_irqack(chp)
2860 struct channel_softc *chp;
2861 {
2862 u_int32_t priirq, secirq;
2863 struct pciide_channel *cp = (struct pciide_channel*)chp;
2864 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2865
2866 if (chp->channel == 0) {
2867 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2868 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2869 } else {
2870 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2871 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2872 }
2873 pciide_irqack(chp);
2874 }
2875
2876 void
2877 cmd680_chip_map(sc, pa)
2878 struct pciide_softc *sc;
2879 struct pci_attach_args *pa;
2880 {
2881 struct pciide_channel *cp;
2882 int channel;
2883
2884 if (pciide_chipen(sc, pa) == 0)
2885 return;
2886
2887 aprint_normal("%s: bus-master DMA support present",
2888 sc->sc_wdcdev.sc_dev.dv_xname);
2889 pciide_mapreg_dma(sc, pa);
2890 aprint_normal("\n");
2891 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2892 WDC_CAPABILITY_MODE;
2893 if (sc->sc_dma_ok) {
2894 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2895 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2896 sc->sc_wdcdev.UDMA_cap = 6;
2897 sc->sc_wdcdev.irqack = pciide_irqack;
2898 }
2899
2900 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2901 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2902 sc->sc_wdcdev.PIO_cap = 4;
2903 sc->sc_wdcdev.DMA_cap = 2;
2904 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2905
2906 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2907 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2908 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2909 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2910 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2911 cp = &sc->pciide_channels[channel];
2912 cmd680_channel_map(pa, sc, channel);
2913 }
2914 }
2915
2916 void
2917 cmd680_channel_map(pa, sc, channel)
2918 struct pci_attach_args *pa;
2919 struct pciide_softc *sc;
2920 int channel;
2921 {
2922 struct pciide_channel *cp = &sc->pciide_channels[channel];
2923 bus_size_t cmdsize, ctlsize;
2924 int interface, i, reg;
2925 static const u_int8_t init_val[] =
2926 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2927 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2928
2929 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2930 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2931 PCIIDE_INTERFACE_SETTABLE(1);
2932 interface |= PCIIDE_INTERFACE_PCI(0) |
2933 PCIIDE_INTERFACE_PCI(1);
2934 } else {
2935 interface = PCI_INTERFACE(pa->pa_class);
2936 }
2937
2938 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2939 cp->name = PCIIDE_CHANNEL_NAME(channel);
2940 cp->wdc_channel.channel = channel;
2941 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2942
2943 cp->wdc_channel.ch_queue =
2944 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2945 if (cp->wdc_channel.ch_queue == NULL) {
2946 aprint_error("%s %s channel: "
2947 "can't allocate memory for command queue",
2948 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2949 return;
2950 }
2951
2952 /* XXX */
2953 reg = 0xa2 + channel * 16;
2954 for (i = 0; i < sizeof(init_val); i++)
2955 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2956
2957 aprint_normal("%s: %s channel %s to %s mode\n",
2958 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2959 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2960 "configured" : "wired",
2961 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2962 "native-PCI" : "compatibility");
2963
2964 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2965 }
2966
2967 void
2968 cmd680_setup_channel(chp)
2969 struct channel_softc *chp;
2970 {
2971 struct ata_drive_datas *drvp;
2972 u_int8_t mode, off, scsc;
2973 u_int16_t val;
2974 u_int32_t idedma_ctl;
2975 int drive;
2976 struct pciide_channel *cp = (struct pciide_channel*)chp;
2977 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2978 pci_chipset_tag_t pc = sc->sc_pc;
2979 pcitag_t pa = sc->sc_tag;
2980 static const u_int8_t udma2_tbl[] =
2981 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2982 static const u_int8_t udma_tbl[] =
2983 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2984 static const u_int16_t dma_tbl[] =
2985 { 0x2208, 0x10c2, 0x10c1 };
2986 static const u_int16_t pio_tbl[] =
2987 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2988
2989 idedma_ctl = 0;
2990 pciide_channel_dma_setup(cp);
2991 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2992
2993 for (drive = 0; drive < 2; drive++) {
2994 drvp = &chp->ch_drive[drive];
2995 /* If no drive, skip */
2996 if ((drvp->drive_flags & DRIVE) == 0)
2997 continue;
2998 mode &= ~(0x03 << (drive * 4));
2999 if (drvp->drive_flags & DRIVE_UDMA) {
3000 drvp->drive_flags &= ~DRIVE_DMA;
3001 off = 0xa0 + chp->channel * 16;
3002 if (drvp->UDMA_mode > 2 &&
3003 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3004 drvp->UDMA_mode = 2;
3005 scsc = pciide_pci_read(pc, pa, 0x8a);
3006 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3007 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3008 scsc = pciide_pci_read(pc, pa, 0x8a);
3009 if ((scsc & 0x30) == 0)
3010 drvp->UDMA_mode = 5;
3011 }
3012 mode |= 0x03 << (drive * 4);
3013 off = 0xac + chp->channel * 16 + drive * 2;
3014 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3015 if (scsc & 0x30)
3016 val |= udma2_tbl[drvp->UDMA_mode];
3017 else
3018 val |= udma_tbl[drvp->UDMA_mode];
3019 pciide_pci_write(pc, pa, off, val);
3020 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3021 } else if (drvp->drive_flags & DRIVE_DMA) {
3022 mode |= 0x02 << (drive * 4);
3023 off = 0xa8 + chp->channel * 16 + drive * 2;
3024 val = dma_tbl[drvp->DMA_mode];
3025 pciide_pci_write(pc, pa, off, val & 0xff);
3026 pciide_pci_write(pc, pa, off, val >> 8);
3027 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3028 } else {
3029 mode |= 0x01 << (drive * 4);
3030 off = 0xa4 + chp->channel * 16 + drive * 2;
3031 val = pio_tbl[drvp->PIO_mode];
3032 pciide_pci_write(pc, pa, off, val & 0xff);
3033 pciide_pci_write(pc, pa, off, val >> 8);
3034 }
3035 }
3036
3037 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3038 if (idedma_ctl != 0) {
3039 /* Add software bits in status register */
3040 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3041 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3042 idedma_ctl);
3043 }
3044 }
3045
3046 void
3047 cmd3112_chip_map(sc, pa)
3048 struct pciide_softc *sc;
3049 struct pci_attach_args *pa;
3050 {
3051 struct pciide_channel *cp;
3052 bus_size_t cmdsize, ctlsize;
3053 pcireg_t interface;
3054 int channel;
3055
3056 if (pciide_chipen(sc, pa) == 0)
3057 return;
3058
3059 aprint_normal("%s: bus-master DMA support present",
3060 sc->sc_wdcdev.sc_dev.dv_xname);
3061 pciide_mapreg_dma(sc, pa);
3062 aprint_normal("\n");
3063
3064 /*
3065 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3066 * corruption if DMA transfers cross an 8K boundary. This is
3067 * apparently hard to tickle, but we'll go ahead and play it
3068 * safe.
3069 */
3070 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3071 sc->sc_dma_maxsegsz = 8192;
3072 sc->sc_dma_boundary = 8192;
3073 }
3074
3075 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3076 WDC_CAPABILITY_MODE;
3077 sc->sc_wdcdev.PIO_cap = 4;
3078 if (sc->sc_dma_ok) {
3079 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3080 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3081 sc->sc_wdcdev.irqack = pciide_irqack;
3082 sc->sc_wdcdev.DMA_cap = 2;
3083 sc->sc_wdcdev.UDMA_cap = 6;
3084 }
3085 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3086
3087 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3088 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3089
3090 /*
3091 * The 3112 can be told to identify as a RAID controller.
3092 * In this case, we have to fake interface
3093 */
3094 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3095 interface = PCI_INTERFACE(pa->pa_class);
3096 } else {
3097 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3098 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3099 }
3100
3101 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3102 cp = &sc->pciide_channels[channel];
3103 if (pciide_chansetup(sc, channel, interface) == 0)
3104 continue;
3105 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3106 pciide_pci_intr);
3107 }
3108 }
3109
3110 void
3111 cmd3112_setup_channel(chp)
3112 struct channel_softc *chp;
3113 {
3114 struct ata_drive_datas *drvp;
3115 int drive;
3116 u_int32_t idedma_ctl, dtm;
3117 struct pciide_channel *cp = (struct pciide_channel*)chp;
3118 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3119
3120 /* setup DMA if needed */
3121 pciide_channel_dma_setup(cp);
3122
3123 idedma_ctl = 0;
3124 dtm = 0;
3125
3126 for (drive = 0; drive < 2; drive++) {
3127 drvp = &chp->ch_drive[drive];
3128 /* If no drive, skip */
3129 if ((drvp->drive_flags & DRIVE) == 0)
3130 continue;
3131 if (drvp->drive_flags & DRIVE_UDMA) {
3132 /* use Ultra/DMA */
3133 drvp->drive_flags &= ~DRIVE_DMA;
3134 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3135 dtm |= DTM_IDEx_DMA;
3136 } else if (drvp->drive_flags & DRIVE_DMA) {
3137 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3138 dtm |= DTM_IDEx_DMA;
3139 } else {
3140 dtm |= DTM_IDEx_PIO;
3141 }
3142 }
3143
3144 /*
3145 * Nothing to do to setup modes; it is meaningless in S-ATA
3146 * (but many S-ATA drives still want to get the SET_FEATURE
3147 * command).
3148 */
3149 if (idedma_ctl != 0) {
3150 /* Add software bits in status register */
3151 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3152 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3153 idedma_ctl);
3154 }
3155 pci_conf_write(sc->sc_pc, sc->sc_tag,
3156 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3157 }
3158
3159 void
3160 cy693_chip_map(sc, pa)
3161 struct pciide_softc *sc;
3162 struct pci_attach_args *pa;
3163 {
3164 struct pciide_channel *cp;
3165 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3166 bus_size_t cmdsize, ctlsize;
3167
3168 if (pciide_chipen(sc, pa) == 0)
3169 return;
3170
3171 /*
3172 * this chip has 2 PCI IDE functions, one for primary and one for
3173 * secondary. So we need to call pciide_mapregs_compat() with
3174 * the real channel
3175 */
3176 if (pa->pa_function == 1) {
3177 sc->sc_cy_compatchan = 0;
3178 } else if (pa->pa_function == 2) {
3179 sc->sc_cy_compatchan = 1;
3180 } else {
3181 aprint_error("%s: unexpected PCI function %d\n",
3182 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3183 return;
3184 }
3185 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3186 aprint_normal("%s: bus-master DMA support present",
3187 sc->sc_wdcdev.sc_dev.dv_xname);
3188 pciide_mapreg_dma(sc, pa);
3189 } else {
3190 aprint_normal("%s: hardware does not support DMA",
3191 sc->sc_wdcdev.sc_dev.dv_xname);
3192 sc->sc_dma_ok = 0;
3193 }
3194 aprint_normal("\n");
3195
3196 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3197 if (sc->sc_cy_handle == NULL) {
3198 aprint_error("%s: unable to map hyperCache control registers\n",
3199 sc->sc_wdcdev.sc_dev.dv_xname);
3200 sc->sc_dma_ok = 0;
3201 }
3202
3203 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3204 WDC_CAPABILITY_MODE;
3205 if (sc->sc_dma_ok) {
3206 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3207 sc->sc_wdcdev.irqack = pciide_irqack;
3208 }
3209 sc->sc_wdcdev.PIO_cap = 4;
3210 sc->sc_wdcdev.DMA_cap = 2;
3211 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3212
3213 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3214 sc->sc_wdcdev.nchannels = 1;
3215
3216 /* Only one channel for this chip; if we are here it's enabled */
3217 cp = &sc->pciide_channels[0];
3218 sc->wdc_chanarray[0] = &cp->wdc_channel;
3219 cp->name = PCIIDE_CHANNEL_NAME(0);
3220 cp->wdc_channel.channel = 0;
3221 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3222 cp->wdc_channel.ch_queue =
3223 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3224 if (cp->wdc_channel.ch_queue == NULL) {
3225 aprint_error("%s primary channel: "
3226 "can't allocate memory for command queue",
3227 sc->sc_wdcdev.sc_dev.dv_xname);
3228 return;
3229 }
3230 aprint_normal("%s: primary channel %s to ",
3231 sc->sc_wdcdev.sc_dev.dv_xname,
3232 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3233 "configured" : "wired");
3234 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3235 aprint_normal("native-PCI mode\n");
3236 pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3237 pciide_pci_intr);
3238 } else {
3239 aprint_normal("compatibility mode\n");
3240 pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, &cmdsize,
3241 &ctlsize);
3242 }
3243 }
3244
3245 void
3246 cy693_setup_channel(chp)
3247 struct channel_softc *chp;
3248 {
3249 struct ata_drive_datas *drvp;
3250 int drive;
3251 u_int32_t cy_cmd_ctrl;
3252 u_int32_t idedma_ctl;
3253 struct pciide_channel *cp = (struct pciide_channel*)chp;
3254 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3255 int dma_mode = -1;
3256
3257 cy_cmd_ctrl = idedma_ctl = 0;
3258
3259 /* setup DMA if needed */
3260 pciide_channel_dma_setup(cp);
3261
3262 for (drive = 0; drive < 2; drive++) {
3263 drvp = &chp->ch_drive[drive];
3264 /* If no drive, skip */
3265 if ((drvp->drive_flags & DRIVE) == 0)
3266 continue;
3267 /* add timing values, setup DMA if needed */
3268 if (drvp->drive_flags & DRIVE_DMA) {
3269 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3270 /* use Multiword DMA */
3271 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3272 dma_mode = drvp->DMA_mode;
3273 }
3274 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3275 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3276 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3277 CY_CMD_CTRL_IOW_REC_OFF(drive));
3278 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3279 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3280 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3281 CY_CMD_CTRL_IOR_REC_OFF(drive));
3282 }
3283 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3284 chp->ch_drive[0].DMA_mode = dma_mode;
3285 chp->ch_drive[1].DMA_mode = dma_mode;
3286
3287 if (dma_mode == -1)
3288 dma_mode = 0;
3289
3290 if (sc->sc_cy_handle != NULL) {
3291 /* Note: `multiple' is implied. */
3292 cy82c693_write(sc->sc_cy_handle,
3293 (sc->sc_cy_compatchan == 0) ?
3294 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3295 }
3296
3297 if (idedma_ctl != 0) {
3298 /* Add software bits in status register */
3299 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3300 IDEDMA_CTL, idedma_ctl);
3301 }
3302 }
3303
3304 static struct sis_hostbr_type {
3305 u_int16_t id;
3306 u_int8_t rev;
3307 u_int8_t udma_mode;
3308 char *name;
3309 u_int8_t type;
3310 #define SIS_TYPE_NOUDMA 0
3311 #define SIS_TYPE_66 1
3312 #define SIS_TYPE_100OLD 2
3313 #define SIS_TYPE_100NEW 3
3314 #define SIS_TYPE_133OLD 4
3315 #define SIS_TYPE_133NEW 5
3316 #define SIS_TYPE_SOUTH 6
3317 } sis_hostbr_type[] = {
3318 /* Most infos here are from sos (at) freebsd.org */
3319 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3320 #if 0
3321 /*
3322 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3323 * have problems with UDMA (info provided by Christos)
3324 */
3325 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3326 #endif
3327 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3328 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3329 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3330 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3331 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3332 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3333 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3334 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3335 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3336 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3337 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3338 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3339 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3340 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3341 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3342 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3343 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3344 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3345 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3346 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3347 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3348 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3349 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3350 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3351 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3352 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3353 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3354 /*
3355 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3356 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3357 */
3358 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3359 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3360 };
3361
3362 static struct sis_hostbr_type *sis_hostbr_type_match;
3363
3364 static int
3365 sis_hostbr_match(pa)
3366 struct pci_attach_args *pa;
3367 {
3368 int i;
3369 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3370 return 0;
3371 sis_hostbr_type_match = NULL;
3372 for (i = 0;
3373 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3374 i++) {
3375 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3376 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3377 sis_hostbr_type_match = &sis_hostbr_type[i];
3378 }
3379 return (sis_hostbr_type_match != NULL);
3380 }
3381
3382 static int sis_south_match(pa)
3383 struct pci_attach_args *pa;
3384 {
3385 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3386 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3387 PCI_REVISION(pa->pa_class) >= 0x10);
3388 }
3389
3390 void
3391 sis_chip_map(sc, pa)
3392 struct pciide_softc *sc;
3393 struct pci_attach_args *pa;
3394 {
3395 struct pciide_channel *cp;
3396 int channel;
3397 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3398 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3399 pcireg_t rev = PCI_REVISION(pa->pa_class);
3400 bus_size_t cmdsize, ctlsize;
3401
3402 if (pciide_chipen(sc, pa) == 0)
3403 return;
3404
3405 aprint_normal("%s: Silicon Integrated System ",
3406 sc->sc_wdcdev.sc_dev.dv_xname);
3407 pci_find_device(NULL, sis_hostbr_match);
3408 if (sis_hostbr_type_match) {
3409 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3410 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3411 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3412 SIS_REG_57) & 0x7f);
3413 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3414 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3415 aprint_normal("96X UDMA%d",
3416 sis_hostbr_type_match->udma_mode);
3417 sc->sis_type = SIS_TYPE_133NEW;
3418 sc->sc_wdcdev.UDMA_cap =
3419 sis_hostbr_type_match->udma_mode;
3420 } else {
3421 if (pci_find_device(NULL, sis_south_match)) {
3422 sc->sis_type = SIS_TYPE_133OLD;
3423 sc->sc_wdcdev.UDMA_cap =
3424 sis_hostbr_type_match->udma_mode;
3425 } else {
3426 sc->sis_type = SIS_TYPE_100NEW;
3427 sc->sc_wdcdev.UDMA_cap =
3428 sis_hostbr_type_match->udma_mode;
3429 }
3430 }
3431 } else {
3432 sc->sis_type = sis_hostbr_type_match->type;
3433 sc->sc_wdcdev.UDMA_cap =
3434 sis_hostbr_type_match->udma_mode;
3435 }
3436 aprint_normal(sis_hostbr_type_match->name);
3437 } else {
3438 aprint_normal("5597/5598");
3439 if (rev >= 0xd0) {
3440 sc->sc_wdcdev.UDMA_cap = 2;
3441 sc->sis_type = SIS_TYPE_66;
3442 } else {
3443 sc->sc_wdcdev.UDMA_cap = 0;
3444 sc->sis_type = SIS_TYPE_NOUDMA;
3445 }
3446 }
3447 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3448 PCI_REVISION(pa->pa_class));
3449 aprint_normal("%s: bus-master DMA support present",
3450 sc->sc_wdcdev.sc_dev.dv_xname);
3451 pciide_mapreg_dma(sc, pa);
3452 aprint_normal("\n");
3453
3454 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3455 WDC_CAPABILITY_MODE;
3456 if (sc->sc_dma_ok) {
3457 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3458 sc->sc_wdcdev.irqack = pciide_irqack;
3459 if (sc->sis_type >= SIS_TYPE_66)
3460 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3461 }
3462
3463 sc->sc_wdcdev.PIO_cap = 4;
3464 sc->sc_wdcdev.DMA_cap = 2;
3465
3466 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3467 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3468 switch(sc->sis_type) {
3469 case SIS_TYPE_NOUDMA:
3470 case SIS_TYPE_66:
3471 case SIS_TYPE_100OLD:
3472 sc->sc_wdcdev.set_modes = sis_setup_channel;
3473 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3474 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3475 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3476 break;
3477 case SIS_TYPE_100NEW:
3478 case SIS_TYPE_133OLD:
3479 sc->sc_wdcdev.set_modes = sis_setup_channel;
3480 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3481 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3482 break;
3483 case SIS_TYPE_133NEW:
3484 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3485 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3486 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3487 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3488 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3489 break;
3490 }
3491
3492
3493 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3494 cp = &sc->pciide_channels[channel];
3495 if (pciide_chansetup(sc, channel, interface) == 0)
3496 continue;
3497 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3498 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3499 aprint_normal("%s: %s channel ignored (disabled)\n",
3500 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3501 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
3502 continue;
3503 }
3504 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3505 pciide_pci_intr);
3506 }
3507 }
3508
3509 void
3510 sis96x_setup_channel(chp)
3511 struct channel_softc *chp;
3512 {
3513 struct ata_drive_datas *drvp;
3514 int drive;
3515 u_int32_t sis_tim;
3516 u_int32_t idedma_ctl;
3517 int regtim;
3518 struct pciide_channel *cp = (struct pciide_channel*)chp;
3519 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3520
3521 sis_tim = 0;
3522 idedma_ctl = 0;
3523 /* setup DMA if needed */
3524 pciide_channel_dma_setup(cp);
3525
3526 for (drive = 0; drive < 2; drive++) {
3527 regtim = SIS_TIM133(
3528 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3529 chp->channel, drive);
3530 drvp = &chp->ch_drive[drive];
3531 /* If no drive, skip */
3532 if ((drvp->drive_flags & DRIVE) == 0)
3533 continue;
3534 /* add timing values, setup DMA if needed */
3535 if (drvp->drive_flags & DRIVE_UDMA) {
3536 /* use Ultra/DMA */
3537 drvp->drive_flags &= ~DRIVE_DMA;
3538 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3539 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3540 if (drvp->UDMA_mode > 2)
3541 drvp->UDMA_mode = 2;
3542 }
3543 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3544 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3545 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3546 } else if (drvp->drive_flags & DRIVE_DMA) {
3547 /*
3548 * use Multiword DMA
3549 * Timings will be used for both PIO and DMA,
3550 * so adjust DMA mode if needed
3551 */
3552 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3553 drvp->PIO_mode = drvp->DMA_mode + 2;
3554 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3555 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3556 drvp->PIO_mode - 2 : 0;
3557 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3558 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3559 } else {
3560 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3561 }
3562 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3563 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3564 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3565 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3566 }
3567 if (idedma_ctl != 0) {
3568 /* Add software bits in status register */
3569 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3570 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3571 idedma_ctl);
3572 }
3573 }
3574
3575 void
3576 sis_setup_channel(chp)
3577 struct channel_softc *chp;
3578 {
3579 struct ata_drive_datas *drvp;
3580 int drive;
3581 u_int32_t sis_tim;
3582 u_int32_t idedma_ctl;
3583 struct pciide_channel *cp = (struct pciide_channel*)chp;
3584 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3585
3586 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3587 "channel %d 0x%x\n", chp->channel,
3588 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3589 DEBUG_PROBE);
3590 sis_tim = 0;
3591 idedma_ctl = 0;
3592 /* setup DMA if needed */
3593 pciide_channel_dma_setup(cp);
3594
3595 for (drive = 0; drive < 2; drive++) {
3596 drvp = &chp->ch_drive[drive];
3597 /* If no drive, skip */
3598 if ((drvp->drive_flags & DRIVE) == 0)
3599 continue;
3600 /* add timing values, setup DMA if needed */
3601 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3602 (drvp->drive_flags & DRIVE_UDMA) == 0)
3603 goto pio;
3604
3605 if (drvp->drive_flags & DRIVE_UDMA) {
3606 /* use Ultra/DMA */
3607 drvp->drive_flags &= ~DRIVE_DMA;
3608 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3609 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3610 if (drvp->UDMA_mode > 2)
3611 drvp->UDMA_mode = 2;
3612 }
3613 switch (sc->sis_type) {
3614 case SIS_TYPE_66:
3615 case SIS_TYPE_100OLD:
3616 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3617 SIS_TIM66_UDMA_TIME_OFF(drive);
3618 break;
3619 case SIS_TYPE_100NEW:
3620 sis_tim |=
3621 sis_udma100new_tim[drvp->UDMA_mode] <<
3622 SIS_TIM100_UDMA_TIME_OFF(drive);
3623 case SIS_TYPE_133OLD:
3624 sis_tim |=
3625 sis_udma133old_tim[drvp->UDMA_mode] <<
3626 SIS_TIM100_UDMA_TIME_OFF(drive);
3627 break;
3628 default:
3629 aprint_error("unknown SiS IDE type %d\n",
3630 sc->sis_type);
3631 }
3632 } else {
3633 /*
3634 * use Multiword DMA
3635 * Timings will be used for both PIO and DMA,
3636 * so adjust DMA mode if needed
3637 */
3638 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3639 drvp->PIO_mode = drvp->DMA_mode + 2;
3640 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3641 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3642 drvp->PIO_mode - 2 : 0;
3643 if (drvp->DMA_mode == 0)
3644 drvp->PIO_mode = 0;
3645 }
3646 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3647 pio: switch (sc->sis_type) {
3648 case SIS_TYPE_NOUDMA:
3649 case SIS_TYPE_66:
3650 case SIS_TYPE_100OLD:
3651 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3652 SIS_TIM66_ACT_OFF(drive);
3653 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3654 SIS_TIM66_REC_OFF(drive);
3655 break;
3656 case SIS_TYPE_100NEW:
3657 case SIS_TYPE_133OLD:
3658 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3659 SIS_TIM100_ACT_OFF(drive);
3660 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3661 SIS_TIM100_REC_OFF(drive);
3662 break;
3663 default:
3664 aprint_error("unknown SiS IDE type %d\n",
3665 sc->sis_type);
3666 }
3667 }
3668 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3669 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3670 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3671 if (idedma_ctl != 0) {
3672 /* Add software bits in status register */
3673 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3674 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3675 idedma_ctl);
3676 }
3677 }
3678
3679 void
3680 acer_chip_map(sc, pa)
3681 struct pciide_softc *sc;
3682 struct pci_attach_args *pa;
3683 {
3684 struct pciide_channel *cp;
3685 int channel;
3686 pcireg_t cr, interface;
3687 bus_size_t cmdsize, ctlsize;
3688 pcireg_t rev = PCI_REVISION(pa->pa_class);
3689
3690 if (pciide_chipen(sc, pa) == 0)
3691 return;
3692
3693 aprint_normal("%s: bus-master DMA support present",
3694 sc->sc_wdcdev.sc_dev.dv_xname);
3695 pciide_mapreg_dma(sc, pa);
3696 aprint_normal("\n");
3697 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3698 WDC_CAPABILITY_MODE;
3699 if (sc->sc_dma_ok) {
3700 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3701 if (rev >= 0x20) {
3702 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3703 if (rev >= 0xC4)
3704 sc->sc_wdcdev.UDMA_cap = 5;
3705 else if (rev >= 0xC2)
3706 sc->sc_wdcdev.UDMA_cap = 4;
3707 else
3708 sc->sc_wdcdev.UDMA_cap = 2;
3709 }
3710 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3711 sc->sc_wdcdev.irqack = pciide_irqack;
3712 }
3713
3714 sc->sc_wdcdev.PIO_cap = 4;
3715 sc->sc_wdcdev.DMA_cap = 2;
3716 sc->sc_wdcdev.set_modes = acer_setup_channel;
3717 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3718 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3719
3720 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3721 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3722 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3723
3724 /* Enable "microsoft register bits" R/W. */
3725 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3726 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3727 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3728 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3729 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3730 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3731 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3732 ~ACER_CHANSTATUSREGS_RO);
3733 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3734 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3735 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3736 /* Don't use cr, re-read the real register content instead */
3737 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3738 PCI_CLASS_REG));
3739
3740 /* From linux: enable "Cable Detection" */
3741 if (rev >= 0xC2) {
3742 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3743 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3744 | ACER_0x4B_CDETECT);
3745 }
3746
3747 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3748 cp = &sc->pciide_channels[channel];
3749 if (pciide_chansetup(sc, channel, interface) == 0)
3750 continue;
3751 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3752 aprint_normal("%s: %s channel ignored (disabled)\n",
3753 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3754 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
3755 continue;
3756 }
3757 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3758 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3759 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3760 }
3761 }
3762
3763 void
3764 acer_setup_channel(chp)
3765 struct channel_softc *chp;
3766 {
3767 struct ata_drive_datas *drvp;
3768 int drive;
3769 u_int32_t acer_fifo_udma;
3770 u_int32_t idedma_ctl;
3771 struct pciide_channel *cp = (struct pciide_channel*)chp;
3772 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3773
3774 idedma_ctl = 0;
3775 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3776 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3777 acer_fifo_udma), DEBUG_PROBE);
3778 /* setup DMA if needed */
3779 pciide_channel_dma_setup(cp);
3780
3781 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3782 DRIVE_UDMA) { /* check 80 pins cable */
3783 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3784 ACER_0x4A_80PIN(chp->channel)) {
3785 if (chp->ch_drive[0].UDMA_mode > 2)
3786 chp->ch_drive[0].UDMA_mode = 2;
3787 if (chp->ch_drive[1].UDMA_mode > 2)
3788 chp->ch_drive[1].UDMA_mode = 2;
3789 }
3790 }
3791
3792 for (drive = 0; drive < 2; drive++) {
3793 drvp = &chp->ch_drive[drive];
3794 /* If no drive, skip */
3795 if ((drvp->drive_flags & DRIVE) == 0)
3796 continue;
3797 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3798 "channel %d drive %d 0x%x\n", chp->channel, drive,
3799 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3800 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3801 /* clear FIFO/DMA mode */
3802 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3803 ACER_UDMA_EN(chp->channel, drive) |
3804 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3805
3806 /* add timing values, setup DMA if needed */
3807 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3808 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3809 acer_fifo_udma |=
3810 ACER_FTH_OPL(chp->channel, drive, 0x1);
3811 goto pio;
3812 }
3813
3814 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3815 if (drvp->drive_flags & DRIVE_UDMA) {
3816 /* use Ultra/DMA */
3817 drvp->drive_flags &= ~DRIVE_DMA;
3818 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3819 acer_fifo_udma |=
3820 ACER_UDMA_TIM(chp->channel, drive,
3821 acer_udma[drvp->UDMA_mode]);
3822 /* XXX disable if one drive < UDMA3 ? */
3823 if (drvp->UDMA_mode >= 3) {
3824 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3825 ACER_0x4B,
3826 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3827 ACER_0x4B) | ACER_0x4B_UDMA66);
3828 }
3829 } else {
3830 /*
3831 * use Multiword DMA
3832 * Timings will be used for both PIO and DMA,
3833 * so adjust DMA mode if needed
3834 */
3835 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3836 drvp->PIO_mode = drvp->DMA_mode + 2;
3837 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3838 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3839 drvp->PIO_mode - 2 : 0;
3840 if (drvp->DMA_mode == 0)
3841 drvp->PIO_mode = 0;
3842 }
3843 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3844 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3845 ACER_IDETIM(chp->channel, drive),
3846 acer_pio[drvp->PIO_mode]);
3847 }
3848 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3849 acer_fifo_udma), DEBUG_PROBE);
3850 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3851 if (idedma_ctl != 0) {
3852 /* Add software bits in status register */
3853 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3854 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3855 idedma_ctl);
3856 }
3857 }
3858
3859 int
3860 acer_pci_intr(arg)
3861 void *arg;
3862 {
3863 struct pciide_softc *sc = arg;
3864 struct pciide_channel *cp;
3865 struct channel_softc *wdc_cp;
3866 int i, rv, crv;
3867 u_int32_t chids;
3868
3869 rv = 0;
3870 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3871 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3872 cp = &sc->pciide_channels[i];
3873 wdc_cp = &cp->wdc_channel;
3874 /* If a compat channel skip. */
3875 if (cp->compat)
3876 continue;
3877 if (chids & ACER_CHIDS_INT(i)) {
3878 crv = wdcintr(wdc_cp);
3879 if (crv == 0)
3880 printf("%s:%d: bogus intr\n",
3881 sc->sc_wdcdev.sc_dev.dv_xname, i);
3882 else
3883 rv = 1;
3884 }
3885 }
3886 return rv;
3887 }
3888
3889 void
3890 hpt_chip_map(sc, pa)
3891 struct pciide_softc *sc;
3892 struct pci_attach_args *pa;
3893 {
3894 struct pciide_channel *cp;
3895 int i, compatchan, revision;
3896 pcireg_t interface;
3897 bus_size_t cmdsize, ctlsize;
3898
3899 if (pciide_chipen(sc, pa) == 0)
3900 return;
3901
3902 revision = PCI_REVISION(pa->pa_class);
3903 aprint_normal("%s: Triones/Highpoint ",
3904 sc->sc_wdcdev.sc_dev.dv_xname);
3905 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3906 aprint_normal("HPT374 IDE Controller\n");
3907 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3908 aprint_normal("HPT372 IDE Controller\n");
3909 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3910 if (revision == HPT372_REV)
3911 aprint_normal("HPT372 IDE Controller\n");
3912 else if (revision == HPT370_REV)
3913 aprint_normal("HPT370 IDE Controller\n");
3914 else if (revision == HPT370A_REV)
3915 aprint_normal("HPT370A IDE Controller\n");
3916 else if (revision == HPT366_REV)
3917 aprint_normal("HPT366 IDE Controller\n");
3918 else
3919 aprint_normal("unknown HPT IDE controller rev %d\n",
3920 revision);
3921 } else
3922 aprint_normal("unknown HPT IDE controller 0x%x\n",
3923 sc->sc_pp->ide_product);
3924
3925 /*
3926 * when the chip is in native mode it identifies itself as a
3927 * 'misc mass storage'. Fake interface in this case.
3928 */
3929 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3930 interface = PCI_INTERFACE(pa->pa_class);
3931 } else {
3932 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3933 PCIIDE_INTERFACE_PCI(0);
3934 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3935 (revision == HPT370_REV || revision == HPT370A_REV ||
3936 revision == HPT372_REV)) ||
3937 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3938 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3939 interface |= PCIIDE_INTERFACE_PCI(1);
3940 }
3941
3942 aprint_normal("%s: bus-master DMA support present",
3943 sc->sc_wdcdev.sc_dev.dv_xname);
3944 pciide_mapreg_dma(sc, pa);
3945 aprint_normal("\n");
3946 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3947 WDC_CAPABILITY_MODE;
3948 if (sc->sc_dma_ok) {
3949 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3950 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3951 sc->sc_wdcdev.irqack = pciide_irqack;
3952 }
3953 sc->sc_wdcdev.PIO_cap = 4;
3954 sc->sc_wdcdev.DMA_cap = 2;
3955
3956 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3957 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3958 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3959 revision == HPT366_REV) {
3960 sc->sc_wdcdev.UDMA_cap = 4;
3961 /*
3962 * The 366 has 2 PCI IDE functions, one for primary and one
3963 * for secondary. So we need to call pciide_mapregs_compat()
3964 * with the real channel
3965 */
3966 if (pa->pa_function == 0) {
3967 compatchan = 0;
3968 } else if (pa->pa_function == 1) {
3969 compatchan = 1;
3970 } else {
3971 aprint_error("%s: unexpected PCI function %d\n",
3972 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3973 return;
3974 }
3975 sc->sc_wdcdev.nchannels = 1;
3976 } else {
3977 sc->sc_wdcdev.nchannels = 2;
3978 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3979 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3980 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3981 revision == HPT372_REV))
3982 sc->sc_wdcdev.UDMA_cap = 6;
3983 else
3984 sc->sc_wdcdev.UDMA_cap = 5;
3985 }
3986 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3987 cp = &sc->pciide_channels[i];
3988 if (sc->sc_wdcdev.nchannels > 1) {
3989 compatchan = i;
3990 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3991 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3992 aprint_normal(
3993 "%s: %s channel ignored (disabled)\n",
3994 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3995 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
3996 continue;
3997 }
3998 }
3999 if (pciide_chansetup(sc, i, interface) == 0)
4000 continue;
4001 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4002 pciide_mapregs_native(pa, cp, &cmdsize,
4003 &ctlsize, hpt_pci_intr);
4004 } else {
4005 pciide_mapregs_compat(pa, cp, compatchan,
4006 &cmdsize, &ctlsize);
4007 }
4008 }
4009 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4010 (revision == HPT370_REV || revision == HPT370A_REV ||
4011 revision == HPT372_REV)) ||
4012 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4013 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4014 /*
4015 * HPT370_REV and highter has a bit to disable interrupts,
4016 * make sure to clear it
4017 */
4018 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4019 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4020 ~HPT_CSEL_IRQDIS);
4021 }
4022 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4023 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4024 revision == HPT372_REV ) ||
4025 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4026 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4027 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4028 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4029 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4030 return;
4031 }
4032
4033 void
4034 hpt_setup_channel(chp)
4035 struct channel_softc *chp;
4036 {
4037 struct ata_drive_datas *drvp;
4038 int drive;
4039 int cable;
4040 u_int32_t before, after;
4041 u_int32_t idedma_ctl;
4042 struct pciide_channel *cp = (struct pciide_channel*)chp;
4043 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4044 int revision =
4045 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4046
4047 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4048
4049 /* setup DMA if needed */
4050 pciide_channel_dma_setup(cp);
4051
4052 idedma_ctl = 0;
4053
4054 /* Per drive settings */
4055 for (drive = 0; drive < 2; drive++) {
4056 drvp = &chp->ch_drive[drive];
4057 /* If no drive, skip */
4058 if ((drvp->drive_flags & DRIVE) == 0)
4059 continue;
4060 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4061 HPT_IDETIM(chp->channel, drive));
4062
4063 /* add timing values, setup DMA if needed */
4064 if (drvp->drive_flags & DRIVE_UDMA) {
4065 /* use Ultra/DMA */
4066 drvp->drive_flags &= ~DRIVE_DMA;
4067 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4068 drvp->UDMA_mode > 2)
4069 drvp->UDMA_mode = 2;
4070 switch (sc->sc_pp->ide_product) {
4071 case PCI_PRODUCT_TRIONES_HPT374:
4072 after = hpt374_udma[drvp->UDMA_mode];
4073 break;
4074 case PCI_PRODUCT_TRIONES_HPT372:
4075 after = hpt372_udma[drvp->UDMA_mode];
4076 break;
4077 case PCI_PRODUCT_TRIONES_HPT366:
4078 default:
4079 switch(revision) {
4080 case HPT372_REV:
4081 after = hpt372_udma[drvp->UDMA_mode];
4082 break;
4083 case HPT370_REV:
4084 case HPT370A_REV:
4085 after = hpt370_udma[drvp->UDMA_mode];
4086 break;
4087 case HPT366_REV:
4088 default:
4089 after = hpt366_udma[drvp->UDMA_mode];
4090 break;
4091 }
4092 }
4093 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4094 } else if (drvp->drive_flags & DRIVE_DMA) {
4095 /*
4096 * use Multiword DMA.
4097 * Timings will be used for both PIO and DMA, so adjust
4098 * DMA mode if needed
4099 */
4100 if (drvp->PIO_mode >= 3 &&
4101 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4102 drvp->DMA_mode = drvp->PIO_mode - 2;
4103 }
4104 switch (sc->sc_pp->ide_product) {
4105 case PCI_PRODUCT_TRIONES_HPT374:
4106 after = hpt374_dma[drvp->DMA_mode];
4107 break;
4108 case PCI_PRODUCT_TRIONES_HPT372:
4109 after = hpt372_dma[drvp->DMA_mode];
4110 break;
4111 case PCI_PRODUCT_TRIONES_HPT366:
4112 default:
4113 switch(revision) {
4114 case HPT372_REV:
4115 after = hpt372_dma[drvp->DMA_mode];
4116 break;
4117 case HPT370_REV:
4118 case HPT370A_REV:
4119 after = hpt370_dma[drvp->DMA_mode];
4120 break;
4121 case HPT366_REV:
4122 default:
4123 after = hpt366_dma[drvp->DMA_mode];
4124 break;
4125 }
4126 }
4127 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4128 } else {
4129 /* PIO only */
4130 switch (sc->sc_pp->ide_product) {
4131 case PCI_PRODUCT_TRIONES_HPT374:
4132 after = hpt374_pio[drvp->PIO_mode];
4133 break;
4134 case PCI_PRODUCT_TRIONES_HPT372:
4135 after = hpt372_pio[drvp->PIO_mode];
4136 break;
4137 case PCI_PRODUCT_TRIONES_HPT366:
4138 default:
4139 switch(revision) {
4140 case HPT372_REV:
4141 after = hpt372_pio[drvp->PIO_mode];
4142 break;
4143 case HPT370_REV:
4144 case HPT370A_REV:
4145 after = hpt370_pio[drvp->PIO_mode];
4146 break;
4147 case HPT366_REV:
4148 default:
4149 after = hpt366_pio[drvp->PIO_mode];
4150 break;
4151 }
4152 }
4153 }
4154 pci_conf_write(sc->sc_pc, sc->sc_tag,
4155 HPT_IDETIM(chp->channel, drive), after);
4156 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4157 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4158 after, before), DEBUG_PROBE);
4159 }
4160 if (idedma_ctl != 0) {
4161 /* Add software bits in status register */
4162 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4163 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4164 idedma_ctl);
4165 }
4166 }
4167
4168 int
4169 hpt_pci_intr(arg)
4170 void *arg;
4171 {
4172 struct pciide_softc *sc = arg;
4173 struct pciide_channel *cp;
4174 struct channel_softc *wdc_cp;
4175 int rv = 0;
4176 int dmastat, i, crv;
4177
4178 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4179 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4180 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4181 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4182 IDEDMA_CTL_INTR)
4183 continue;
4184 cp = &sc->pciide_channels[i];
4185 wdc_cp = &cp->wdc_channel;
4186 crv = wdcintr(wdc_cp);
4187 if (crv == 0) {
4188 printf("%s:%d: bogus intr\n",
4189 sc->sc_wdcdev.sc_dev.dv_xname, i);
4190 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4191 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4192 } else
4193 rv = 1;
4194 }
4195 return rv;
4196 }
4197
4198
4199 /* Macros to test product */
4200 #define PDC_IS_262(sc) \
4201 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4202 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4203 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4204 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4205 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4206 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4207 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4208 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4209 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4210 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4211 #define PDC_IS_265(sc) \
4212 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4213 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4214 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4215 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4216 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4217 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4218 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4219 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4220 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4221 #define PDC_IS_268(sc) \
4222 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4223 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4224 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4225 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4226 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4227 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4228 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4229 #define PDC_IS_276(sc) \
4230 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4231 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4232 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4233 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4234 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4235
4236 void
4237 pdc202xx_chip_map(sc, pa)
4238 struct pciide_softc *sc;
4239 struct pci_attach_args *pa;
4240 {
4241 struct pciide_channel *cp;
4242 int channel;
4243 pcireg_t interface, st, mode;
4244 bus_size_t cmdsize, ctlsize;
4245
4246 if (!PDC_IS_268(sc)) {
4247 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4248 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4249 st), DEBUG_PROBE);
4250 }
4251 if (pciide_chipen(sc, pa) == 0)
4252 return;
4253
4254 /* turn off RAID mode */
4255 if (!PDC_IS_268(sc))
4256 st &= ~PDC2xx_STATE_IDERAID;
4257
4258 /*
4259 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4260 * mode. We have to fake interface
4261 */
4262 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4263 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4264 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4265
4266 aprint_normal("%s: bus-master DMA support present",
4267 sc->sc_wdcdev.sc_dev.dv_xname);
4268 pciide_mapreg_dma(sc, pa);
4269 aprint_normal("\n");
4270 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4271 WDC_CAPABILITY_MODE;
4272 if (sc->sc_dma_ok) {
4273 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4274 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4275 sc->sc_wdcdev.irqack = pciide_irqack;
4276 }
4277 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4278 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4279 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4280 sc->sc_wdcdev.PIO_cap = 4;
4281 sc->sc_wdcdev.DMA_cap = 2;
4282 if (PDC_IS_276(sc))
4283 sc->sc_wdcdev.UDMA_cap = 6;
4284 else if (PDC_IS_265(sc))
4285 sc->sc_wdcdev.UDMA_cap = 5;
4286 else if (PDC_IS_262(sc))
4287 sc->sc_wdcdev.UDMA_cap = 4;
4288 else
4289 sc->sc_wdcdev.UDMA_cap = 2;
4290 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4291 pdc20268_setup_channel : pdc202xx_setup_channel;
4292 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4293 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4294
4295 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4296 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4297 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4298 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4299 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4300 }
4301
4302 if (!PDC_IS_268(sc)) {
4303 /* setup failsafe defaults */
4304 mode = 0;
4305 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4306 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4307 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4308 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4309 for (channel = 0;
4310 channel < sc->sc_wdcdev.nchannels;
4311 channel++) {
4312 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4313 "drive 0 initial timings 0x%x, now 0x%x\n",
4314 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4315 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4316 DEBUG_PROBE);
4317 pci_conf_write(sc->sc_pc, sc->sc_tag,
4318 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4319 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4320 "drive 1 initial timings 0x%x, now 0x%x\n",
4321 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4322 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4323 pci_conf_write(sc->sc_pc, sc->sc_tag,
4324 PDC2xx_TIM(channel, 1), mode);
4325 }
4326
4327 mode = PDC2xx_SCR_DMA;
4328 if (PDC_IS_265(sc)) {
4329 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4330 } else if (PDC_IS_262(sc)) {
4331 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4332 } else {
4333 /* the BIOS set it up this way */
4334 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4335 }
4336 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4337 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4338 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4339 "now 0x%x\n",
4340 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4341 PDC2xx_SCR),
4342 mode), DEBUG_PROBE);
4343 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4344 PDC2xx_SCR, mode);
4345
4346 /* controller initial state register is OK even without BIOS */
4347 /* Set DMA mode to IDE DMA compatibility */
4348 mode =
4349 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4350 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4351 DEBUG_PROBE);
4352 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4353 mode | 0x1);
4354 mode =
4355 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4356 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4357 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4358 mode | 0x1);
4359 }
4360
4361 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4362 cp = &sc->pciide_channels[channel];
4363 if (pciide_chansetup(sc, channel, interface) == 0)
4364 continue;
4365 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4366 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4367 aprint_normal("%s: %s channel ignored (disabled)\n",
4368 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4369 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
4370 continue;
4371 }
4372 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4373 PDC_IS_265(sc) ? pdc20265_pci_intr : pdc202xx_pci_intr);
4374 }
4375 if (!PDC_IS_268(sc)) {
4376 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4377 "0x%x\n", st), DEBUG_PROBE);
4378 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4379 }
4380 return;
4381 }
4382
4383 void
4384 pdc202xx_setup_channel(chp)
4385 struct channel_softc *chp;
4386 {
4387 struct ata_drive_datas *drvp;
4388 int drive;
4389 pcireg_t mode, st;
4390 u_int32_t idedma_ctl, scr, atapi;
4391 struct pciide_channel *cp = (struct pciide_channel*)chp;
4392 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4393 int channel = chp->channel;
4394
4395 /* setup DMA if needed */
4396 pciide_channel_dma_setup(cp);
4397
4398 idedma_ctl = 0;
4399 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4400 sc->sc_wdcdev.sc_dev.dv_xname,
4401 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4402 DEBUG_PROBE);
4403
4404 /* Per channel settings */
4405 if (PDC_IS_262(sc)) {
4406 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4407 PDC262_U66);
4408 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4409 /* Trim UDMA mode */
4410 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4411 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4412 chp->ch_drive[0].UDMA_mode <= 2) ||
4413 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4414 chp->ch_drive[1].UDMA_mode <= 2)) {
4415 if (chp->ch_drive[0].UDMA_mode > 2)
4416 chp->ch_drive[0].UDMA_mode = 2;
4417 if (chp->ch_drive[1].UDMA_mode > 2)
4418 chp->ch_drive[1].UDMA_mode = 2;
4419 }
4420 /* Set U66 if needed */
4421 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4422 chp->ch_drive[0].UDMA_mode > 2) ||
4423 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4424 chp->ch_drive[1].UDMA_mode > 2))
4425 scr |= PDC262_U66_EN(channel);
4426 else
4427 scr &= ~PDC262_U66_EN(channel);
4428 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4429 PDC262_U66, scr);
4430 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4431 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4432 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4433 PDC262_ATAPI(channel))), DEBUG_PROBE);
4434 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4435 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4436 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4437 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4438 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4439 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4440 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4441 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4442 atapi = 0;
4443 else
4444 atapi = PDC262_ATAPI_UDMA;
4445 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4446 PDC262_ATAPI(channel), atapi);
4447 }
4448 }
4449 for (drive = 0; drive < 2; drive++) {
4450 drvp = &chp->ch_drive[drive];
4451 /* If no drive, skip */
4452 if ((drvp->drive_flags & DRIVE) == 0)
4453 continue;
4454 mode = 0;
4455 if (drvp->drive_flags & DRIVE_UDMA) {
4456 /* use Ultra/DMA */
4457 drvp->drive_flags &= ~DRIVE_DMA;
4458 mode = PDC2xx_TIM_SET_MB(mode,
4459 pdc2xx_udma_mb[drvp->UDMA_mode]);
4460 mode = PDC2xx_TIM_SET_MC(mode,
4461 pdc2xx_udma_mc[drvp->UDMA_mode]);
4462 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4463 } else if (drvp->drive_flags & DRIVE_DMA) {
4464 mode = PDC2xx_TIM_SET_MB(mode,
4465 pdc2xx_dma_mb[drvp->DMA_mode]);
4466 mode = PDC2xx_TIM_SET_MC(mode,
4467 pdc2xx_dma_mc[drvp->DMA_mode]);
4468 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4469 } else {
4470 mode = PDC2xx_TIM_SET_MB(mode,
4471 pdc2xx_dma_mb[0]);
4472 mode = PDC2xx_TIM_SET_MC(mode,
4473 pdc2xx_dma_mc[0]);
4474 }
4475 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4476 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4477 if (drvp->drive_flags & DRIVE_ATA)
4478 mode |= PDC2xx_TIM_PRE;
4479 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4480 if (drvp->PIO_mode >= 3) {
4481 mode |= PDC2xx_TIM_IORDY;
4482 if (drive == 0)
4483 mode |= PDC2xx_TIM_IORDYp;
4484 }
4485 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4486 "timings 0x%x\n",
4487 sc->sc_wdcdev.sc_dev.dv_xname,
4488 chp->channel, drive, mode), DEBUG_PROBE);
4489 pci_conf_write(sc->sc_pc, sc->sc_tag,
4490 PDC2xx_TIM(chp->channel, drive), mode);
4491 }
4492 if (idedma_ctl != 0) {
4493 /* Add software bits in status register */
4494 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4495 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4496 idedma_ctl);
4497 }
4498 }
4499
4500 void
4501 pdc20268_setup_channel(chp)
4502 struct channel_softc *chp;
4503 {
4504 struct ata_drive_datas *drvp;
4505 int drive;
4506 u_int32_t idedma_ctl;
4507 struct pciide_channel *cp = (struct pciide_channel*)chp;
4508 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4509 int u100;
4510
4511 /* setup DMA if needed */
4512 pciide_channel_dma_setup(cp);
4513
4514 idedma_ctl = 0;
4515
4516 /* I don't know what this is for, FreeBSD does it ... */
4517 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4518 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4519
4520 /*
4521 * cable type detect, from FreeBSD
4522 */
4523 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4524 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4525 0 : 1;
4526
4527 for (drive = 0; drive < 2; drive++) {
4528 drvp = &chp->ch_drive[drive];
4529 /* If no drive, skip */
4530 if ((drvp->drive_flags & DRIVE) == 0)
4531 continue;
4532 if (drvp->drive_flags & DRIVE_UDMA) {
4533 /* use Ultra/DMA */
4534 drvp->drive_flags &= ~DRIVE_DMA;
4535 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4536 if (drvp->UDMA_mode > 2 && u100 == 0)
4537 drvp->UDMA_mode = 2;
4538 } else if (drvp->drive_flags & DRIVE_DMA) {
4539 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4540 }
4541 }
4542 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4543 if (idedma_ctl != 0) {
4544 /* Add software bits in status register */
4545 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4546 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4547 idedma_ctl);
4548 }
4549 }
4550
4551 int
4552 pdc202xx_pci_intr(arg)
4553 void *arg;
4554 {
4555 struct pciide_softc *sc = arg;
4556 struct pciide_channel *cp;
4557 struct channel_softc *wdc_cp;
4558 int i, rv, crv;
4559 u_int32_t scr;
4560
4561 rv = 0;
4562 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4563 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4564 cp = &sc->pciide_channels[i];
4565 wdc_cp = &cp->wdc_channel;
4566 /* If a compat channel skip. */
4567 if (cp->compat)
4568 continue;
4569 if (scr & PDC2xx_SCR_INT(i)) {
4570 crv = wdcintr(wdc_cp);
4571 if (crv == 0)
4572 printf("%s:%d: bogus intr (reg 0x%x)\n",
4573 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4574 else
4575 rv = 1;
4576 }
4577 }
4578 return rv;
4579 }
4580
4581 int
4582 pdc20265_pci_intr(arg)
4583 void *arg;
4584 {
4585 struct pciide_softc *sc = arg;
4586 struct pciide_channel *cp;
4587 struct channel_softc *wdc_cp;
4588 int i, rv, crv;
4589 u_int32_t dmastat;
4590
4591 rv = 0;
4592 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4593 cp = &sc->pciide_channels[i];
4594 wdc_cp = &cp->wdc_channel;
4595 /* If a compat channel skip. */
4596 if (cp->compat)
4597 continue;
4598 /*
4599 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4600 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4601 * So use it instead (requires 2 reg reads instead of 1,
4602 * but we can't do it another way).
4603 */
4604 dmastat = bus_space_read_1(sc->sc_dma_iot,
4605 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4606 if((dmastat & IDEDMA_CTL_INTR) == 0)
4607 continue;
4608 crv = wdcintr(wdc_cp);
4609 if (crv == 0)
4610 printf("%s:%d: bogus intr\n",
4611 sc->sc_wdcdev.sc_dev.dv_xname, i);
4612 else
4613 rv = 1;
4614 }
4615 return rv;
4616 }
4617
4618 static void
4619 pdc20262_dma_start(v, channel, drive)
4620 void *v;
4621 int channel, drive;
4622 {
4623 struct pciide_softc *sc = v;
4624 struct pciide_dma_maps *dma_maps =
4625 &sc->pciide_channels[channel].dma_maps[drive];
4626 int atapi;
4627
4628 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4629 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4630 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4631 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4632 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4633 PDC262_ATAPI(channel), atapi);
4634 }
4635
4636 pciide_dma_start(v, channel, drive);
4637 }
4638
4639 int
4640 pdc20262_dma_finish(v, channel, drive, force)
4641 void *v;
4642 int channel, drive;
4643 int force;
4644 {
4645 struct pciide_softc *sc = v;
4646 struct pciide_dma_maps *dma_maps =
4647 &sc->pciide_channels[channel].dma_maps[drive];
4648 struct channel_softc *chp;
4649 int atapi, error;
4650
4651 error = pciide_dma_finish(v, channel, drive, force);
4652
4653 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4654 chp = sc->wdc_chanarray[channel];
4655 atapi = 0;
4656 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4657 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4658 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4659 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4660 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4661 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4662 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4663 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4664 atapi = PDC262_ATAPI_UDMA;
4665 }
4666 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4667 PDC262_ATAPI(channel), atapi);
4668 }
4669
4670 return error;
4671 }
4672
4673 void
4674 opti_chip_map(sc, pa)
4675 struct pciide_softc *sc;
4676 struct pci_attach_args *pa;
4677 {
4678 struct pciide_channel *cp;
4679 bus_size_t cmdsize, ctlsize;
4680 pcireg_t interface;
4681 u_int8_t init_ctrl;
4682 int channel;
4683
4684 if (pciide_chipen(sc, pa) == 0)
4685 return;
4686
4687 aprint_normal("%s: bus-master DMA support present",
4688 sc->sc_wdcdev.sc_dev.dv_xname);
4689
4690 /*
4691 * XXXSCW:
4692 * There seem to be a couple of buggy revisions/implementations
4693 * of the OPTi pciide chipset. This kludge seems to fix one of
4694 * the reported problems (PR/11644) but still fails for the
4695 * other (PR/13151), although the latter may be due to other
4696 * issues too...
4697 */
4698 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4699 aprint_normal(" but disabled due to chip rev. <= 0x12");
4700 sc->sc_dma_ok = 0;
4701 } else
4702 pciide_mapreg_dma(sc, pa);
4703
4704 aprint_normal("\n");
4705
4706 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4707 WDC_CAPABILITY_MODE;
4708 sc->sc_wdcdev.PIO_cap = 4;
4709 if (sc->sc_dma_ok) {
4710 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4711 sc->sc_wdcdev.irqack = pciide_irqack;
4712 sc->sc_wdcdev.DMA_cap = 2;
4713 }
4714 sc->sc_wdcdev.set_modes = opti_setup_channel;
4715
4716 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4717 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4718
4719 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4720 OPTI_REG_INIT_CONTROL);
4721
4722 interface = PCI_INTERFACE(pa->pa_class);
4723
4724 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4725 cp = &sc->pciide_channels[channel];
4726 if (pciide_chansetup(sc, channel, interface) == 0)
4727 continue;
4728 if (channel == 1 &&
4729 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4730 aprint_normal("%s: %s channel ignored (disabled)\n",
4731 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4732 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
4733 continue;
4734 }
4735 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4736 pciide_pci_intr);
4737 }
4738 }
4739
4740 void
4741 opti_setup_channel(chp)
4742 struct channel_softc *chp;
4743 {
4744 struct ata_drive_datas *drvp;
4745 struct pciide_channel *cp = (struct pciide_channel*)chp;
4746 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4747 int drive, spd;
4748 int mode[2];
4749 u_int8_t rv, mr;
4750
4751 /*
4752 * The `Delay' and `Address Setup Time' fields of the
4753 * Miscellaneous Register are always zero initially.
4754 */
4755 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4756 mr &= ~(OPTI_MISC_DELAY_MASK |
4757 OPTI_MISC_ADDR_SETUP_MASK |
4758 OPTI_MISC_INDEX_MASK);
4759
4760 /* Prime the control register before setting timing values */
4761 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4762
4763 /* Determine the clockrate of the PCIbus the chip is attached to */
4764 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4765 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4766
4767 /* setup DMA if needed */
4768 pciide_channel_dma_setup(cp);
4769
4770 for (drive = 0; drive < 2; drive++) {
4771 drvp = &chp->ch_drive[drive];
4772 /* If no drive, skip */
4773 if ((drvp->drive_flags & DRIVE) == 0) {
4774 mode[drive] = -1;
4775 continue;
4776 }
4777
4778 if ((drvp->drive_flags & DRIVE_DMA)) {
4779 /*
4780 * Timings will be used for both PIO and DMA,
4781 * so adjust DMA mode if needed
4782 */
4783 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4784 drvp->PIO_mode = drvp->DMA_mode + 2;
4785 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4786 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4787 drvp->PIO_mode - 2 : 0;
4788 if (drvp->DMA_mode == 0)
4789 drvp->PIO_mode = 0;
4790
4791 mode[drive] = drvp->DMA_mode + 5;
4792 } else
4793 mode[drive] = drvp->PIO_mode;
4794
4795 if (drive && mode[0] >= 0 &&
4796 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4797 /*
4798 * Can't have two drives using different values
4799 * for `Address Setup Time'.
4800 * Slow down the faster drive to compensate.
4801 */
4802 int d = (opti_tim_as[spd][mode[0]] >
4803 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4804
4805 mode[d] = mode[1-d];
4806 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4807 chp->ch_drive[d].DMA_mode = 0;
4808 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4809 }
4810 }
4811
4812 for (drive = 0; drive < 2; drive++) {
4813 int m;
4814 if ((m = mode[drive]) < 0)
4815 continue;
4816
4817 /* Set the Address Setup Time and select appropriate index */
4818 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4819 rv |= OPTI_MISC_INDEX(drive);
4820 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4821
4822 /* Set the pulse width and recovery timing parameters */
4823 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4824 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4825 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4826 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4827
4828 /* Set the Enhanced Mode register appropriately */
4829 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4830 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4831 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4832 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4833 }
4834
4835 /* Finally, enable the timings */
4836 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4837 }
4838
4839 #define ACARD_IS_850(sc) \
4840 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4841
4842 void
4843 acard_chip_map(sc, pa)
4844 struct pciide_softc *sc;
4845 struct pci_attach_args *pa;
4846 {
4847 struct pciide_channel *cp;
4848 int i;
4849 pcireg_t interface;
4850 bus_size_t cmdsize, ctlsize;
4851
4852 if (pciide_chipen(sc, pa) == 0)
4853 return;
4854
4855 /*
4856 * when the chip is in native mode it identifies itself as a
4857 * 'misc mass storage'. Fake interface in this case.
4858 */
4859 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4860 interface = PCI_INTERFACE(pa->pa_class);
4861 } else {
4862 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4863 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4864 }
4865
4866 aprint_normal("%s: bus-master DMA support present",
4867 sc->sc_wdcdev.sc_dev.dv_xname);
4868 pciide_mapreg_dma(sc, pa);
4869 aprint_normal("\n");
4870 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4871 WDC_CAPABILITY_MODE;
4872
4873 if (sc->sc_dma_ok) {
4874 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4875 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4876 sc->sc_wdcdev.irqack = pciide_irqack;
4877 }
4878 sc->sc_wdcdev.PIO_cap = 4;
4879 sc->sc_wdcdev.DMA_cap = 2;
4880 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4881
4882 sc->sc_wdcdev.set_modes = acard_setup_channel;
4883 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4884 sc->sc_wdcdev.nchannels = 2;
4885
4886 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4887 cp = &sc->pciide_channels[i];
4888 if (pciide_chansetup(sc, i, interface) == 0)
4889 continue;
4890 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4891 pciide_pci_intr);
4892 }
4893 if (!ACARD_IS_850(sc)) {
4894 u_int32_t reg;
4895 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4896 reg &= ~ATP860_CTRL_INT;
4897 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4898 }
4899 }
4900
4901 void
4902 acard_setup_channel(chp)
4903 struct channel_softc *chp;
4904 {
4905 struct ata_drive_datas *drvp;
4906 struct pciide_channel *cp = (struct pciide_channel*)chp;
4907 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4908 int channel = chp->channel;
4909 int drive;
4910 u_int32_t idetime, udma_mode;
4911 u_int32_t idedma_ctl;
4912
4913 /* setup DMA if needed */
4914 pciide_channel_dma_setup(cp);
4915
4916 if (ACARD_IS_850(sc)) {
4917 idetime = 0;
4918 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4919 udma_mode &= ~ATP850_UDMA_MASK(channel);
4920 } else {
4921 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4922 idetime &= ~ATP860_SETTIME_MASK(channel);
4923 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4924 udma_mode &= ~ATP860_UDMA_MASK(channel);
4925
4926 /* check 80 pins cable */
4927 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4928 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4929 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4930 & ATP860_CTRL_80P(chp->channel)) {
4931 if (chp->ch_drive[0].UDMA_mode > 2)
4932 chp->ch_drive[0].UDMA_mode = 2;
4933 if (chp->ch_drive[1].UDMA_mode > 2)
4934 chp->ch_drive[1].UDMA_mode = 2;
4935 }
4936 }
4937 }
4938
4939 idedma_ctl = 0;
4940
4941 /* Per drive settings */
4942 for (drive = 0; drive < 2; drive++) {
4943 drvp = &chp->ch_drive[drive];
4944 /* If no drive, skip */
4945 if ((drvp->drive_flags & DRIVE) == 0)
4946 continue;
4947 /* add timing values, setup DMA if needed */
4948 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4949 (drvp->drive_flags & DRIVE_UDMA)) {
4950 /* use Ultra/DMA */
4951 if (ACARD_IS_850(sc)) {
4952 idetime |= ATP850_SETTIME(drive,
4953 acard_act_udma[drvp->UDMA_mode],
4954 acard_rec_udma[drvp->UDMA_mode]);
4955 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4956 acard_udma_conf[drvp->UDMA_mode]);
4957 } else {
4958 idetime |= ATP860_SETTIME(channel, drive,
4959 acard_act_udma[drvp->UDMA_mode],
4960 acard_rec_udma[drvp->UDMA_mode]);
4961 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4962 acard_udma_conf[drvp->UDMA_mode]);
4963 }
4964 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4965 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4966 (drvp->drive_flags & DRIVE_DMA)) {
4967 /* use Multiword DMA */
4968 drvp->drive_flags &= ~DRIVE_UDMA;
4969 if (ACARD_IS_850(sc)) {
4970 idetime |= ATP850_SETTIME(drive,
4971 acard_act_dma[drvp->DMA_mode],
4972 acard_rec_dma[drvp->DMA_mode]);
4973 } else {
4974 idetime |= ATP860_SETTIME(channel, drive,
4975 acard_act_dma[drvp->DMA_mode],
4976 acard_rec_dma[drvp->DMA_mode]);
4977 }
4978 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4979 } else {
4980 /* PIO only */
4981 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4982 if (ACARD_IS_850(sc)) {
4983 idetime |= ATP850_SETTIME(drive,
4984 acard_act_pio[drvp->PIO_mode],
4985 acard_rec_pio[drvp->PIO_mode]);
4986 } else {
4987 idetime |= ATP860_SETTIME(channel, drive,
4988 acard_act_pio[drvp->PIO_mode],
4989 acard_rec_pio[drvp->PIO_mode]);
4990 }
4991 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4992 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4993 | ATP8x0_CTRL_EN(channel));
4994 }
4995 }
4996
4997 if (idedma_ctl != 0) {
4998 /* Add software bits in status register */
4999 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5000 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5001 }
5002
5003 if (ACARD_IS_850(sc)) {
5004 pci_conf_write(sc->sc_pc, sc->sc_tag,
5005 ATP850_IDETIME(channel), idetime);
5006 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5007 } else {
5008 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5009 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5010 }
5011 }
5012
5013 int
5014 acard_pci_intr(arg)
5015 void *arg;
5016 {
5017 struct pciide_softc *sc = arg;
5018 struct pciide_channel *cp;
5019 struct channel_softc *wdc_cp;
5020 int rv = 0;
5021 int dmastat, i, crv;
5022
5023 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5024 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5025 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5026 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5027 continue;
5028 cp = &sc->pciide_channels[i];
5029 wdc_cp = &cp->wdc_channel;
5030 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5031 (void)wdcintr(wdc_cp);
5032 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5033 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5034 continue;
5035 }
5036 crv = wdcintr(wdc_cp);
5037 if (crv == 0)
5038 printf("%s:%d: bogus intr\n",
5039 sc->sc_wdcdev.sc_dev.dv_xname, i);
5040 else if (crv == 1)
5041 rv = 1;
5042 else if (rv == 0)
5043 rv = crv;
5044 }
5045 return rv;
5046 }
5047
5048 static int
5049 sl82c105_bugchk(struct pci_attach_args *pa)
5050 {
5051
5052 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5053 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5054 return (0);
5055
5056 if (PCI_REVISION(pa->pa_class) <= 0x05)
5057 return (1);
5058
5059 return (0);
5060 }
5061
5062 void
5063 sl82c105_chip_map(sc, pa)
5064 struct pciide_softc *sc;
5065 struct pci_attach_args *pa;
5066 {
5067 struct pciide_channel *cp;
5068 bus_size_t cmdsize, ctlsize;
5069 pcireg_t interface, idecr;
5070 int channel;
5071
5072 if (pciide_chipen(sc, pa) == 0)
5073 return;
5074
5075 aprint_normal("%s: bus-master DMA support present",
5076 sc->sc_wdcdev.sc_dev.dv_xname);
5077
5078 /*
5079 * Check to see if we're part of the Winbond 83c553 Southbridge.
5080 * If so, we need to disable DMA on rev. <= 5 of that chip.
5081 */
5082 if (pci_find_device(pa, sl82c105_bugchk)) {
5083 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5084 sc->sc_dma_ok = 0;
5085 } else
5086 pciide_mapreg_dma(sc, pa);
5087 aprint_normal("\n");
5088
5089 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5090 WDC_CAPABILITY_MODE;
5091 sc->sc_wdcdev.PIO_cap = 4;
5092 if (sc->sc_dma_ok) {
5093 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5094 sc->sc_wdcdev.irqack = pciide_irqack;
5095 sc->sc_wdcdev.DMA_cap = 2;
5096 }
5097 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5098
5099 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5100 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5101
5102 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5103
5104 interface = PCI_INTERFACE(pa->pa_class);
5105
5106 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5107 cp = &sc->pciide_channels[channel];
5108 if (pciide_chansetup(sc, channel, interface) == 0)
5109 continue;
5110 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5111 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5112 aprint_normal("%s: %s channel ignored (disabled)\n",
5113 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5114 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
5115 continue;
5116 }
5117 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5118 pciide_pci_intr);
5119 }
5120 }
5121
5122 void
5123 sl82c105_setup_channel(chp)
5124 struct channel_softc *chp;
5125 {
5126 struct ata_drive_datas *drvp;
5127 struct pciide_channel *cp = (struct pciide_channel*)chp;
5128 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5129 int pxdx_reg, drive;
5130 pcireg_t pxdx;
5131
5132 /* Set up DMA if needed. */
5133 pciide_channel_dma_setup(cp);
5134
5135 for (drive = 0; drive < 2; drive++) {
5136 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5137 : SYMPH_P1D0CR) + (drive * 4);
5138
5139 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5140
5141 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5142 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5143
5144 drvp = &chp->ch_drive[drive];
5145 /* If no drive, skip. */
5146 if ((drvp->drive_flags & DRIVE) == 0) {
5147 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5148 continue;
5149 }
5150
5151 if (drvp->drive_flags & DRIVE_DMA) {
5152 /*
5153 * Timings will be used for both PIO and DMA,
5154 * so adjust DMA mode if needed.
5155 */
5156 if (drvp->PIO_mode >= 3) {
5157 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5158 drvp->DMA_mode = drvp->PIO_mode - 2;
5159 if (drvp->DMA_mode < 1) {
5160 /*
5161 * Can't mix both PIO and DMA.
5162 * Disable DMA.
5163 */
5164 drvp->drive_flags &= ~DRIVE_DMA;
5165 }
5166 } else {
5167 /*
5168 * Can't mix both PIO and DMA. Disable
5169 * DMA.
5170 */
5171 drvp->drive_flags &= ~DRIVE_DMA;
5172 }
5173 }
5174
5175 if (drvp->drive_flags & DRIVE_DMA) {
5176 /* Use multi-word DMA. */
5177 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5178 PxDx_CMD_ON_SHIFT;
5179 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5180 } else {
5181 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5182 PxDx_CMD_ON_SHIFT;
5183 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5184 }
5185
5186 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5187
5188 /* ...and set the mode for this drive. */
5189 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5190 }
5191 }
5192
5193 void
5194 serverworks_chip_map(sc, pa)
5195 struct pciide_softc *sc;
5196 struct pci_attach_args *pa;
5197 {
5198 struct pciide_channel *cp;
5199 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5200 pcitag_t pcib_tag;
5201 int channel;
5202 bus_size_t cmdsize, ctlsize;
5203
5204 if (pciide_chipen(sc, pa) == 0)
5205 return;
5206
5207 aprint_normal("%s: bus-master DMA support present",
5208 sc->sc_wdcdev.sc_dev.dv_xname);
5209 pciide_mapreg_dma(sc, pa);
5210 aprint_normal("\n");
5211 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5212 WDC_CAPABILITY_MODE;
5213
5214 if (sc->sc_dma_ok) {
5215 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5216 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5217 sc->sc_wdcdev.irqack = pciide_irqack;
5218 }
5219 sc->sc_wdcdev.PIO_cap = 4;
5220 sc->sc_wdcdev.DMA_cap = 2;
5221 switch (sc->sc_pp->ide_product) {
5222 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5223 sc->sc_wdcdev.UDMA_cap = 2;
5224 break;
5225 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5226 if (PCI_REVISION(pa->pa_class) < 0x92)
5227 sc->sc_wdcdev.UDMA_cap = 4;
5228 else
5229 sc->sc_wdcdev.UDMA_cap = 5;
5230 break;
5231 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5232 sc->sc_wdcdev.UDMA_cap = 5;
5233 break;
5234 }
5235
5236 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5237 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5238 sc->sc_wdcdev.nchannels = 2;
5239
5240 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5241 cp = &sc->pciide_channels[channel];
5242 if (pciide_chansetup(sc, channel, interface) == 0)
5243 continue;
5244 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5245 serverworks_pci_intr);
5246 }
5247
5248 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5249 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5250 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5251 }
5252
5253 void
5254 serverworks_setup_channel(chp)
5255 struct channel_softc *chp;
5256 {
5257 struct ata_drive_datas *drvp;
5258 struct pciide_channel *cp = (struct pciide_channel*)chp;
5259 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5260 int channel = chp->channel;
5261 int drive, unit;
5262 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5263 u_int32_t idedma_ctl;
5264 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5265 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5266
5267 /* setup DMA if needed */
5268 pciide_channel_dma_setup(cp);
5269
5270 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5271 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5272 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5273 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5274
5275 pio_time &= ~(0xffff << (16 * channel));
5276 dma_time &= ~(0xffff << (16 * channel));
5277 pio_mode &= ~(0xff << (8 * channel + 16));
5278 udma_mode &= ~(0xff << (8 * channel + 16));
5279 udma_mode &= ~(3 << (2 * channel));
5280
5281 idedma_ctl = 0;
5282
5283 /* Per drive settings */
5284 for (drive = 0; drive < 2; drive++) {
5285 drvp = &chp->ch_drive[drive];
5286 /* If no drive, skip */
5287 if ((drvp->drive_flags & DRIVE) == 0)
5288 continue;
5289 unit = drive + 2 * channel;
5290 /* add timing values, setup DMA if needed */
5291 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5292 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5293 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5294 (drvp->drive_flags & DRIVE_UDMA)) {
5295 /* use Ultra/DMA, check for 80-pin cable */
5296 if (drvp->UDMA_mode > 2 &&
5297 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5298 drvp->UDMA_mode = 2;
5299 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5300 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5301 udma_mode |= 1 << unit;
5302 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5303 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5304 (drvp->drive_flags & DRIVE_DMA)) {
5305 /* use Multiword DMA */
5306 drvp->drive_flags &= ~DRIVE_UDMA;
5307 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5308 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5309 } else {
5310 /* PIO only */
5311 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5312 }
5313 }
5314
5315 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5316 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5317 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5318 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5319 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5320
5321 if (idedma_ctl != 0) {
5322 /* Add software bits in status register */
5323 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5324 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5325 }
5326 }
5327
5328 int
5329 serverworks_pci_intr(arg)
5330 void *arg;
5331 {
5332 struct pciide_softc *sc = arg;
5333 struct pciide_channel *cp;
5334 struct channel_softc *wdc_cp;
5335 int rv = 0;
5336 int dmastat, i, crv;
5337
5338 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5339 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5340 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5341 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5342 IDEDMA_CTL_INTR)
5343 continue;
5344 cp = &sc->pciide_channels[i];
5345 wdc_cp = &cp->wdc_channel;
5346 crv = wdcintr(wdc_cp);
5347 if (crv == 0) {
5348 printf("%s:%d: bogus intr\n",
5349 sc->sc_wdcdev.sc_dev.dv_xname, i);
5350 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5351 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5352 } else
5353 rv = 1;
5354 }
5355 return rv;
5356 }
5357
5358 void
5359 artisea_chip_map(sc, pa)
5360 struct pciide_softc *sc;
5361 struct pci_attach_args *pa;
5362 {
5363 struct pciide_channel *cp;
5364 bus_size_t cmdsize, ctlsize;
5365 pcireg_t interface;
5366 int channel;
5367
5368 if (pciide_chipen(sc, pa) == 0)
5369 return;
5370
5371 aprint_normal("%s: bus-master DMA support present",
5372 sc->sc_wdcdev.sc_dev.dv_xname);
5373 #ifndef PCIIDE_I31244_ENABLEDMA
5374 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 &&
5375 PCI_REVISION(pa->pa_class) == 0) {
5376 aprint_normal(" but disabled due to rev. 0");
5377 sc->sc_dma_ok = 0;
5378 } else
5379 #endif
5380 pciide_mapreg_dma(sc, pa);
5381 aprint_normal("\n");
5382
5383 /*
5384 * XXX Configure LEDs to show activity.
5385 */
5386
5387 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5388 WDC_CAPABILITY_MODE;
5389 sc->sc_wdcdev.PIO_cap = 4;
5390 if (sc->sc_dma_ok) {
5391 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5392 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5393 sc->sc_wdcdev.irqack = pciide_irqack;
5394 sc->sc_wdcdev.DMA_cap = 2;
5395 sc->sc_wdcdev.UDMA_cap = 6;
5396 }
5397 sc->sc_wdcdev.set_modes = sata_setup_channel;
5398
5399 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5400 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5401
5402 interface = PCI_INTERFACE(pa->pa_class);
5403
5404 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5405 cp = &sc->pciide_channels[channel];
5406 if (pciide_chansetup(sc, channel, interface) == 0)
5407 continue;
5408 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5409 pciide_pci_intr);
5410 }
5411 }
5412