pciide.c revision 1.198 1 /* $NetBSD: pciide.c,v 1.198 2003/09/15 20:24:42 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.198 2003/09/15 20:24:42 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_sata_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void apollo_setup_channel __P((struct channel_softc*));
180
181 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void cmd0643_9_setup_channel __P((struct channel_softc*));
184 void cmd_channel_map __P((struct pci_attach_args *,
185 struct pciide_softc *, int));
186 int cmd_pci_intr __P((void *));
187 void cmd646_9_irqack __P((struct channel_softc *));
188 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void cmd680_setup_channel __P((struct channel_softc*));
190 void cmd680_channel_map __P((struct pci_attach_args *,
191 struct pciide_softc *, int));
192
193 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void cmd3112_setup_channel __P((struct channel_softc*));
195
196 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void cy693_setup_channel __P((struct channel_softc*));
198
199 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void sis_setup_channel __P((struct channel_softc*));
201 void sis96x_setup_channel __P((struct channel_softc*));
202 static int sis_hostbr_match __P(( struct pci_attach_args *));
203 static int sis_south_match __P(( struct pci_attach_args *));
204
205 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acer_setup_channel __P((struct channel_softc*));
207 int acer_pci_intr __P((void *));
208
209 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void pdc202xx_setup_channel __P((struct channel_softc*));
211 void pdc20268_setup_channel __P((struct channel_softc*));
212 int pdc202xx_pci_intr __P((void *));
213 int pdc20265_pci_intr __P((void *));
214 static void pdc20262_dma_start __P((void*, int, int));
215 static int pdc20262_dma_finish __P((void*, int, int, int));
216
217 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void opti_setup_channel __P((struct channel_softc*));
219
220 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
221 void hpt_setup_channel __P((struct channel_softc*));
222 int hpt_pci_intr __P((void *));
223
224 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
225 void acard_setup_channel __P((struct channel_softc*));
226 int acard_pci_intr __P((void *));
227
228 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
229 void serverworks_setup_channel __P((struct channel_softc*));
230 int serverworks_pci_intr __P((void *));
231
232 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
233 void sl82c105_setup_channel __P((struct channel_softc*));
234
235 void pciide_channel_dma_setup __P((struct pciide_channel *));
236 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
237 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
238 void pciide_dma_start __P((void*, int, int));
239 int pciide_dma_finish __P((void*, int, int, int));
240 void pciide_irqack __P((struct channel_softc *));
241 void pciide_print_modes __P((struct pciide_channel *));
242
243 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
244
245 struct pciide_product_desc {
246 u_int32_t ide_product;
247 int ide_flags;
248 const char *ide_name;
249 /* map and setup chip, probe drives */
250 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
251 };
252
253 /* Flags for ide_flags */
254 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
255 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
256
257 /* Default product description for devices not known from this controller */
258 const struct pciide_product_desc default_product_desc = {
259 0,
260 0,
261 "Generic PCI IDE controller",
262 default_chip_map,
263 };
264
265 const struct pciide_product_desc pciide_intel_products[] = {
266 { PCI_PRODUCT_INTEL_82092AA,
267 0,
268 "Intel 82092AA IDE controller",
269 default_chip_map,
270 },
271 { PCI_PRODUCT_INTEL_82371FB_IDE,
272 0,
273 "Intel 82371FB IDE controller (PIIX)",
274 piix_chip_map,
275 },
276 { PCI_PRODUCT_INTEL_82371SB_IDE,
277 0,
278 "Intel 82371SB IDE Interface (PIIX3)",
279 piix_chip_map,
280 },
281 { PCI_PRODUCT_INTEL_82371AB_IDE,
282 0,
283 "Intel 82371AB IDE controller (PIIX4)",
284 piix_chip_map,
285 },
286 { PCI_PRODUCT_INTEL_82440MX_IDE,
287 0,
288 "Intel 82440MX IDE controller",
289 piix_chip_map
290 },
291 { PCI_PRODUCT_INTEL_82801AA_IDE,
292 0,
293 "Intel 82801AA IDE Controller (ICH)",
294 piix_chip_map,
295 },
296 { PCI_PRODUCT_INTEL_82801AB_IDE,
297 0,
298 "Intel 82801AB IDE Controller (ICH0)",
299 piix_chip_map,
300 },
301 { PCI_PRODUCT_INTEL_82801BA_IDE,
302 0,
303 "Intel 82801BA IDE Controller (ICH2)",
304 piix_chip_map,
305 },
306 { PCI_PRODUCT_INTEL_82801BAM_IDE,
307 0,
308 "Intel 82801BAM IDE Controller (ICH2-M)",
309 piix_chip_map,
310 },
311 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
312 0,
313 "Intel 82801CA IDE Controller (ICH3)",
314 piix_chip_map,
315 },
316 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
317 0,
318 "Intel 82801CA IDE Controller (ICH3)",
319 piix_chip_map,
320 },
321 { PCI_PRODUCT_INTEL_82801DB_IDE,
322 0,
323 "Intel 82801DB IDE Controller (ICH4)",
324 piix_chip_map,
325 },
326 { PCI_PRODUCT_INTEL_82801DBM_IDE,
327 0,
328 "Intel 82801DBM IDE Controller (ICH4-M)",
329 piix_chip_map,
330 },
331 { PCI_PRODUCT_INTEL_82801EB_IDE,
332 0,
333 "Intel 82801EB IDE Controller (ICH5)",
334 piix_chip_map,
335 },
336 { PCI_PRODUCT_INTEL_31244,
337 0,
338 "Intel 31244 Serial ATA Controller",
339 artisea_chip_map,
340 },
341 { PCI_PRODUCT_INTEL_82801EB_SATA,
342 0,
343 "Intel 82801EB Serial ATA Controller",
344 artisea_chip_map,
345 },
346 { 0,
347 0,
348 NULL,
349 NULL
350 }
351 };
352
353 const struct pciide_product_desc pciide_amd_products[] = {
354 { PCI_PRODUCT_AMD_PBC756_IDE,
355 0,
356 "Advanced Micro Devices AMD756 IDE Controller",
357 amd7x6_chip_map
358 },
359 { PCI_PRODUCT_AMD_PBC766_IDE,
360 0,
361 "Advanced Micro Devices AMD766 IDE Controller",
362 amd7x6_chip_map
363 },
364 { PCI_PRODUCT_AMD_PBC768_IDE,
365 0,
366 "Advanced Micro Devices AMD768 IDE Controller",
367 amd7x6_chip_map
368 },
369 { PCI_PRODUCT_AMD_PBC8111_IDE,
370 0,
371 "Advanced Micro Devices AMD8111 IDE Controller",
372 amd7x6_chip_map
373 },
374 { 0,
375 0,
376 NULL,
377 NULL
378 }
379 };
380
381 const struct pciide_product_desc pciide_nvidia_products[] = {
382 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
383 0,
384 "NVIDIA nForce IDE Controller",
385 amd7x6_chip_map
386 },
387 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
388 0,
389 "NVIDIA nForce2 IDE Controller",
390 amd7x6_chip_map
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_cmd_products[] = {
400 { PCI_PRODUCT_CMDTECH_640,
401 0,
402 "CMD Technology PCI0640",
403 cmd_chip_map
404 },
405 { PCI_PRODUCT_CMDTECH_643,
406 0,
407 "CMD Technology PCI0643",
408 cmd0643_9_chip_map,
409 },
410 { PCI_PRODUCT_CMDTECH_646,
411 0,
412 "CMD Technology PCI0646",
413 cmd0643_9_chip_map,
414 },
415 { PCI_PRODUCT_CMDTECH_648,
416 IDE_PCI_CLASS_OVERRIDE,
417 "CMD Technology PCI0648",
418 cmd0643_9_chip_map,
419 },
420 { PCI_PRODUCT_CMDTECH_649,
421 IDE_PCI_CLASS_OVERRIDE,
422 "CMD Technology PCI0649",
423 cmd0643_9_chip_map,
424 },
425 { PCI_PRODUCT_CMDTECH_680,
426 IDE_PCI_CLASS_OVERRIDE,
427 "Silicon Image 0680",
428 cmd680_chip_map,
429 },
430 { PCI_PRODUCT_CMDTECH_3112,
431 IDE_PCI_CLASS_OVERRIDE,
432 "Silicon Image SATALink 3112",
433 cmd3112_chip_map,
434 },
435 { 0,
436 0,
437 NULL,
438 NULL
439 }
440 };
441
442 const struct pciide_product_desc pciide_via_products[] = {
443 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
444 0,
445 NULL,
446 apollo_chip_map,
447 },
448 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
449 0,
450 NULL,
451 apollo_chip_map,
452 },
453 { PCI_PRODUCT_VIATECH_VT8237_SATA,
454 IDE_PCI_CLASS_OVERRIDE,
455 "VIA Technologies VT8237 SATA Controller",
456 apollo_sata_chip_map,
457 },
458 { 0,
459 0,
460 NULL,
461 NULL
462 }
463 };
464
465 const struct pciide_product_desc pciide_cypress_products[] = {
466 { PCI_PRODUCT_CONTAQ_82C693,
467 IDE_16BIT_IOSPACE,
468 "Cypress 82C693 IDE Controller",
469 cy693_chip_map,
470 },
471 { 0,
472 0,
473 NULL,
474 NULL
475 }
476 };
477
478 const struct pciide_product_desc pciide_sis_products[] = {
479 { PCI_PRODUCT_SIS_5597_IDE,
480 0,
481 NULL,
482 sis_chip_map,
483 },
484 { 0,
485 0,
486 NULL,
487 NULL
488 }
489 };
490
491 const struct pciide_product_desc pciide_acer_products[] = {
492 { PCI_PRODUCT_ALI_M5229,
493 0,
494 "Acer Labs M5229 UDMA IDE Controller",
495 acer_chip_map,
496 },
497 { 0,
498 0,
499 NULL,
500 NULL
501 }
502 };
503
504 const struct pciide_product_desc pciide_promise_products[] = {
505 { PCI_PRODUCT_PROMISE_ULTRA33,
506 IDE_PCI_CLASS_OVERRIDE,
507 "Promise Ultra33/ATA Bus Master IDE Accelerator",
508 pdc202xx_chip_map,
509 },
510 { PCI_PRODUCT_PROMISE_ULTRA66,
511 IDE_PCI_CLASS_OVERRIDE,
512 "Promise Ultra66/ATA Bus Master IDE Accelerator",
513 pdc202xx_chip_map,
514 },
515 { PCI_PRODUCT_PROMISE_ULTRA100,
516 IDE_PCI_CLASS_OVERRIDE,
517 "Promise Ultra100/ATA Bus Master IDE Accelerator",
518 pdc202xx_chip_map,
519 },
520 { PCI_PRODUCT_PROMISE_ULTRA100X,
521 IDE_PCI_CLASS_OVERRIDE,
522 "Promise Ultra100/ATA Bus Master IDE Accelerator",
523 pdc202xx_chip_map,
524 },
525 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
526 IDE_PCI_CLASS_OVERRIDE,
527 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
528 pdc202xx_chip_map,
529 },
530 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
531 IDE_PCI_CLASS_OVERRIDE,
532 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
533 pdc202xx_chip_map,
534 },
535 { PCI_PRODUCT_PROMISE_ULTRA133,
536 IDE_PCI_CLASS_OVERRIDE,
537 "Promise Ultra133/ATA Bus Master IDE Accelerator",
538 pdc202xx_chip_map,
539 },
540 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
541 IDE_PCI_CLASS_OVERRIDE,
542 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
543 pdc202xx_chip_map,
544 },
545 { PCI_PRODUCT_PROMISE_MBULTRA133,
546 IDE_PCI_CLASS_OVERRIDE,
547 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
548 pdc202xx_chip_map,
549 },
550 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
551 IDE_PCI_CLASS_OVERRIDE,
552 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
553 pdc202xx_chip_map,
554 },
555 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
556 IDE_PCI_CLASS_OVERRIDE,
557 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
558 pdc202xx_chip_map,
559 },
560 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
561 IDE_PCI_CLASS_OVERRIDE,
562 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
563 pdc202xx_chip_map,
564 },
565 { 0,
566 0,
567 NULL,
568 NULL
569 }
570 };
571
572 const struct pciide_product_desc pciide_opti_products[] = {
573 { PCI_PRODUCT_OPTI_82C621,
574 0,
575 "OPTi 82c621 PCI IDE controller",
576 opti_chip_map,
577 },
578 { PCI_PRODUCT_OPTI_82C568,
579 0,
580 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
581 opti_chip_map,
582 },
583 { PCI_PRODUCT_OPTI_82D568,
584 0,
585 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
586 opti_chip_map,
587 },
588 { 0,
589 0,
590 NULL,
591 NULL
592 }
593 };
594
595 const struct pciide_product_desc pciide_triones_products[] = {
596 { PCI_PRODUCT_TRIONES_HPT366,
597 IDE_PCI_CLASS_OVERRIDE,
598 NULL,
599 hpt_chip_map,
600 },
601 { PCI_PRODUCT_TRIONES_HPT372,
602 IDE_PCI_CLASS_OVERRIDE,
603 NULL,
604 hpt_chip_map
605 },
606 { PCI_PRODUCT_TRIONES_HPT374,
607 IDE_PCI_CLASS_OVERRIDE,
608 NULL,
609 hpt_chip_map
610 },
611 { 0,
612 0,
613 NULL,
614 NULL
615 }
616 };
617
618 const struct pciide_product_desc pciide_acard_products[] = {
619 { PCI_PRODUCT_ACARD_ATP850U,
620 IDE_PCI_CLASS_OVERRIDE,
621 "Acard ATP850U Ultra33 IDE Controller",
622 acard_chip_map,
623 },
624 { PCI_PRODUCT_ACARD_ATP860,
625 IDE_PCI_CLASS_OVERRIDE,
626 "Acard ATP860 Ultra66 IDE Controller",
627 acard_chip_map,
628 },
629 { PCI_PRODUCT_ACARD_ATP860A,
630 IDE_PCI_CLASS_OVERRIDE,
631 "Acard ATP860-A Ultra66 IDE Controller",
632 acard_chip_map,
633 },
634 { 0,
635 0,
636 NULL,
637 NULL
638 }
639 };
640
641 const struct pciide_product_desc pciide_serverworks_products[] = {
642 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
643 0,
644 "ServerWorks OSB4 IDE Controller",
645 serverworks_chip_map,
646 },
647 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
648 0,
649 "ServerWorks CSB5 IDE Controller",
650 serverworks_chip_map,
651 },
652 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
653 0,
654 "ServerWorks CSB6 RAID/IDE Controller",
655 serverworks_chip_map,
656 },
657 { 0,
658 0,
659 NULL,
660 }
661 };
662
663 const struct pciide_product_desc pciide_symphony_products[] = {
664 { PCI_PRODUCT_SYMPHONY_82C105,
665 0,
666 "Symphony Labs 82C105 IDE controller",
667 sl82c105_chip_map,
668 },
669 { 0,
670 0,
671 NULL,
672 }
673 };
674
675 const struct pciide_product_desc pciide_winbond_products[] = {
676 { PCI_PRODUCT_WINBOND_W83C553F_1,
677 0,
678 "Winbond W83C553F IDE controller",
679 sl82c105_chip_map,
680 },
681 { 0,
682 0,
683 NULL,
684 }
685 };
686
687 struct pciide_vendor_desc {
688 u_int32_t ide_vendor;
689 const struct pciide_product_desc *ide_products;
690 };
691
692 const struct pciide_vendor_desc pciide_vendors[] = {
693 { PCI_VENDOR_INTEL, pciide_intel_products },
694 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
695 { PCI_VENDOR_VIATECH, pciide_via_products },
696 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
697 { PCI_VENDOR_SIS, pciide_sis_products },
698 { PCI_VENDOR_ALI, pciide_acer_products },
699 { PCI_VENDOR_PROMISE, pciide_promise_products },
700 { PCI_VENDOR_AMD, pciide_amd_products },
701 { PCI_VENDOR_OPTI, pciide_opti_products },
702 { PCI_VENDOR_TRIONES, pciide_triones_products },
703 { PCI_VENDOR_ACARD, pciide_acard_products },
704 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
705 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
706 { PCI_VENDOR_WINBOND, pciide_winbond_products },
707 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
708 { 0, NULL }
709 };
710
711 /* options passed via the 'flags' config keyword */
712 #define PCIIDE_OPTIONS_DMA 0x01
713 #define PCIIDE_OPTIONS_NODMA 0x02
714
715 int pciide_match __P((struct device *, struct cfdata *, void *));
716 void pciide_attach __P((struct device *, struct device *, void *));
717
718 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
719 pciide_match, pciide_attach, NULL, NULL);
720
721 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
722 int pciide_mapregs_compat __P(( struct pci_attach_args *,
723 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
724 int pciide_mapregs_native __P((struct pci_attach_args *,
725 struct pciide_channel *, bus_size_t *, bus_size_t *,
726 int (*pci_intr) __P((void *))));
727 void pciide_mapreg_dma __P((struct pciide_softc *,
728 struct pci_attach_args *));
729 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
730 void pciide_mapchan __P((struct pci_attach_args *,
731 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
732 int (*pci_intr) __P((void *))));
733 int pciide_chan_candisable __P((struct pciide_channel *));
734 void pciide_map_compat_intr __P(( struct pci_attach_args *,
735 struct pciide_channel *, int, int));
736 int pciide_compat_intr __P((void *));
737 int pciide_pci_intr __P((void *));
738 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
739
740 const struct pciide_product_desc *
741 pciide_lookup_product(id)
742 u_int32_t id;
743 {
744 const struct pciide_product_desc *pp;
745 const struct pciide_vendor_desc *vp;
746
747 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
748 if (PCI_VENDOR(id) == vp->ide_vendor)
749 break;
750
751 if ((pp = vp->ide_products) == NULL)
752 return NULL;
753
754 for (; pp->chip_map != NULL; pp++)
755 if (PCI_PRODUCT(id) == pp->ide_product)
756 break;
757
758 if (pp->chip_map == NULL)
759 return NULL;
760 return pp;
761 }
762
763 int
764 pciide_match(parent, match, aux)
765 struct device *parent;
766 struct cfdata *match;
767 void *aux;
768 {
769 struct pci_attach_args *pa = aux;
770 const struct pciide_product_desc *pp;
771
772 /*
773 * Check the ID register to see that it's a PCI IDE controller.
774 * If it is, we assume that we can deal with it; it _should_
775 * work in a standardized way...
776 */
777 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
778 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
779 return (1);
780 }
781
782 /*
783 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
784 * controllers. Let see if we can deal with it anyway.
785 */
786 pp = pciide_lookup_product(pa->pa_id);
787 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
788 return (1);
789 }
790
791 return (0);
792 }
793
794 void
795 pciide_attach(parent, self, aux)
796 struct device *parent, *self;
797 void *aux;
798 {
799 struct pci_attach_args *pa = aux;
800 pci_chipset_tag_t pc = pa->pa_pc;
801 pcitag_t tag = pa->pa_tag;
802 struct pciide_softc *sc = (struct pciide_softc *)self;
803 pcireg_t csr;
804 char devinfo[256];
805 const char *displaydev;
806
807 aprint_naive(": disk controller\n");
808
809 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
810 sc->sc_pp = pciide_lookup_product(pa->pa_id);
811 if (sc->sc_pp == NULL) {
812 sc->sc_pp = &default_product_desc;
813 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
814 displaydev = devinfo;
815 } else
816 displaydev = sc->sc_pp->ide_name;
817
818 /* if displaydev == NULL, printf is done in chip-specific map */
819 if (displaydev)
820 aprint_normal(": %s (rev. 0x%02x)\n", displaydev,
821 PCI_REVISION(pa->pa_class));
822
823 sc->sc_pc = pa->pa_pc;
824 sc->sc_tag = pa->pa_tag;
825
826 /* Set up DMA defaults; these might be adjusted by chip_map. */
827 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
828 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
829
830 #ifdef WDCDEBUG
831 if (wdcdebug_pciide_mask & DEBUG_PROBE)
832 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
833 #endif
834 sc->sc_pp->chip_map(sc, pa);
835
836 if (sc->sc_dma_ok) {
837 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
838 csr |= PCI_COMMAND_MASTER_ENABLE;
839 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
840 }
841 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
842 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
843 }
844
845 /* tell whether the chip is enabled or not */
846 int
847 pciide_chipen(sc, pa)
848 struct pciide_softc *sc;
849 struct pci_attach_args *pa;
850 {
851 pcireg_t csr;
852 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
853 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
854 PCI_COMMAND_STATUS_REG);
855 aprint_normal("%s: device disabled (at %s)\n",
856 sc->sc_wdcdev.sc_dev.dv_xname,
857 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
858 "device" : "bridge");
859 return 0;
860 }
861 return 1;
862 }
863
864 int
865 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
866 struct pci_attach_args *pa;
867 struct pciide_channel *cp;
868 int compatchan;
869 bus_size_t *cmdsizep, *ctlsizep;
870 {
871 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
872 struct channel_softc *wdc_cp = &cp->wdc_channel;
873
874 cp->compat = 1;
875 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
876 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
877
878 wdc_cp->cmd_iot = pa->pa_iot;
879 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
880 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
881 aprint_error("%s: couldn't map %s channel cmd regs\n",
882 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
883 return (0);
884 }
885
886 wdc_cp->ctl_iot = pa->pa_iot;
887 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
888 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
889 aprint_error("%s: couldn't map %s channel ctl regs\n",
890 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
891 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
892 PCIIDE_COMPAT_CMD_SIZE);
893 return (0);
894 }
895
896 return (1);
897 }
898
899 int
900 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
901 struct pci_attach_args * pa;
902 struct pciide_channel *cp;
903 bus_size_t *cmdsizep, *ctlsizep;
904 int (*pci_intr) __P((void *));
905 {
906 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
907 struct channel_softc *wdc_cp = &cp->wdc_channel;
908 const char *intrstr;
909 pci_intr_handle_t intrhandle;
910
911 cp->compat = 0;
912
913 if (sc->sc_pci_ih == NULL) {
914 if (pci_intr_map(pa, &intrhandle) != 0) {
915 aprint_error("%s: couldn't map native-PCI interrupt\n",
916 sc->sc_wdcdev.sc_dev.dv_xname);
917 return 0;
918 }
919 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
920 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
921 intrhandle, IPL_BIO, pci_intr, sc);
922 if (sc->sc_pci_ih != NULL) {
923 aprint_normal("%s: using %s for native-PCI interrupt\n",
924 sc->sc_wdcdev.sc_dev.dv_xname,
925 intrstr ? intrstr : "unknown interrupt");
926 } else {
927 aprint_error(
928 "%s: couldn't establish native-PCI interrupt",
929 sc->sc_wdcdev.sc_dev.dv_xname);
930 if (intrstr != NULL)
931 aprint_normal(" at %s", intrstr);
932 aprint_normal("\n");
933 return 0;
934 }
935 }
936 cp->ih = sc->sc_pci_ih;
937 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
938 PCI_MAPREG_TYPE_IO, 0,
939 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
940 aprint_error("%s: couldn't map %s channel cmd regs\n",
941 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
942 return 0;
943 }
944
945 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
946 PCI_MAPREG_TYPE_IO, 0,
947 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
948 aprint_error("%s: couldn't map %s channel ctl regs\n",
949 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
950 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
951 return 0;
952 }
953 /*
954 * In native mode, 4 bytes of I/O space are mapped for the control
955 * register, the control register is at offset 2. Pass the generic
956 * code a handle for only one byte at the right offset.
957 */
958 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
959 &wdc_cp->ctl_ioh) != 0) {
960 aprint_error("%s: unable to subregion %s channel ctl regs\n",
961 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
962 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
963 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
964 return 0;
965 }
966 return (1);
967 }
968
969 void
970 pciide_mapreg_dma(sc, pa)
971 struct pciide_softc *sc;
972 struct pci_attach_args *pa;
973 {
974 pcireg_t maptype;
975 bus_addr_t addr;
976
977 /*
978 * Map DMA registers
979 *
980 * Note that sc_dma_ok is the right variable to test to see if
981 * DMA can be done. If the interface doesn't support DMA,
982 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
983 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
984 * non-zero if the interface supports DMA and the registers
985 * could be mapped.
986 *
987 * XXX Note that despite the fact that the Bus Master IDE specs
988 * XXX say that "The bus master IDE function uses 16 bytes of IO
989 * XXX space," some controllers (at least the United
990 * XXX Microelectronics UM8886BF) place it in memory space.
991 */
992 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
993 PCIIDE_REG_BUS_MASTER_DMA);
994
995 switch (maptype) {
996 case PCI_MAPREG_TYPE_IO:
997 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
998 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
999 &addr, NULL, NULL) == 0);
1000 if (sc->sc_dma_ok == 0) {
1001 aprint_normal(
1002 ", but unused (couldn't query registers)");
1003 break;
1004 }
1005 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
1006 && addr >= 0x10000) {
1007 sc->sc_dma_ok = 0;
1008 aprint_normal(
1009 ", but unused (registers at unsafe address "
1010 "%#lx)", (unsigned long)addr);
1011 break;
1012 }
1013 /* FALLTHROUGH */
1014
1015 case PCI_MAPREG_MEM_TYPE_32BIT:
1016 sc->sc_dma_ok = (pci_mapreg_map(pa,
1017 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1018 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1019 sc->sc_dmat = pa->pa_dmat;
1020 if (sc->sc_dma_ok == 0) {
1021 aprint_normal(", but unused (couldn't map registers)");
1022 } else {
1023 sc->sc_wdcdev.dma_arg = sc;
1024 sc->sc_wdcdev.dma_init = pciide_dma_init;
1025 sc->sc_wdcdev.dma_start = pciide_dma_start;
1026 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1027 }
1028
1029 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1030 PCIIDE_OPTIONS_NODMA) {
1031 aprint_normal(
1032 ", but unused (forced off by config file)");
1033 sc->sc_dma_ok = 0;
1034 }
1035 break;
1036
1037 default:
1038 sc->sc_dma_ok = 0;
1039 aprint_normal(
1040 ", but unsupported register maptype (0x%x)", maptype);
1041 }
1042 }
1043
1044 int
1045 pciide_compat_intr(arg)
1046 void *arg;
1047 {
1048 struct pciide_channel *cp = arg;
1049
1050 #ifdef DIAGNOSTIC
1051 /* should only be called for a compat channel */
1052 if (cp->compat == 0)
1053 panic("pciide compat intr called for non-compat chan %p", cp);
1054 #endif
1055 return (wdcintr(&cp->wdc_channel));
1056 }
1057
1058 int
1059 pciide_pci_intr(arg)
1060 void *arg;
1061 {
1062 struct pciide_softc *sc = arg;
1063 struct pciide_channel *cp;
1064 struct channel_softc *wdc_cp;
1065 int i, rv, crv;
1066
1067 rv = 0;
1068 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1069 cp = &sc->pciide_channels[i];
1070 wdc_cp = &cp->wdc_channel;
1071
1072 /* If a compat channel skip. */
1073 if (cp->compat)
1074 continue;
1075 /* if this channel not waiting for intr, skip */
1076 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1077 continue;
1078
1079 crv = wdcintr(wdc_cp);
1080 if (crv == 0)
1081 ; /* leave rv alone */
1082 else if (crv == 1)
1083 rv = 1; /* claim the intr */
1084 else if (rv == 0) /* crv should be -1 in this case */
1085 rv = crv; /* if we've done no better, take it */
1086 }
1087 return (rv);
1088 }
1089
1090 void
1091 pciide_channel_dma_setup(cp)
1092 struct pciide_channel *cp;
1093 {
1094 int drive;
1095 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1096 struct ata_drive_datas *drvp;
1097
1098 for (drive = 0; drive < 2; drive++) {
1099 drvp = &cp->wdc_channel.ch_drive[drive];
1100 /* If no drive, skip */
1101 if ((drvp->drive_flags & DRIVE) == 0)
1102 continue;
1103 /* setup DMA if needed */
1104 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1105 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1106 sc->sc_dma_ok == 0) {
1107 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1108 continue;
1109 }
1110 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1111 != 0) {
1112 /* Abort DMA setup */
1113 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1114 continue;
1115 }
1116 }
1117 }
1118
1119 int
1120 pciide_dma_table_setup(sc, channel, drive)
1121 struct pciide_softc *sc;
1122 int channel, drive;
1123 {
1124 bus_dma_segment_t seg;
1125 int error, rseg;
1126 const bus_size_t dma_table_size =
1127 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1128 struct pciide_dma_maps *dma_maps =
1129 &sc->pciide_channels[channel].dma_maps[drive];
1130
1131 /* If table was already allocated, just return */
1132 if (dma_maps->dma_table)
1133 return 0;
1134
1135 /* Allocate memory for the DMA tables and map it */
1136 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1137 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1138 BUS_DMA_NOWAIT)) != 0) {
1139 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1140 "allocate", drive, error);
1141 return error;
1142 }
1143 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1144 dma_table_size,
1145 (caddr_t *)&dma_maps->dma_table,
1146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1147 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1148 "map", drive, error);
1149 return error;
1150 }
1151 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1152 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1153 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1154 /* Create and load table DMA map for this disk */
1155 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1156 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1157 &dma_maps->dmamap_table)) != 0) {
1158 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1159 "create", drive, error);
1160 return error;
1161 }
1162 if ((error = bus_dmamap_load(sc->sc_dmat,
1163 dma_maps->dmamap_table,
1164 dma_maps->dma_table,
1165 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1166 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1167 "load", drive, error);
1168 return error;
1169 }
1170 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1171 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1172 DEBUG_PROBE);
1173 /* Create a xfer DMA map for this drive */
1174 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1175 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1176 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1177 &dma_maps->dmamap_xfer)) != 0) {
1178 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1179 "create xfer", drive, error);
1180 return error;
1181 }
1182 return 0;
1183 }
1184
1185 int
1186 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1187 void *v;
1188 int channel, drive;
1189 void *databuf;
1190 size_t datalen;
1191 int flags;
1192 {
1193 struct pciide_softc *sc = v;
1194 int error, seg;
1195 struct pciide_dma_maps *dma_maps =
1196 &sc->pciide_channels[channel].dma_maps[drive];
1197
1198 error = bus_dmamap_load(sc->sc_dmat,
1199 dma_maps->dmamap_xfer,
1200 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1201 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1202 if (error) {
1203 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1204 "load xfer", drive, error);
1205 return error;
1206 }
1207
1208 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1209 dma_maps->dmamap_xfer->dm_mapsize,
1210 (flags & WDC_DMA_READ) ?
1211 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1212
1213 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1214 #ifdef DIAGNOSTIC
1215 /* A segment must not cross a 64k boundary */
1216 {
1217 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1218 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1219 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1220 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1221 printf("pciide_dma: segment %d physical addr 0x%lx"
1222 " len 0x%lx not properly aligned\n",
1223 seg, phys, len);
1224 panic("pciide_dma: buf align");
1225 }
1226 }
1227 #endif
1228 dma_maps->dma_table[seg].base_addr =
1229 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1230 dma_maps->dma_table[seg].byte_count =
1231 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1232 IDEDMA_BYTE_COUNT_MASK);
1233 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1234 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1235 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1236
1237 }
1238 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1239 htole32(IDEDMA_BYTE_COUNT_EOT);
1240
1241 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1242 dma_maps->dmamap_table->dm_mapsize,
1243 BUS_DMASYNC_PREWRITE);
1244
1245 /* Maps are ready. Start DMA function */
1246 #ifdef DIAGNOSTIC
1247 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1248 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1249 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1250 panic("pciide_dma_init: table align");
1251 }
1252 #endif
1253
1254 /* Clear status bits */
1255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1256 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1257 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1258 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1259 /* Write table addr */
1260 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1261 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1262 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1263 /* set read/write */
1264 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1265 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1266 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1267 /* remember flags */
1268 dma_maps->dma_flags = flags;
1269 return 0;
1270 }
1271
1272 void
1273 pciide_dma_start(v, channel, drive)
1274 void *v;
1275 int channel, drive;
1276 {
1277 struct pciide_softc *sc = v;
1278
1279 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1280 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1281 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1282 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1283 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1284 }
1285
1286 int
1287 pciide_dma_finish(v, channel, drive, force)
1288 void *v;
1289 int channel, drive;
1290 int force;
1291 {
1292 struct pciide_softc *sc = v;
1293 u_int8_t status;
1294 int error = 0;
1295 struct pciide_dma_maps *dma_maps =
1296 &sc->pciide_channels[channel].dma_maps[drive];
1297
1298 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1299 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1300 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1301 DEBUG_XFERS);
1302
1303 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1304 return WDC_DMAST_NOIRQ;
1305
1306 /* stop DMA channel */
1307 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1308 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1309 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1310 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1311
1312 /* Unload the map of the data buffer */
1313 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1314 dma_maps->dmamap_xfer->dm_mapsize,
1315 (dma_maps->dma_flags & WDC_DMA_READ) ?
1316 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1317 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1318
1319 if ((status & IDEDMA_CTL_ERR) != 0) {
1320 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1321 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1322 error |= WDC_DMAST_ERR;
1323 }
1324
1325 if ((status & IDEDMA_CTL_INTR) == 0) {
1326 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1327 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1328 drive, status);
1329 error |= WDC_DMAST_NOIRQ;
1330 }
1331
1332 if ((status & IDEDMA_CTL_ACT) != 0) {
1333 /* data underrun, may be a valid condition for ATAPI */
1334 error |= WDC_DMAST_UNDER;
1335 }
1336 return error;
1337 }
1338
1339 void
1340 pciide_irqack(chp)
1341 struct channel_softc *chp;
1342 {
1343 struct pciide_channel *cp = (struct pciide_channel*)chp;
1344 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1345
1346 /* clear status bits in IDE DMA registers */
1347 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1348 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1349 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1350 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1351 }
1352
1353 /* some common code used by several chip_map */
1354 int
1355 pciide_chansetup(sc, channel, interface)
1356 struct pciide_softc *sc;
1357 int channel;
1358 pcireg_t interface;
1359 {
1360 struct pciide_channel *cp = &sc->pciide_channels[channel];
1361 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1362 cp->name = PCIIDE_CHANNEL_NAME(channel);
1363 cp->wdc_channel.channel = channel;
1364 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1365 cp->wdc_channel.ch_queue =
1366 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1367 if (cp->wdc_channel.ch_queue == NULL) {
1368 aprint_error("%s %s channel: "
1369 "can't allocate memory for command queue",
1370 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1371 return 0;
1372 }
1373 aprint_normal("%s: %s channel %s to %s mode\n",
1374 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1375 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1376 "configured" : "wired",
1377 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1378 "native-PCI" : "compatibility");
1379 return 1;
1380 }
1381
1382 /* some common code used by several chip channel_map */
1383 void
1384 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1385 struct pci_attach_args *pa;
1386 struct pciide_channel *cp;
1387 pcireg_t interface;
1388 bus_size_t *cmdsizep, *ctlsizep;
1389 int (*pci_intr) __P((void *));
1390 {
1391 struct channel_softc *wdc_cp = &cp->wdc_channel;
1392
1393 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1394 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1395 pci_intr);
1396 else
1397 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1398 wdc_cp->channel, cmdsizep, ctlsizep);
1399
1400 if (cp->hw_ok == 0)
1401 return;
1402 wdc_cp->data32iot = wdc_cp->cmd_iot;
1403 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1404 wdcattach(wdc_cp);
1405 }
1406
1407 /*
1408 * Generic code to call to know if a channel can be disabled. Return 1
1409 * if channel can be disabled, 0 if not
1410 */
1411 int
1412 pciide_chan_candisable(cp)
1413 struct pciide_channel *cp;
1414 {
1415 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1416 struct channel_softc *wdc_cp = &cp->wdc_channel;
1417
1418 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1419 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1420 aprint_normal("%s: disabling %s channel (no drives)\n",
1421 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1422 cp->hw_ok = 0;
1423 return 1;
1424 }
1425 return 0;
1426 }
1427
1428 /*
1429 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1430 * Set hw_ok=0 on failure
1431 */
1432 void
1433 pciide_map_compat_intr(pa, cp, compatchan, interface)
1434 struct pci_attach_args *pa;
1435 struct pciide_channel *cp;
1436 int compatchan, interface;
1437 {
1438 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1439 struct channel_softc *wdc_cp = &cp->wdc_channel;
1440
1441 if (cp->hw_ok == 0)
1442 return;
1443 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1444 return;
1445
1446 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1447 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1448 pa, compatchan, pciide_compat_intr, cp);
1449 if (cp->ih == NULL) {
1450 #endif
1451 aprint_error("%s: no compatibility interrupt for use by %s "
1452 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1453 cp->hw_ok = 0;
1454 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1455 }
1456 #endif
1457 }
1458
1459 void
1460 pciide_print_modes(cp)
1461 struct pciide_channel *cp;
1462 {
1463 wdc_print_modes(&cp->wdc_channel);
1464 }
1465
1466 void
1467 default_chip_map(sc, pa)
1468 struct pciide_softc *sc;
1469 struct pci_attach_args *pa;
1470 {
1471 struct pciide_channel *cp;
1472 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1473 pcireg_t csr;
1474 int channel, drive;
1475 struct ata_drive_datas *drvp;
1476 u_int8_t idedma_ctl;
1477 bus_size_t cmdsize, ctlsize;
1478 char *failreason;
1479
1480 if (pciide_chipen(sc, pa) == 0)
1481 return;
1482
1483 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1484 aprint_normal("%s: bus-master DMA support present",
1485 sc->sc_wdcdev.sc_dev.dv_xname);
1486 if (sc->sc_pp == &default_product_desc &&
1487 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1488 PCIIDE_OPTIONS_DMA) == 0) {
1489 aprint_normal(", but unused (no driver support)");
1490 sc->sc_dma_ok = 0;
1491 } else {
1492 pciide_mapreg_dma(sc, pa);
1493 if (sc->sc_dma_ok != 0)
1494 aprint_normal(", used without full driver "
1495 "support");
1496 }
1497 } else {
1498 aprint_normal("%s: hardware does not support DMA",
1499 sc->sc_wdcdev.sc_dev.dv_xname);
1500 sc->sc_dma_ok = 0;
1501 }
1502 aprint_normal("\n");
1503 if (sc->sc_dma_ok) {
1504 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1505 sc->sc_wdcdev.irqack = pciide_irqack;
1506 }
1507 sc->sc_wdcdev.PIO_cap = 0;
1508 sc->sc_wdcdev.DMA_cap = 0;
1509
1510 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1511 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1512 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1513
1514 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1515 cp = &sc->pciide_channels[channel];
1516 if (pciide_chansetup(sc, channel, interface) == 0)
1517 continue;
1518 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1519 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1520 &ctlsize, pciide_pci_intr);
1521 } else {
1522 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1523 channel, &cmdsize, &ctlsize);
1524 }
1525 if (cp->hw_ok == 0)
1526 continue;
1527 /*
1528 * Check to see if something appears to be there.
1529 */
1530 failreason = NULL;
1531 if (!wdcprobe(&cp->wdc_channel)) {
1532 failreason = "not responding; disabled or no drives?";
1533 goto next;
1534 }
1535 /*
1536 * Now, make sure it's actually attributable to this PCI IDE
1537 * channel by trying to access the channel again while the
1538 * PCI IDE controller's I/O space is disabled. (If the
1539 * channel no longer appears to be there, it belongs to
1540 * this controller.) YUCK!
1541 */
1542 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1543 PCI_COMMAND_STATUS_REG);
1544 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1545 csr & ~PCI_COMMAND_IO_ENABLE);
1546 if (wdcprobe(&cp->wdc_channel))
1547 failreason = "other hardware responding at addresses";
1548 pci_conf_write(sc->sc_pc, sc->sc_tag,
1549 PCI_COMMAND_STATUS_REG, csr);
1550 next:
1551 if (failreason) {
1552 aprint_error("%s: %s channel ignored (%s)\n",
1553 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1554 failreason);
1555 cp->hw_ok = 0;
1556 bus_space_unmap(cp->wdc_channel.cmd_iot,
1557 cp->wdc_channel.cmd_ioh, cmdsize);
1558 if (interface & PCIIDE_INTERFACE_PCI(channel))
1559 bus_space_unmap(cp->wdc_channel.ctl_iot,
1560 cp->ctl_baseioh, ctlsize);
1561 else
1562 bus_space_unmap(cp->wdc_channel.ctl_iot,
1563 cp->wdc_channel.ctl_ioh, ctlsize);
1564 } else {
1565 pciide_map_compat_intr(pa, cp, channel, interface);
1566 }
1567 if (cp->hw_ok) {
1568 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1569 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1570 wdcattach(&cp->wdc_channel);
1571 }
1572 }
1573
1574 if (sc->sc_dma_ok == 0)
1575 return;
1576
1577 /* Allocate DMA maps */
1578 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1579 idedma_ctl = 0;
1580 cp = &sc->pciide_channels[channel];
1581 for (drive = 0; drive < 2; drive++) {
1582 drvp = &cp->wdc_channel.ch_drive[drive];
1583 /* If no drive, skip */
1584 if ((drvp->drive_flags & DRIVE) == 0)
1585 continue;
1586 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1587 continue;
1588 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1589 /* Abort DMA setup */
1590 aprint_error(
1591 "%s:%d:%d: can't allocate DMA maps, "
1592 "using PIO transfers\n",
1593 sc->sc_wdcdev.sc_dev.dv_xname,
1594 channel, drive);
1595 drvp->drive_flags &= ~DRIVE_DMA;
1596 }
1597 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1598 sc->sc_wdcdev.sc_dev.dv_xname,
1599 channel, drive);
1600 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1601 }
1602 if (idedma_ctl != 0) {
1603 /* Add software bits in status register */
1604 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1605 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1606 idedma_ctl);
1607 }
1608 }
1609 }
1610
1611 void
1612 sata_setup_channel(chp)
1613 struct channel_softc *chp;
1614 {
1615 struct ata_drive_datas *drvp;
1616 int drive;
1617 u_int32_t idedma_ctl;
1618 struct pciide_channel *cp = (struct pciide_channel*)chp;
1619 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1620
1621 /* setup DMA if needed */
1622 pciide_channel_dma_setup(cp);
1623
1624 idedma_ctl = 0;
1625
1626 for (drive = 0; drive < 2; drive++) {
1627 drvp = &chp->ch_drive[drive];
1628 /* If no drive, skip */
1629 if ((drvp->drive_flags & DRIVE) == 0)
1630 continue;
1631 if (drvp->drive_flags & DRIVE_UDMA) {
1632 /* use Ultra/DMA */
1633 drvp->drive_flags &= ~DRIVE_DMA;
1634 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1635 } else if (drvp->drive_flags & DRIVE_DMA) {
1636 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1637 }
1638 }
1639
1640 /*
1641 * Nothing to do to setup modes; it is meaningless in S-ATA
1642 * (but many S-ATA drives still want to get the SET_FEATURE
1643 * command).
1644 */
1645 if (idedma_ctl != 0) {
1646 /* Add software bits in status register */
1647 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1648 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1649 idedma_ctl);
1650 }
1651 pciide_print_modes(cp);
1652 }
1653
1654 void
1655 piix_chip_map(sc, pa)
1656 struct pciide_softc *sc;
1657 struct pci_attach_args *pa;
1658 {
1659 struct pciide_channel *cp;
1660 int channel;
1661 u_int32_t idetim;
1662 bus_size_t cmdsize, ctlsize;
1663
1664 if (pciide_chipen(sc, pa) == 0)
1665 return;
1666
1667 aprint_normal("%s: bus-master DMA support present",
1668 sc->sc_wdcdev.sc_dev.dv_xname);
1669 pciide_mapreg_dma(sc, pa);
1670 aprint_normal("\n");
1671 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1672 WDC_CAPABILITY_MODE;
1673 if (sc->sc_dma_ok) {
1674 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1675 sc->sc_wdcdev.irqack = pciide_irqack;
1676 switch(sc->sc_pp->ide_product) {
1677 case PCI_PRODUCT_INTEL_82371AB_IDE:
1678 case PCI_PRODUCT_INTEL_82440MX_IDE:
1679 case PCI_PRODUCT_INTEL_82801AA_IDE:
1680 case PCI_PRODUCT_INTEL_82801AB_IDE:
1681 case PCI_PRODUCT_INTEL_82801BA_IDE:
1682 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1683 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1684 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1685 case PCI_PRODUCT_INTEL_82801DB_IDE:
1686 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1687 case PCI_PRODUCT_INTEL_82801EB_IDE:
1688 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1689 }
1690 }
1691 sc->sc_wdcdev.PIO_cap = 4;
1692 sc->sc_wdcdev.DMA_cap = 2;
1693 switch(sc->sc_pp->ide_product) {
1694 case PCI_PRODUCT_INTEL_82801AA_IDE:
1695 sc->sc_wdcdev.UDMA_cap = 4;
1696 break;
1697 case PCI_PRODUCT_INTEL_82801BA_IDE:
1698 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1699 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1700 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1701 case PCI_PRODUCT_INTEL_82801DB_IDE:
1702 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1703 case PCI_PRODUCT_INTEL_82801EB_IDE:
1704 sc->sc_wdcdev.UDMA_cap = 5;
1705 break;
1706 default:
1707 sc->sc_wdcdev.UDMA_cap = 2;
1708 }
1709 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1710 sc->sc_wdcdev.set_modes = piix_setup_channel;
1711 else
1712 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1713 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1714 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1715
1716 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1717 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1718 DEBUG_PROBE);
1719 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1720 WDCDEBUG_PRINT((", sidetim=0x%x",
1721 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1722 DEBUG_PROBE);
1723 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1724 WDCDEBUG_PRINT((", udamreg 0x%x",
1725 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1726 DEBUG_PROBE);
1727 }
1728 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1729 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1730 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1731 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1732 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1733 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1734 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1735 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1736 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1737 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1738 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1739 DEBUG_PROBE);
1740 }
1741
1742 }
1743 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1744
1745 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1746 cp = &sc->pciide_channels[channel];
1747 /* PIIX is compat-only */
1748 if (pciide_chansetup(sc, channel, 0) == 0)
1749 continue;
1750 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1751 if ((PIIX_IDETIM_READ(idetim, channel) &
1752 PIIX_IDETIM_IDE) == 0) {
1753 aprint_normal("%s: %s channel ignored (disabled)\n",
1754 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1755 continue;
1756 }
1757 /* PIIX are compat-only pciide devices */
1758 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1759 if (cp->hw_ok == 0)
1760 continue;
1761 if (pciide_chan_candisable(cp)) {
1762 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1763 channel);
1764 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1765 idetim);
1766 }
1767 pciide_map_compat_intr(pa, cp, channel, 0);
1768 if (cp->hw_ok == 0)
1769 continue;
1770 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1771 }
1772
1773 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1774 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1775 DEBUG_PROBE);
1776 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1777 WDCDEBUG_PRINT((", sidetim=0x%x",
1778 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1779 DEBUG_PROBE);
1780 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1781 WDCDEBUG_PRINT((", udamreg 0x%x",
1782 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1783 DEBUG_PROBE);
1784 }
1785 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1786 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1787 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1788 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1789 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1790 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1791 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1792 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1793 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1794 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1795 DEBUG_PROBE);
1796 }
1797 }
1798 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1799 }
1800
1801 void
1802 piix_setup_channel(chp)
1803 struct channel_softc *chp;
1804 {
1805 u_int8_t mode[2], drive;
1806 u_int32_t oidetim, idetim, idedma_ctl;
1807 struct pciide_channel *cp = (struct pciide_channel*)chp;
1808 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1809 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1810
1811 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1812 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1813 idedma_ctl = 0;
1814
1815 /* set up new idetim: Enable IDE registers decode */
1816 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1817 chp->channel);
1818
1819 /* setup DMA */
1820 pciide_channel_dma_setup(cp);
1821
1822 /*
1823 * Here we have to mess up with drives mode: PIIX can't have
1824 * different timings for master and slave drives.
1825 * We need to find the best combination.
1826 */
1827
1828 /* If both drives supports DMA, take the lower mode */
1829 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1830 (drvp[1].drive_flags & DRIVE_DMA)) {
1831 mode[0] = mode[1] =
1832 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1833 drvp[0].DMA_mode = mode[0];
1834 drvp[1].DMA_mode = mode[1];
1835 goto ok;
1836 }
1837 /*
1838 * If only one drive supports DMA, use its mode, and
1839 * put the other one in PIO mode 0 if mode not compatible
1840 */
1841 if (drvp[0].drive_flags & DRIVE_DMA) {
1842 mode[0] = drvp[0].DMA_mode;
1843 mode[1] = drvp[1].PIO_mode;
1844 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1845 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1846 mode[1] = drvp[1].PIO_mode = 0;
1847 goto ok;
1848 }
1849 if (drvp[1].drive_flags & DRIVE_DMA) {
1850 mode[1] = drvp[1].DMA_mode;
1851 mode[0] = drvp[0].PIO_mode;
1852 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1853 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1854 mode[0] = drvp[0].PIO_mode = 0;
1855 goto ok;
1856 }
1857 /*
1858 * If both drives are not DMA, takes the lower mode, unless
1859 * one of them is PIO mode < 2
1860 */
1861 if (drvp[0].PIO_mode < 2) {
1862 mode[0] = drvp[0].PIO_mode = 0;
1863 mode[1] = drvp[1].PIO_mode;
1864 } else if (drvp[1].PIO_mode < 2) {
1865 mode[1] = drvp[1].PIO_mode = 0;
1866 mode[0] = drvp[0].PIO_mode;
1867 } else {
1868 mode[0] = mode[1] =
1869 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1870 drvp[0].PIO_mode = mode[0];
1871 drvp[1].PIO_mode = mode[1];
1872 }
1873 ok: /* The modes are setup */
1874 for (drive = 0; drive < 2; drive++) {
1875 if (drvp[drive].drive_flags & DRIVE_DMA) {
1876 idetim |= piix_setup_idetim_timings(
1877 mode[drive], 1, chp->channel);
1878 goto end;
1879 }
1880 }
1881 /* If we are there, none of the drives are DMA */
1882 if (mode[0] >= 2)
1883 idetim |= piix_setup_idetim_timings(
1884 mode[0], 0, chp->channel);
1885 else
1886 idetim |= piix_setup_idetim_timings(
1887 mode[1], 0, chp->channel);
1888 end: /*
1889 * timing mode is now set up in the controller. Enable
1890 * it per-drive
1891 */
1892 for (drive = 0; drive < 2; drive++) {
1893 /* If no drive, skip */
1894 if ((drvp[drive].drive_flags & DRIVE) == 0)
1895 continue;
1896 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1897 if (drvp[drive].drive_flags & DRIVE_DMA)
1898 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1899 }
1900 if (idedma_ctl != 0) {
1901 /* Add software bits in status register */
1902 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1903 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1904 idedma_ctl);
1905 }
1906 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1907 pciide_print_modes(cp);
1908 }
1909
1910 void
1911 piix3_4_setup_channel(chp)
1912 struct channel_softc *chp;
1913 {
1914 struct ata_drive_datas *drvp;
1915 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1916 struct pciide_channel *cp = (struct pciide_channel*)chp;
1917 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1918 int drive;
1919 int channel = chp->channel;
1920
1921 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1922 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1923 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1924 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1925 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1926 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1927 PIIX_SIDETIM_RTC_MASK(channel));
1928
1929 idedma_ctl = 0;
1930 /* If channel disabled, no need to go further */
1931 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1932 return;
1933 /* set up new idetim: Enable IDE registers decode */
1934 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1935
1936 /* setup DMA if needed */
1937 pciide_channel_dma_setup(cp);
1938
1939 for (drive = 0; drive < 2; drive++) {
1940 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1941 PIIX_UDMATIM_SET(0x3, channel, drive));
1942 drvp = &chp->ch_drive[drive];
1943 /* If no drive, skip */
1944 if ((drvp->drive_flags & DRIVE) == 0)
1945 continue;
1946 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1947 (drvp->drive_flags & DRIVE_UDMA) == 0))
1948 goto pio;
1949
1950 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1951 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1952 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1953 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1954 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1955 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1956 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1957 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1958 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1959 ideconf |= PIIX_CONFIG_PINGPONG;
1960 }
1961 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1962 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1963 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1964 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1965 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1966 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1967 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1968 /* setup Ultra/100 */
1969 if (drvp->UDMA_mode > 2 &&
1970 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1971 drvp->UDMA_mode = 2;
1972 if (drvp->UDMA_mode > 4) {
1973 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1974 } else {
1975 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1976 if (drvp->UDMA_mode > 2) {
1977 ideconf |= PIIX_CONFIG_UDMA66(channel,
1978 drive);
1979 } else {
1980 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1981 drive);
1982 }
1983 }
1984 }
1985 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1986 /* setup Ultra/66 */
1987 if (drvp->UDMA_mode > 2 &&
1988 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1989 drvp->UDMA_mode = 2;
1990 if (drvp->UDMA_mode > 2)
1991 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1992 else
1993 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1994 }
1995 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1996 (drvp->drive_flags & DRIVE_UDMA)) {
1997 /* use Ultra/DMA */
1998 drvp->drive_flags &= ~DRIVE_DMA;
1999 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
2000 udmareg |= PIIX_UDMATIM_SET(
2001 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
2002 } else {
2003 /* use Multiword DMA */
2004 drvp->drive_flags &= ~DRIVE_UDMA;
2005 if (drive == 0) {
2006 idetim |= piix_setup_idetim_timings(
2007 drvp->DMA_mode, 1, channel);
2008 } else {
2009 sidetim |= piix_setup_sidetim_timings(
2010 drvp->DMA_mode, 1, channel);
2011 idetim =PIIX_IDETIM_SET(idetim,
2012 PIIX_IDETIM_SITRE, channel);
2013 }
2014 }
2015 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2016
2017 pio: /* use PIO mode */
2018 idetim |= piix_setup_idetim_drvs(drvp);
2019 if (drive == 0) {
2020 idetim |= piix_setup_idetim_timings(
2021 drvp->PIO_mode, 0, channel);
2022 } else {
2023 sidetim |= piix_setup_sidetim_timings(
2024 drvp->PIO_mode, 0, channel);
2025 idetim =PIIX_IDETIM_SET(idetim,
2026 PIIX_IDETIM_SITRE, channel);
2027 }
2028 }
2029 if (idedma_ctl != 0) {
2030 /* Add software bits in status register */
2031 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2032 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2033 idedma_ctl);
2034 }
2035 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2036 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2037 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2038 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2039 pciide_print_modes(cp);
2040 }
2041
2042
2043 /* setup ISP and RTC fields, based on mode */
2044 static u_int32_t
2045 piix_setup_idetim_timings(mode, dma, channel)
2046 u_int8_t mode;
2047 u_int8_t dma;
2048 u_int8_t channel;
2049 {
2050
2051 if (dma)
2052 return PIIX_IDETIM_SET(0,
2053 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2054 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2055 channel);
2056 else
2057 return PIIX_IDETIM_SET(0,
2058 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2059 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2060 channel);
2061 }
2062
2063 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2064 static u_int32_t
2065 piix_setup_idetim_drvs(drvp)
2066 struct ata_drive_datas *drvp;
2067 {
2068 u_int32_t ret = 0;
2069 struct channel_softc *chp = drvp->chnl_softc;
2070 u_int8_t channel = chp->channel;
2071 u_int8_t drive = drvp->drive;
2072
2073 /*
2074 * If drive is using UDMA, timings setups are independant
2075 * So just check DMA and PIO here.
2076 */
2077 if (drvp->drive_flags & DRIVE_DMA) {
2078 /* if mode = DMA mode 0, use compatible timings */
2079 if ((drvp->drive_flags & DRIVE_DMA) &&
2080 drvp->DMA_mode == 0) {
2081 drvp->PIO_mode = 0;
2082 return ret;
2083 }
2084 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2085 /*
2086 * PIO and DMA timings are the same, use fast timings for PIO
2087 * too, else use compat timings.
2088 */
2089 if ((piix_isp_pio[drvp->PIO_mode] !=
2090 piix_isp_dma[drvp->DMA_mode]) ||
2091 (piix_rtc_pio[drvp->PIO_mode] !=
2092 piix_rtc_dma[drvp->DMA_mode]))
2093 drvp->PIO_mode = 0;
2094 /* if PIO mode <= 2, use compat timings for PIO */
2095 if (drvp->PIO_mode <= 2) {
2096 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2097 channel);
2098 return ret;
2099 }
2100 }
2101
2102 /*
2103 * Now setup PIO modes. If mode < 2, use compat timings.
2104 * Else enable fast timings. Enable IORDY and prefetch/post
2105 * if PIO mode >= 3.
2106 */
2107
2108 if (drvp->PIO_mode < 2)
2109 return ret;
2110
2111 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2112 if (drvp->PIO_mode >= 3) {
2113 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2114 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2115 }
2116 return ret;
2117 }
2118
2119 /* setup values in SIDETIM registers, based on mode */
2120 static u_int32_t
2121 piix_setup_sidetim_timings(mode, dma, channel)
2122 u_int8_t mode;
2123 u_int8_t dma;
2124 u_int8_t channel;
2125 {
2126 if (dma)
2127 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2128 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2129 else
2130 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2131 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2132 }
2133
2134 void
2135 amd7x6_chip_map(sc, pa)
2136 struct pciide_softc *sc;
2137 struct pci_attach_args *pa;
2138 {
2139 struct pciide_channel *cp;
2140 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2141 int channel;
2142 pcireg_t chanenable;
2143 bus_size_t cmdsize, ctlsize;
2144
2145 if (pciide_chipen(sc, pa) == 0)
2146 return;
2147 aprint_normal("%s: bus-master DMA support present",
2148 sc->sc_wdcdev.sc_dev.dv_xname);
2149 pciide_mapreg_dma(sc, pa);
2150 aprint_normal("\n");
2151 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2152 WDC_CAPABILITY_MODE;
2153 if (sc->sc_dma_ok) {
2154 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2155 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2156 sc->sc_wdcdev.irqack = pciide_irqack;
2157 }
2158 sc->sc_wdcdev.PIO_cap = 4;
2159 sc->sc_wdcdev.DMA_cap = 2;
2160
2161 switch (sc->sc_pci_vendor) {
2162 case PCI_VENDOR_AMD:
2163 switch (sc->sc_pp->ide_product) {
2164 case PCI_PRODUCT_AMD_PBC766_IDE:
2165 case PCI_PRODUCT_AMD_PBC768_IDE:
2166 case PCI_PRODUCT_AMD_PBC8111_IDE:
2167 sc->sc_wdcdev.UDMA_cap = 5;
2168 break;
2169 default:
2170 sc->sc_wdcdev.UDMA_cap = 4;
2171 }
2172 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2173 break;
2174
2175 case PCI_VENDOR_NVIDIA:
2176 switch (sc->sc_pp->ide_product) {
2177 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2178 sc->sc_wdcdev.UDMA_cap = 5;
2179 break;
2180 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2181 sc->sc_wdcdev.UDMA_cap = 6;
2182 break;
2183 }
2184 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2185 break;
2186
2187 default:
2188 panic("amd7x6_chip_map: unknown vendor");
2189 }
2190 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2191 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2192 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2193 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2194 AMD7X6_CHANSTATUS_EN(sc));
2195
2196 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2197 DEBUG_PROBE);
2198 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2199 cp = &sc->pciide_channels[channel];
2200 if (pciide_chansetup(sc, channel, interface) == 0)
2201 continue;
2202
2203 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2204 aprint_normal("%s: %s channel ignored (disabled)\n",
2205 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2206 continue;
2207 }
2208 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2209 pciide_pci_intr);
2210
2211 if (pciide_chan_candisable(cp))
2212 chanenable &= ~AMD7X6_CHAN_EN(channel);
2213 pciide_map_compat_intr(pa, cp, channel, interface);
2214 if (cp->hw_ok == 0)
2215 continue;
2216
2217 amd7x6_setup_channel(&cp->wdc_channel);
2218 }
2219 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2220 chanenable);
2221 return;
2222 }
2223
2224 void
2225 amd7x6_setup_channel(chp)
2226 struct channel_softc *chp;
2227 {
2228 u_int32_t udmatim_reg, datatim_reg;
2229 u_int8_t idedma_ctl;
2230 int mode, drive;
2231 struct ata_drive_datas *drvp;
2232 struct pciide_channel *cp = (struct pciide_channel*)chp;
2233 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2234 #ifndef PCIIDE_AMD756_ENABLEDMA
2235 int rev = PCI_REVISION(
2236 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2237 #endif
2238
2239 idedma_ctl = 0;
2240 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2241 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2242 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2243 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2244
2245 /* setup DMA if needed */
2246 pciide_channel_dma_setup(cp);
2247
2248 for (drive = 0; drive < 2; drive++) {
2249 drvp = &chp->ch_drive[drive];
2250 /* If no drive, skip */
2251 if ((drvp->drive_flags & DRIVE) == 0)
2252 continue;
2253 /* add timing values, setup DMA if needed */
2254 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2255 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2256 mode = drvp->PIO_mode;
2257 goto pio;
2258 }
2259 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2260 (drvp->drive_flags & DRIVE_UDMA)) {
2261 /* use Ultra/DMA */
2262 drvp->drive_flags &= ~DRIVE_DMA;
2263 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2264 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2265 AMD7X6_UDMA_TIME(chp->channel, drive,
2266 amd7x6_udma_tim[drvp->UDMA_mode]);
2267 /* can use PIO timings, MW DMA unused */
2268 mode = drvp->PIO_mode;
2269 } else {
2270 /* use Multiword DMA, but only if revision is OK */
2271 drvp->drive_flags &= ~DRIVE_UDMA;
2272 #ifndef PCIIDE_AMD756_ENABLEDMA
2273 /*
2274 * The workaround doesn't seem to be necessary
2275 * with all drives, so it can be disabled by
2276 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2277 * triggered.
2278 */
2279 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2280 sc->sc_pp->ide_product ==
2281 PCI_PRODUCT_AMD_PBC756_IDE &&
2282 AMD756_CHIPREV_DISABLEDMA(rev)) {
2283 aprint_normal(
2284 "%s:%d:%d: multi-word DMA disabled due "
2285 "to chip revision\n",
2286 sc->sc_wdcdev.sc_dev.dv_xname,
2287 chp->channel, drive);
2288 mode = drvp->PIO_mode;
2289 drvp->drive_flags &= ~DRIVE_DMA;
2290 goto pio;
2291 }
2292 #endif
2293 /* mode = min(pio, dma+2) */
2294 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2295 mode = drvp->PIO_mode;
2296 else
2297 mode = drvp->DMA_mode + 2;
2298 }
2299 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2300
2301 pio: /* setup PIO mode */
2302 if (mode <= 2) {
2303 drvp->DMA_mode = 0;
2304 drvp->PIO_mode = 0;
2305 mode = 0;
2306 } else {
2307 drvp->PIO_mode = mode;
2308 drvp->DMA_mode = mode - 2;
2309 }
2310 datatim_reg |=
2311 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2312 amd7x6_pio_set[mode]) |
2313 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2314 amd7x6_pio_rec[mode]);
2315 }
2316 if (idedma_ctl != 0) {
2317 /* Add software bits in status register */
2318 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2319 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2320 idedma_ctl);
2321 }
2322 pciide_print_modes(cp);
2323 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2324 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2325 }
2326
2327 void
2328 apollo_chip_map(sc, pa)
2329 struct pciide_softc *sc;
2330 struct pci_attach_args *pa;
2331 {
2332 struct pciide_channel *cp;
2333 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2334 int channel;
2335 u_int32_t ideconf;
2336 bus_size_t cmdsize, ctlsize;
2337 pcitag_t pcib_tag;
2338 pcireg_t pcib_id, pcib_class;
2339
2340 if (pciide_chipen(sc, pa) == 0)
2341 return;
2342 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2343 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2344 /* and read ID and rev of the ISA bridge */
2345 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2346 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2347 aprint_normal(": VIA Technologies ");
2348 switch (PCI_PRODUCT(pcib_id)) {
2349 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2350 aprint_normal("VT82C586 (Apollo VP) ");
2351 if(PCI_REVISION(pcib_class) >= 0x02) {
2352 aprint_normal("ATA33 controller\n");
2353 sc->sc_wdcdev.UDMA_cap = 2;
2354 } else {
2355 aprint_normal("controller\n");
2356 sc->sc_wdcdev.UDMA_cap = 0;
2357 }
2358 break;
2359 case PCI_PRODUCT_VIATECH_VT82C596A:
2360 aprint_normal("VT82C596A (Apollo Pro) ");
2361 if (PCI_REVISION(pcib_class) >= 0x12) {
2362 aprint_normal("ATA66 controller\n");
2363 sc->sc_wdcdev.UDMA_cap = 4;
2364 } else {
2365 aprint_normal("ATA33 controller\n");
2366 sc->sc_wdcdev.UDMA_cap = 2;
2367 }
2368 break;
2369 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2370 aprint_normal("VT82C686A (Apollo KX133) ");
2371 if (PCI_REVISION(pcib_class) >= 0x40) {
2372 aprint_normal("ATA100 controller\n");
2373 sc->sc_wdcdev.UDMA_cap = 5;
2374 } else {
2375 aprint_normal("ATA66 controller\n");
2376 sc->sc_wdcdev.UDMA_cap = 4;
2377 }
2378 break;
2379 case PCI_PRODUCT_VIATECH_VT8231:
2380 aprint_normal("VT8231 ATA100 controller\n");
2381 sc->sc_wdcdev.UDMA_cap = 5;
2382 break;
2383 case PCI_PRODUCT_VIATECH_VT8233:
2384 aprint_normal("VT8233 ATA100 controller\n");
2385 sc->sc_wdcdev.UDMA_cap = 5;
2386 break;
2387 case PCI_PRODUCT_VIATECH_VT8233A:
2388 aprint_normal("VT8233A ATA133 controller\n");
2389 sc->sc_wdcdev.UDMA_cap = 6;
2390 break;
2391 case PCI_PRODUCT_VIATECH_VT8235:
2392 aprint_normal("VT8235 ATA133 controller\n");
2393 sc->sc_wdcdev.UDMA_cap = 6;
2394 break;
2395 default:
2396 aprint_normal("unknown ATA controller\n");
2397 sc->sc_wdcdev.UDMA_cap = 0;
2398 }
2399
2400 aprint_normal("%s: bus-master DMA support present",
2401 sc->sc_wdcdev.sc_dev.dv_xname);
2402 pciide_mapreg_dma(sc, pa);
2403 aprint_normal("\n");
2404 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2405 WDC_CAPABILITY_MODE;
2406 if (sc->sc_dma_ok) {
2407 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2408 sc->sc_wdcdev.irqack = pciide_irqack;
2409 if (sc->sc_wdcdev.UDMA_cap > 0)
2410 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2411 }
2412 sc->sc_wdcdev.PIO_cap = 4;
2413 sc->sc_wdcdev.DMA_cap = 2;
2414 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2415 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2416 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2417
2418 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2419 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2420 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2421 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2422 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2423 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2424 DEBUG_PROBE);
2425
2426 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2427 cp = &sc->pciide_channels[channel];
2428 if (pciide_chansetup(sc, channel, interface) == 0)
2429 continue;
2430
2431 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2432 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2433 aprint_normal("%s: %s channel ignored (disabled)\n",
2434 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2435 continue;
2436 }
2437 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2438 pciide_pci_intr);
2439 if (cp->hw_ok == 0)
2440 continue;
2441 if (pciide_chan_candisable(cp)) {
2442 ideconf &= ~APO_IDECONF_EN(channel);
2443 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2444 ideconf);
2445 }
2446 pciide_map_compat_intr(pa, cp, channel, interface);
2447
2448 if (cp->hw_ok == 0)
2449 continue;
2450 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2451 }
2452 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2453 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2454 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2455 }
2456
2457 void
2458 apollo_setup_channel(chp)
2459 struct channel_softc *chp;
2460 {
2461 u_int32_t udmatim_reg, datatim_reg;
2462 u_int8_t idedma_ctl;
2463 int mode, drive;
2464 struct ata_drive_datas *drvp;
2465 struct pciide_channel *cp = (struct pciide_channel*)chp;
2466 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2467
2468 idedma_ctl = 0;
2469 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2470 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2471 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2472 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2473
2474 /* setup DMA if needed */
2475 pciide_channel_dma_setup(cp);
2476
2477 for (drive = 0; drive < 2; drive++) {
2478 drvp = &chp->ch_drive[drive];
2479 /* If no drive, skip */
2480 if ((drvp->drive_flags & DRIVE) == 0)
2481 continue;
2482 /* add timing values, setup DMA if needed */
2483 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2484 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2485 mode = drvp->PIO_mode;
2486 goto pio;
2487 }
2488 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2489 (drvp->drive_flags & DRIVE_UDMA)) {
2490 /* use Ultra/DMA */
2491 drvp->drive_flags &= ~DRIVE_DMA;
2492 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2493 APO_UDMA_EN_MTH(chp->channel, drive);
2494 if (sc->sc_wdcdev.UDMA_cap == 6) {
2495 /* 8233a */
2496 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2497 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2498 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2499 /* 686b */
2500 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2501 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2502 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2503 /* 596b or 686a */
2504 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2505 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2506 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2507 } else {
2508 /* 596a or 586b */
2509 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2510 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2511 }
2512 /* can use PIO timings, MW DMA unused */
2513 mode = drvp->PIO_mode;
2514 } else {
2515 /* use Multiword DMA */
2516 drvp->drive_flags &= ~DRIVE_UDMA;
2517 /* mode = min(pio, dma+2) */
2518 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2519 mode = drvp->PIO_mode;
2520 else
2521 mode = drvp->DMA_mode + 2;
2522 }
2523 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2524
2525 pio: /* setup PIO mode */
2526 if (mode <= 2) {
2527 drvp->DMA_mode = 0;
2528 drvp->PIO_mode = 0;
2529 mode = 0;
2530 } else {
2531 drvp->PIO_mode = mode;
2532 drvp->DMA_mode = mode - 2;
2533 }
2534 datatim_reg |=
2535 APO_DATATIM_PULSE(chp->channel, drive,
2536 apollo_pio_set[mode]) |
2537 APO_DATATIM_RECOV(chp->channel, drive,
2538 apollo_pio_rec[mode]);
2539 }
2540 if (idedma_ctl != 0) {
2541 /* Add software bits in status register */
2542 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2543 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2544 idedma_ctl);
2545 }
2546 pciide_print_modes(cp);
2547 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2548 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2549 }
2550
2551 void
2552 apollo_sata_chip_map(sc, pa)
2553 struct pciide_softc *sc;
2554 struct pci_attach_args *pa;
2555 {
2556 struct pciide_channel *cp;
2557 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2558 int channel;
2559 bus_size_t cmdsize, ctlsize;
2560
2561 if (pciide_chipen(sc, pa) == 0)
2562 return;
2563
2564 if ( interface == 0 ) {
2565 WDCDEBUG_PRINT(("apollo_sata_chip_map interface == 0\n"),
2566 DEBUG_PROBE);
2567 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2568 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2569 }
2570
2571 aprint_normal("%s: bus-master DMA support present",
2572 sc->sc_wdcdev.sc_dev.dv_xname);
2573 pciide_mapreg_dma(sc, pa);
2574 aprint_normal("\n");
2575
2576 if (sc->sc_dma_ok) {
2577 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2578 sc->sc_wdcdev.irqack = pciide_irqack;
2579 }
2580 sc->sc_wdcdev.PIO_cap = 4;
2581 sc->sc_wdcdev.DMA_cap = 2;
2582 sc->sc_wdcdev.UDMA_cap = 6;
2583
2584 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2585 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2586 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2587 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SINGLE_DRIVE;
2588 sc->sc_wdcdev.set_modes = sata_setup_channel;
2589
2590 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2591 cp = &sc->pciide_channels[channel];
2592 if (pciide_chansetup(sc, channel, interface) == 0)
2593 continue;
2594 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2595 pciide_pci_intr);
2596
2597 pciide_map_compat_intr(pa, cp, channel, interface);
2598 sata_setup_channel(&cp->wdc_channel);
2599 }
2600 }
2601
2602 void
2603 cmd_channel_map(pa, sc, channel)
2604 struct pci_attach_args *pa;
2605 struct pciide_softc *sc;
2606 int channel;
2607 {
2608 struct pciide_channel *cp = &sc->pciide_channels[channel];
2609 bus_size_t cmdsize, ctlsize;
2610 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2611 int interface, one_channel;
2612
2613 /*
2614 * The 0648/0649 can be told to identify as a RAID controller.
2615 * In this case, we have to fake interface
2616 */
2617 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2618 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2619 PCIIDE_INTERFACE_SETTABLE(1);
2620 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2621 CMD_CONF_DSA1)
2622 interface |= PCIIDE_INTERFACE_PCI(0) |
2623 PCIIDE_INTERFACE_PCI(1);
2624 } else {
2625 interface = PCI_INTERFACE(pa->pa_class);
2626 }
2627
2628 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2629 cp->name = PCIIDE_CHANNEL_NAME(channel);
2630 cp->wdc_channel.channel = channel;
2631 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2632
2633 /*
2634 * Older CMD64X doesn't have independant channels
2635 */
2636 switch (sc->sc_pp->ide_product) {
2637 case PCI_PRODUCT_CMDTECH_649:
2638 one_channel = 0;
2639 break;
2640 default:
2641 one_channel = 1;
2642 break;
2643 }
2644
2645 if (channel > 0 && one_channel) {
2646 cp->wdc_channel.ch_queue =
2647 sc->pciide_channels[0].wdc_channel.ch_queue;
2648 } else {
2649 cp->wdc_channel.ch_queue =
2650 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2651 }
2652 if (cp->wdc_channel.ch_queue == NULL) {
2653 aprint_error("%s %s channel: "
2654 "can't allocate memory for command queue",
2655 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2656 return;
2657 }
2658
2659 aprint_normal("%s: %s channel %s to %s mode\n",
2660 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2661 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2662 "configured" : "wired",
2663 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2664 "native-PCI" : "compatibility");
2665
2666 /*
2667 * with a CMD PCI64x, if we get here, the first channel is enabled:
2668 * there's no way to disable the first channel without disabling
2669 * the whole device
2670 */
2671 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2672 aprint_normal("%s: %s channel ignored (disabled)\n",
2673 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2674 return;
2675 }
2676
2677 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2678 if (cp->hw_ok == 0)
2679 return;
2680 if (channel == 1) {
2681 if (pciide_chan_candisable(cp)) {
2682 ctrl &= ~CMD_CTRL_2PORT;
2683 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2684 CMD_CTRL, ctrl);
2685 }
2686 }
2687 pciide_map_compat_intr(pa, cp, channel, interface);
2688 }
2689
2690 int
2691 cmd_pci_intr(arg)
2692 void *arg;
2693 {
2694 struct pciide_softc *sc = arg;
2695 struct pciide_channel *cp;
2696 struct channel_softc *wdc_cp;
2697 int i, rv, crv;
2698 u_int32_t priirq, secirq;
2699
2700 rv = 0;
2701 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2702 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2703 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2704 cp = &sc->pciide_channels[i];
2705 wdc_cp = &cp->wdc_channel;
2706 /* If a compat channel skip. */
2707 if (cp->compat)
2708 continue;
2709 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2710 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2711 crv = wdcintr(wdc_cp);
2712 if (crv == 0)
2713 printf("%s:%d: bogus intr\n",
2714 sc->sc_wdcdev.sc_dev.dv_xname, i);
2715 else
2716 rv = 1;
2717 }
2718 }
2719 return rv;
2720 }
2721
2722 void
2723 cmd_chip_map(sc, pa)
2724 struct pciide_softc *sc;
2725 struct pci_attach_args *pa;
2726 {
2727 int channel;
2728
2729 /*
2730 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2731 * and base adresses registers can be disabled at
2732 * hardware level. In this case, the device is wired
2733 * in compat mode and its first channel is always enabled,
2734 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2735 * In fact, it seems that the first channel of the CMD PCI0640
2736 * can't be disabled.
2737 */
2738
2739 #ifdef PCIIDE_CMD064x_DISABLE
2740 if (pciide_chipen(sc, pa) == 0)
2741 return;
2742 #endif
2743
2744 aprint_normal("%s: hardware does not support DMA\n",
2745 sc->sc_wdcdev.sc_dev.dv_xname);
2746 sc->sc_dma_ok = 0;
2747
2748 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2749 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2750 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2751
2752 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2753 cmd_channel_map(pa, sc, channel);
2754 }
2755 }
2756
2757 void
2758 cmd0643_9_chip_map(sc, pa)
2759 struct pciide_softc *sc;
2760 struct pci_attach_args *pa;
2761 {
2762 struct pciide_channel *cp;
2763 int channel;
2764 pcireg_t rev = PCI_REVISION(pa->pa_class);
2765
2766 /*
2767 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2768 * and base adresses registers can be disabled at
2769 * hardware level. In this case, the device is wired
2770 * in compat mode and its first channel is always enabled,
2771 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2772 * In fact, it seems that the first channel of the CMD PCI0640
2773 * can't be disabled.
2774 */
2775
2776 #ifdef PCIIDE_CMD064x_DISABLE
2777 if (pciide_chipen(sc, pa) == 0)
2778 return;
2779 #endif
2780 aprint_normal("%s: bus-master DMA support present",
2781 sc->sc_wdcdev.sc_dev.dv_xname);
2782 pciide_mapreg_dma(sc, pa);
2783 aprint_normal("\n");
2784 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2785 WDC_CAPABILITY_MODE;
2786 if (sc->sc_dma_ok) {
2787 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2788 switch (sc->sc_pp->ide_product) {
2789 case PCI_PRODUCT_CMDTECH_649:
2790 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2791 sc->sc_wdcdev.UDMA_cap = 5;
2792 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2793 break;
2794 case PCI_PRODUCT_CMDTECH_648:
2795 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2796 sc->sc_wdcdev.UDMA_cap = 4;
2797 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2798 break;
2799 case PCI_PRODUCT_CMDTECH_646:
2800 if (rev >= CMD0646U2_REV) {
2801 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2802 sc->sc_wdcdev.UDMA_cap = 2;
2803 } else if (rev >= CMD0646U_REV) {
2804 /*
2805 * Linux's driver claims that the 646U is broken
2806 * with UDMA. Only enable it if we know what we're
2807 * doing
2808 */
2809 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2810 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2811 sc->sc_wdcdev.UDMA_cap = 2;
2812 #endif
2813 /* explicitly disable UDMA */
2814 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2815 CMD_UDMATIM(0), 0);
2816 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2817 CMD_UDMATIM(1), 0);
2818 }
2819 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2820 break;
2821 default:
2822 sc->sc_wdcdev.irqack = pciide_irqack;
2823 }
2824 }
2825
2826 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2827 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2828 sc->sc_wdcdev.PIO_cap = 4;
2829 sc->sc_wdcdev.DMA_cap = 2;
2830 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2831
2832 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2833 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2834 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2835 DEBUG_PROBE);
2836
2837 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2838 cp = &sc->pciide_channels[channel];
2839 cmd_channel_map(pa, sc, channel);
2840 if (cp->hw_ok == 0)
2841 continue;
2842 cmd0643_9_setup_channel(&cp->wdc_channel);
2843 }
2844 /*
2845 * note - this also makes sure we clear the irq disable and reset
2846 * bits
2847 */
2848 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2849 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2850 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2851 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2852 DEBUG_PROBE);
2853 }
2854
2855 void
2856 cmd0643_9_setup_channel(chp)
2857 struct channel_softc *chp;
2858 {
2859 struct ata_drive_datas *drvp;
2860 u_int8_t tim;
2861 u_int32_t idedma_ctl, udma_reg;
2862 int drive;
2863 struct pciide_channel *cp = (struct pciide_channel*)chp;
2864 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2865
2866 idedma_ctl = 0;
2867 /* setup DMA if needed */
2868 pciide_channel_dma_setup(cp);
2869
2870 for (drive = 0; drive < 2; drive++) {
2871 drvp = &chp->ch_drive[drive];
2872 /* If no drive, skip */
2873 if ((drvp->drive_flags & DRIVE) == 0)
2874 continue;
2875 /* add timing values, setup DMA if needed */
2876 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2877 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2878 if (drvp->drive_flags & DRIVE_UDMA) {
2879 /* UltraDMA on a 646U2, 0648 or 0649 */
2880 drvp->drive_flags &= ~DRIVE_DMA;
2881 udma_reg = pciide_pci_read(sc->sc_pc,
2882 sc->sc_tag, CMD_UDMATIM(chp->channel));
2883 if (drvp->UDMA_mode > 2 &&
2884 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2885 CMD_BICSR) &
2886 CMD_BICSR_80(chp->channel)) == 0)
2887 drvp->UDMA_mode = 2;
2888 if (drvp->UDMA_mode > 2)
2889 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2890 else if (sc->sc_wdcdev.UDMA_cap > 2)
2891 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2892 udma_reg |= CMD_UDMATIM_UDMA(drive);
2893 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2894 CMD_UDMATIM_TIM_OFF(drive));
2895 udma_reg |=
2896 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2897 CMD_UDMATIM_TIM_OFF(drive));
2898 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2899 CMD_UDMATIM(chp->channel), udma_reg);
2900 } else {
2901 /*
2902 * use Multiword DMA.
2903 * Timings will be used for both PIO and DMA,
2904 * so adjust DMA mode if needed
2905 * if we have a 0646U2/8/9, turn off UDMA
2906 */
2907 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2908 udma_reg = pciide_pci_read(sc->sc_pc,
2909 sc->sc_tag,
2910 CMD_UDMATIM(chp->channel));
2911 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2912 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2913 CMD_UDMATIM(chp->channel),
2914 udma_reg);
2915 }
2916 if (drvp->PIO_mode >= 3 &&
2917 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2918 drvp->DMA_mode = drvp->PIO_mode - 2;
2919 }
2920 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2921 }
2922 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2923 }
2924 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2925 CMD_DATA_TIM(chp->channel, drive), tim);
2926 }
2927 if (idedma_ctl != 0) {
2928 /* Add software bits in status register */
2929 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2930 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2931 idedma_ctl);
2932 }
2933 pciide_print_modes(cp);
2934 }
2935
2936 void
2937 cmd646_9_irqack(chp)
2938 struct channel_softc *chp;
2939 {
2940 u_int32_t priirq, secirq;
2941 struct pciide_channel *cp = (struct pciide_channel*)chp;
2942 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2943
2944 if (chp->channel == 0) {
2945 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2946 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2947 } else {
2948 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2949 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2950 }
2951 pciide_irqack(chp);
2952 }
2953
2954 void
2955 cmd680_chip_map(sc, pa)
2956 struct pciide_softc *sc;
2957 struct pci_attach_args *pa;
2958 {
2959 struct pciide_channel *cp;
2960 int channel;
2961
2962 if (pciide_chipen(sc, pa) == 0)
2963 return;
2964 aprint_normal("%s: bus-master DMA support present",
2965 sc->sc_wdcdev.sc_dev.dv_xname);
2966 pciide_mapreg_dma(sc, pa);
2967 aprint_normal("\n");
2968 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2969 WDC_CAPABILITY_MODE;
2970 if (sc->sc_dma_ok) {
2971 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2972 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2973 sc->sc_wdcdev.UDMA_cap = 6;
2974 sc->sc_wdcdev.irqack = pciide_irqack;
2975 }
2976
2977 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2978 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2979 sc->sc_wdcdev.PIO_cap = 4;
2980 sc->sc_wdcdev.DMA_cap = 2;
2981 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2982
2983 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2984 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2985 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2986 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2987 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2988 cp = &sc->pciide_channels[channel];
2989 cmd680_channel_map(pa, sc, channel);
2990 if (cp->hw_ok == 0)
2991 continue;
2992 cmd680_setup_channel(&cp->wdc_channel);
2993 }
2994 }
2995
2996 void
2997 cmd680_channel_map(pa, sc, channel)
2998 struct pci_attach_args *pa;
2999 struct pciide_softc *sc;
3000 int channel;
3001 {
3002 struct pciide_channel *cp = &sc->pciide_channels[channel];
3003 bus_size_t cmdsize, ctlsize;
3004 int interface, i, reg;
3005 static const u_int8_t init_val[] =
3006 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
3007 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
3008
3009 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
3010 interface = PCIIDE_INTERFACE_SETTABLE(0) |
3011 PCIIDE_INTERFACE_SETTABLE(1);
3012 interface |= PCIIDE_INTERFACE_PCI(0) |
3013 PCIIDE_INTERFACE_PCI(1);
3014 } else {
3015 interface = PCI_INTERFACE(pa->pa_class);
3016 }
3017
3018 sc->wdc_chanarray[channel] = &cp->wdc_channel;
3019 cp->name = PCIIDE_CHANNEL_NAME(channel);
3020 cp->wdc_channel.channel = channel;
3021 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3022
3023 cp->wdc_channel.ch_queue =
3024 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3025 if (cp->wdc_channel.ch_queue == NULL) {
3026 aprint_error("%s %s channel: "
3027 "can't allocate memory for command queue",
3028 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3029 return;
3030 }
3031
3032 /* XXX */
3033 reg = 0xa2 + channel * 16;
3034 for (i = 0; i < sizeof(init_val); i++)
3035 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
3036
3037 aprint_normal("%s: %s channel %s to %s mode\n",
3038 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
3039 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
3040 "configured" : "wired",
3041 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
3042 "native-PCI" : "compatibility");
3043
3044 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
3045 if (cp->hw_ok == 0)
3046 return;
3047 pciide_map_compat_intr(pa, cp, channel, interface);
3048 }
3049
3050 void
3051 cmd680_setup_channel(chp)
3052 struct channel_softc *chp;
3053 {
3054 struct ata_drive_datas *drvp;
3055 u_int8_t mode, off, scsc;
3056 u_int16_t val;
3057 u_int32_t idedma_ctl;
3058 int drive;
3059 struct pciide_channel *cp = (struct pciide_channel*)chp;
3060 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3061 pci_chipset_tag_t pc = sc->sc_pc;
3062 pcitag_t pa = sc->sc_tag;
3063 static const u_int8_t udma2_tbl[] =
3064 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
3065 static const u_int8_t udma_tbl[] =
3066 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
3067 static const u_int16_t dma_tbl[] =
3068 { 0x2208, 0x10c2, 0x10c1 };
3069 static const u_int16_t pio_tbl[] =
3070 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
3071
3072 idedma_ctl = 0;
3073 pciide_channel_dma_setup(cp);
3074 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
3075
3076 for (drive = 0; drive < 2; drive++) {
3077 drvp = &chp->ch_drive[drive];
3078 /* If no drive, skip */
3079 if ((drvp->drive_flags & DRIVE) == 0)
3080 continue;
3081 mode &= ~(0x03 << (drive * 4));
3082 if (drvp->drive_flags & DRIVE_UDMA) {
3083 drvp->drive_flags &= ~DRIVE_DMA;
3084 off = 0xa0 + chp->channel * 16;
3085 if (drvp->UDMA_mode > 2 &&
3086 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3087 drvp->UDMA_mode = 2;
3088 scsc = pciide_pci_read(pc, pa, 0x8a);
3089 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3090 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3091 scsc = pciide_pci_read(pc, pa, 0x8a);
3092 if ((scsc & 0x30) == 0)
3093 drvp->UDMA_mode = 5;
3094 }
3095 mode |= 0x03 << (drive * 4);
3096 off = 0xac + chp->channel * 16 + drive * 2;
3097 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3098 if (scsc & 0x30)
3099 val |= udma2_tbl[drvp->UDMA_mode];
3100 else
3101 val |= udma_tbl[drvp->UDMA_mode];
3102 pciide_pci_write(pc, pa, off, val);
3103 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3104 } else if (drvp->drive_flags & DRIVE_DMA) {
3105 mode |= 0x02 << (drive * 4);
3106 off = 0xa8 + chp->channel * 16 + drive * 2;
3107 val = dma_tbl[drvp->DMA_mode];
3108 pciide_pci_write(pc, pa, off, val & 0xff);
3109 pciide_pci_write(pc, pa, off, val >> 8);
3110 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3111 } else {
3112 mode |= 0x01 << (drive * 4);
3113 off = 0xa4 + chp->channel * 16 + drive * 2;
3114 val = pio_tbl[drvp->PIO_mode];
3115 pciide_pci_write(pc, pa, off, val & 0xff);
3116 pciide_pci_write(pc, pa, off, val >> 8);
3117 }
3118 }
3119
3120 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3121 if (idedma_ctl != 0) {
3122 /* Add software bits in status register */
3123 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3124 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3125 idedma_ctl);
3126 }
3127 pciide_print_modes(cp);
3128 }
3129
3130 void
3131 cmd3112_chip_map(sc, pa)
3132 struct pciide_softc *sc;
3133 struct pci_attach_args *pa;
3134 {
3135 struct pciide_channel *cp;
3136 bus_size_t cmdsize, ctlsize;
3137 pcireg_t interface;
3138 int channel;
3139
3140 if (pciide_chipen(sc, pa) == 0)
3141 return;
3142
3143 aprint_normal("%s: bus-master DMA support present",
3144 sc->sc_wdcdev.sc_dev.dv_xname);
3145 pciide_mapreg_dma(sc, pa);
3146 aprint_normal("\n");
3147
3148 /*
3149 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3150 * corruption if DMA transfers cross an 8K boundary. This is
3151 * apparently hard to tickle, but we'll go ahead and play it
3152 * safe.
3153 */
3154 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3155 sc->sc_dma_maxsegsz = 8192;
3156 sc->sc_dma_boundary = 8192;
3157 }
3158
3159 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3160 WDC_CAPABILITY_MODE;
3161 sc->sc_wdcdev.PIO_cap = 4;
3162 if (sc->sc_dma_ok) {
3163 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3164 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3165 sc->sc_wdcdev.irqack = pciide_irqack;
3166 sc->sc_wdcdev.DMA_cap = 2;
3167 sc->sc_wdcdev.UDMA_cap = 6;
3168 }
3169 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3170
3171 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3172 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3173
3174 /*
3175 * The 3112 can be told to identify as a RAID controller.
3176 * In this case, we have to fake interface
3177 */
3178 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3179 interface = PCI_INTERFACE(pa->pa_class);
3180 } else {
3181 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3182 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3183 }
3184
3185 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3186 cp = &sc->pciide_channels[channel];
3187 if (pciide_chansetup(sc, channel, interface) == 0)
3188 continue;
3189 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3190 pciide_pci_intr);
3191 if (cp->hw_ok == 0)
3192 continue;
3193 pciide_map_compat_intr(pa, cp, channel, interface);
3194 cmd3112_setup_channel(&cp->wdc_channel);
3195 }
3196 }
3197
3198 void
3199 cmd3112_setup_channel(chp)
3200 struct channel_softc *chp;
3201 {
3202 struct ata_drive_datas *drvp;
3203 int drive;
3204 u_int32_t idedma_ctl, dtm;
3205 struct pciide_channel *cp = (struct pciide_channel*)chp;
3206 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3207
3208 /* setup DMA if needed */
3209 pciide_channel_dma_setup(cp);
3210
3211 idedma_ctl = 0;
3212 dtm = 0;
3213
3214 for (drive = 0; drive < 2; drive++) {
3215 drvp = &chp->ch_drive[drive];
3216 /* If no drive, skip */
3217 if ((drvp->drive_flags & DRIVE) == 0)
3218 continue;
3219 if (drvp->drive_flags & DRIVE_UDMA) {
3220 /* use Ultra/DMA */
3221 drvp->drive_flags &= ~DRIVE_DMA;
3222 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3223 dtm |= DTM_IDEx_DMA;
3224 } else if (drvp->drive_flags & DRIVE_DMA) {
3225 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3226 dtm |= DTM_IDEx_DMA;
3227 } else {
3228 dtm |= DTM_IDEx_PIO;
3229 }
3230 }
3231
3232 /*
3233 * Nothing to do to setup modes; it is meaningless in S-ATA
3234 * (but many S-ATA drives still want to get the SET_FEATURE
3235 * command).
3236 */
3237 if (idedma_ctl != 0) {
3238 /* Add software bits in status register */
3239 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3240 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3241 idedma_ctl);
3242 }
3243 pci_conf_write(sc->sc_pc, sc->sc_tag,
3244 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3245 pciide_print_modes(cp);
3246 }
3247
3248 void
3249 cy693_chip_map(sc, pa)
3250 struct pciide_softc *sc;
3251 struct pci_attach_args *pa;
3252 {
3253 struct pciide_channel *cp;
3254 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3255 bus_size_t cmdsize, ctlsize;
3256
3257 if (pciide_chipen(sc, pa) == 0)
3258 return;
3259 /*
3260 * this chip has 2 PCI IDE functions, one for primary and one for
3261 * secondary. So we need to call pciide_mapregs_compat() with
3262 * the real channel
3263 */
3264 if (pa->pa_function == 1) {
3265 sc->sc_cy_compatchan = 0;
3266 } else if (pa->pa_function == 2) {
3267 sc->sc_cy_compatchan = 1;
3268 } else {
3269 aprint_error("%s: unexpected PCI function %d\n",
3270 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3271 return;
3272 }
3273 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3274 aprint_normal("%s: bus-master DMA support present",
3275 sc->sc_wdcdev.sc_dev.dv_xname);
3276 pciide_mapreg_dma(sc, pa);
3277 } else {
3278 aprint_normal("%s: hardware does not support DMA",
3279 sc->sc_wdcdev.sc_dev.dv_xname);
3280 sc->sc_dma_ok = 0;
3281 }
3282 aprint_normal("\n");
3283
3284 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3285 if (sc->sc_cy_handle == NULL) {
3286 aprint_error("%s: unable to map hyperCache control registers\n",
3287 sc->sc_wdcdev.sc_dev.dv_xname);
3288 sc->sc_dma_ok = 0;
3289 }
3290
3291 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3292 WDC_CAPABILITY_MODE;
3293 if (sc->sc_dma_ok) {
3294 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3295 sc->sc_wdcdev.irqack = pciide_irqack;
3296 }
3297 sc->sc_wdcdev.PIO_cap = 4;
3298 sc->sc_wdcdev.DMA_cap = 2;
3299 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3300
3301 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3302 sc->sc_wdcdev.nchannels = 1;
3303
3304 /* Only one channel for this chip; if we are here it's enabled */
3305 cp = &sc->pciide_channels[0];
3306 sc->wdc_chanarray[0] = &cp->wdc_channel;
3307 cp->name = PCIIDE_CHANNEL_NAME(0);
3308 cp->wdc_channel.channel = 0;
3309 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3310 cp->wdc_channel.ch_queue =
3311 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3312 if (cp->wdc_channel.ch_queue == NULL) {
3313 aprint_error("%s primary channel: "
3314 "can't allocate memory for command queue",
3315 sc->sc_wdcdev.sc_dev.dv_xname);
3316 return;
3317 }
3318 aprint_normal("%s: primary channel %s to ",
3319 sc->sc_wdcdev.sc_dev.dv_xname,
3320 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3321 "configured" : "wired");
3322 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3323 aprint_normal("native-PCI");
3324 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3325 pciide_pci_intr);
3326 } else {
3327 aprint_normal("compatibility");
3328 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3329 &cmdsize, &ctlsize);
3330 }
3331 aprint_normal(" mode\n");
3332 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3333 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3334 wdcattach(&cp->wdc_channel);
3335 if (pciide_chan_candisable(cp)) {
3336 pci_conf_write(sc->sc_pc, sc->sc_tag,
3337 PCI_COMMAND_STATUS_REG, 0);
3338 }
3339 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3340 if (cp->hw_ok == 0)
3341 return;
3342 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3343 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3344 cy693_setup_channel(&cp->wdc_channel);
3345 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3346 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3347 }
3348
3349 void
3350 cy693_setup_channel(chp)
3351 struct channel_softc *chp;
3352 {
3353 struct ata_drive_datas *drvp;
3354 int drive;
3355 u_int32_t cy_cmd_ctrl;
3356 u_int32_t idedma_ctl;
3357 struct pciide_channel *cp = (struct pciide_channel*)chp;
3358 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3359 int dma_mode = -1;
3360
3361 cy_cmd_ctrl = idedma_ctl = 0;
3362
3363 /* setup DMA if needed */
3364 pciide_channel_dma_setup(cp);
3365
3366 for (drive = 0; drive < 2; drive++) {
3367 drvp = &chp->ch_drive[drive];
3368 /* If no drive, skip */
3369 if ((drvp->drive_flags & DRIVE) == 0)
3370 continue;
3371 /* add timing values, setup DMA if needed */
3372 if (drvp->drive_flags & DRIVE_DMA) {
3373 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3374 /* use Multiword DMA */
3375 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3376 dma_mode = drvp->DMA_mode;
3377 }
3378 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3379 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3380 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3381 CY_CMD_CTRL_IOW_REC_OFF(drive));
3382 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3383 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3384 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3385 CY_CMD_CTRL_IOR_REC_OFF(drive));
3386 }
3387 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3388 chp->ch_drive[0].DMA_mode = dma_mode;
3389 chp->ch_drive[1].DMA_mode = dma_mode;
3390
3391 if (dma_mode == -1)
3392 dma_mode = 0;
3393
3394 if (sc->sc_cy_handle != NULL) {
3395 /* Note: `multiple' is implied. */
3396 cy82c693_write(sc->sc_cy_handle,
3397 (sc->sc_cy_compatchan == 0) ?
3398 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3399 }
3400
3401 pciide_print_modes(cp);
3402
3403 if (idedma_ctl != 0) {
3404 /* Add software bits in status register */
3405 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3406 IDEDMA_CTL, idedma_ctl);
3407 }
3408 }
3409
3410 static struct sis_hostbr_type {
3411 u_int16_t id;
3412 u_int8_t rev;
3413 u_int8_t udma_mode;
3414 char *name;
3415 u_int8_t type;
3416 #define SIS_TYPE_NOUDMA 0
3417 #define SIS_TYPE_66 1
3418 #define SIS_TYPE_100OLD 2
3419 #define SIS_TYPE_100NEW 3
3420 #define SIS_TYPE_133OLD 4
3421 #define SIS_TYPE_133NEW 5
3422 #define SIS_TYPE_SOUTH 6
3423 } sis_hostbr_type[] = {
3424 /* Most infos here are from sos (at) freebsd.org */
3425 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3426 #if 0
3427 /*
3428 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3429 * have problems with UDMA (info provided by Christos)
3430 */
3431 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3432 #endif
3433 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3434 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3435 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3436 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3437 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3438 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3439 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3440 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3441 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3442 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3443 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3444 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3445 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3446 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3447 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3448 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3449 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3450 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3451 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3452 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3453 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3454 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3455 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3456 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3457 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3458 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3459 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3460 /*
3461 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3462 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3463 */
3464 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3465 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3466 };
3467
3468 static struct sis_hostbr_type *sis_hostbr_type_match;
3469
3470 static int
3471 sis_hostbr_match(pa)
3472 struct pci_attach_args *pa;
3473 {
3474 int i;
3475 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3476 return 0;
3477 sis_hostbr_type_match = NULL;
3478 for (i = 0;
3479 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3480 i++) {
3481 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3482 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3483 sis_hostbr_type_match = &sis_hostbr_type[i];
3484 }
3485 return (sis_hostbr_type_match != NULL);
3486 }
3487
3488 static int sis_south_match(pa)
3489 struct pci_attach_args *pa;
3490 {
3491 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3492 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3493 PCI_REVISION(pa->pa_class) >= 0x10);
3494 }
3495
3496 void
3497 sis_chip_map(sc, pa)
3498 struct pciide_softc *sc;
3499 struct pci_attach_args *pa;
3500 {
3501 struct pciide_channel *cp;
3502 int channel;
3503 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3504 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3505 pcireg_t rev = PCI_REVISION(pa->pa_class);
3506 bus_size_t cmdsize, ctlsize;
3507
3508 if (pciide_chipen(sc, pa) == 0)
3509 return;
3510 aprint_normal(": Silicon Integrated System ");
3511 pci_find_device(NULL, sis_hostbr_match);
3512 if (sis_hostbr_type_match) {
3513 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3514 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3515 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3516 SIS_REG_57) & 0x7f);
3517 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3518 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3519 aprint_normal("96X UDMA%d",
3520 sis_hostbr_type_match->udma_mode);
3521 sc->sis_type = SIS_TYPE_133NEW;
3522 sc->sc_wdcdev.UDMA_cap =
3523 sis_hostbr_type_match->udma_mode;
3524 } else {
3525 if (pci_find_device(NULL, sis_south_match)) {
3526 sc->sis_type = SIS_TYPE_133OLD;
3527 sc->sc_wdcdev.UDMA_cap =
3528 sis_hostbr_type_match->udma_mode;
3529 } else {
3530 sc->sis_type = SIS_TYPE_100NEW;
3531 sc->sc_wdcdev.UDMA_cap =
3532 sis_hostbr_type_match->udma_mode;
3533 }
3534 }
3535 } else {
3536 sc->sis_type = sis_hostbr_type_match->type;
3537 sc->sc_wdcdev.UDMA_cap =
3538 sis_hostbr_type_match->udma_mode;
3539 }
3540 aprint_normal(sis_hostbr_type_match->name);
3541 } else {
3542 aprint_normal("5597/5598");
3543 if (rev >= 0xd0) {
3544 sc->sc_wdcdev.UDMA_cap = 2;
3545 sc->sis_type = SIS_TYPE_66;
3546 } else {
3547 sc->sc_wdcdev.UDMA_cap = 0;
3548 sc->sis_type = SIS_TYPE_NOUDMA;
3549 }
3550 }
3551 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3552 PCI_REVISION(pa->pa_class));
3553 aprint_normal("%s: bus-master DMA support present",
3554 sc->sc_wdcdev.sc_dev.dv_xname);
3555 pciide_mapreg_dma(sc, pa);
3556 aprint_normal("\n");
3557
3558 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3559 WDC_CAPABILITY_MODE;
3560 if (sc->sc_dma_ok) {
3561 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3562 sc->sc_wdcdev.irqack = pciide_irqack;
3563 if (sc->sis_type >= SIS_TYPE_66)
3564 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3565 }
3566
3567 sc->sc_wdcdev.PIO_cap = 4;
3568 sc->sc_wdcdev.DMA_cap = 2;
3569
3570 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3571 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3572 switch(sc->sis_type) {
3573 case SIS_TYPE_NOUDMA:
3574 case SIS_TYPE_66:
3575 case SIS_TYPE_100OLD:
3576 sc->sc_wdcdev.set_modes = sis_setup_channel;
3577 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3578 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3579 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3580 break;
3581 case SIS_TYPE_100NEW:
3582 case SIS_TYPE_133OLD:
3583 sc->sc_wdcdev.set_modes = sis_setup_channel;
3584 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3585 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3586 break;
3587 case SIS_TYPE_133NEW:
3588 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3589 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3590 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3591 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3592 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3593 break;
3594 }
3595
3596
3597 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3598 cp = &sc->pciide_channels[channel];
3599 if (pciide_chansetup(sc, channel, interface) == 0)
3600 continue;
3601 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3602 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3603 aprint_normal("%s: %s channel ignored (disabled)\n",
3604 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3605 continue;
3606 }
3607 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3608 pciide_pci_intr);
3609 if (cp->hw_ok == 0)
3610 continue;
3611 if (pciide_chan_candisable(cp)) {
3612 if (channel == 0)
3613 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3614 else
3615 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3616 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3617 sis_ctr0);
3618 }
3619 pciide_map_compat_intr(pa, cp, channel, interface);
3620 if (cp->hw_ok == 0)
3621 continue;
3622 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3623 }
3624 }
3625
3626 void
3627 sis96x_setup_channel(chp)
3628 struct channel_softc *chp;
3629 {
3630 struct ata_drive_datas *drvp;
3631 int drive;
3632 u_int32_t sis_tim;
3633 u_int32_t idedma_ctl;
3634 int regtim;
3635 struct pciide_channel *cp = (struct pciide_channel*)chp;
3636 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3637
3638 sis_tim = 0;
3639 idedma_ctl = 0;
3640 /* setup DMA if needed */
3641 pciide_channel_dma_setup(cp);
3642
3643 for (drive = 0; drive < 2; drive++) {
3644 regtim = SIS_TIM133(
3645 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3646 chp->channel, drive);
3647 drvp = &chp->ch_drive[drive];
3648 /* If no drive, skip */
3649 if ((drvp->drive_flags & DRIVE) == 0)
3650 continue;
3651 /* add timing values, setup DMA if needed */
3652 if (drvp->drive_flags & DRIVE_UDMA) {
3653 /* use Ultra/DMA */
3654 drvp->drive_flags &= ~DRIVE_DMA;
3655 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3656 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3657 if (drvp->UDMA_mode > 2)
3658 drvp->UDMA_mode = 2;
3659 }
3660 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3661 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3662 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3663 } else if (drvp->drive_flags & DRIVE_DMA) {
3664 /*
3665 * use Multiword DMA
3666 * Timings will be used for both PIO and DMA,
3667 * so adjust DMA mode if needed
3668 */
3669 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3670 drvp->PIO_mode = drvp->DMA_mode + 2;
3671 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3672 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3673 drvp->PIO_mode - 2 : 0;
3674 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3675 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3676 } else {
3677 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3678 }
3679 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3680 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3681 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3682 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3683 }
3684 if (idedma_ctl != 0) {
3685 /* Add software bits in status register */
3686 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3687 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3688 idedma_ctl);
3689 }
3690 pciide_print_modes(cp);
3691 }
3692
3693 void
3694 sis_setup_channel(chp)
3695 struct channel_softc *chp;
3696 {
3697 struct ata_drive_datas *drvp;
3698 int drive;
3699 u_int32_t sis_tim;
3700 u_int32_t idedma_ctl;
3701 struct pciide_channel *cp = (struct pciide_channel*)chp;
3702 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3703
3704 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3705 "channel %d 0x%x\n", chp->channel,
3706 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3707 DEBUG_PROBE);
3708 sis_tim = 0;
3709 idedma_ctl = 0;
3710 /* setup DMA if needed */
3711 pciide_channel_dma_setup(cp);
3712
3713 for (drive = 0; drive < 2; drive++) {
3714 drvp = &chp->ch_drive[drive];
3715 /* If no drive, skip */
3716 if ((drvp->drive_flags & DRIVE) == 0)
3717 continue;
3718 /* add timing values, setup DMA if needed */
3719 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3720 (drvp->drive_flags & DRIVE_UDMA) == 0)
3721 goto pio;
3722
3723 if (drvp->drive_flags & DRIVE_UDMA) {
3724 /* use Ultra/DMA */
3725 drvp->drive_flags &= ~DRIVE_DMA;
3726 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3727 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3728 if (drvp->UDMA_mode > 2)
3729 drvp->UDMA_mode = 2;
3730 }
3731 switch (sc->sis_type) {
3732 case SIS_TYPE_66:
3733 case SIS_TYPE_100OLD:
3734 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3735 SIS_TIM66_UDMA_TIME_OFF(drive);
3736 break;
3737 case SIS_TYPE_100NEW:
3738 sis_tim |=
3739 sis_udma100new_tim[drvp->UDMA_mode] <<
3740 SIS_TIM100_UDMA_TIME_OFF(drive);
3741 case SIS_TYPE_133OLD:
3742 sis_tim |=
3743 sis_udma133old_tim[drvp->UDMA_mode] <<
3744 SIS_TIM100_UDMA_TIME_OFF(drive);
3745 break;
3746 default:
3747 aprint_error("unknown SiS IDE type %d\n",
3748 sc->sis_type);
3749 }
3750 } else {
3751 /*
3752 * use Multiword DMA
3753 * Timings will be used for both PIO and DMA,
3754 * so adjust DMA mode if needed
3755 */
3756 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3757 drvp->PIO_mode = drvp->DMA_mode + 2;
3758 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3759 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3760 drvp->PIO_mode - 2 : 0;
3761 if (drvp->DMA_mode == 0)
3762 drvp->PIO_mode = 0;
3763 }
3764 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3765 pio: switch (sc->sis_type) {
3766 case SIS_TYPE_NOUDMA:
3767 case SIS_TYPE_66:
3768 case SIS_TYPE_100OLD:
3769 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3770 SIS_TIM66_ACT_OFF(drive);
3771 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3772 SIS_TIM66_REC_OFF(drive);
3773 break;
3774 case SIS_TYPE_100NEW:
3775 case SIS_TYPE_133OLD:
3776 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3777 SIS_TIM100_ACT_OFF(drive);
3778 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3779 SIS_TIM100_REC_OFF(drive);
3780 break;
3781 default:
3782 aprint_error("unknown SiS IDE type %d\n",
3783 sc->sis_type);
3784 }
3785 }
3786 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3787 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3788 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3789 if (idedma_ctl != 0) {
3790 /* Add software bits in status register */
3791 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3792 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3793 idedma_ctl);
3794 }
3795 pciide_print_modes(cp);
3796 }
3797
3798 void
3799 acer_chip_map(sc, pa)
3800 struct pciide_softc *sc;
3801 struct pci_attach_args *pa;
3802 {
3803 struct pciide_channel *cp;
3804 int channel;
3805 pcireg_t cr, interface;
3806 bus_size_t cmdsize, ctlsize;
3807 pcireg_t rev = PCI_REVISION(pa->pa_class);
3808
3809 if (pciide_chipen(sc, pa) == 0)
3810 return;
3811 aprint_normal("%s: bus-master DMA support present",
3812 sc->sc_wdcdev.sc_dev.dv_xname);
3813 pciide_mapreg_dma(sc, pa);
3814 aprint_normal("\n");
3815 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3816 WDC_CAPABILITY_MODE;
3817 if (sc->sc_dma_ok) {
3818 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3819 if (rev >= 0x20) {
3820 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3821 if (rev >= 0xC4)
3822 sc->sc_wdcdev.UDMA_cap = 5;
3823 else if (rev >= 0xC2)
3824 sc->sc_wdcdev.UDMA_cap = 4;
3825 else
3826 sc->sc_wdcdev.UDMA_cap = 2;
3827 }
3828 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3829 sc->sc_wdcdev.irqack = pciide_irqack;
3830 }
3831
3832 sc->sc_wdcdev.PIO_cap = 4;
3833 sc->sc_wdcdev.DMA_cap = 2;
3834 sc->sc_wdcdev.set_modes = acer_setup_channel;
3835 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3836 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3837
3838 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3839 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3840 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3841
3842 /* Enable "microsoft register bits" R/W. */
3843 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3844 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3845 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3846 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3847 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3848 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3849 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3850 ~ACER_CHANSTATUSREGS_RO);
3851 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3852 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3853 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3854 /* Don't use cr, re-read the real register content instead */
3855 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3856 PCI_CLASS_REG));
3857
3858 /* From linux: enable "Cable Detection" */
3859 if (rev >= 0xC2) {
3860 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3861 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3862 | ACER_0x4B_CDETECT);
3863 }
3864
3865 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3866 cp = &sc->pciide_channels[channel];
3867 if (pciide_chansetup(sc, channel, interface) == 0)
3868 continue;
3869 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3870 aprint_normal("%s: %s channel ignored (disabled)\n",
3871 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3872 continue;
3873 }
3874 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3875 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3876 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3877 if (cp->hw_ok == 0)
3878 continue;
3879 if (pciide_chan_candisable(cp)) {
3880 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3881 pci_conf_write(sc->sc_pc, sc->sc_tag,
3882 PCI_CLASS_REG, cr);
3883 }
3884 pciide_map_compat_intr(pa, cp, channel, interface);
3885 acer_setup_channel(&cp->wdc_channel);
3886 }
3887 }
3888
3889 void
3890 acer_setup_channel(chp)
3891 struct channel_softc *chp;
3892 {
3893 struct ata_drive_datas *drvp;
3894 int drive;
3895 u_int32_t acer_fifo_udma;
3896 u_int32_t idedma_ctl;
3897 struct pciide_channel *cp = (struct pciide_channel*)chp;
3898 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3899
3900 idedma_ctl = 0;
3901 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3902 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3903 acer_fifo_udma), DEBUG_PROBE);
3904 /* setup DMA if needed */
3905 pciide_channel_dma_setup(cp);
3906
3907 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3908 DRIVE_UDMA) { /* check 80 pins cable */
3909 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3910 ACER_0x4A_80PIN(chp->channel)) {
3911 if (chp->ch_drive[0].UDMA_mode > 2)
3912 chp->ch_drive[0].UDMA_mode = 2;
3913 if (chp->ch_drive[1].UDMA_mode > 2)
3914 chp->ch_drive[1].UDMA_mode = 2;
3915 }
3916 }
3917
3918 for (drive = 0; drive < 2; drive++) {
3919 drvp = &chp->ch_drive[drive];
3920 /* If no drive, skip */
3921 if ((drvp->drive_flags & DRIVE) == 0)
3922 continue;
3923 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3924 "channel %d drive %d 0x%x\n", chp->channel, drive,
3925 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3926 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3927 /* clear FIFO/DMA mode */
3928 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3929 ACER_UDMA_EN(chp->channel, drive) |
3930 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3931
3932 /* add timing values, setup DMA if needed */
3933 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3934 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3935 acer_fifo_udma |=
3936 ACER_FTH_OPL(chp->channel, drive, 0x1);
3937 goto pio;
3938 }
3939
3940 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3941 if (drvp->drive_flags & DRIVE_UDMA) {
3942 /* use Ultra/DMA */
3943 drvp->drive_flags &= ~DRIVE_DMA;
3944 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3945 acer_fifo_udma |=
3946 ACER_UDMA_TIM(chp->channel, drive,
3947 acer_udma[drvp->UDMA_mode]);
3948 /* XXX disable if one drive < UDMA3 ? */
3949 if (drvp->UDMA_mode >= 3) {
3950 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3951 ACER_0x4B,
3952 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3953 ACER_0x4B) | ACER_0x4B_UDMA66);
3954 }
3955 } else {
3956 /*
3957 * use Multiword DMA
3958 * Timings will be used for both PIO and DMA,
3959 * so adjust DMA mode if needed
3960 */
3961 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3962 drvp->PIO_mode = drvp->DMA_mode + 2;
3963 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3964 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3965 drvp->PIO_mode - 2 : 0;
3966 if (drvp->DMA_mode == 0)
3967 drvp->PIO_mode = 0;
3968 }
3969 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3970 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3971 ACER_IDETIM(chp->channel, drive),
3972 acer_pio[drvp->PIO_mode]);
3973 }
3974 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3975 acer_fifo_udma), DEBUG_PROBE);
3976 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3977 if (idedma_ctl != 0) {
3978 /* Add software bits in status register */
3979 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3980 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3981 idedma_ctl);
3982 }
3983 pciide_print_modes(cp);
3984 }
3985
3986 int
3987 acer_pci_intr(arg)
3988 void *arg;
3989 {
3990 struct pciide_softc *sc = arg;
3991 struct pciide_channel *cp;
3992 struct channel_softc *wdc_cp;
3993 int i, rv, crv;
3994 u_int32_t chids;
3995
3996 rv = 0;
3997 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3998 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3999 cp = &sc->pciide_channels[i];
4000 wdc_cp = &cp->wdc_channel;
4001 /* If a compat channel skip. */
4002 if (cp->compat)
4003 continue;
4004 if (chids & ACER_CHIDS_INT(i)) {
4005 crv = wdcintr(wdc_cp);
4006 if (crv == 0)
4007 printf("%s:%d: bogus intr\n",
4008 sc->sc_wdcdev.sc_dev.dv_xname, i);
4009 else
4010 rv = 1;
4011 }
4012 }
4013 return rv;
4014 }
4015
4016 void
4017 hpt_chip_map(sc, pa)
4018 struct pciide_softc *sc;
4019 struct pci_attach_args *pa;
4020 {
4021 struct pciide_channel *cp;
4022 int i, compatchan, revision;
4023 pcireg_t interface;
4024 bus_size_t cmdsize, ctlsize;
4025
4026 if (pciide_chipen(sc, pa) == 0)
4027 return;
4028 revision = PCI_REVISION(pa->pa_class);
4029 aprint_normal(": Triones/Highpoint ");
4030 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4031 aprint_normal("HPT374 IDE Controller\n");
4032 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
4033 aprint_normal("HPT372 IDE Controller\n");
4034 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
4035 if (revision == HPT372_REV)
4036 aprint_normal("HPT372 IDE Controller\n");
4037 else if (revision == HPT370_REV)
4038 aprint_normal("HPT370 IDE Controller\n");
4039 else if (revision == HPT370A_REV)
4040 aprint_normal("HPT370A IDE Controller\n");
4041 else if (revision == HPT366_REV)
4042 aprint_normal("HPT366 IDE Controller\n");
4043 else
4044 aprint_normal("unknown HPT IDE controller rev %d\n",
4045 revision);
4046 } else
4047 aprint_normal("unknown HPT IDE controller 0x%x\n",
4048 sc->sc_pp->ide_product);
4049
4050 /*
4051 * when the chip is in native mode it identifies itself as a
4052 * 'misc mass storage'. Fake interface in this case.
4053 */
4054 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4055 interface = PCI_INTERFACE(pa->pa_class);
4056 } else {
4057 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4058 PCIIDE_INTERFACE_PCI(0);
4059 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4060 (revision == HPT370_REV || revision == HPT370A_REV ||
4061 revision == HPT372_REV)) ||
4062 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4063 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4064 interface |= PCIIDE_INTERFACE_PCI(1);
4065 }
4066
4067 aprint_normal("%s: bus-master DMA support present",
4068 sc->sc_wdcdev.sc_dev.dv_xname);
4069 pciide_mapreg_dma(sc, pa);
4070 aprint_normal("\n");
4071 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4072 WDC_CAPABILITY_MODE;
4073 if (sc->sc_dma_ok) {
4074 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4075 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4076 sc->sc_wdcdev.irqack = pciide_irqack;
4077 }
4078 sc->sc_wdcdev.PIO_cap = 4;
4079 sc->sc_wdcdev.DMA_cap = 2;
4080
4081 sc->sc_wdcdev.set_modes = hpt_setup_channel;
4082 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4083 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4084 revision == HPT366_REV) {
4085 sc->sc_wdcdev.UDMA_cap = 4;
4086 /*
4087 * The 366 has 2 PCI IDE functions, one for primary and one
4088 * for secondary. So we need to call pciide_mapregs_compat()
4089 * with the real channel
4090 */
4091 if (pa->pa_function == 0) {
4092 compatchan = 0;
4093 } else if (pa->pa_function == 1) {
4094 compatchan = 1;
4095 } else {
4096 aprint_error("%s: unexpected PCI function %d\n",
4097 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4098 return;
4099 }
4100 sc->sc_wdcdev.nchannels = 1;
4101 } else {
4102 sc->sc_wdcdev.nchannels = 2;
4103 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4104 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4105 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4106 revision == HPT372_REV))
4107 sc->sc_wdcdev.UDMA_cap = 6;
4108 else
4109 sc->sc_wdcdev.UDMA_cap = 5;
4110 }
4111 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4112 cp = &sc->pciide_channels[i];
4113 if (sc->sc_wdcdev.nchannels > 1) {
4114 compatchan = i;
4115 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4116 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4117 aprint_normal(
4118 "%s: %s channel ignored (disabled)\n",
4119 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4120 continue;
4121 }
4122 }
4123 if (pciide_chansetup(sc, i, interface) == 0)
4124 continue;
4125 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4126 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4127 &ctlsize, hpt_pci_intr);
4128 } else {
4129 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4130 &cmdsize, &ctlsize);
4131 }
4132 if (cp->hw_ok == 0)
4133 return;
4134 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4135 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4136 wdcattach(&cp->wdc_channel);
4137 hpt_setup_channel(&cp->wdc_channel);
4138 }
4139 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4140 (revision == HPT370_REV || revision == HPT370A_REV ||
4141 revision == HPT372_REV)) ||
4142 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4143 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4144 /*
4145 * HPT370_REV and highter has a bit to disable interrupts,
4146 * make sure to clear it
4147 */
4148 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4149 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4150 ~HPT_CSEL_IRQDIS);
4151 }
4152 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4153 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4154 revision == HPT372_REV ) ||
4155 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4156 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4157 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4158 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4159 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4160 return;
4161 }
4162
4163 void
4164 hpt_setup_channel(chp)
4165 struct channel_softc *chp;
4166 {
4167 struct ata_drive_datas *drvp;
4168 int drive;
4169 int cable;
4170 u_int32_t before, after;
4171 u_int32_t idedma_ctl;
4172 struct pciide_channel *cp = (struct pciide_channel*)chp;
4173 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4174 int revision =
4175 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4176
4177 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4178
4179 /* setup DMA if needed */
4180 pciide_channel_dma_setup(cp);
4181
4182 idedma_ctl = 0;
4183
4184 /* Per drive settings */
4185 for (drive = 0; drive < 2; drive++) {
4186 drvp = &chp->ch_drive[drive];
4187 /* If no drive, skip */
4188 if ((drvp->drive_flags & DRIVE) == 0)
4189 continue;
4190 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4191 HPT_IDETIM(chp->channel, drive));
4192
4193 /* add timing values, setup DMA if needed */
4194 if (drvp->drive_flags & DRIVE_UDMA) {
4195 /* use Ultra/DMA */
4196 drvp->drive_flags &= ~DRIVE_DMA;
4197 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4198 drvp->UDMA_mode > 2)
4199 drvp->UDMA_mode = 2;
4200 switch (sc->sc_pp->ide_product) {
4201 case PCI_PRODUCT_TRIONES_HPT374:
4202 after = hpt374_udma[drvp->UDMA_mode];
4203 break;
4204 case PCI_PRODUCT_TRIONES_HPT372:
4205 after = hpt372_udma[drvp->UDMA_mode];
4206 break;
4207 case PCI_PRODUCT_TRIONES_HPT366:
4208 default:
4209 switch(revision) {
4210 case HPT372_REV:
4211 after = hpt372_udma[drvp->UDMA_mode];
4212 break;
4213 case HPT370_REV:
4214 case HPT370A_REV:
4215 after = hpt370_udma[drvp->UDMA_mode];
4216 break;
4217 case HPT366_REV:
4218 default:
4219 after = hpt366_udma[drvp->UDMA_mode];
4220 break;
4221 }
4222 }
4223 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4224 } else if (drvp->drive_flags & DRIVE_DMA) {
4225 /*
4226 * use Multiword DMA.
4227 * Timings will be used for both PIO and DMA, so adjust
4228 * DMA mode if needed
4229 */
4230 if (drvp->PIO_mode >= 3 &&
4231 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4232 drvp->DMA_mode = drvp->PIO_mode - 2;
4233 }
4234 switch (sc->sc_pp->ide_product) {
4235 case PCI_PRODUCT_TRIONES_HPT374:
4236 after = hpt374_dma[drvp->DMA_mode];
4237 break;
4238 case PCI_PRODUCT_TRIONES_HPT372:
4239 after = hpt372_dma[drvp->DMA_mode];
4240 break;
4241 case PCI_PRODUCT_TRIONES_HPT366:
4242 default:
4243 switch(revision) {
4244 case HPT372_REV:
4245 after = hpt372_dma[drvp->DMA_mode];
4246 break;
4247 case HPT370_REV:
4248 case HPT370A_REV:
4249 after = hpt370_dma[drvp->DMA_mode];
4250 break;
4251 case HPT366_REV:
4252 default:
4253 after = hpt366_dma[drvp->DMA_mode];
4254 break;
4255 }
4256 }
4257 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4258 } else {
4259 /* PIO only */
4260 switch (sc->sc_pp->ide_product) {
4261 case PCI_PRODUCT_TRIONES_HPT374:
4262 after = hpt374_pio[drvp->PIO_mode];
4263 break;
4264 case PCI_PRODUCT_TRIONES_HPT372:
4265 after = hpt372_pio[drvp->PIO_mode];
4266 break;
4267 case PCI_PRODUCT_TRIONES_HPT366:
4268 default:
4269 switch(revision) {
4270 case HPT372_REV:
4271 after = hpt372_pio[drvp->PIO_mode];
4272 break;
4273 case HPT370_REV:
4274 case HPT370A_REV:
4275 after = hpt370_pio[drvp->PIO_mode];
4276 break;
4277 case HPT366_REV:
4278 default:
4279 after = hpt366_pio[drvp->PIO_mode];
4280 break;
4281 }
4282 }
4283 }
4284 pci_conf_write(sc->sc_pc, sc->sc_tag,
4285 HPT_IDETIM(chp->channel, drive), after);
4286 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4287 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4288 after, before), DEBUG_PROBE);
4289 }
4290 if (idedma_ctl != 0) {
4291 /* Add software bits in status register */
4292 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4293 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4294 idedma_ctl);
4295 }
4296 pciide_print_modes(cp);
4297 }
4298
4299 int
4300 hpt_pci_intr(arg)
4301 void *arg;
4302 {
4303 struct pciide_softc *sc = arg;
4304 struct pciide_channel *cp;
4305 struct channel_softc *wdc_cp;
4306 int rv = 0;
4307 int dmastat, i, crv;
4308
4309 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4310 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4311 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4312 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4313 IDEDMA_CTL_INTR)
4314 continue;
4315 cp = &sc->pciide_channels[i];
4316 wdc_cp = &cp->wdc_channel;
4317 crv = wdcintr(wdc_cp);
4318 if (crv == 0) {
4319 printf("%s:%d: bogus intr\n",
4320 sc->sc_wdcdev.sc_dev.dv_xname, i);
4321 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4322 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4323 } else
4324 rv = 1;
4325 }
4326 return rv;
4327 }
4328
4329
4330 /* Macros to test product */
4331 #define PDC_IS_262(sc) \
4332 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4333 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4334 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4335 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4336 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4337 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4338 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4339 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4340 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4341 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4342 #define PDC_IS_265(sc) \
4343 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4344 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4345 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4346 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4347 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4348 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4349 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4350 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4351 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4352 #define PDC_IS_268(sc) \
4353 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4354 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4355 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4356 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4357 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4358 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4359 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4360 #define PDC_IS_276(sc) \
4361 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4362 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4363 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4364 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4365 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4366
4367 void
4368 pdc202xx_chip_map(sc, pa)
4369 struct pciide_softc *sc;
4370 struct pci_attach_args *pa;
4371 {
4372 struct pciide_channel *cp;
4373 int channel;
4374 pcireg_t interface, st, mode;
4375 bus_size_t cmdsize, ctlsize;
4376
4377 if (!PDC_IS_268(sc)) {
4378 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4379 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4380 st), DEBUG_PROBE);
4381 }
4382 if (pciide_chipen(sc, pa) == 0)
4383 return;
4384
4385 /* turn off RAID mode */
4386 if (!PDC_IS_268(sc))
4387 st &= ~PDC2xx_STATE_IDERAID;
4388
4389 /*
4390 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4391 * mode. We have to fake interface
4392 */
4393 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4394 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4395 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4396
4397 aprint_normal("%s: bus-master DMA support present",
4398 sc->sc_wdcdev.sc_dev.dv_xname);
4399 pciide_mapreg_dma(sc, pa);
4400 aprint_normal("\n");
4401 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4402 WDC_CAPABILITY_MODE;
4403 if (sc->sc_dma_ok) {
4404 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4405 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4406 sc->sc_wdcdev.irqack = pciide_irqack;
4407 }
4408 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4409 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4410 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4411 sc->sc_wdcdev.PIO_cap = 4;
4412 sc->sc_wdcdev.DMA_cap = 2;
4413 if (PDC_IS_276(sc))
4414 sc->sc_wdcdev.UDMA_cap = 6;
4415 else if (PDC_IS_265(sc))
4416 sc->sc_wdcdev.UDMA_cap = 5;
4417 else if (PDC_IS_262(sc))
4418 sc->sc_wdcdev.UDMA_cap = 4;
4419 else
4420 sc->sc_wdcdev.UDMA_cap = 2;
4421 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4422 pdc20268_setup_channel : pdc202xx_setup_channel;
4423 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4424 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4425
4426 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4427 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4428 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4429 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4430 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4431 }
4432
4433 if (!PDC_IS_268(sc)) {
4434 /* setup failsafe defaults */
4435 mode = 0;
4436 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4437 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4438 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4439 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4440 for (channel = 0;
4441 channel < sc->sc_wdcdev.nchannels;
4442 channel++) {
4443 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4444 "drive 0 initial timings 0x%x, now 0x%x\n",
4445 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4446 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4447 DEBUG_PROBE);
4448 pci_conf_write(sc->sc_pc, sc->sc_tag,
4449 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4450 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4451 "drive 1 initial timings 0x%x, now 0x%x\n",
4452 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4453 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4454 pci_conf_write(sc->sc_pc, sc->sc_tag,
4455 PDC2xx_TIM(channel, 1), mode);
4456 }
4457
4458 mode = PDC2xx_SCR_DMA;
4459 if (PDC_IS_265(sc)) {
4460 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4461 } else if (PDC_IS_262(sc)) {
4462 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4463 } else {
4464 /* the BIOS set it up this way */
4465 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4466 }
4467 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4468 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4469 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4470 "now 0x%x\n",
4471 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4472 PDC2xx_SCR),
4473 mode), DEBUG_PROBE);
4474 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4475 PDC2xx_SCR, mode);
4476
4477 /* controller initial state register is OK even without BIOS */
4478 /* Set DMA mode to IDE DMA compatibility */
4479 mode =
4480 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4481 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4482 DEBUG_PROBE);
4483 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4484 mode | 0x1);
4485 mode =
4486 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4487 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4488 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4489 mode | 0x1);
4490 }
4491
4492 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4493 cp = &sc->pciide_channels[channel];
4494 if (pciide_chansetup(sc, channel, interface) == 0)
4495 continue;
4496 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4497 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4498 aprint_normal("%s: %s channel ignored (disabled)\n",
4499 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4500 continue;
4501 }
4502 if (PDC_IS_265(sc))
4503 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4504 pdc20265_pci_intr);
4505 else
4506 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4507 pdc202xx_pci_intr);
4508 if (cp->hw_ok == 0)
4509 continue;
4510 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4511 st &= ~(PDC_IS_262(sc) ?
4512 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4513 pciide_map_compat_intr(pa, cp, channel, interface);
4514 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4515 }
4516 if (!PDC_IS_268(sc)) {
4517 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4518 "0x%x\n", st), DEBUG_PROBE);
4519 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4520 }
4521 return;
4522 }
4523
4524 void
4525 pdc202xx_setup_channel(chp)
4526 struct channel_softc *chp;
4527 {
4528 struct ata_drive_datas *drvp;
4529 int drive;
4530 pcireg_t mode, st;
4531 u_int32_t idedma_ctl, scr, atapi;
4532 struct pciide_channel *cp = (struct pciide_channel*)chp;
4533 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4534 int channel = chp->channel;
4535
4536 /* setup DMA if needed */
4537 pciide_channel_dma_setup(cp);
4538
4539 idedma_ctl = 0;
4540 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4541 sc->sc_wdcdev.sc_dev.dv_xname,
4542 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4543 DEBUG_PROBE);
4544
4545 /* Per channel settings */
4546 if (PDC_IS_262(sc)) {
4547 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4548 PDC262_U66);
4549 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4550 /* Trim UDMA mode */
4551 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4552 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4553 chp->ch_drive[0].UDMA_mode <= 2) ||
4554 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4555 chp->ch_drive[1].UDMA_mode <= 2)) {
4556 if (chp->ch_drive[0].UDMA_mode > 2)
4557 chp->ch_drive[0].UDMA_mode = 2;
4558 if (chp->ch_drive[1].UDMA_mode > 2)
4559 chp->ch_drive[1].UDMA_mode = 2;
4560 }
4561 /* Set U66 if needed */
4562 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4563 chp->ch_drive[0].UDMA_mode > 2) ||
4564 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4565 chp->ch_drive[1].UDMA_mode > 2))
4566 scr |= PDC262_U66_EN(channel);
4567 else
4568 scr &= ~PDC262_U66_EN(channel);
4569 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4570 PDC262_U66, scr);
4571 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4572 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4573 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4574 PDC262_ATAPI(channel))), DEBUG_PROBE);
4575 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4576 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4577 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4578 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4579 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4580 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4581 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4582 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4583 atapi = 0;
4584 else
4585 atapi = PDC262_ATAPI_UDMA;
4586 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4587 PDC262_ATAPI(channel), atapi);
4588 }
4589 }
4590 for (drive = 0; drive < 2; drive++) {
4591 drvp = &chp->ch_drive[drive];
4592 /* If no drive, skip */
4593 if ((drvp->drive_flags & DRIVE) == 0)
4594 continue;
4595 mode = 0;
4596 if (drvp->drive_flags & DRIVE_UDMA) {
4597 /* use Ultra/DMA */
4598 drvp->drive_flags &= ~DRIVE_DMA;
4599 mode = PDC2xx_TIM_SET_MB(mode,
4600 pdc2xx_udma_mb[drvp->UDMA_mode]);
4601 mode = PDC2xx_TIM_SET_MC(mode,
4602 pdc2xx_udma_mc[drvp->UDMA_mode]);
4603 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4604 } else if (drvp->drive_flags & DRIVE_DMA) {
4605 mode = PDC2xx_TIM_SET_MB(mode,
4606 pdc2xx_dma_mb[drvp->DMA_mode]);
4607 mode = PDC2xx_TIM_SET_MC(mode,
4608 pdc2xx_dma_mc[drvp->DMA_mode]);
4609 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4610 } else {
4611 mode = PDC2xx_TIM_SET_MB(mode,
4612 pdc2xx_dma_mb[0]);
4613 mode = PDC2xx_TIM_SET_MC(mode,
4614 pdc2xx_dma_mc[0]);
4615 }
4616 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4617 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4618 if (drvp->drive_flags & DRIVE_ATA)
4619 mode |= PDC2xx_TIM_PRE;
4620 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4621 if (drvp->PIO_mode >= 3) {
4622 mode |= PDC2xx_TIM_IORDY;
4623 if (drive == 0)
4624 mode |= PDC2xx_TIM_IORDYp;
4625 }
4626 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4627 "timings 0x%x\n",
4628 sc->sc_wdcdev.sc_dev.dv_xname,
4629 chp->channel, drive, mode), DEBUG_PROBE);
4630 pci_conf_write(sc->sc_pc, sc->sc_tag,
4631 PDC2xx_TIM(chp->channel, drive), mode);
4632 }
4633 if (idedma_ctl != 0) {
4634 /* Add software bits in status register */
4635 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4636 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4637 idedma_ctl);
4638 }
4639 pciide_print_modes(cp);
4640 }
4641
4642 void
4643 pdc20268_setup_channel(chp)
4644 struct channel_softc *chp;
4645 {
4646 struct ata_drive_datas *drvp;
4647 int drive;
4648 u_int32_t idedma_ctl;
4649 struct pciide_channel *cp = (struct pciide_channel*)chp;
4650 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4651 int u100;
4652
4653 /* setup DMA if needed */
4654 pciide_channel_dma_setup(cp);
4655
4656 idedma_ctl = 0;
4657
4658 /* I don't know what this is for, FreeBSD does it ... */
4659 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4660 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4661
4662 /*
4663 * cable type detect, from FreeBSD
4664 */
4665 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4666 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4667 0 : 1;
4668
4669 for (drive = 0; drive < 2; drive++) {
4670 drvp = &chp->ch_drive[drive];
4671 /* If no drive, skip */
4672 if ((drvp->drive_flags & DRIVE) == 0)
4673 continue;
4674 if (drvp->drive_flags & DRIVE_UDMA) {
4675 /* use Ultra/DMA */
4676 drvp->drive_flags &= ~DRIVE_DMA;
4677 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4678 if (drvp->UDMA_mode > 2 && u100 == 0)
4679 drvp->UDMA_mode = 2;
4680 } else if (drvp->drive_flags & DRIVE_DMA) {
4681 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4682 }
4683 }
4684 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4685 if (idedma_ctl != 0) {
4686 /* Add software bits in status register */
4687 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4688 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4689 idedma_ctl);
4690 }
4691 pciide_print_modes(cp);
4692 }
4693
4694 int
4695 pdc202xx_pci_intr(arg)
4696 void *arg;
4697 {
4698 struct pciide_softc *sc = arg;
4699 struct pciide_channel *cp;
4700 struct channel_softc *wdc_cp;
4701 int i, rv, crv;
4702 u_int32_t scr;
4703
4704 rv = 0;
4705 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4706 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4707 cp = &sc->pciide_channels[i];
4708 wdc_cp = &cp->wdc_channel;
4709 /* If a compat channel skip. */
4710 if (cp->compat)
4711 continue;
4712 if (scr & PDC2xx_SCR_INT(i)) {
4713 crv = wdcintr(wdc_cp);
4714 if (crv == 0)
4715 printf("%s:%d: bogus intr (reg 0x%x)\n",
4716 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4717 else
4718 rv = 1;
4719 }
4720 }
4721 return rv;
4722 }
4723
4724 int
4725 pdc20265_pci_intr(arg)
4726 void *arg;
4727 {
4728 struct pciide_softc *sc = arg;
4729 struct pciide_channel *cp;
4730 struct channel_softc *wdc_cp;
4731 int i, rv, crv;
4732 u_int32_t dmastat;
4733
4734 rv = 0;
4735 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4736 cp = &sc->pciide_channels[i];
4737 wdc_cp = &cp->wdc_channel;
4738 /* If a compat channel skip. */
4739 if (cp->compat)
4740 continue;
4741 /*
4742 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4743 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4744 * So use it instead (requires 2 reg reads instead of 1,
4745 * but we can't do it another way).
4746 */
4747 dmastat = bus_space_read_1(sc->sc_dma_iot,
4748 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4749 if((dmastat & IDEDMA_CTL_INTR) == 0)
4750 continue;
4751 crv = wdcintr(wdc_cp);
4752 if (crv == 0)
4753 printf("%s:%d: bogus intr\n",
4754 sc->sc_wdcdev.sc_dev.dv_xname, i);
4755 else
4756 rv = 1;
4757 }
4758 return rv;
4759 }
4760
4761 static void
4762 pdc20262_dma_start(v, channel, drive)
4763 void *v;
4764 int channel, drive;
4765 {
4766 struct pciide_softc *sc = v;
4767 struct pciide_dma_maps *dma_maps =
4768 &sc->pciide_channels[channel].dma_maps[drive];
4769 int atapi;
4770
4771 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4772 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4773 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4774 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4775 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4776 PDC262_ATAPI(channel), atapi);
4777 }
4778
4779 pciide_dma_start(v, channel, drive);
4780 }
4781
4782 int
4783 pdc20262_dma_finish(v, channel, drive, force)
4784 void *v;
4785 int channel, drive;
4786 int force;
4787 {
4788 struct pciide_softc *sc = v;
4789 struct pciide_dma_maps *dma_maps =
4790 &sc->pciide_channels[channel].dma_maps[drive];
4791 struct channel_softc *chp;
4792 int atapi, error;
4793
4794 error = pciide_dma_finish(v, channel, drive, force);
4795
4796 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4797 chp = sc->wdc_chanarray[channel];
4798 atapi = 0;
4799 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4800 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4801 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4802 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4803 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4804 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4805 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4806 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4807 atapi = PDC262_ATAPI_UDMA;
4808 }
4809 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4810 PDC262_ATAPI(channel), atapi);
4811 }
4812
4813 return error;
4814 }
4815
4816 void
4817 opti_chip_map(sc, pa)
4818 struct pciide_softc *sc;
4819 struct pci_attach_args *pa;
4820 {
4821 struct pciide_channel *cp;
4822 bus_size_t cmdsize, ctlsize;
4823 pcireg_t interface;
4824 u_int8_t init_ctrl;
4825 int channel;
4826
4827 if (pciide_chipen(sc, pa) == 0)
4828 return;
4829 aprint_normal("%s: bus-master DMA support present",
4830 sc->sc_wdcdev.sc_dev.dv_xname);
4831
4832 /*
4833 * XXXSCW:
4834 * There seem to be a couple of buggy revisions/implementations
4835 * of the OPTi pciide chipset. This kludge seems to fix one of
4836 * the reported problems (PR/11644) but still fails for the
4837 * other (PR/13151), although the latter may be due to other
4838 * issues too...
4839 */
4840 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4841 aprint_normal(" but disabled due to chip rev. <= 0x12");
4842 sc->sc_dma_ok = 0;
4843 } else
4844 pciide_mapreg_dma(sc, pa);
4845
4846 aprint_normal("\n");
4847
4848 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4849 WDC_CAPABILITY_MODE;
4850 sc->sc_wdcdev.PIO_cap = 4;
4851 if (sc->sc_dma_ok) {
4852 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4853 sc->sc_wdcdev.irqack = pciide_irqack;
4854 sc->sc_wdcdev.DMA_cap = 2;
4855 }
4856 sc->sc_wdcdev.set_modes = opti_setup_channel;
4857
4858 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4859 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4860
4861 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4862 OPTI_REG_INIT_CONTROL);
4863
4864 interface = PCI_INTERFACE(pa->pa_class);
4865
4866 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4867 cp = &sc->pciide_channels[channel];
4868 if (pciide_chansetup(sc, channel, interface) == 0)
4869 continue;
4870 if (channel == 1 &&
4871 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4872 aprint_normal("%s: %s channel ignored (disabled)\n",
4873 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4874 continue;
4875 }
4876 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4877 pciide_pci_intr);
4878 if (cp->hw_ok == 0)
4879 continue;
4880 pciide_map_compat_intr(pa, cp, channel, interface);
4881 if (cp->hw_ok == 0)
4882 continue;
4883 opti_setup_channel(&cp->wdc_channel);
4884 }
4885 }
4886
4887 void
4888 opti_setup_channel(chp)
4889 struct channel_softc *chp;
4890 {
4891 struct ata_drive_datas *drvp;
4892 struct pciide_channel *cp = (struct pciide_channel*)chp;
4893 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4894 int drive, spd;
4895 int mode[2];
4896 u_int8_t rv, mr;
4897
4898 /*
4899 * The `Delay' and `Address Setup Time' fields of the
4900 * Miscellaneous Register are always zero initially.
4901 */
4902 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4903 mr &= ~(OPTI_MISC_DELAY_MASK |
4904 OPTI_MISC_ADDR_SETUP_MASK |
4905 OPTI_MISC_INDEX_MASK);
4906
4907 /* Prime the control register before setting timing values */
4908 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4909
4910 /* Determine the clockrate of the PCIbus the chip is attached to */
4911 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4912 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4913
4914 /* setup DMA if needed */
4915 pciide_channel_dma_setup(cp);
4916
4917 for (drive = 0; drive < 2; drive++) {
4918 drvp = &chp->ch_drive[drive];
4919 /* If no drive, skip */
4920 if ((drvp->drive_flags & DRIVE) == 0) {
4921 mode[drive] = -1;
4922 continue;
4923 }
4924
4925 if ((drvp->drive_flags & DRIVE_DMA)) {
4926 /*
4927 * Timings will be used for both PIO and DMA,
4928 * so adjust DMA mode if needed
4929 */
4930 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4931 drvp->PIO_mode = drvp->DMA_mode + 2;
4932 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4933 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4934 drvp->PIO_mode - 2 : 0;
4935 if (drvp->DMA_mode == 0)
4936 drvp->PIO_mode = 0;
4937
4938 mode[drive] = drvp->DMA_mode + 5;
4939 } else
4940 mode[drive] = drvp->PIO_mode;
4941
4942 if (drive && mode[0] >= 0 &&
4943 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4944 /*
4945 * Can't have two drives using different values
4946 * for `Address Setup Time'.
4947 * Slow down the faster drive to compensate.
4948 */
4949 int d = (opti_tim_as[spd][mode[0]] >
4950 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4951
4952 mode[d] = mode[1-d];
4953 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4954 chp->ch_drive[d].DMA_mode = 0;
4955 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4956 }
4957 }
4958
4959 for (drive = 0; drive < 2; drive++) {
4960 int m;
4961 if ((m = mode[drive]) < 0)
4962 continue;
4963
4964 /* Set the Address Setup Time and select appropriate index */
4965 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4966 rv |= OPTI_MISC_INDEX(drive);
4967 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4968
4969 /* Set the pulse width and recovery timing parameters */
4970 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4971 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4972 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4973 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4974
4975 /* Set the Enhanced Mode register appropriately */
4976 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4977 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4978 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4979 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4980 }
4981
4982 /* Finally, enable the timings */
4983 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4984
4985 pciide_print_modes(cp);
4986 }
4987
4988 #define ACARD_IS_850(sc) \
4989 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4990
4991 void
4992 acard_chip_map(sc, pa)
4993 struct pciide_softc *sc;
4994 struct pci_attach_args *pa;
4995 {
4996 struct pciide_channel *cp;
4997 int i;
4998 pcireg_t interface;
4999 bus_size_t cmdsize, ctlsize;
5000
5001 if (pciide_chipen(sc, pa) == 0)
5002 return;
5003
5004 /*
5005 * when the chip is in native mode it identifies itself as a
5006 * 'misc mass storage'. Fake interface in this case.
5007 */
5008 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
5009 interface = PCI_INTERFACE(pa->pa_class);
5010 } else {
5011 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
5012 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
5013 }
5014
5015 aprint_normal("%s: bus-master DMA support present",
5016 sc->sc_wdcdev.sc_dev.dv_xname);
5017 pciide_mapreg_dma(sc, pa);
5018 aprint_normal("\n");
5019 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5020 WDC_CAPABILITY_MODE;
5021
5022 if (sc->sc_dma_ok) {
5023 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5024 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5025 sc->sc_wdcdev.irqack = pciide_irqack;
5026 }
5027 sc->sc_wdcdev.PIO_cap = 4;
5028 sc->sc_wdcdev.DMA_cap = 2;
5029 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
5030
5031 sc->sc_wdcdev.set_modes = acard_setup_channel;
5032 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5033 sc->sc_wdcdev.nchannels = 2;
5034
5035 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5036 cp = &sc->pciide_channels[i];
5037 if (pciide_chansetup(sc, i, interface) == 0)
5038 continue;
5039 if (interface & PCIIDE_INTERFACE_PCI(i)) {
5040 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
5041 &ctlsize, pciide_pci_intr);
5042 } else {
5043 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
5044 &cmdsize, &ctlsize);
5045 }
5046 if (cp->hw_ok == 0)
5047 return;
5048 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
5049 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
5050 wdcattach(&cp->wdc_channel);
5051 acard_setup_channel(&cp->wdc_channel);
5052 }
5053 if (!ACARD_IS_850(sc)) {
5054 u_int32_t reg;
5055 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
5056 reg &= ~ATP860_CTRL_INT;
5057 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
5058 }
5059 }
5060
5061 void
5062 acard_setup_channel(chp)
5063 struct channel_softc *chp;
5064 {
5065 struct ata_drive_datas *drvp;
5066 struct pciide_channel *cp = (struct pciide_channel*)chp;
5067 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5068 int channel = chp->channel;
5069 int drive;
5070 u_int32_t idetime, udma_mode;
5071 u_int32_t idedma_ctl;
5072
5073 /* setup DMA if needed */
5074 pciide_channel_dma_setup(cp);
5075
5076 if (ACARD_IS_850(sc)) {
5077 idetime = 0;
5078 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
5079 udma_mode &= ~ATP850_UDMA_MASK(channel);
5080 } else {
5081 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
5082 idetime &= ~ATP860_SETTIME_MASK(channel);
5083 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
5084 udma_mode &= ~ATP860_UDMA_MASK(channel);
5085
5086 /* check 80 pins cable */
5087 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
5088 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
5089 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5090 & ATP860_CTRL_80P(chp->channel)) {
5091 if (chp->ch_drive[0].UDMA_mode > 2)
5092 chp->ch_drive[0].UDMA_mode = 2;
5093 if (chp->ch_drive[1].UDMA_mode > 2)
5094 chp->ch_drive[1].UDMA_mode = 2;
5095 }
5096 }
5097 }
5098
5099 idedma_ctl = 0;
5100
5101 /* Per drive settings */
5102 for (drive = 0; drive < 2; drive++) {
5103 drvp = &chp->ch_drive[drive];
5104 /* If no drive, skip */
5105 if ((drvp->drive_flags & DRIVE) == 0)
5106 continue;
5107 /* add timing values, setup DMA if needed */
5108 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5109 (drvp->drive_flags & DRIVE_UDMA)) {
5110 /* use Ultra/DMA */
5111 if (ACARD_IS_850(sc)) {
5112 idetime |= ATP850_SETTIME(drive,
5113 acard_act_udma[drvp->UDMA_mode],
5114 acard_rec_udma[drvp->UDMA_mode]);
5115 udma_mode |= ATP850_UDMA_MODE(channel, drive,
5116 acard_udma_conf[drvp->UDMA_mode]);
5117 } else {
5118 idetime |= ATP860_SETTIME(channel, drive,
5119 acard_act_udma[drvp->UDMA_mode],
5120 acard_rec_udma[drvp->UDMA_mode]);
5121 udma_mode |= ATP860_UDMA_MODE(channel, drive,
5122 acard_udma_conf[drvp->UDMA_mode]);
5123 }
5124 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5125 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5126 (drvp->drive_flags & DRIVE_DMA)) {
5127 /* use Multiword DMA */
5128 drvp->drive_flags &= ~DRIVE_UDMA;
5129 if (ACARD_IS_850(sc)) {
5130 idetime |= ATP850_SETTIME(drive,
5131 acard_act_dma[drvp->DMA_mode],
5132 acard_rec_dma[drvp->DMA_mode]);
5133 } else {
5134 idetime |= ATP860_SETTIME(channel, drive,
5135 acard_act_dma[drvp->DMA_mode],
5136 acard_rec_dma[drvp->DMA_mode]);
5137 }
5138 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5139 } else {
5140 /* PIO only */
5141 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5142 if (ACARD_IS_850(sc)) {
5143 idetime |= ATP850_SETTIME(drive,
5144 acard_act_pio[drvp->PIO_mode],
5145 acard_rec_pio[drvp->PIO_mode]);
5146 } else {
5147 idetime |= ATP860_SETTIME(channel, drive,
5148 acard_act_pio[drvp->PIO_mode],
5149 acard_rec_pio[drvp->PIO_mode]);
5150 }
5151 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5152 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5153 | ATP8x0_CTRL_EN(channel));
5154 }
5155 }
5156
5157 if (idedma_ctl != 0) {
5158 /* Add software bits in status register */
5159 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5160 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5161 }
5162 pciide_print_modes(cp);
5163
5164 if (ACARD_IS_850(sc)) {
5165 pci_conf_write(sc->sc_pc, sc->sc_tag,
5166 ATP850_IDETIME(channel), idetime);
5167 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5168 } else {
5169 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5170 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5171 }
5172 }
5173
5174 int
5175 acard_pci_intr(arg)
5176 void *arg;
5177 {
5178 struct pciide_softc *sc = arg;
5179 struct pciide_channel *cp;
5180 struct channel_softc *wdc_cp;
5181 int rv = 0;
5182 int dmastat, i, crv;
5183
5184 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5185 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5186 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5187 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5188 continue;
5189 cp = &sc->pciide_channels[i];
5190 wdc_cp = &cp->wdc_channel;
5191 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5192 (void)wdcintr(wdc_cp);
5193 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5194 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5195 continue;
5196 }
5197 crv = wdcintr(wdc_cp);
5198 if (crv == 0)
5199 printf("%s:%d: bogus intr\n",
5200 sc->sc_wdcdev.sc_dev.dv_xname, i);
5201 else if (crv == 1)
5202 rv = 1;
5203 else if (rv == 0)
5204 rv = crv;
5205 }
5206 return rv;
5207 }
5208
5209 static int
5210 sl82c105_bugchk(struct pci_attach_args *pa)
5211 {
5212
5213 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5214 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5215 return (0);
5216
5217 if (PCI_REVISION(pa->pa_class) <= 0x05)
5218 return (1);
5219
5220 return (0);
5221 }
5222
5223 void
5224 sl82c105_chip_map(sc, pa)
5225 struct pciide_softc *sc;
5226 struct pci_attach_args *pa;
5227 {
5228 struct pciide_channel *cp;
5229 bus_size_t cmdsize, ctlsize;
5230 pcireg_t interface, idecr;
5231 int channel;
5232
5233 if (pciide_chipen(sc, pa) == 0)
5234 return;
5235
5236 aprint_normal("%s: bus-master DMA support present",
5237 sc->sc_wdcdev.sc_dev.dv_xname);
5238
5239 /*
5240 * Check to see if we're part of the Winbond 83c553 Southbridge.
5241 * If so, we need to disable DMA on rev. <= 5 of that chip.
5242 */
5243 if (pci_find_device(pa, sl82c105_bugchk)) {
5244 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5245 sc->sc_dma_ok = 0;
5246 } else
5247 pciide_mapreg_dma(sc, pa);
5248 aprint_normal("\n");
5249
5250 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5251 WDC_CAPABILITY_MODE;
5252 sc->sc_wdcdev.PIO_cap = 4;
5253 if (sc->sc_dma_ok) {
5254 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5255 sc->sc_wdcdev.irqack = pciide_irqack;
5256 sc->sc_wdcdev.DMA_cap = 2;
5257 }
5258 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5259
5260 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5261 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5262
5263 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5264
5265 interface = PCI_INTERFACE(pa->pa_class);
5266
5267 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5268 cp = &sc->pciide_channels[channel];
5269 if (pciide_chansetup(sc, channel, interface) == 0)
5270 continue;
5271 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5272 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5273 aprint_normal("%s: %s channel ignored (disabled)\n",
5274 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5275 continue;
5276 }
5277 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5278 pciide_pci_intr);
5279 if (cp->hw_ok == 0)
5280 continue;
5281 pciide_map_compat_intr(pa, cp, channel, interface);
5282 if (cp->hw_ok == 0)
5283 continue;
5284 sl82c105_setup_channel(&cp->wdc_channel);
5285 }
5286 }
5287
5288 void
5289 sl82c105_setup_channel(chp)
5290 struct channel_softc *chp;
5291 {
5292 struct ata_drive_datas *drvp;
5293 struct pciide_channel *cp = (struct pciide_channel*)chp;
5294 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5295 int pxdx_reg, drive;
5296 pcireg_t pxdx;
5297
5298 /* Set up DMA if needed. */
5299 pciide_channel_dma_setup(cp);
5300
5301 for (drive = 0; drive < 2; drive++) {
5302 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5303 : SYMPH_P1D0CR) + (drive * 4);
5304
5305 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5306
5307 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5308 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5309
5310 drvp = &chp->ch_drive[drive];
5311 /* If no drive, skip. */
5312 if ((drvp->drive_flags & DRIVE) == 0) {
5313 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5314 continue;
5315 }
5316
5317 if (drvp->drive_flags & DRIVE_DMA) {
5318 /*
5319 * Timings will be used for both PIO and DMA,
5320 * so adjust DMA mode if needed.
5321 */
5322 if (drvp->PIO_mode >= 3) {
5323 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5324 drvp->DMA_mode = drvp->PIO_mode - 2;
5325 if (drvp->DMA_mode < 1) {
5326 /*
5327 * Can't mix both PIO and DMA.
5328 * Disable DMA.
5329 */
5330 drvp->drive_flags &= ~DRIVE_DMA;
5331 }
5332 } else {
5333 /*
5334 * Can't mix both PIO and DMA. Disable
5335 * DMA.
5336 */
5337 drvp->drive_flags &= ~DRIVE_DMA;
5338 }
5339 }
5340
5341 if (drvp->drive_flags & DRIVE_DMA) {
5342 /* Use multi-word DMA. */
5343 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5344 PxDx_CMD_ON_SHIFT;
5345 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5346 } else {
5347 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5348 PxDx_CMD_ON_SHIFT;
5349 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5350 }
5351
5352 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5353
5354 /* ...and set the mode for this drive. */
5355 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5356 }
5357
5358 pciide_print_modes(cp);
5359 }
5360
5361 void
5362 serverworks_chip_map(sc, pa)
5363 struct pciide_softc *sc;
5364 struct pci_attach_args *pa;
5365 {
5366 struct pciide_channel *cp;
5367 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5368 pcitag_t pcib_tag;
5369 int channel;
5370 bus_size_t cmdsize, ctlsize;
5371
5372 if (pciide_chipen(sc, pa) == 0)
5373 return;
5374
5375 aprint_normal("%s: bus-master DMA support present",
5376 sc->sc_wdcdev.sc_dev.dv_xname);
5377 pciide_mapreg_dma(sc, pa);
5378 aprint_normal("\n");
5379 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5380 WDC_CAPABILITY_MODE;
5381
5382 if (sc->sc_dma_ok) {
5383 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5384 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5385 sc->sc_wdcdev.irqack = pciide_irqack;
5386 }
5387 sc->sc_wdcdev.PIO_cap = 4;
5388 sc->sc_wdcdev.DMA_cap = 2;
5389 switch (sc->sc_pp->ide_product) {
5390 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5391 sc->sc_wdcdev.UDMA_cap = 2;
5392 break;
5393 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5394 if (PCI_REVISION(pa->pa_class) < 0x92)
5395 sc->sc_wdcdev.UDMA_cap = 4;
5396 else
5397 sc->sc_wdcdev.UDMA_cap = 5;
5398 break;
5399 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5400 sc->sc_wdcdev.UDMA_cap = 5;
5401 break;
5402 }
5403
5404 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5405 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5406 sc->sc_wdcdev.nchannels = 2;
5407
5408 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5409 cp = &sc->pciide_channels[channel];
5410 if (pciide_chansetup(sc, channel, interface) == 0)
5411 continue;
5412 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5413 serverworks_pci_intr);
5414 if (cp->hw_ok == 0)
5415 return;
5416 pciide_map_compat_intr(pa, cp, channel, interface);
5417 if (cp->hw_ok == 0)
5418 return;
5419 serverworks_setup_channel(&cp->wdc_channel);
5420 }
5421
5422 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5423 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5424 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5425 }
5426
5427 void
5428 serverworks_setup_channel(chp)
5429 struct channel_softc *chp;
5430 {
5431 struct ata_drive_datas *drvp;
5432 struct pciide_channel *cp = (struct pciide_channel*)chp;
5433 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5434 int channel = chp->channel;
5435 int drive, unit;
5436 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5437 u_int32_t idedma_ctl;
5438 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5439 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5440
5441 /* setup DMA if needed */
5442 pciide_channel_dma_setup(cp);
5443
5444 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5445 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5446 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5447 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5448
5449 pio_time &= ~(0xffff << (16 * channel));
5450 dma_time &= ~(0xffff << (16 * channel));
5451 pio_mode &= ~(0xff << (8 * channel + 16));
5452 udma_mode &= ~(0xff << (8 * channel + 16));
5453 udma_mode &= ~(3 << (2 * channel));
5454
5455 idedma_ctl = 0;
5456
5457 /* Per drive settings */
5458 for (drive = 0; drive < 2; drive++) {
5459 drvp = &chp->ch_drive[drive];
5460 /* If no drive, skip */
5461 if ((drvp->drive_flags & DRIVE) == 0)
5462 continue;
5463 unit = drive + 2 * channel;
5464 /* add timing values, setup DMA if needed */
5465 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5466 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5467 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5468 (drvp->drive_flags & DRIVE_UDMA)) {
5469 /* use Ultra/DMA, check for 80-pin cable */
5470 if (drvp->UDMA_mode > 2 &&
5471 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5472 drvp->UDMA_mode = 2;
5473 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5474 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5475 udma_mode |= 1 << unit;
5476 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5477 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5478 (drvp->drive_flags & DRIVE_DMA)) {
5479 /* use Multiword DMA */
5480 drvp->drive_flags &= ~DRIVE_UDMA;
5481 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5482 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5483 } else {
5484 /* PIO only */
5485 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5486 }
5487 }
5488
5489 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5490 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5491 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5492 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5493 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5494
5495 if (idedma_ctl != 0) {
5496 /* Add software bits in status register */
5497 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5498 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5499 }
5500 pciide_print_modes(cp);
5501 }
5502
5503 int
5504 serverworks_pci_intr(arg)
5505 void *arg;
5506 {
5507 struct pciide_softc *sc = arg;
5508 struct pciide_channel *cp;
5509 struct channel_softc *wdc_cp;
5510 int rv = 0;
5511 int dmastat, i, crv;
5512
5513 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5514 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5515 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5516 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5517 IDEDMA_CTL_INTR)
5518 continue;
5519 cp = &sc->pciide_channels[i];
5520 wdc_cp = &cp->wdc_channel;
5521 crv = wdcintr(wdc_cp);
5522 if (crv == 0) {
5523 printf("%s:%d: bogus intr\n",
5524 sc->sc_wdcdev.sc_dev.dv_xname, i);
5525 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5526 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5527 } else
5528 rv = 1;
5529 }
5530 return rv;
5531 }
5532
5533 void
5534 artisea_chip_map(sc, pa)
5535 struct pciide_softc *sc;
5536 struct pci_attach_args *pa;
5537 {
5538 struct pciide_channel *cp;
5539 bus_size_t cmdsize, ctlsize;
5540 pcireg_t interface;
5541 int channel;
5542
5543 if (pciide_chipen(sc, pa) == 0)
5544 return;
5545
5546 aprint_normal("%s: bus-master DMA support present",
5547 sc->sc_wdcdev.sc_dev.dv_xname);
5548 #ifndef PCIIDE_I31244_ENABLEDMA
5549 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 &&
5550 PCI_REVISION(pa->pa_class) == 0) {
5551 aprint_normal(" but disabled due to rev. 0");
5552 sc->sc_dma_ok = 0;
5553 } else
5554 #endif
5555 pciide_mapreg_dma(sc, pa);
5556 aprint_normal("\n");
5557
5558 /*
5559 * XXX Configure LEDs to show activity.
5560 */
5561
5562 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5563 WDC_CAPABILITY_MODE;
5564 sc->sc_wdcdev.PIO_cap = 4;
5565 if (sc->sc_dma_ok) {
5566 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5567 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5568 sc->sc_wdcdev.irqack = pciide_irqack;
5569 sc->sc_wdcdev.DMA_cap = 2;
5570 sc->sc_wdcdev.UDMA_cap = 6;
5571 }
5572 sc->sc_wdcdev.set_modes = sata_setup_channel;
5573
5574 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5575 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5576
5577 interface = PCI_INTERFACE(pa->pa_class);
5578
5579 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5580 cp = &sc->pciide_channels[channel];
5581 if (pciide_chansetup(sc, channel, interface) == 0)
5582 continue;
5583 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5584 pciide_pci_intr);
5585 if (cp->hw_ok == 0)
5586 continue;
5587 pciide_map_compat_intr(pa, cp, channel, interface);
5588 sata_setup_channel(&cp->wdc_channel);
5589 }
5590 }
5591