pciide.c revision 1.199 1 /* $NetBSD: pciide.c,v 1.199 2003/09/17 16:55:20 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.199 2003/09/17 16:55:20 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_sata_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void apollo_setup_channel __P((struct channel_softc*));
180
181 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void cmd0643_9_setup_channel __P((struct channel_softc*));
184 void cmd_channel_map __P((struct pci_attach_args *,
185 struct pciide_softc *, int));
186 int cmd_pci_intr __P((void *));
187 void cmd646_9_irqack __P((struct channel_softc *));
188 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void cmd680_setup_channel __P((struct channel_softc*));
190 void cmd680_channel_map __P((struct pci_attach_args *,
191 struct pciide_softc *, int));
192
193 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void cmd3112_setup_channel __P((struct channel_softc*));
195
196 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void cy693_setup_channel __P((struct channel_softc*));
198
199 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void sis_setup_channel __P((struct channel_softc*));
201 void sis96x_setup_channel __P((struct channel_softc*));
202 static int sis_hostbr_match __P(( struct pci_attach_args *));
203 static int sis_south_match __P(( struct pci_attach_args *));
204
205 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acer_setup_channel __P((struct channel_softc*));
207 int acer_pci_intr __P((void *));
208
209 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void pdc202xx_setup_channel __P((struct channel_softc*));
211 void pdc20268_setup_channel __P((struct channel_softc*));
212 int pdc202xx_pci_intr __P((void *));
213 int pdc20265_pci_intr __P((void *));
214 static void pdc20262_dma_start __P((void*, int, int));
215 static int pdc20262_dma_finish __P((void*, int, int, int));
216
217 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void opti_setup_channel __P((struct channel_softc*));
219
220 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
221 void hpt_setup_channel __P((struct channel_softc*));
222 int hpt_pci_intr __P((void *));
223
224 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
225 void acard_setup_channel __P((struct channel_softc*));
226 int acard_pci_intr __P((void *));
227
228 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
229 void serverworks_setup_channel __P((struct channel_softc*));
230 int serverworks_pci_intr __P((void *));
231
232 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
233 void sl82c105_setup_channel __P((struct channel_softc*));
234
235 void pciide_channel_dma_setup __P((struct pciide_channel *));
236 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
237 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
238 void pciide_dma_start __P((void*, int, int));
239 int pciide_dma_finish __P((void*, int, int, int));
240 void pciide_irqack __P((struct channel_softc *));
241 void pciide_print_modes __P((struct pciide_channel *));
242
243 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
244
245 struct pciide_product_desc {
246 u_int32_t ide_product;
247 int ide_flags;
248 const char *ide_name;
249 /* map and setup chip, probe drives */
250 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
251 };
252
253 /* Flags for ide_flags */
254 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
255 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
256
257 /* Default product description for devices not known from this controller */
258 const struct pciide_product_desc default_product_desc = {
259 0,
260 0,
261 "Generic PCI IDE controller",
262 default_chip_map,
263 };
264
265 const struct pciide_product_desc pciide_intel_products[] = {
266 { PCI_PRODUCT_INTEL_82092AA,
267 0,
268 "Intel 82092AA IDE controller",
269 default_chip_map,
270 },
271 { PCI_PRODUCT_INTEL_82371FB_IDE,
272 0,
273 "Intel 82371FB IDE controller (PIIX)",
274 piix_chip_map,
275 },
276 { PCI_PRODUCT_INTEL_82371SB_IDE,
277 0,
278 "Intel 82371SB IDE Interface (PIIX3)",
279 piix_chip_map,
280 },
281 { PCI_PRODUCT_INTEL_82371AB_IDE,
282 0,
283 "Intel 82371AB IDE controller (PIIX4)",
284 piix_chip_map,
285 },
286 { PCI_PRODUCT_INTEL_82440MX_IDE,
287 0,
288 "Intel 82440MX IDE controller",
289 piix_chip_map
290 },
291 { PCI_PRODUCT_INTEL_82801AA_IDE,
292 0,
293 "Intel 82801AA IDE Controller (ICH)",
294 piix_chip_map,
295 },
296 { PCI_PRODUCT_INTEL_82801AB_IDE,
297 0,
298 "Intel 82801AB IDE Controller (ICH0)",
299 piix_chip_map,
300 },
301 { PCI_PRODUCT_INTEL_82801BA_IDE,
302 0,
303 "Intel 82801BA IDE Controller (ICH2)",
304 piix_chip_map,
305 },
306 { PCI_PRODUCT_INTEL_82801BAM_IDE,
307 0,
308 "Intel 82801BAM IDE Controller (ICH2-M)",
309 piix_chip_map,
310 },
311 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
312 0,
313 "Intel 82801CA IDE Controller (ICH3)",
314 piix_chip_map,
315 },
316 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
317 0,
318 "Intel 82801CA IDE Controller (ICH3)",
319 piix_chip_map,
320 },
321 { PCI_PRODUCT_INTEL_82801DB_IDE,
322 0,
323 "Intel 82801DB IDE Controller (ICH4)",
324 piix_chip_map,
325 },
326 { PCI_PRODUCT_INTEL_82801DBM_IDE,
327 0,
328 "Intel 82801DBM IDE Controller (ICH4-M)",
329 piix_chip_map,
330 },
331 { PCI_PRODUCT_INTEL_82801EB_IDE,
332 0,
333 "Intel 82801EB IDE Controller (ICH5)",
334 piix_chip_map,
335 },
336 { PCI_PRODUCT_INTEL_31244,
337 0,
338 "Intel 31244 Serial ATA Controller",
339 artisea_chip_map,
340 },
341 { PCI_PRODUCT_INTEL_82801EB_SATA,
342 0,
343 "Intel 82801EB Serial ATA Controller",
344 artisea_chip_map,
345 },
346 { 0,
347 0,
348 NULL,
349 NULL
350 }
351 };
352
353 const struct pciide_product_desc pciide_amd_products[] = {
354 { PCI_PRODUCT_AMD_PBC756_IDE,
355 0,
356 "Advanced Micro Devices AMD756 IDE Controller",
357 amd7x6_chip_map
358 },
359 { PCI_PRODUCT_AMD_PBC766_IDE,
360 0,
361 "Advanced Micro Devices AMD766 IDE Controller",
362 amd7x6_chip_map
363 },
364 { PCI_PRODUCT_AMD_PBC768_IDE,
365 0,
366 "Advanced Micro Devices AMD768 IDE Controller",
367 amd7x6_chip_map
368 },
369 { PCI_PRODUCT_AMD_PBC8111_IDE,
370 0,
371 "Advanced Micro Devices AMD8111 IDE Controller",
372 amd7x6_chip_map
373 },
374 { 0,
375 0,
376 NULL,
377 NULL
378 }
379 };
380
381 const struct pciide_product_desc pciide_nvidia_products[] = {
382 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
383 0,
384 "NVIDIA nForce IDE Controller",
385 amd7x6_chip_map
386 },
387 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
388 0,
389 "NVIDIA nForce2 IDE Controller",
390 amd7x6_chip_map
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_cmd_products[] = {
400 { PCI_PRODUCT_CMDTECH_640,
401 0,
402 "CMD Technology PCI0640",
403 cmd_chip_map
404 },
405 { PCI_PRODUCT_CMDTECH_643,
406 0,
407 "CMD Technology PCI0643",
408 cmd0643_9_chip_map,
409 },
410 { PCI_PRODUCT_CMDTECH_646,
411 0,
412 "CMD Technology PCI0646",
413 cmd0643_9_chip_map,
414 },
415 { PCI_PRODUCT_CMDTECH_648,
416 IDE_PCI_CLASS_OVERRIDE,
417 "CMD Technology PCI0648",
418 cmd0643_9_chip_map,
419 },
420 { PCI_PRODUCT_CMDTECH_649,
421 IDE_PCI_CLASS_OVERRIDE,
422 "CMD Technology PCI0649",
423 cmd0643_9_chip_map,
424 },
425 { PCI_PRODUCT_CMDTECH_680,
426 IDE_PCI_CLASS_OVERRIDE,
427 "Silicon Image 0680",
428 cmd680_chip_map,
429 },
430 { PCI_PRODUCT_CMDTECH_3112,
431 IDE_PCI_CLASS_OVERRIDE,
432 "Silicon Image SATALink 3112",
433 cmd3112_chip_map,
434 },
435 { 0,
436 0,
437 NULL,
438 NULL
439 }
440 };
441
442 const struct pciide_product_desc pciide_via_products[] = {
443 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
444 0,
445 NULL,
446 apollo_chip_map,
447 },
448 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
449 0,
450 NULL,
451 apollo_chip_map,
452 },
453 { PCI_PRODUCT_VIATECH_VT8237_SATA,
454 IDE_PCI_CLASS_OVERRIDE,
455 "VIA Technologies VT8237 SATA Controller",
456 apollo_sata_chip_map,
457 },
458 { 0,
459 0,
460 NULL,
461 NULL
462 }
463 };
464
465 const struct pciide_product_desc pciide_cypress_products[] = {
466 { PCI_PRODUCT_CONTAQ_82C693,
467 IDE_16BIT_IOSPACE,
468 "Cypress 82C693 IDE Controller",
469 cy693_chip_map,
470 },
471 { 0,
472 0,
473 NULL,
474 NULL
475 }
476 };
477
478 const struct pciide_product_desc pciide_sis_products[] = {
479 { PCI_PRODUCT_SIS_5597_IDE,
480 0,
481 NULL,
482 sis_chip_map,
483 },
484 { 0,
485 0,
486 NULL,
487 NULL
488 }
489 };
490
491 const struct pciide_product_desc pciide_acer_products[] = {
492 { PCI_PRODUCT_ALI_M5229,
493 0,
494 "Acer Labs M5229 UDMA IDE Controller",
495 acer_chip_map,
496 },
497 { 0,
498 0,
499 NULL,
500 NULL
501 }
502 };
503
504 const struct pciide_product_desc pciide_promise_products[] = {
505 { PCI_PRODUCT_PROMISE_ULTRA33,
506 IDE_PCI_CLASS_OVERRIDE,
507 "Promise Ultra33/ATA Bus Master IDE Accelerator",
508 pdc202xx_chip_map,
509 },
510 { PCI_PRODUCT_PROMISE_ULTRA66,
511 IDE_PCI_CLASS_OVERRIDE,
512 "Promise Ultra66/ATA Bus Master IDE Accelerator",
513 pdc202xx_chip_map,
514 },
515 { PCI_PRODUCT_PROMISE_ULTRA100,
516 IDE_PCI_CLASS_OVERRIDE,
517 "Promise Ultra100/ATA Bus Master IDE Accelerator",
518 pdc202xx_chip_map,
519 },
520 { PCI_PRODUCT_PROMISE_ULTRA100X,
521 IDE_PCI_CLASS_OVERRIDE,
522 "Promise Ultra100/ATA Bus Master IDE Accelerator",
523 pdc202xx_chip_map,
524 },
525 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
526 IDE_PCI_CLASS_OVERRIDE,
527 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
528 pdc202xx_chip_map,
529 },
530 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
531 IDE_PCI_CLASS_OVERRIDE,
532 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
533 pdc202xx_chip_map,
534 },
535 { PCI_PRODUCT_PROMISE_ULTRA133,
536 IDE_PCI_CLASS_OVERRIDE,
537 "Promise Ultra133/ATA Bus Master IDE Accelerator",
538 pdc202xx_chip_map,
539 },
540 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
541 IDE_PCI_CLASS_OVERRIDE,
542 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
543 pdc202xx_chip_map,
544 },
545 { PCI_PRODUCT_PROMISE_MBULTRA133,
546 IDE_PCI_CLASS_OVERRIDE,
547 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
548 pdc202xx_chip_map,
549 },
550 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
551 IDE_PCI_CLASS_OVERRIDE,
552 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
553 pdc202xx_chip_map,
554 },
555 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
556 IDE_PCI_CLASS_OVERRIDE,
557 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
558 pdc202xx_chip_map,
559 },
560 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
561 IDE_PCI_CLASS_OVERRIDE,
562 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
563 pdc202xx_chip_map,
564 },
565 { 0,
566 0,
567 NULL,
568 NULL
569 }
570 };
571
572 const struct pciide_product_desc pciide_opti_products[] = {
573 { PCI_PRODUCT_OPTI_82C621,
574 0,
575 "OPTi 82c621 PCI IDE controller",
576 opti_chip_map,
577 },
578 { PCI_PRODUCT_OPTI_82C568,
579 0,
580 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
581 opti_chip_map,
582 },
583 { PCI_PRODUCT_OPTI_82D568,
584 0,
585 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
586 opti_chip_map,
587 },
588 { 0,
589 0,
590 NULL,
591 NULL
592 }
593 };
594
595 const struct pciide_product_desc pciide_triones_products[] = {
596 { PCI_PRODUCT_TRIONES_HPT366,
597 IDE_PCI_CLASS_OVERRIDE,
598 NULL,
599 hpt_chip_map,
600 },
601 { PCI_PRODUCT_TRIONES_HPT372,
602 IDE_PCI_CLASS_OVERRIDE,
603 NULL,
604 hpt_chip_map
605 },
606 { PCI_PRODUCT_TRIONES_HPT374,
607 IDE_PCI_CLASS_OVERRIDE,
608 NULL,
609 hpt_chip_map
610 },
611 { 0,
612 0,
613 NULL,
614 NULL
615 }
616 };
617
618 const struct pciide_product_desc pciide_acard_products[] = {
619 { PCI_PRODUCT_ACARD_ATP850U,
620 IDE_PCI_CLASS_OVERRIDE,
621 "Acard ATP850U Ultra33 IDE Controller",
622 acard_chip_map,
623 },
624 { PCI_PRODUCT_ACARD_ATP860,
625 IDE_PCI_CLASS_OVERRIDE,
626 "Acard ATP860 Ultra66 IDE Controller",
627 acard_chip_map,
628 },
629 { PCI_PRODUCT_ACARD_ATP860A,
630 IDE_PCI_CLASS_OVERRIDE,
631 "Acard ATP860-A Ultra66 IDE Controller",
632 acard_chip_map,
633 },
634 { 0,
635 0,
636 NULL,
637 NULL
638 }
639 };
640
641 const struct pciide_product_desc pciide_serverworks_products[] = {
642 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
643 0,
644 "ServerWorks OSB4 IDE Controller",
645 serverworks_chip_map,
646 },
647 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
648 0,
649 "ServerWorks CSB5 IDE Controller",
650 serverworks_chip_map,
651 },
652 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
653 0,
654 "ServerWorks CSB6 RAID/IDE Controller",
655 serverworks_chip_map,
656 },
657 { 0,
658 0,
659 NULL,
660 }
661 };
662
663 const struct pciide_product_desc pciide_symphony_products[] = {
664 { PCI_PRODUCT_SYMPHONY_82C105,
665 0,
666 "Symphony Labs 82C105 IDE controller",
667 sl82c105_chip_map,
668 },
669 { 0,
670 0,
671 NULL,
672 }
673 };
674
675 const struct pciide_product_desc pciide_winbond_products[] = {
676 { PCI_PRODUCT_WINBOND_W83C553F_1,
677 0,
678 "Winbond W83C553F IDE controller",
679 sl82c105_chip_map,
680 },
681 { 0,
682 0,
683 NULL,
684 }
685 };
686
687 struct pciide_vendor_desc {
688 u_int32_t ide_vendor;
689 const struct pciide_product_desc *ide_products;
690 };
691
692 const struct pciide_vendor_desc pciide_vendors[] = {
693 { PCI_VENDOR_INTEL, pciide_intel_products },
694 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
695 { PCI_VENDOR_VIATECH, pciide_via_products },
696 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
697 { PCI_VENDOR_SIS, pciide_sis_products },
698 { PCI_VENDOR_ALI, pciide_acer_products },
699 { PCI_VENDOR_PROMISE, pciide_promise_products },
700 { PCI_VENDOR_AMD, pciide_amd_products },
701 { PCI_VENDOR_OPTI, pciide_opti_products },
702 { PCI_VENDOR_TRIONES, pciide_triones_products },
703 { PCI_VENDOR_ACARD, pciide_acard_products },
704 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
705 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
706 { PCI_VENDOR_WINBOND, pciide_winbond_products },
707 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
708 { 0, NULL }
709 };
710
711 /* options passed via the 'flags' config keyword */
712 #define PCIIDE_OPTIONS_DMA 0x01
713 #define PCIIDE_OPTIONS_NODMA 0x02
714
715 int pciide_match __P((struct device *, struct cfdata *, void *));
716 void pciide_attach __P((struct device *, struct device *, void *));
717
718 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
719 pciide_match, pciide_attach, NULL, NULL);
720
721 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
722 int pciide_mapregs_compat __P(( struct pci_attach_args *,
723 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
724 int pciide_mapregs_native __P((struct pci_attach_args *,
725 struct pciide_channel *, bus_size_t *, bus_size_t *,
726 int (*pci_intr) __P((void *))));
727 void pciide_mapreg_dma __P((struct pciide_softc *,
728 struct pci_attach_args *));
729 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
730 void pciide_mapchan __P((struct pci_attach_args *,
731 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
732 int (*pci_intr) __P((void *))));
733 int pciide_chan_candisable __P((struct pciide_channel *));
734 void pciide_map_compat_intr __P(( struct pci_attach_args *,
735 struct pciide_channel *, int, int));
736 int pciide_compat_intr __P((void *));
737 int pciide_pci_intr __P((void *));
738 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
739
740 const struct pciide_product_desc *
741 pciide_lookup_product(id)
742 u_int32_t id;
743 {
744 const struct pciide_product_desc *pp;
745 const struct pciide_vendor_desc *vp;
746
747 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
748 if (PCI_VENDOR(id) == vp->ide_vendor)
749 break;
750
751 if ((pp = vp->ide_products) == NULL)
752 return NULL;
753
754 for (; pp->chip_map != NULL; pp++)
755 if (PCI_PRODUCT(id) == pp->ide_product)
756 break;
757
758 if (pp->chip_map == NULL)
759 return NULL;
760 return pp;
761 }
762
763 int
764 pciide_match(parent, match, aux)
765 struct device *parent;
766 struct cfdata *match;
767 void *aux;
768 {
769 struct pci_attach_args *pa = aux;
770 const struct pciide_product_desc *pp;
771
772 /*
773 * Check the ID register to see that it's a PCI IDE controller.
774 * If it is, we assume that we can deal with it; it _should_
775 * work in a standardized way...
776 */
777 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
778 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
779 return (1);
780 }
781
782 /*
783 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
784 * controllers. Let see if we can deal with it anyway.
785 */
786 pp = pciide_lookup_product(pa->pa_id);
787 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
788 return (1);
789 }
790
791 return (0);
792 }
793
794 void
795 pciide_attach(parent, self, aux)
796 struct device *parent, *self;
797 void *aux;
798 {
799 struct pci_attach_args *pa = aux;
800 pci_chipset_tag_t pc = pa->pa_pc;
801 pcitag_t tag = pa->pa_tag;
802 struct pciide_softc *sc = (struct pciide_softc *)self;
803 pcireg_t csr;
804 char devinfo[256];
805 const char *displaydev;
806
807 aprint_naive(": disk controller\n");
808
809 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
810 sc->sc_pp = pciide_lookup_product(pa->pa_id);
811 if (sc->sc_pp == NULL) {
812 sc->sc_pp = &default_product_desc;
813 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
814 displaydev = devinfo;
815 } else
816 displaydev = sc->sc_pp->ide_name;
817
818 /* if displaydev == NULL, printf is done in chip-specific map */
819 if (displaydev)
820 aprint_normal(": %s (rev. 0x%02x)\n", displaydev,
821 PCI_REVISION(pa->pa_class));
822
823 sc->sc_pc = pa->pa_pc;
824 sc->sc_tag = pa->pa_tag;
825
826 /* Set up DMA defaults; these might be adjusted by chip_map. */
827 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
828 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
829
830 #ifdef WDCDEBUG
831 if (wdcdebug_pciide_mask & DEBUG_PROBE)
832 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
833 #endif
834 sc->sc_pp->chip_map(sc, pa);
835
836 if (sc->sc_dma_ok) {
837 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
838 csr |= PCI_COMMAND_MASTER_ENABLE;
839 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
840 }
841 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
842 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
843 }
844
845 /* tell whether the chip is enabled or not */
846 int
847 pciide_chipen(sc, pa)
848 struct pciide_softc *sc;
849 struct pci_attach_args *pa;
850 {
851 pcireg_t csr;
852 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
853 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
854 PCI_COMMAND_STATUS_REG);
855 aprint_normal("%s: device disabled (at %s)\n",
856 sc->sc_wdcdev.sc_dev.dv_xname,
857 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
858 "device" : "bridge");
859 return 0;
860 }
861 return 1;
862 }
863
864 int
865 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
866 struct pci_attach_args *pa;
867 struct pciide_channel *cp;
868 int compatchan;
869 bus_size_t *cmdsizep, *ctlsizep;
870 {
871 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
872 struct channel_softc *wdc_cp = &cp->wdc_channel;
873
874 cp->compat = 1;
875 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
876 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
877
878 wdc_cp->cmd_iot = pa->pa_iot;
879 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
880 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
881 aprint_error("%s: couldn't map %s channel cmd regs\n",
882 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
883 return (0);
884 }
885
886 wdc_cp->ctl_iot = pa->pa_iot;
887 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
888 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
889 aprint_error("%s: couldn't map %s channel ctl regs\n",
890 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
891 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
892 PCIIDE_COMPAT_CMD_SIZE);
893 return (0);
894 }
895
896 return (1);
897 }
898
899 int
900 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
901 struct pci_attach_args * pa;
902 struct pciide_channel *cp;
903 bus_size_t *cmdsizep, *ctlsizep;
904 int (*pci_intr) __P((void *));
905 {
906 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
907 struct channel_softc *wdc_cp = &cp->wdc_channel;
908 const char *intrstr;
909 pci_intr_handle_t intrhandle;
910
911 cp->compat = 0;
912
913 if (sc->sc_pci_ih == NULL) {
914 if (pci_intr_map(pa, &intrhandle) != 0) {
915 aprint_error("%s: couldn't map native-PCI interrupt\n",
916 sc->sc_wdcdev.sc_dev.dv_xname);
917 return 0;
918 }
919 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
920 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
921 intrhandle, IPL_BIO, pci_intr, sc);
922 if (sc->sc_pci_ih != NULL) {
923 aprint_normal("%s: using %s for native-PCI interrupt\n",
924 sc->sc_wdcdev.sc_dev.dv_xname,
925 intrstr ? intrstr : "unknown interrupt");
926 } else {
927 aprint_error(
928 "%s: couldn't establish native-PCI interrupt",
929 sc->sc_wdcdev.sc_dev.dv_xname);
930 if (intrstr != NULL)
931 aprint_normal(" at %s", intrstr);
932 aprint_normal("\n");
933 return 0;
934 }
935 }
936 cp->ih = sc->sc_pci_ih;
937 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
938 PCI_MAPREG_TYPE_IO, 0,
939 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
940 aprint_error("%s: couldn't map %s channel cmd regs\n",
941 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
942 return 0;
943 }
944
945 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
946 PCI_MAPREG_TYPE_IO, 0,
947 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
948 aprint_error("%s: couldn't map %s channel ctl regs\n",
949 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
950 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
951 return 0;
952 }
953 /*
954 * In native mode, 4 bytes of I/O space are mapped for the control
955 * register, the control register is at offset 2. Pass the generic
956 * code a handle for only one byte at the right offset.
957 */
958 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
959 &wdc_cp->ctl_ioh) != 0) {
960 aprint_error("%s: unable to subregion %s channel ctl regs\n",
961 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
962 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
963 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
964 return 0;
965 }
966 return (1);
967 }
968
969 void
970 pciide_mapreg_dma(sc, pa)
971 struct pciide_softc *sc;
972 struct pci_attach_args *pa;
973 {
974 pcireg_t maptype;
975 bus_addr_t addr;
976
977 /*
978 * Map DMA registers
979 *
980 * Note that sc_dma_ok is the right variable to test to see if
981 * DMA can be done. If the interface doesn't support DMA,
982 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
983 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
984 * non-zero if the interface supports DMA and the registers
985 * could be mapped.
986 *
987 * XXX Note that despite the fact that the Bus Master IDE specs
988 * XXX say that "The bus master IDE function uses 16 bytes of IO
989 * XXX space," some controllers (at least the United
990 * XXX Microelectronics UM8886BF) place it in memory space.
991 */
992 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
993 PCIIDE_REG_BUS_MASTER_DMA);
994
995 switch (maptype) {
996 case PCI_MAPREG_TYPE_IO:
997 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
998 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
999 &addr, NULL, NULL) == 0);
1000 if (sc->sc_dma_ok == 0) {
1001 aprint_normal(
1002 ", but unused (couldn't query registers)");
1003 break;
1004 }
1005 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
1006 && addr >= 0x10000) {
1007 sc->sc_dma_ok = 0;
1008 aprint_normal(
1009 ", but unused (registers at unsafe address "
1010 "%#lx)", (unsigned long)addr);
1011 break;
1012 }
1013 /* FALLTHROUGH */
1014
1015 case PCI_MAPREG_MEM_TYPE_32BIT:
1016 sc->sc_dma_ok = (pci_mapreg_map(pa,
1017 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1018 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1019 sc->sc_dmat = pa->pa_dmat;
1020 if (sc->sc_dma_ok == 0) {
1021 aprint_normal(", but unused (couldn't map registers)");
1022 } else {
1023 sc->sc_wdcdev.dma_arg = sc;
1024 sc->sc_wdcdev.dma_init = pciide_dma_init;
1025 sc->sc_wdcdev.dma_start = pciide_dma_start;
1026 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1027 }
1028
1029 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1030 PCIIDE_OPTIONS_NODMA) {
1031 aprint_normal(
1032 ", but unused (forced off by config file)");
1033 sc->sc_dma_ok = 0;
1034 }
1035 break;
1036
1037 default:
1038 sc->sc_dma_ok = 0;
1039 aprint_normal(
1040 ", but unsupported register maptype (0x%x)", maptype);
1041 }
1042 }
1043
1044 int
1045 pciide_compat_intr(arg)
1046 void *arg;
1047 {
1048 struct pciide_channel *cp = arg;
1049
1050 #ifdef DIAGNOSTIC
1051 /* should only be called for a compat channel */
1052 if (cp->compat == 0)
1053 panic("pciide compat intr called for non-compat chan %p", cp);
1054 #endif
1055 return (wdcintr(&cp->wdc_channel));
1056 }
1057
1058 int
1059 pciide_pci_intr(arg)
1060 void *arg;
1061 {
1062 struct pciide_softc *sc = arg;
1063 struct pciide_channel *cp;
1064 struct channel_softc *wdc_cp;
1065 int i, rv, crv;
1066
1067 rv = 0;
1068 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1069 cp = &sc->pciide_channels[i];
1070 wdc_cp = &cp->wdc_channel;
1071
1072 /* If a compat channel skip. */
1073 if (cp->compat)
1074 continue;
1075 /* if this channel not waiting for intr, skip */
1076 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1077 continue;
1078
1079 crv = wdcintr(wdc_cp);
1080 if (crv == 0)
1081 ; /* leave rv alone */
1082 else if (crv == 1)
1083 rv = 1; /* claim the intr */
1084 else if (rv == 0) /* crv should be -1 in this case */
1085 rv = crv; /* if we've done no better, take it */
1086 }
1087 return (rv);
1088 }
1089
1090 void
1091 pciide_channel_dma_setup(cp)
1092 struct pciide_channel *cp;
1093 {
1094 int drive;
1095 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1096 struct ata_drive_datas *drvp;
1097
1098 for (drive = 0; drive < 2; drive++) {
1099 drvp = &cp->wdc_channel.ch_drive[drive];
1100 /* If no drive, skip */
1101 if ((drvp->drive_flags & DRIVE) == 0)
1102 continue;
1103 /* setup DMA if needed */
1104 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1105 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1106 sc->sc_dma_ok == 0) {
1107 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1108 continue;
1109 }
1110 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1111 != 0) {
1112 /* Abort DMA setup */
1113 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1114 continue;
1115 }
1116 }
1117 }
1118
1119 int
1120 pciide_dma_table_setup(sc, channel, drive)
1121 struct pciide_softc *sc;
1122 int channel, drive;
1123 {
1124 bus_dma_segment_t seg;
1125 int error, rseg;
1126 const bus_size_t dma_table_size =
1127 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1128 struct pciide_dma_maps *dma_maps =
1129 &sc->pciide_channels[channel].dma_maps[drive];
1130
1131 /* If table was already allocated, just return */
1132 if (dma_maps->dma_table)
1133 return 0;
1134
1135 /* Allocate memory for the DMA tables and map it */
1136 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1137 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1138 BUS_DMA_NOWAIT)) != 0) {
1139 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1140 "allocate", drive, error);
1141 return error;
1142 }
1143 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1144 dma_table_size,
1145 (caddr_t *)&dma_maps->dma_table,
1146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1147 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1148 "map", drive, error);
1149 return error;
1150 }
1151 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1152 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1153 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1154 /* Create and load table DMA map for this disk */
1155 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1156 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1157 &dma_maps->dmamap_table)) != 0) {
1158 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1159 "create", drive, error);
1160 return error;
1161 }
1162 if ((error = bus_dmamap_load(sc->sc_dmat,
1163 dma_maps->dmamap_table,
1164 dma_maps->dma_table,
1165 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1166 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1167 "load", drive, error);
1168 return error;
1169 }
1170 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1171 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1172 DEBUG_PROBE);
1173 /* Create a xfer DMA map for this drive */
1174 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1175 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1176 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1177 &dma_maps->dmamap_xfer)) != 0) {
1178 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1179 "create xfer", drive, error);
1180 return error;
1181 }
1182 return 0;
1183 }
1184
1185 int
1186 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1187 void *v;
1188 int channel, drive;
1189 void *databuf;
1190 size_t datalen;
1191 int flags;
1192 {
1193 struct pciide_softc *sc = v;
1194 int error, seg;
1195 struct pciide_dma_maps *dma_maps =
1196 &sc->pciide_channels[channel].dma_maps[drive];
1197
1198 error = bus_dmamap_load(sc->sc_dmat,
1199 dma_maps->dmamap_xfer,
1200 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1201 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1202 if (error) {
1203 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1204 "load xfer", drive, error);
1205 return error;
1206 }
1207
1208 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1209 dma_maps->dmamap_xfer->dm_mapsize,
1210 (flags & WDC_DMA_READ) ?
1211 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1212
1213 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1214 #ifdef DIAGNOSTIC
1215 /* A segment must not cross a 64k boundary */
1216 {
1217 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1218 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1219 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1220 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1221 printf("pciide_dma: segment %d physical addr 0x%lx"
1222 " len 0x%lx not properly aligned\n",
1223 seg, phys, len);
1224 panic("pciide_dma: buf align");
1225 }
1226 }
1227 #endif
1228 dma_maps->dma_table[seg].base_addr =
1229 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1230 dma_maps->dma_table[seg].byte_count =
1231 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1232 IDEDMA_BYTE_COUNT_MASK);
1233 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1234 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1235 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1236
1237 }
1238 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1239 htole32(IDEDMA_BYTE_COUNT_EOT);
1240
1241 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1242 dma_maps->dmamap_table->dm_mapsize,
1243 BUS_DMASYNC_PREWRITE);
1244
1245 /* Maps are ready. Start DMA function */
1246 #ifdef DIAGNOSTIC
1247 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1248 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1249 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1250 panic("pciide_dma_init: table align");
1251 }
1252 #endif
1253
1254 /* Clear status bits */
1255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1256 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1257 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1258 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1259 /* Write table addr */
1260 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1261 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1262 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1263 /* set read/write */
1264 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1265 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1266 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1267 /* remember flags */
1268 dma_maps->dma_flags = flags;
1269 return 0;
1270 }
1271
1272 void
1273 pciide_dma_start(v, channel, drive)
1274 void *v;
1275 int channel, drive;
1276 {
1277 struct pciide_softc *sc = v;
1278
1279 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1280 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1281 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1282 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1283 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1284 }
1285
1286 int
1287 pciide_dma_finish(v, channel, drive, force)
1288 void *v;
1289 int channel, drive;
1290 int force;
1291 {
1292 struct pciide_softc *sc = v;
1293 u_int8_t status;
1294 int error = 0;
1295 struct pciide_dma_maps *dma_maps =
1296 &sc->pciide_channels[channel].dma_maps[drive];
1297
1298 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1299 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1300 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1301 DEBUG_XFERS);
1302
1303 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1304 return WDC_DMAST_NOIRQ;
1305
1306 /* stop DMA channel */
1307 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1308 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1309 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1310 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1311
1312 /* Unload the map of the data buffer */
1313 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1314 dma_maps->dmamap_xfer->dm_mapsize,
1315 (dma_maps->dma_flags & WDC_DMA_READ) ?
1316 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1317 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1318
1319 if ((status & IDEDMA_CTL_ERR) != 0) {
1320 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1321 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1322 error |= WDC_DMAST_ERR;
1323 }
1324
1325 if ((status & IDEDMA_CTL_INTR) == 0) {
1326 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1327 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1328 drive, status);
1329 error |= WDC_DMAST_NOIRQ;
1330 }
1331
1332 if ((status & IDEDMA_CTL_ACT) != 0) {
1333 /* data underrun, may be a valid condition for ATAPI */
1334 error |= WDC_DMAST_UNDER;
1335 }
1336 return error;
1337 }
1338
1339 void
1340 pciide_irqack(chp)
1341 struct channel_softc *chp;
1342 {
1343 struct pciide_channel *cp = (struct pciide_channel*)chp;
1344 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1345
1346 /* clear status bits in IDE DMA registers */
1347 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1348 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1349 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1350 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1351 }
1352
1353 /* some common code used by several chip_map */
1354 int
1355 pciide_chansetup(sc, channel, interface)
1356 struct pciide_softc *sc;
1357 int channel;
1358 pcireg_t interface;
1359 {
1360 struct pciide_channel *cp = &sc->pciide_channels[channel];
1361 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1362 cp->name = PCIIDE_CHANNEL_NAME(channel);
1363 cp->wdc_channel.channel = channel;
1364 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1365 cp->wdc_channel.ch_queue =
1366 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1367 if (cp->wdc_channel.ch_queue == NULL) {
1368 aprint_error("%s %s channel: "
1369 "can't allocate memory for command queue",
1370 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1371 return 0;
1372 }
1373 aprint_normal("%s: %s channel %s to %s mode\n",
1374 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1375 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1376 "configured" : "wired",
1377 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1378 "native-PCI" : "compatibility");
1379 return 1;
1380 }
1381
1382 /* some common code used by several chip channel_map */
1383 void
1384 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1385 struct pci_attach_args *pa;
1386 struct pciide_channel *cp;
1387 pcireg_t interface;
1388 bus_size_t *cmdsizep, *ctlsizep;
1389 int (*pci_intr) __P((void *));
1390 {
1391 struct channel_softc *wdc_cp = &cp->wdc_channel;
1392
1393 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1394 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1395 pci_intr);
1396 else
1397 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1398 wdc_cp->channel, cmdsizep, ctlsizep);
1399
1400 if (cp->hw_ok == 0)
1401 return;
1402 wdc_cp->data32iot = wdc_cp->cmd_iot;
1403 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1404 wdcattach(wdc_cp);
1405 }
1406
1407 /*
1408 * Generic code to call to know if a channel can be disabled. Return 1
1409 * if channel can be disabled, 0 if not
1410 */
1411 int
1412 pciide_chan_candisable(cp)
1413 struct pciide_channel *cp;
1414 {
1415 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1416 struct channel_softc *wdc_cp = &cp->wdc_channel;
1417
1418 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1419 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1420 aprint_normal("%s: disabling %s channel (no drives)\n",
1421 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1422 cp->hw_ok = 0;
1423 return 1;
1424 }
1425 return 0;
1426 }
1427
1428 /*
1429 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1430 * Set hw_ok=0 on failure
1431 */
1432 void
1433 pciide_map_compat_intr(pa, cp, compatchan, interface)
1434 struct pci_attach_args *pa;
1435 struct pciide_channel *cp;
1436 int compatchan, interface;
1437 {
1438 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1439 struct channel_softc *wdc_cp = &cp->wdc_channel;
1440
1441 if (cp->hw_ok == 0)
1442 return;
1443 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1444 return;
1445
1446 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1447 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1448 pa, compatchan, pciide_compat_intr, cp);
1449 if (cp->ih == NULL) {
1450 #endif
1451 aprint_error("%s: no compatibility interrupt for use by %s "
1452 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1453 cp->hw_ok = 0;
1454 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1455 }
1456 #endif
1457 }
1458
1459 void
1460 pciide_print_modes(cp)
1461 struct pciide_channel *cp;
1462 {
1463 wdc_print_modes(&cp->wdc_channel);
1464 }
1465
1466 void
1467 default_chip_map(sc, pa)
1468 struct pciide_softc *sc;
1469 struct pci_attach_args *pa;
1470 {
1471 struct pciide_channel *cp;
1472 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1473 pcireg_t csr;
1474 int channel, drive;
1475 struct ata_drive_datas *drvp;
1476 u_int8_t idedma_ctl;
1477 bus_size_t cmdsize, ctlsize;
1478 char *failreason;
1479
1480 if (pciide_chipen(sc, pa) == 0)
1481 return;
1482
1483 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1484 aprint_normal("%s: bus-master DMA support present",
1485 sc->sc_wdcdev.sc_dev.dv_xname);
1486 if (sc->sc_pp == &default_product_desc &&
1487 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1488 PCIIDE_OPTIONS_DMA) == 0) {
1489 aprint_normal(", but unused (no driver support)");
1490 sc->sc_dma_ok = 0;
1491 } else {
1492 pciide_mapreg_dma(sc, pa);
1493 if (sc->sc_dma_ok != 0)
1494 aprint_normal(", used without full driver "
1495 "support");
1496 }
1497 } else {
1498 aprint_normal("%s: hardware does not support DMA",
1499 sc->sc_wdcdev.sc_dev.dv_xname);
1500 sc->sc_dma_ok = 0;
1501 }
1502 aprint_normal("\n");
1503 if (sc->sc_dma_ok) {
1504 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1505 sc->sc_wdcdev.irqack = pciide_irqack;
1506 }
1507 sc->sc_wdcdev.PIO_cap = 0;
1508 sc->sc_wdcdev.DMA_cap = 0;
1509
1510 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1511 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1512 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1513
1514 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1515 cp = &sc->pciide_channels[channel];
1516 if (pciide_chansetup(sc, channel, interface) == 0)
1517 continue;
1518 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1519 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1520 &ctlsize, pciide_pci_intr);
1521 } else {
1522 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1523 channel, &cmdsize, &ctlsize);
1524 }
1525 if (cp->hw_ok == 0)
1526 continue;
1527 /*
1528 * Check to see if something appears to be there.
1529 */
1530 failreason = NULL;
1531 if (!wdcprobe(&cp->wdc_channel)) {
1532 failreason = "not responding; disabled or no drives?";
1533 goto next;
1534 }
1535 /*
1536 * Now, make sure it's actually attributable to this PCI IDE
1537 * channel by trying to access the channel again while the
1538 * PCI IDE controller's I/O space is disabled. (If the
1539 * channel no longer appears to be there, it belongs to
1540 * this controller.) YUCK!
1541 */
1542 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1543 PCI_COMMAND_STATUS_REG);
1544 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1545 csr & ~PCI_COMMAND_IO_ENABLE);
1546 if (wdcprobe(&cp->wdc_channel))
1547 failreason = "other hardware responding at addresses";
1548 pci_conf_write(sc->sc_pc, sc->sc_tag,
1549 PCI_COMMAND_STATUS_REG, csr);
1550 next:
1551 if (failreason) {
1552 aprint_error("%s: %s channel ignored (%s)\n",
1553 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1554 failreason);
1555 cp->hw_ok = 0;
1556 bus_space_unmap(cp->wdc_channel.cmd_iot,
1557 cp->wdc_channel.cmd_ioh, cmdsize);
1558 if (interface & PCIIDE_INTERFACE_PCI(channel))
1559 bus_space_unmap(cp->wdc_channel.ctl_iot,
1560 cp->ctl_baseioh, ctlsize);
1561 else
1562 bus_space_unmap(cp->wdc_channel.ctl_iot,
1563 cp->wdc_channel.ctl_ioh, ctlsize);
1564 } else {
1565 pciide_map_compat_intr(pa, cp, channel, interface);
1566 }
1567 if (cp->hw_ok) {
1568 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1569 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1570 wdcattach(&cp->wdc_channel);
1571 }
1572 }
1573
1574 if (sc->sc_dma_ok == 0)
1575 return;
1576
1577 /* Allocate DMA maps */
1578 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1579 idedma_ctl = 0;
1580 cp = &sc->pciide_channels[channel];
1581 for (drive = 0; drive < 2; drive++) {
1582 drvp = &cp->wdc_channel.ch_drive[drive];
1583 /* If no drive, skip */
1584 if ((drvp->drive_flags & DRIVE) == 0)
1585 continue;
1586 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1587 continue;
1588 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1589 /* Abort DMA setup */
1590 aprint_error(
1591 "%s:%d:%d: can't allocate DMA maps, "
1592 "using PIO transfers\n",
1593 sc->sc_wdcdev.sc_dev.dv_xname,
1594 channel, drive);
1595 drvp->drive_flags &= ~DRIVE_DMA;
1596 }
1597 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1598 sc->sc_wdcdev.sc_dev.dv_xname,
1599 channel, drive);
1600 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1601 }
1602 if (idedma_ctl != 0) {
1603 /* Add software bits in status register */
1604 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1605 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1606 idedma_ctl);
1607 }
1608 }
1609 }
1610
1611 void
1612 sata_setup_channel(chp)
1613 struct channel_softc *chp;
1614 {
1615 struct ata_drive_datas *drvp;
1616 int drive;
1617 u_int32_t idedma_ctl;
1618 struct pciide_channel *cp = (struct pciide_channel*)chp;
1619 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1620
1621 /* setup DMA if needed */
1622 pciide_channel_dma_setup(cp);
1623
1624 idedma_ctl = 0;
1625
1626 for (drive = 0; drive < 2; drive++) {
1627 drvp = &chp->ch_drive[drive];
1628 /* If no drive, skip */
1629 if ((drvp->drive_flags & DRIVE) == 0)
1630 continue;
1631 if (drvp->drive_flags & DRIVE_UDMA) {
1632 /* use Ultra/DMA */
1633 drvp->drive_flags &= ~DRIVE_DMA;
1634 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1635 } else if (drvp->drive_flags & DRIVE_DMA) {
1636 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1637 }
1638 }
1639
1640 /*
1641 * Nothing to do to setup modes; it is meaningless in S-ATA
1642 * (but many S-ATA drives still want to get the SET_FEATURE
1643 * command).
1644 */
1645 if (idedma_ctl != 0) {
1646 /* Add software bits in status register */
1647 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1648 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1649 idedma_ctl);
1650 }
1651 pciide_print_modes(cp);
1652 }
1653
1654 void
1655 piix_chip_map(sc, pa)
1656 struct pciide_softc *sc;
1657 struct pci_attach_args *pa;
1658 {
1659 struct pciide_channel *cp;
1660 int channel;
1661 u_int32_t idetim;
1662 bus_size_t cmdsize, ctlsize;
1663
1664 if (pciide_chipen(sc, pa) == 0)
1665 return;
1666
1667 aprint_normal("%s: bus-master DMA support present",
1668 sc->sc_wdcdev.sc_dev.dv_xname);
1669 pciide_mapreg_dma(sc, pa);
1670 aprint_normal("\n");
1671 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1672 WDC_CAPABILITY_MODE;
1673 if (sc->sc_dma_ok) {
1674 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1675 sc->sc_wdcdev.irqack = pciide_irqack;
1676 switch(sc->sc_pp->ide_product) {
1677 case PCI_PRODUCT_INTEL_82371AB_IDE:
1678 case PCI_PRODUCT_INTEL_82440MX_IDE:
1679 case PCI_PRODUCT_INTEL_82801AA_IDE:
1680 case PCI_PRODUCT_INTEL_82801AB_IDE:
1681 case PCI_PRODUCT_INTEL_82801BA_IDE:
1682 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1683 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1684 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1685 case PCI_PRODUCT_INTEL_82801DB_IDE:
1686 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1687 case PCI_PRODUCT_INTEL_82801EB_IDE:
1688 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1689 }
1690 }
1691 sc->sc_wdcdev.PIO_cap = 4;
1692 sc->sc_wdcdev.DMA_cap = 2;
1693 switch(sc->sc_pp->ide_product) {
1694 case PCI_PRODUCT_INTEL_82801AA_IDE:
1695 sc->sc_wdcdev.UDMA_cap = 4;
1696 break;
1697 case PCI_PRODUCT_INTEL_82801BA_IDE:
1698 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1699 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1700 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1701 case PCI_PRODUCT_INTEL_82801DB_IDE:
1702 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1703 case PCI_PRODUCT_INTEL_82801EB_IDE:
1704 sc->sc_wdcdev.UDMA_cap = 5;
1705 break;
1706 default:
1707 sc->sc_wdcdev.UDMA_cap = 2;
1708 }
1709 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1710 sc->sc_wdcdev.set_modes = piix_setup_channel;
1711 else
1712 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1713 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1714 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1715
1716 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1717 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1718 DEBUG_PROBE);
1719 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1720 WDCDEBUG_PRINT((", sidetim=0x%x",
1721 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1722 DEBUG_PROBE);
1723 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1724 WDCDEBUG_PRINT((", udamreg 0x%x",
1725 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1726 DEBUG_PROBE);
1727 }
1728 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1729 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1730 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1731 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1732 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1733 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1734 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1735 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1736 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1737 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1738 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1739 DEBUG_PROBE);
1740 }
1741
1742 }
1743 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1744
1745 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1746 cp = &sc->pciide_channels[channel];
1747 /* PIIX is compat-only */
1748 if (pciide_chansetup(sc, channel, 0) == 0)
1749 continue;
1750 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1751 if ((PIIX_IDETIM_READ(idetim, channel) &
1752 PIIX_IDETIM_IDE) == 0) {
1753 aprint_normal("%s: %s channel ignored (disabled)\n",
1754 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1755 continue;
1756 }
1757 /* PIIX are compat-only pciide devices */
1758 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1759 if (cp->hw_ok == 0)
1760 continue;
1761 if (pciide_chan_candisable(cp)) {
1762 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1763 channel);
1764 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1765 idetim);
1766 }
1767 pciide_map_compat_intr(pa, cp, channel, 0);
1768 if (cp->hw_ok == 0)
1769 continue;
1770 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1771 }
1772
1773 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1774 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1775 DEBUG_PROBE);
1776 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1777 WDCDEBUG_PRINT((", sidetim=0x%x",
1778 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1779 DEBUG_PROBE);
1780 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1781 WDCDEBUG_PRINT((", udamreg 0x%x",
1782 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1783 DEBUG_PROBE);
1784 }
1785 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1786 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1787 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1788 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1789 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1790 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1791 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1792 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1793 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1794 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1795 DEBUG_PROBE);
1796 }
1797 }
1798 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1799 }
1800
1801 void
1802 piix_setup_channel(chp)
1803 struct channel_softc *chp;
1804 {
1805 u_int8_t mode[2], drive;
1806 u_int32_t oidetim, idetim, idedma_ctl;
1807 struct pciide_channel *cp = (struct pciide_channel*)chp;
1808 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1809 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1810
1811 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1812 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1813 idedma_ctl = 0;
1814
1815 /* set up new idetim: Enable IDE registers decode */
1816 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1817 chp->channel);
1818
1819 /* setup DMA */
1820 pciide_channel_dma_setup(cp);
1821
1822 /*
1823 * Here we have to mess up with drives mode: PIIX can't have
1824 * different timings for master and slave drives.
1825 * We need to find the best combination.
1826 */
1827
1828 /* If both drives supports DMA, take the lower mode */
1829 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1830 (drvp[1].drive_flags & DRIVE_DMA)) {
1831 mode[0] = mode[1] =
1832 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1833 drvp[0].DMA_mode = mode[0];
1834 drvp[1].DMA_mode = mode[1];
1835 goto ok;
1836 }
1837 /*
1838 * If only one drive supports DMA, use its mode, and
1839 * put the other one in PIO mode 0 if mode not compatible
1840 */
1841 if (drvp[0].drive_flags & DRIVE_DMA) {
1842 mode[0] = drvp[0].DMA_mode;
1843 mode[1] = drvp[1].PIO_mode;
1844 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1845 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1846 mode[1] = drvp[1].PIO_mode = 0;
1847 goto ok;
1848 }
1849 if (drvp[1].drive_flags & DRIVE_DMA) {
1850 mode[1] = drvp[1].DMA_mode;
1851 mode[0] = drvp[0].PIO_mode;
1852 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1853 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1854 mode[0] = drvp[0].PIO_mode = 0;
1855 goto ok;
1856 }
1857 /*
1858 * If both drives are not DMA, takes the lower mode, unless
1859 * one of them is PIO mode < 2
1860 */
1861 if (drvp[0].PIO_mode < 2) {
1862 mode[0] = drvp[0].PIO_mode = 0;
1863 mode[1] = drvp[1].PIO_mode;
1864 } else if (drvp[1].PIO_mode < 2) {
1865 mode[1] = drvp[1].PIO_mode = 0;
1866 mode[0] = drvp[0].PIO_mode;
1867 } else {
1868 mode[0] = mode[1] =
1869 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1870 drvp[0].PIO_mode = mode[0];
1871 drvp[1].PIO_mode = mode[1];
1872 }
1873 ok: /* The modes are setup */
1874 for (drive = 0; drive < 2; drive++) {
1875 if (drvp[drive].drive_flags & DRIVE_DMA) {
1876 idetim |= piix_setup_idetim_timings(
1877 mode[drive], 1, chp->channel);
1878 goto end;
1879 }
1880 }
1881 /* If we are there, none of the drives are DMA */
1882 if (mode[0] >= 2)
1883 idetim |= piix_setup_idetim_timings(
1884 mode[0], 0, chp->channel);
1885 else
1886 idetim |= piix_setup_idetim_timings(
1887 mode[1], 0, chp->channel);
1888 end: /*
1889 * timing mode is now set up in the controller. Enable
1890 * it per-drive
1891 */
1892 for (drive = 0; drive < 2; drive++) {
1893 /* If no drive, skip */
1894 if ((drvp[drive].drive_flags & DRIVE) == 0)
1895 continue;
1896 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1897 if (drvp[drive].drive_flags & DRIVE_DMA)
1898 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1899 }
1900 if (idedma_ctl != 0) {
1901 /* Add software bits in status register */
1902 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1903 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1904 idedma_ctl);
1905 }
1906 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1907 pciide_print_modes(cp);
1908 }
1909
1910 void
1911 piix3_4_setup_channel(chp)
1912 struct channel_softc *chp;
1913 {
1914 struct ata_drive_datas *drvp;
1915 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1916 struct pciide_channel *cp = (struct pciide_channel*)chp;
1917 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1918 int drive;
1919 int channel = chp->channel;
1920
1921 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1922 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1923 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1924 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1925 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1926 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1927 PIIX_SIDETIM_RTC_MASK(channel));
1928
1929 idedma_ctl = 0;
1930 /* If channel disabled, no need to go further */
1931 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1932 return;
1933 /* set up new idetim: Enable IDE registers decode */
1934 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1935
1936 /* setup DMA if needed */
1937 pciide_channel_dma_setup(cp);
1938
1939 for (drive = 0; drive < 2; drive++) {
1940 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1941 PIIX_UDMATIM_SET(0x3, channel, drive));
1942 drvp = &chp->ch_drive[drive];
1943 /* If no drive, skip */
1944 if ((drvp->drive_flags & DRIVE) == 0)
1945 continue;
1946 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1947 (drvp->drive_flags & DRIVE_UDMA) == 0))
1948 goto pio;
1949
1950 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1951 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1952 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1953 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1954 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1955 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1956 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1957 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1958 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1959 ideconf |= PIIX_CONFIG_PINGPONG;
1960 }
1961 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1962 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1963 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1964 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1965 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1966 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1967 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1968 /* setup Ultra/100 */
1969 if (drvp->UDMA_mode > 2 &&
1970 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1971 drvp->UDMA_mode = 2;
1972 if (drvp->UDMA_mode > 4) {
1973 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1974 } else {
1975 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1976 if (drvp->UDMA_mode > 2) {
1977 ideconf |= PIIX_CONFIG_UDMA66(channel,
1978 drive);
1979 } else {
1980 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1981 drive);
1982 }
1983 }
1984 }
1985 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1986 /* setup Ultra/66 */
1987 if (drvp->UDMA_mode > 2 &&
1988 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1989 drvp->UDMA_mode = 2;
1990 if (drvp->UDMA_mode > 2)
1991 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1992 else
1993 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1994 }
1995 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1996 (drvp->drive_flags & DRIVE_UDMA)) {
1997 /* use Ultra/DMA */
1998 drvp->drive_flags &= ~DRIVE_DMA;
1999 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
2000 udmareg |= PIIX_UDMATIM_SET(
2001 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
2002 } else {
2003 /* use Multiword DMA */
2004 drvp->drive_flags &= ~DRIVE_UDMA;
2005 if (drive == 0) {
2006 idetim |= piix_setup_idetim_timings(
2007 drvp->DMA_mode, 1, channel);
2008 } else {
2009 sidetim |= piix_setup_sidetim_timings(
2010 drvp->DMA_mode, 1, channel);
2011 idetim =PIIX_IDETIM_SET(idetim,
2012 PIIX_IDETIM_SITRE, channel);
2013 }
2014 }
2015 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2016
2017 pio: /* use PIO mode */
2018 idetim |= piix_setup_idetim_drvs(drvp);
2019 if (drive == 0) {
2020 idetim |= piix_setup_idetim_timings(
2021 drvp->PIO_mode, 0, channel);
2022 } else {
2023 sidetim |= piix_setup_sidetim_timings(
2024 drvp->PIO_mode, 0, channel);
2025 idetim =PIIX_IDETIM_SET(idetim,
2026 PIIX_IDETIM_SITRE, channel);
2027 }
2028 }
2029 if (idedma_ctl != 0) {
2030 /* Add software bits in status register */
2031 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2032 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2033 idedma_ctl);
2034 }
2035 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2036 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2037 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2038 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2039 pciide_print_modes(cp);
2040 }
2041
2042
2043 /* setup ISP and RTC fields, based on mode */
2044 static u_int32_t
2045 piix_setup_idetim_timings(mode, dma, channel)
2046 u_int8_t mode;
2047 u_int8_t dma;
2048 u_int8_t channel;
2049 {
2050
2051 if (dma)
2052 return PIIX_IDETIM_SET(0,
2053 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2054 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2055 channel);
2056 else
2057 return PIIX_IDETIM_SET(0,
2058 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2059 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2060 channel);
2061 }
2062
2063 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2064 static u_int32_t
2065 piix_setup_idetim_drvs(drvp)
2066 struct ata_drive_datas *drvp;
2067 {
2068 u_int32_t ret = 0;
2069 struct channel_softc *chp = drvp->chnl_softc;
2070 u_int8_t channel = chp->channel;
2071 u_int8_t drive = drvp->drive;
2072
2073 /*
2074 * If drive is using UDMA, timings setups are independant
2075 * So just check DMA and PIO here.
2076 */
2077 if (drvp->drive_flags & DRIVE_DMA) {
2078 /* if mode = DMA mode 0, use compatible timings */
2079 if ((drvp->drive_flags & DRIVE_DMA) &&
2080 drvp->DMA_mode == 0) {
2081 drvp->PIO_mode = 0;
2082 return ret;
2083 }
2084 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2085 /*
2086 * PIO and DMA timings are the same, use fast timings for PIO
2087 * too, else use compat timings.
2088 */
2089 if ((piix_isp_pio[drvp->PIO_mode] !=
2090 piix_isp_dma[drvp->DMA_mode]) ||
2091 (piix_rtc_pio[drvp->PIO_mode] !=
2092 piix_rtc_dma[drvp->DMA_mode]))
2093 drvp->PIO_mode = 0;
2094 /* if PIO mode <= 2, use compat timings for PIO */
2095 if (drvp->PIO_mode <= 2) {
2096 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2097 channel);
2098 return ret;
2099 }
2100 }
2101
2102 /*
2103 * Now setup PIO modes. If mode < 2, use compat timings.
2104 * Else enable fast timings. Enable IORDY and prefetch/post
2105 * if PIO mode >= 3.
2106 */
2107
2108 if (drvp->PIO_mode < 2)
2109 return ret;
2110
2111 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2112 if (drvp->PIO_mode >= 3) {
2113 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2114 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2115 }
2116 return ret;
2117 }
2118
2119 /* setup values in SIDETIM registers, based on mode */
2120 static u_int32_t
2121 piix_setup_sidetim_timings(mode, dma, channel)
2122 u_int8_t mode;
2123 u_int8_t dma;
2124 u_int8_t channel;
2125 {
2126 if (dma)
2127 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2128 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2129 else
2130 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2131 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2132 }
2133
2134 void
2135 amd7x6_chip_map(sc, pa)
2136 struct pciide_softc *sc;
2137 struct pci_attach_args *pa;
2138 {
2139 struct pciide_channel *cp;
2140 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2141 int channel;
2142 pcireg_t chanenable;
2143 bus_size_t cmdsize, ctlsize;
2144
2145 if (pciide_chipen(sc, pa) == 0)
2146 return;
2147 aprint_normal("%s: bus-master DMA support present",
2148 sc->sc_wdcdev.sc_dev.dv_xname);
2149 pciide_mapreg_dma(sc, pa);
2150 aprint_normal("\n");
2151 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2152 WDC_CAPABILITY_MODE;
2153 if (sc->sc_dma_ok) {
2154 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2155 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2156 sc->sc_wdcdev.irqack = pciide_irqack;
2157 }
2158 sc->sc_wdcdev.PIO_cap = 4;
2159 sc->sc_wdcdev.DMA_cap = 2;
2160
2161 switch (sc->sc_pci_vendor) {
2162 case PCI_VENDOR_AMD:
2163 switch (sc->sc_pp->ide_product) {
2164 case PCI_PRODUCT_AMD_PBC766_IDE:
2165 case PCI_PRODUCT_AMD_PBC768_IDE:
2166 case PCI_PRODUCT_AMD_PBC8111_IDE:
2167 sc->sc_wdcdev.UDMA_cap = 5;
2168 break;
2169 default:
2170 sc->sc_wdcdev.UDMA_cap = 4;
2171 }
2172 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2173 break;
2174
2175 case PCI_VENDOR_NVIDIA:
2176 switch (sc->sc_pp->ide_product) {
2177 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2178 sc->sc_wdcdev.UDMA_cap = 5;
2179 break;
2180 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2181 sc->sc_wdcdev.UDMA_cap = 6;
2182 break;
2183 }
2184 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2185 break;
2186
2187 default:
2188 panic("amd7x6_chip_map: unknown vendor");
2189 }
2190 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2191 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2192 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2193 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2194 AMD7X6_CHANSTATUS_EN(sc));
2195
2196 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2197 DEBUG_PROBE);
2198 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2199 cp = &sc->pciide_channels[channel];
2200 if (pciide_chansetup(sc, channel, interface) == 0)
2201 continue;
2202
2203 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2204 aprint_normal("%s: %s channel ignored (disabled)\n",
2205 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2206 continue;
2207 }
2208 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2209 pciide_pci_intr);
2210
2211 if (pciide_chan_candisable(cp))
2212 chanenable &= ~AMD7X6_CHAN_EN(channel);
2213 pciide_map_compat_intr(pa, cp, channel, interface);
2214 if (cp->hw_ok == 0)
2215 continue;
2216
2217 amd7x6_setup_channel(&cp->wdc_channel);
2218 }
2219 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2220 chanenable);
2221 return;
2222 }
2223
2224 void
2225 amd7x6_setup_channel(chp)
2226 struct channel_softc *chp;
2227 {
2228 u_int32_t udmatim_reg, datatim_reg;
2229 u_int8_t idedma_ctl;
2230 int mode, drive;
2231 struct ata_drive_datas *drvp;
2232 struct pciide_channel *cp = (struct pciide_channel*)chp;
2233 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2234 #ifndef PCIIDE_AMD756_ENABLEDMA
2235 int rev = PCI_REVISION(
2236 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2237 #endif
2238
2239 idedma_ctl = 0;
2240 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2241 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2242 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2243 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2244
2245 /* setup DMA if needed */
2246 pciide_channel_dma_setup(cp);
2247
2248 for (drive = 0; drive < 2; drive++) {
2249 drvp = &chp->ch_drive[drive];
2250 /* If no drive, skip */
2251 if ((drvp->drive_flags & DRIVE) == 0)
2252 continue;
2253 /* add timing values, setup DMA if needed */
2254 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2255 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2256 mode = drvp->PIO_mode;
2257 goto pio;
2258 }
2259 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2260 (drvp->drive_flags & DRIVE_UDMA)) {
2261 /* use Ultra/DMA */
2262 drvp->drive_flags &= ~DRIVE_DMA;
2263 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2264 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2265 AMD7X6_UDMA_TIME(chp->channel, drive,
2266 amd7x6_udma_tim[drvp->UDMA_mode]);
2267 /* can use PIO timings, MW DMA unused */
2268 mode = drvp->PIO_mode;
2269 } else {
2270 /* use Multiword DMA, but only if revision is OK */
2271 drvp->drive_flags &= ~DRIVE_UDMA;
2272 #ifndef PCIIDE_AMD756_ENABLEDMA
2273 /*
2274 * The workaround doesn't seem to be necessary
2275 * with all drives, so it can be disabled by
2276 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2277 * triggered.
2278 */
2279 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2280 sc->sc_pp->ide_product ==
2281 PCI_PRODUCT_AMD_PBC756_IDE &&
2282 AMD756_CHIPREV_DISABLEDMA(rev)) {
2283 aprint_normal(
2284 "%s:%d:%d: multi-word DMA disabled due "
2285 "to chip revision\n",
2286 sc->sc_wdcdev.sc_dev.dv_xname,
2287 chp->channel, drive);
2288 mode = drvp->PIO_mode;
2289 drvp->drive_flags &= ~DRIVE_DMA;
2290 goto pio;
2291 }
2292 #endif
2293 /* mode = min(pio, dma+2) */
2294 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2295 mode = drvp->PIO_mode;
2296 else
2297 mode = drvp->DMA_mode + 2;
2298 }
2299 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2300
2301 pio: /* setup PIO mode */
2302 if (mode <= 2) {
2303 drvp->DMA_mode = 0;
2304 drvp->PIO_mode = 0;
2305 mode = 0;
2306 } else {
2307 drvp->PIO_mode = mode;
2308 drvp->DMA_mode = mode - 2;
2309 }
2310 datatim_reg |=
2311 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2312 amd7x6_pio_set[mode]) |
2313 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2314 amd7x6_pio_rec[mode]);
2315 }
2316 if (idedma_ctl != 0) {
2317 /* Add software bits in status register */
2318 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2319 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2320 idedma_ctl);
2321 }
2322 pciide_print_modes(cp);
2323 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2324 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2325 }
2326
2327 void
2328 apollo_chip_map(sc, pa)
2329 struct pciide_softc *sc;
2330 struct pci_attach_args *pa;
2331 {
2332 struct pciide_channel *cp;
2333 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2334 int channel;
2335 u_int32_t ideconf;
2336 bus_size_t cmdsize, ctlsize;
2337 pcitag_t pcib_tag;
2338 pcireg_t pcib_id, pcib_class;
2339
2340 if (pciide_chipen(sc, pa) == 0)
2341 return;
2342 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2343 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2344 /* and read ID and rev of the ISA bridge */
2345 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2346 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2347 aprint_normal(": VIA Technologies ");
2348 switch (PCI_PRODUCT(pcib_id)) {
2349 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2350 aprint_normal("VT82C586 (Apollo VP) ");
2351 if(PCI_REVISION(pcib_class) >= 0x02) {
2352 aprint_normal("ATA33 controller\n");
2353 sc->sc_wdcdev.UDMA_cap = 2;
2354 } else {
2355 aprint_normal("controller\n");
2356 sc->sc_wdcdev.UDMA_cap = 0;
2357 }
2358 break;
2359 case PCI_PRODUCT_VIATECH_VT82C596A:
2360 aprint_normal("VT82C596A (Apollo Pro) ");
2361 if (PCI_REVISION(pcib_class) >= 0x12) {
2362 aprint_normal("ATA66 controller\n");
2363 sc->sc_wdcdev.UDMA_cap = 4;
2364 } else {
2365 aprint_normal("ATA33 controller\n");
2366 sc->sc_wdcdev.UDMA_cap = 2;
2367 }
2368 break;
2369 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2370 aprint_normal("VT82C686A (Apollo KX133) ");
2371 if (PCI_REVISION(pcib_class) >= 0x40) {
2372 aprint_normal("ATA100 controller\n");
2373 sc->sc_wdcdev.UDMA_cap = 5;
2374 } else {
2375 aprint_normal("ATA66 controller\n");
2376 sc->sc_wdcdev.UDMA_cap = 4;
2377 }
2378 break;
2379 case PCI_PRODUCT_VIATECH_VT8231:
2380 aprint_normal("VT8231 ATA100 controller\n");
2381 sc->sc_wdcdev.UDMA_cap = 5;
2382 break;
2383 case PCI_PRODUCT_VIATECH_VT8233:
2384 aprint_normal("VT8233 ATA100 controller\n");
2385 sc->sc_wdcdev.UDMA_cap = 5;
2386 break;
2387 case PCI_PRODUCT_VIATECH_VT8233A:
2388 aprint_normal("VT8233A ATA133 controller\n");
2389 sc->sc_wdcdev.UDMA_cap = 6;
2390 break;
2391 case PCI_PRODUCT_VIATECH_VT8235:
2392 aprint_normal("VT8235 ATA133 controller\n");
2393 sc->sc_wdcdev.UDMA_cap = 6;
2394 break;
2395 case PCI_PRODUCT_VIATECH_VT8237_SATA:
2396 aprint_normal("VT8237 ATA133 controller\n");
2397 sc->sc_wdcdev.UDMA_cap = 6;
2398 break;
2399 default:
2400 aprint_normal("unknown ATA controller\n");
2401 sc->sc_wdcdev.UDMA_cap = 0;
2402 }
2403
2404 aprint_normal("%s: bus-master DMA support present",
2405 sc->sc_wdcdev.sc_dev.dv_xname);
2406 pciide_mapreg_dma(sc, pa);
2407 aprint_normal("\n");
2408 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2409 WDC_CAPABILITY_MODE;
2410 if (sc->sc_dma_ok) {
2411 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2412 sc->sc_wdcdev.irqack = pciide_irqack;
2413 if (sc->sc_wdcdev.UDMA_cap > 0)
2414 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2415 }
2416 sc->sc_wdcdev.PIO_cap = 4;
2417 sc->sc_wdcdev.DMA_cap = 2;
2418 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2419 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2420 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2421
2422 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2423 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2424 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2425 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2426 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2427 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2428 DEBUG_PROBE);
2429
2430 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2431 cp = &sc->pciide_channels[channel];
2432 if (pciide_chansetup(sc, channel, interface) == 0)
2433 continue;
2434
2435 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2436 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2437 aprint_normal("%s: %s channel ignored (disabled)\n",
2438 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2439 continue;
2440 }
2441 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2442 pciide_pci_intr);
2443 if (cp->hw_ok == 0)
2444 continue;
2445 if (pciide_chan_candisable(cp)) {
2446 ideconf &= ~APO_IDECONF_EN(channel);
2447 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2448 ideconf);
2449 }
2450 pciide_map_compat_intr(pa, cp, channel, interface);
2451
2452 if (cp->hw_ok == 0)
2453 continue;
2454 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2455 }
2456 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2457 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2458 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2459 }
2460
2461 void
2462 apollo_setup_channel(chp)
2463 struct channel_softc *chp;
2464 {
2465 u_int32_t udmatim_reg, datatim_reg;
2466 u_int8_t idedma_ctl;
2467 int mode, drive;
2468 struct ata_drive_datas *drvp;
2469 struct pciide_channel *cp = (struct pciide_channel*)chp;
2470 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2471
2472 idedma_ctl = 0;
2473 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2474 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2475 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2476 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2477
2478 /* setup DMA if needed */
2479 pciide_channel_dma_setup(cp);
2480
2481 for (drive = 0; drive < 2; drive++) {
2482 drvp = &chp->ch_drive[drive];
2483 /* If no drive, skip */
2484 if ((drvp->drive_flags & DRIVE) == 0)
2485 continue;
2486 /* add timing values, setup DMA if needed */
2487 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2488 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2489 mode = drvp->PIO_mode;
2490 goto pio;
2491 }
2492 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2493 (drvp->drive_flags & DRIVE_UDMA)) {
2494 /* use Ultra/DMA */
2495 drvp->drive_flags &= ~DRIVE_DMA;
2496 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2497 APO_UDMA_EN_MTH(chp->channel, drive);
2498 if (sc->sc_wdcdev.UDMA_cap == 6) {
2499 /* 8233a */
2500 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2501 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2502 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2503 /* 686b */
2504 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2505 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2506 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2507 /* 596b or 686a */
2508 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2509 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2510 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2511 } else {
2512 /* 596a or 586b */
2513 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2514 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2515 }
2516 /* can use PIO timings, MW DMA unused */
2517 mode = drvp->PIO_mode;
2518 } else {
2519 /* use Multiword DMA */
2520 drvp->drive_flags &= ~DRIVE_UDMA;
2521 /* mode = min(pio, dma+2) */
2522 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2523 mode = drvp->PIO_mode;
2524 else
2525 mode = drvp->DMA_mode + 2;
2526 }
2527 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2528
2529 pio: /* setup PIO mode */
2530 if (mode <= 2) {
2531 drvp->DMA_mode = 0;
2532 drvp->PIO_mode = 0;
2533 mode = 0;
2534 } else {
2535 drvp->PIO_mode = mode;
2536 drvp->DMA_mode = mode - 2;
2537 }
2538 datatim_reg |=
2539 APO_DATATIM_PULSE(chp->channel, drive,
2540 apollo_pio_set[mode]) |
2541 APO_DATATIM_RECOV(chp->channel, drive,
2542 apollo_pio_rec[mode]);
2543 }
2544 if (idedma_ctl != 0) {
2545 /* Add software bits in status register */
2546 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2547 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2548 idedma_ctl);
2549 }
2550 pciide_print_modes(cp);
2551 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2552 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2553 }
2554
2555 void
2556 apollo_sata_chip_map(sc, pa)
2557 struct pciide_softc *sc;
2558 struct pci_attach_args *pa;
2559 {
2560 struct pciide_channel *cp;
2561 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2562 int channel;
2563 bus_size_t cmdsize, ctlsize;
2564
2565 if (pciide_chipen(sc, pa) == 0)
2566 return;
2567
2568 if ( interface == 0 ) {
2569 WDCDEBUG_PRINT(("apollo_sata_chip_map interface == 0\n"),
2570 DEBUG_PROBE);
2571 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2572 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2573 }
2574
2575 aprint_normal("%s: bus-master DMA support present",
2576 sc->sc_wdcdev.sc_dev.dv_xname);
2577 pciide_mapreg_dma(sc, pa);
2578 aprint_normal("\n");
2579
2580 if (sc->sc_dma_ok) {
2581 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2582 sc->sc_wdcdev.irqack = pciide_irqack;
2583 }
2584 sc->sc_wdcdev.PIO_cap = 4;
2585 sc->sc_wdcdev.DMA_cap = 2;
2586 sc->sc_wdcdev.UDMA_cap = 6;
2587
2588 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2589 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2590 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2591 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SINGLE_DRIVE;
2592 sc->sc_wdcdev.set_modes = sata_setup_channel;
2593
2594 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2595 cp = &sc->pciide_channels[channel];
2596 if (pciide_chansetup(sc, channel, interface) == 0)
2597 continue;
2598 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2599 pciide_pci_intr);
2600
2601 pciide_map_compat_intr(pa, cp, channel, interface);
2602 sata_setup_channel(&cp->wdc_channel);
2603 }
2604 }
2605
2606 void
2607 cmd_channel_map(pa, sc, channel)
2608 struct pci_attach_args *pa;
2609 struct pciide_softc *sc;
2610 int channel;
2611 {
2612 struct pciide_channel *cp = &sc->pciide_channels[channel];
2613 bus_size_t cmdsize, ctlsize;
2614 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2615 int interface, one_channel;
2616
2617 /*
2618 * The 0648/0649 can be told to identify as a RAID controller.
2619 * In this case, we have to fake interface
2620 */
2621 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2622 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2623 PCIIDE_INTERFACE_SETTABLE(1);
2624 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2625 CMD_CONF_DSA1)
2626 interface |= PCIIDE_INTERFACE_PCI(0) |
2627 PCIIDE_INTERFACE_PCI(1);
2628 } else {
2629 interface = PCI_INTERFACE(pa->pa_class);
2630 }
2631
2632 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2633 cp->name = PCIIDE_CHANNEL_NAME(channel);
2634 cp->wdc_channel.channel = channel;
2635 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2636
2637 /*
2638 * Older CMD64X doesn't have independant channels
2639 */
2640 switch (sc->sc_pp->ide_product) {
2641 case PCI_PRODUCT_CMDTECH_649:
2642 one_channel = 0;
2643 break;
2644 default:
2645 one_channel = 1;
2646 break;
2647 }
2648
2649 if (channel > 0 && one_channel) {
2650 cp->wdc_channel.ch_queue =
2651 sc->pciide_channels[0].wdc_channel.ch_queue;
2652 } else {
2653 cp->wdc_channel.ch_queue =
2654 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2655 }
2656 if (cp->wdc_channel.ch_queue == NULL) {
2657 aprint_error("%s %s channel: "
2658 "can't allocate memory for command queue",
2659 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2660 return;
2661 }
2662
2663 aprint_normal("%s: %s channel %s to %s mode\n",
2664 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2665 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2666 "configured" : "wired",
2667 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2668 "native-PCI" : "compatibility");
2669
2670 /*
2671 * with a CMD PCI64x, if we get here, the first channel is enabled:
2672 * there's no way to disable the first channel without disabling
2673 * the whole device
2674 */
2675 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2676 aprint_normal("%s: %s channel ignored (disabled)\n",
2677 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2678 return;
2679 }
2680
2681 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2682 if (cp->hw_ok == 0)
2683 return;
2684 if (channel == 1) {
2685 if (pciide_chan_candisable(cp)) {
2686 ctrl &= ~CMD_CTRL_2PORT;
2687 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2688 CMD_CTRL, ctrl);
2689 }
2690 }
2691 pciide_map_compat_intr(pa, cp, channel, interface);
2692 }
2693
2694 int
2695 cmd_pci_intr(arg)
2696 void *arg;
2697 {
2698 struct pciide_softc *sc = arg;
2699 struct pciide_channel *cp;
2700 struct channel_softc *wdc_cp;
2701 int i, rv, crv;
2702 u_int32_t priirq, secirq;
2703
2704 rv = 0;
2705 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2706 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2707 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2708 cp = &sc->pciide_channels[i];
2709 wdc_cp = &cp->wdc_channel;
2710 /* If a compat channel skip. */
2711 if (cp->compat)
2712 continue;
2713 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2714 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2715 crv = wdcintr(wdc_cp);
2716 if (crv == 0)
2717 printf("%s:%d: bogus intr\n",
2718 sc->sc_wdcdev.sc_dev.dv_xname, i);
2719 else
2720 rv = 1;
2721 }
2722 }
2723 return rv;
2724 }
2725
2726 void
2727 cmd_chip_map(sc, pa)
2728 struct pciide_softc *sc;
2729 struct pci_attach_args *pa;
2730 {
2731 int channel;
2732
2733 /*
2734 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2735 * and base adresses registers can be disabled at
2736 * hardware level. In this case, the device is wired
2737 * in compat mode and its first channel is always enabled,
2738 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2739 * In fact, it seems that the first channel of the CMD PCI0640
2740 * can't be disabled.
2741 */
2742
2743 #ifdef PCIIDE_CMD064x_DISABLE
2744 if (pciide_chipen(sc, pa) == 0)
2745 return;
2746 #endif
2747
2748 aprint_normal("%s: hardware does not support DMA\n",
2749 sc->sc_wdcdev.sc_dev.dv_xname);
2750 sc->sc_dma_ok = 0;
2751
2752 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2753 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2754 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2755
2756 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2757 cmd_channel_map(pa, sc, channel);
2758 }
2759 }
2760
2761 void
2762 cmd0643_9_chip_map(sc, pa)
2763 struct pciide_softc *sc;
2764 struct pci_attach_args *pa;
2765 {
2766 struct pciide_channel *cp;
2767 int channel;
2768 pcireg_t rev = PCI_REVISION(pa->pa_class);
2769
2770 /*
2771 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2772 * and base adresses registers can be disabled at
2773 * hardware level. In this case, the device is wired
2774 * in compat mode and its first channel is always enabled,
2775 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2776 * In fact, it seems that the first channel of the CMD PCI0640
2777 * can't be disabled.
2778 */
2779
2780 #ifdef PCIIDE_CMD064x_DISABLE
2781 if (pciide_chipen(sc, pa) == 0)
2782 return;
2783 #endif
2784 aprint_normal("%s: bus-master DMA support present",
2785 sc->sc_wdcdev.sc_dev.dv_xname);
2786 pciide_mapreg_dma(sc, pa);
2787 aprint_normal("\n");
2788 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2789 WDC_CAPABILITY_MODE;
2790 if (sc->sc_dma_ok) {
2791 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2792 switch (sc->sc_pp->ide_product) {
2793 case PCI_PRODUCT_CMDTECH_649:
2794 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2795 sc->sc_wdcdev.UDMA_cap = 5;
2796 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2797 break;
2798 case PCI_PRODUCT_CMDTECH_648:
2799 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2800 sc->sc_wdcdev.UDMA_cap = 4;
2801 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2802 break;
2803 case PCI_PRODUCT_CMDTECH_646:
2804 if (rev >= CMD0646U2_REV) {
2805 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2806 sc->sc_wdcdev.UDMA_cap = 2;
2807 } else if (rev >= CMD0646U_REV) {
2808 /*
2809 * Linux's driver claims that the 646U is broken
2810 * with UDMA. Only enable it if we know what we're
2811 * doing
2812 */
2813 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2814 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2815 sc->sc_wdcdev.UDMA_cap = 2;
2816 #endif
2817 /* explicitly disable UDMA */
2818 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2819 CMD_UDMATIM(0), 0);
2820 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2821 CMD_UDMATIM(1), 0);
2822 }
2823 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2824 break;
2825 default:
2826 sc->sc_wdcdev.irqack = pciide_irqack;
2827 }
2828 }
2829
2830 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2831 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2832 sc->sc_wdcdev.PIO_cap = 4;
2833 sc->sc_wdcdev.DMA_cap = 2;
2834 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2835
2836 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2837 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2838 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2839 DEBUG_PROBE);
2840
2841 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2842 cp = &sc->pciide_channels[channel];
2843 cmd_channel_map(pa, sc, channel);
2844 if (cp->hw_ok == 0)
2845 continue;
2846 cmd0643_9_setup_channel(&cp->wdc_channel);
2847 }
2848 /*
2849 * note - this also makes sure we clear the irq disable and reset
2850 * bits
2851 */
2852 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2853 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2854 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2855 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2856 DEBUG_PROBE);
2857 }
2858
2859 void
2860 cmd0643_9_setup_channel(chp)
2861 struct channel_softc *chp;
2862 {
2863 struct ata_drive_datas *drvp;
2864 u_int8_t tim;
2865 u_int32_t idedma_ctl, udma_reg;
2866 int drive;
2867 struct pciide_channel *cp = (struct pciide_channel*)chp;
2868 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2869
2870 idedma_ctl = 0;
2871 /* setup DMA if needed */
2872 pciide_channel_dma_setup(cp);
2873
2874 for (drive = 0; drive < 2; drive++) {
2875 drvp = &chp->ch_drive[drive];
2876 /* If no drive, skip */
2877 if ((drvp->drive_flags & DRIVE) == 0)
2878 continue;
2879 /* add timing values, setup DMA if needed */
2880 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2881 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2882 if (drvp->drive_flags & DRIVE_UDMA) {
2883 /* UltraDMA on a 646U2, 0648 or 0649 */
2884 drvp->drive_flags &= ~DRIVE_DMA;
2885 udma_reg = pciide_pci_read(sc->sc_pc,
2886 sc->sc_tag, CMD_UDMATIM(chp->channel));
2887 if (drvp->UDMA_mode > 2 &&
2888 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2889 CMD_BICSR) &
2890 CMD_BICSR_80(chp->channel)) == 0)
2891 drvp->UDMA_mode = 2;
2892 if (drvp->UDMA_mode > 2)
2893 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2894 else if (sc->sc_wdcdev.UDMA_cap > 2)
2895 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2896 udma_reg |= CMD_UDMATIM_UDMA(drive);
2897 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2898 CMD_UDMATIM_TIM_OFF(drive));
2899 udma_reg |=
2900 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2901 CMD_UDMATIM_TIM_OFF(drive));
2902 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2903 CMD_UDMATIM(chp->channel), udma_reg);
2904 } else {
2905 /*
2906 * use Multiword DMA.
2907 * Timings will be used for both PIO and DMA,
2908 * so adjust DMA mode if needed
2909 * if we have a 0646U2/8/9, turn off UDMA
2910 */
2911 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2912 udma_reg = pciide_pci_read(sc->sc_pc,
2913 sc->sc_tag,
2914 CMD_UDMATIM(chp->channel));
2915 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2916 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2917 CMD_UDMATIM(chp->channel),
2918 udma_reg);
2919 }
2920 if (drvp->PIO_mode >= 3 &&
2921 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2922 drvp->DMA_mode = drvp->PIO_mode - 2;
2923 }
2924 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2925 }
2926 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2927 }
2928 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2929 CMD_DATA_TIM(chp->channel, drive), tim);
2930 }
2931 if (idedma_ctl != 0) {
2932 /* Add software bits in status register */
2933 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2934 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2935 idedma_ctl);
2936 }
2937 pciide_print_modes(cp);
2938 }
2939
2940 void
2941 cmd646_9_irqack(chp)
2942 struct channel_softc *chp;
2943 {
2944 u_int32_t priirq, secirq;
2945 struct pciide_channel *cp = (struct pciide_channel*)chp;
2946 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2947
2948 if (chp->channel == 0) {
2949 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2950 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2951 } else {
2952 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2953 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2954 }
2955 pciide_irqack(chp);
2956 }
2957
2958 void
2959 cmd680_chip_map(sc, pa)
2960 struct pciide_softc *sc;
2961 struct pci_attach_args *pa;
2962 {
2963 struct pciide_channel *cp;
2964 int channel;
2965
2966 if (pciide_chipen(sc, pa) == 0)
2967 return;
2968 aprint_normal("%s: bus-master DMA support present",
2969 sc->sc_wdcdev.sc_dev.dv_xname);
2970 pciide_mapreg_dma(sc, pa);
2971 aprint_normal("\n");
2972 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2973 WDC_CAPABILITY_MODE;
2974 if (sc->sc_dma_ok) {
2975 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2976 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2977 sc->sc_wdcdev.UDMA_cap = 6;
2978 sc->sc_wdcdev.irqack = pciide_irqack;
2979 }
2980
2981 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2982 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2983 sc->sc_wdcdev.PIO_cap = 4;
2984 sc->sc_wdcdev.DMA_cap = 2;
2985 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2986
2987 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2988 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2989 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2990 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2991 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2992 cp = &sc->pciide_channels[channel];
2993 cmd680_channel_map(pa, sc, channel);
2994 if (cp->hw_ok == 0)
2995 continue;
2996 cmd680_setup_channel(&cp->wdc_channel);
2997 }
2998 }
2999
3000 void
3001 cmd680_channel_map(pa, sc, channel)
3002 struct pci_attach_args *pa;
3003 struct pciide_softc *sc;
3004 int channel;
3005 {
3006 struct pciide_channel *cp = &sc->pciide_channels[channel];
3007 bus_size_t cmdsize, ctlsize;
3008 int interface, i, reg;
3009 static const u_int8_t init_val[] =
3010 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
3011 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
3012
3013 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
3014 interface = PCIIDE_INTERFACE_SETTABLE(0) |
3015 PCIIDE_INTERFACE_SETTABLE(1);
3016 interface |= PCIIDE_INTERFACE_PCI(0) |
3017 PCIIDE_INTERFACE_PCI(1);
3018 } else {
3019 interface = PCI_INTERFACE(pa->pa_class);
3020 }
3021
3022 sc->wdc_chanarray[channel] = &cp->wdc_channel;
3023 cp->name = PCIIDE_CHANNEL_NAME(channel);
3024 cp->wdc_channel.channel = channel;
3025 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3026
3027 cp->wdc_channel.ch_queue =
3028 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3029 if (cp->wdc_channel.ch_queue == NULL) {
3030 aprint_error("%s %s channel: "
3031 "can't allocate memory for command queue",
3032 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3033 return;
3034 }
3035
3036 /* XXX */
3037 reg = 0xa2 + channel * 16;
3038 for (i = 0; i < sizeof(init_val); i++)
3039 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
3040
3041 aprint_normal("%s: %s channel %s to %s mode\n",
3042 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
3043 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
3044 "configured" : "wired",
3045 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
3046 "native-PCI" : "compatibility");
3047
3048 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
3049 if (cp->hw_ok == 0)
3050 return;
3051 pciide_map_compat_intr(pa, cp, channel, interface);
3052 }
3053
3054 void
3055 cmd680_setup_channel(chp)
3056 struct channel_softc *chp;
3057 {
3058 struct ata_drive_datas *drvp;
3059 u_int8_t mode, off, scsc;
3060 u_int16_t val;
3061 u_int32_t idedma_ctl;
3062 int drive;
3063 struct pciide_channel *cp = (struct pciide_channel*)chp;
3064 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3065 pci_chipset_tag_t pc = sc->sc_pc;
3066 pcitag_t pa = sc->sc_tag;
3067 static const u_int8_t udma2_tbl[] =
3068 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
3069 static const u_int8_t udma_tbl[] =
3070 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
3071 static const u_int16_t dma_tbl[] =
3072 { 0x2208, 0x10c2, 0x10c1 };
3073 static const u_int16_t pio_tbl[] =
3074 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
3075
3076 idedma_ctl = 0;
3077 pciide_channel_dma_setup(cp);
3078 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
3079
3080 for (drive = 0; drive < 2; drive++) {
3081 drvp = &chp->ch_drive[drive];
3082 /* If no drive, skip */
3083 if ((drvp->drive_flags & DRIVE) == 0)
3084 continue;
3085 mode &= ~(0x03 << (drive * 4));
3086 if (drvp->drive_flags & DRIVE_UDMA) {
3087 drvp->drive_flags &= ~DRIVE_DMA;
3088 off = 0xa0 + chp->channel * 16;
3089 if (drvp->UDMA_mode > 2 &&
3090 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3091 drvp->UDMA_mode = 2;
3092 scsc = pciide_pci_read(pc, pa, 0x8a);
3093 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3094 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3095 scsc = pciide_pci_read(pc, pa, 0x8a);
3096 if ((scsc & 0x30) == 0)
3097 drvp->UDMA_mode = 5;
3098 }
3099 mode |= 0x03 << (drive * 4);
3100 off = 0xac + chp->channel * 16 + drive * 2;
3101 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3102 if (scsc & 0x30)
3103 val |= udma2_tbl[drvp->UDMA_mode];
3104 else
3105 val |= udma_tbl[drvp->UDMA_mode];
3106 pciide_pci_write(pc, pa, off, val);
3107 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3108 } else if (drvp->drive_flags & DRIVE_DMA) {
3109 mode |= 0x02 << (drive * 4);
3110 off = 0xa8 + chp->channel * 16 + drive * 2;
3111 val = dma_tbl[drvp->DMA_mode];
3112 pciide_pci_write(pc, pa, off, val & 0xff);
3113 pciide_pci_write(pc, pa, off, val >> 8);
3114 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3115 } else {
3116 mode |= 0x01 << (drive * 4);
3117 off = 0xa4 + chp->channel * 16 + drive * 2;
3118 val = pio_tbl[drvp->PIO_mode];
3119 pciide_pci_write(pc, pa, off, val & 0xff);
3120 pciide_pci_write(pc, pa, off, val >> 8);
3121 }
3122 }
3123
3124 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3125 if (idedma_ctl != 0) {
3126 /* Add software bits in status register */
3127 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3128 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3129 idedma_ctl);
3130 }
3131 pciide_print_modes(cp);
3132 }
3133
3134 void
3135 cmd3112_chip_map(sc, pa)
3136 struct pciide_softc *sc;
3137 struct pci_attach_args *pa;
3138 {
3139 struct pciide_channel *cp;
3140 bus_size_t cmdsize, ctlsize;
3141 pcireg_t interface;
3142 int channel;
3143
3144 if (pciide_chipen(sc, pa) == 0)
3145 return;
3146
3147 aprint_normal("%s: bus-master DMA support present",
3148 sc->sc_wdcdev.sc_dev.dv_xname);
3149 pciide_mapreg_dma(sc, pa);
3150 aprint_normal("\n");
3151
3152 /*
3153 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3154 * corruption if DMA transfers cross an 8K boundary. This is
3155 * apparently hard to tickle, but we'll go ahead and play it
3156 * safe.
3157 */
3158 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3159 sc->sc_dma_maxsegsz = 8192;
3160 sc->sc_dma_boundary = 8192;
3161 }
3162
3163 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3164 WDC_CAPABILITY_MODE;
3165 sc->sc_wdcdev.PIO_cap = 4;
3166 if (sc->sc_dma_ok) {
3167 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3168 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3169 sc->sc_wdcdev.irqack = pciide_irqack;
3170 sc->sc_wdcdev.DMA_cap = 2;
3171 sc->sc_wdcdev.UDMA_cap = 6;
3172 }
3173 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3174
3175 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3176 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3177
3178 /*
3179 * The 3112 can be told to identify as a RAID controller.
3180 * In this case, we have to fake interface
3181 */
3182 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3183 interface = PCI_INTERFACE(pa->pa_class);
3184 } else {
3185 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3186 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3187 }
3188
3189 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3190 cp = &sc->pciide_channels[channel];
3191 if (pciide_chansetup(sc, channel, interface) == 0)
3192 continue;
3193 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3194 pciide_pci_intr);
3195 if (cp->hw_ok == 0)
3196 continue;
3197 pciide_map_compat_intr(pa, cp, channel, interface);
3198 cmd3112_setup_channel(&cp->wdc_channel);
3199 }
3200 }
3201
3202 void
3203 cmd3112_setup_channel(chp)
3204 struct channel_softc *chp;
3205 {
3206 struct ata_drive_datas *drvp;
3207 int drive;
3208 u_int32_t idedma_ctl, dtm;
3209 struct pciide_channel *cp = (struct pciide_channel*)chp;
3210 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3211
3212 /* setup DMA if needed */
3213 pciide_channel_dma_setup(cp);
3214
3215 idedma_ctl = 0;
3216 dtm = 0;
3217
3218 for (drive = 0; drive < 2; drive++) {
3219 drvp = &chp->ch_drive[drive];
3220 /* If no drive, skip */
3221 if ((drvp->drive_flags & DRIVE) == 0)
3222 continue;
3223 if (drvp->drive_flags & DRIVE_UDMA) {
3224 /* use Ultra/DMA */
3225 drvp->drive_flags &= ~DRIVE_DMA;
3226 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3227 dtm |= DTM_IDEx_DMA;
3228 } else if (drvp->drive_flags & DRIVE_DMA) {
3229 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3230 dtm |= DTM_IDEx_DMA;
3231 } else {
3232 dtm |= DTM_IDEx_PIO;
3233 }
3234 }
3235
3236 /*
3237 * Nothing to do to setup modes; it is meaningless in S-ATA
3238 * (but many S-ATA drives still want to get the SET_FEATURE
3239 * command).
3240 */
3241 if (idedma_ctl != 0) {
3242 /* Add software bits in status register */
3243 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3244 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3245 idedma_ctl);
3246 }
3247 pci_conf_write(sc->sc_pc, sc->sc_tag,
3248 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3249 pciide_print_modes(cp);
3250 }
3251
3252 void
3253 cy693_chip_map(sc, pa)
3254 struct pciide_softc *sc;
3255 struct pci_attach_args *pa;
3256 {
3257 struct pciide_channel *cp;
3258 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3259 bus_size_t cmdsize, ctlsize;
3260
3261 if (pciide_chipen(sc, pa) == 0)
3262 return;
3263 /*
3264 * this chip has 2 PCI IDE functions, one for primary and one for
3265 * secondary. So we need to call pciide_mapregs_compat() with
3266 * the real channel
3267 */
3268 if (pa->pa_function == 1) {
3269 sc->sc_cy_compatchan = 0;
3270 } else if (pa->pa_function == 2) {
3271 sc->sc_cy_compatchan = 1;
3272 } else {
3273 aprint_error("%s: unexpected PCI function %d\n",
3274 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3275 return;
3276 }
3277 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3278 aprint_normal("%s: bus-master DMA support present",
3279 sc->sc_wdcdev.sc_dev.dv_xname);
3280 pciide_mapreg_dma(sc, pa);
3281 } else {
3282 aprint_normal("%s: hardware does not support DMA",
3283 sc->sc_wdcdev.sc_dev.dv_xname);
3284 sc->sc_dma_ok = 0;
3285 }
3286 aprint_normal("\n");
3287
3288 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3289 if (sc->sc_cy_handle == NULL) {
3290 aprint_error("%s: unable to map hyperCache control registers\n",
3291 sc->sc_wdcdev.sc_dev.dv_xname);
3292 sc->sc_dma_ok = 0;
3293 }
3294
3295 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3296 WDC_CAPABILITY_MODE;
3297 if (sc->sc_dma_ok) {
3298 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3299 sc->sc_wdcdev.irqack = pciide_irqack;
3300 }
3301 sc->sc_wdcdev.PIO_cap = 4;
3302 sc->sc_wdcdev.DMA_cap = 2;
3303 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3304
3305 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3306 sc->sc_wdcdev.nchannels = 1;
3307
3308 /* Only one channel for this chip; if we are here it's enabled */
3309 cp = &sc->pciide_channels[0];
3310 sc->wdc_chanarray[0] = &cp->wdc_channel;
3311 cp->name = PCIIDE_CHANNEL_NAME(0);
3312 cp->wdc_channel.channel = 0;
3313 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3314 cp->wdc_channel.ch_queue =
3315 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3316 if (cp->wdc_channel.ch_queue == NULL) {
3317 aprint_error("%s primary channel: "
3318 "can't allocate memory for command queue",
3319 sc->sc_wdcdev.sc_dev.dv_xname);
3320 return;
3321 }
3322 aprint_normal("%s: primary channel %s to ",
3323 sc->sc_wdcdev.sc_dev.dv_xname,
3324 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3325 "configured" : "wired");
3326 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3327 aprint_normal("native-PCI");
3328 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3329 pciide_pci_intr);
3330 } else {
3331 aprint_normal("compatibility");
3332 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3333 &cmdsize, &ctlsize);
3334 }
3335 aprint_normal(" mode\n");
3336 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3337 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3338 wdcattach(&cp->wdc_channel);
3339 if (pciide_chan_candisable(cp)) {
3340 pci_conf_write(sc->sc_pc, sc->sc_tag,
3341 PCI_COMMAND_STATUS_REG, 0);
3342 }
3343 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3344 if (cp->hw_ok == 0)
3345 return;
3346 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3347 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3348 cy693_setup_channel(&cp->wdc_channel);
3349 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3350 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3351 }
3352
3353 void
3354 cy693_setup_channel(chp)
3355 struct channel_softc *chp;
3356 {
3357 struct ata_drive_datas *drvp;
3358 int drive;
3359 u_int32_t cy_cmd_ctrl;
3360 u_int32_t idedma_ctl;
3361 struct pciide_channel *cp = (struct pciide_channel*)chp;
3362 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3363 int dma_mode = -1;
3364
3365 cy_cmd_ctrl = idedma_ctl = 0;
3366
3367 /* setup DMA if needed */
3368 pciide_channel_dma_setup(cp);
3369
3370 for (drive = 0; drive < 2; drive++) {
3371 drvp = &chp->ch_drive[drive];
3372 /* If no drive, skip */
3373 if ((drvp->drive_flags & DRIVE) == 0)
3374 continue;
3375 /* add timing values, setup DMA if needed */
3376 if (drvp->drive_flags & DRIVE_DMA) {
3377 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3378 /* use Multiword DMA */
3379 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3380 dma_mode = drvp->DMA_mode;
3381 }
3382 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3383 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3384 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3385 CY_CMD_CTRL_IOW_REC_OFF(drive));
3386 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3387 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3388 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3389 CY_CMD_CTRL_IOR_REC_OFF(drive));
3390 }
3391 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3392 chp->ch_drive[0].DMA_mode = dma_mode;
3393 chp->ch_drive[1].DMA_mode = dma_mode;
3394
3395 if (dma_mode == -1)
3396 dma_mode = 0;
3397
3398 if (sc->sc_cy_handle != NULL) {
3399 /* Note: `multiple' is implied. */
3400 cy82c693_write(sc->sc_cy_handle,
3401 (sc->sc_cy_compatchan == 0) ?
3402 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3403 }
3404
3405 pciide_print_modes(cp);
3406
3407 if (idedma_ctl != 0) {
3408 /* Add software bits in status register */
3409 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3410 IDEDMA_CTL, idedma_ctl);
3411 }
3412 }
3413
3414 static struct sis_hostbr_type {
3415 u_int16_t id;
3416 u_int8_t rev;
3417 u_int8_t udma_mode;
3418 char *name;
3419 u_int8_t type;
3420 #define SIS_TYPE_NOUDMA 0
3421 #define SIS_TYPE_66 1
3422 #define SIS_TYPE_100OLD 2
3423 #define SIS_TYPE_100NEW 3
3424 #define SIS_TYPE_133OLD 4
3425 #define SIS_TYPE_133NEW 5
3426 #define SIS_TYPE_SOUTH 6
3427 } sis_hostbr_type[] = {
3428 /* Most infos here are from sos (at) freebsd.org */
3429 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3430 #if 0
3431 /*
3432 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3433 * have problems with UDMA (info provided by Christos)
3434 */
3435 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3436 #endif
3437 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3438 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3439 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3440 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3441 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3442 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3443 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3444 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3445 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3446 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3447 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3448 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3449 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3450 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3451 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3452 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3453 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3454 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3455 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3456 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3457 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3458 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3459 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3460 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3461 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3462 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3463 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3464 /*
3465 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3466 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3467 */
3468 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3469 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3470 };
3471
3472 static struct sis_hostbr_type *sis_hostbr_type_match;
3473
3474 static int
3475 sis_hostbr_match(pa)
3476 struct pci_attach_args *pa;
3477 {
3478 int i;
3479 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3480 return 0;
3481 sis_hostbr_type_match = NULL;
3482 for (i = 0;
3483 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3484 i++) {
3485 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3486 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3487 sis_hostbr_type_match = &sis_hostbr_type[i];
3488 }
3489 return (sis_hostbr_type_match != NULL);
3490 }
3491
3492 static int sis_south_match(pa)
3493 struct pci_attach_args *pa;
3494 {
3495 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3496 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3497 PCI_REVISION(pa->pa_class) >= 0x10);
3498 }
3499
3500 void
3501 sis_chip_map(sc, pa)
3502 struct pciide_softc *sc;
3503 struct pci_attach_args *pa;
3504 {
3505 struct pciide_channel *cp;
3506 int channel;
3507 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3508 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3509 pcireg_t rev = PCI_REVISION(pa->pa_class);
3510 bus_size_t cmdsize, ctlsize;
3511
3512 if (pciide_chipen(sc, pa) == 0)
3513 return;
3514 aprint_normal(": Silicon Integrated System ");
3515 pci_find_device(NULL, sis_hostbr_match);
3516 if (sis_hostbr_type_match) {
3517 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3518 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3519 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3520 SIS_REG_57) & 0x7f);
3521 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3522 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3523 aprint_normal("96X UDMA%d",
3524 sis_hostbr_type_match->udma_mode);
3525 sc->sis_type = SIS_TYPE_133NEW;
3526 sc->sc_wdcdev.UDMA_cap =
3527 sis_hostbr_type_match->udma_mode;
3528 } else {
3529 if (pci_find_device(NULL, sis_south_match)) {
3530 sc->sis_type = SIS_TYPE_133OLD;
3531 sc->sc_wdcdev.UDMA_cap =
3532 sis_hostbr_type_match->udma_mode;
3533 } else {
3534 sc->sis_type = SIS_TYPE_100NEW;
3535 sc->sc_wdcdev.UDMA_cap =
3536 sis_hostbr_type_match->udma_mode;
3537 }
3538 }
3539 } else {
3540 sc->sis_type = sis_hostbr_type_match->type;
3541 sc->sc_wdcdev.UDMA_cap =
3542 sis_hostbr_type_match->udma_mode;
3543 }
3544 aprint_normal(sis_hostbr_type_match->name);
3545 } else {
3546 aprint_normal("5597/5598");
3547 if (rev >= 0xd0) {
3548 sc->sc_wdcdev.UDMA_cap = 2;
3549 sc->sis_type = SIS_TYPE_66;
3550 } else {
3551 sc->sc_wdcdev.UDMA_cap = 0;
3552 sc->sis_type = SIS_TYPE_NOUDMA;
3553 }
3554 }
3555 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3556 PCI_REVISION(pa->pa_class));
3557 aprint_normal("%s: bus-master DMA support present",
3558 sc->sc_wdcdev.sc_dev.dv_xname);
3559 pciide_mapreg_dma(sc, pa);
3560 aprint_normal("\n");
3561
3562 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3563 WDC_CAPABILITY_MODE;
3564 if (sc->sc_dma_ok) {
3565 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3566 sc->sc_wdcdev.irqack = pciide_irqack;
3567 if (sc->sis_type >= SIS_TYPE_66)
3568 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3569 }
3570
3571 sc->sc_wdcdev.PIO_cap = 4;
3572 sc->sc_wdcdev.DMA_cap = 2;
3573
3574 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3575 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3576 switch(sc->sis_type) {
3577 case SIS_TYPE_NOUDMA:
3578 case SIS_TYPE_66:
3579 case SIS_TYPE_100OLD:
3580 sc->sc_wdcdev.set_modes = sis_setup_channel;
3581 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3582 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3583 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3584 break;
3585 case SIS_TYPE_100NEW:
3586 case SIS_TYPE_133OLD:
3587 sc->sc_wdcdev.set_modes = sis_setup_channel;
3588 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3589 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3590 break;
3591 case SIS_TYPE_133NEW:
3592 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3593 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3594 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3595 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3596 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3597 break;
3598 }
3599
3600
3601 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3602 cp = &sc->pciide_channels[channel];
3603 if (pciide_chansetup(sc, channel, interface) == 0)
3604 continue;
3605 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3606 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3607 aprint_normal("%s: %s channel ignored (disabled)\n",
3608 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3609 continue;
3610 }
3611 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3612 pciide_pci_intr);
3613 if (cp->hw_ok == 0)
3614 continue;
3615 if (pciide_chan_candisable(cp)) {
3616 if (channel == 0)
3617 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3618 else
3619 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3620 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3621 sis_ctr0);
3622 }
3623 pciide_map_compat_intr(pa, cp, channel, interface);
3624 if (cp->hw_ok == 0)
3625 continue;
3626 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3627 }
3628 }
3629
3630 void
3631 sis96x_setup_channel(chp)
3632 struct channel_softc *chp;
3633 {
3634 struct ata_drive_datas *drvp;
3635 int drive;
3636 u_int32_t sis_tim;
3637 u_int32_t idedma_ctl;
3638 int regtim;
3639 struct pciide_channel *cp = (struct pciide_channel*)chp;
3640 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3641
3642 sis_tim = 0;
3643 idedma_ctl = 0;
3644 /* setup DMA if needed */
3645 pciide_channel_dma_setup(cp);
3646
3647 for (drive = 0; drive < 2; drive++) {
3648 regtim = SIS_TIM133(
3649 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3650 chp->channel, drive);
3651 drvp = &chp->ch_drive[drive];
3652 /* If no drive, skip */
3653 if ((drvp->drive_flags & DRIVE) == 0)
3654 continue;
3655 /* add timing values, setup DMA if needed */
3656 if (drvp->drive_flags & DRIVE_UDMA) {
3657 /* use Ultra/DMA */
3658 drvp->drive_flags &= ~DRIVE_DMA;
3659 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3660 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3661 if (drvp->UDMA_mode > 2)
3662 drvp->UDMA_mode = 2;
3663 }
3664 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3665 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3666 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3667 } else if (drvp->drive_flags & DRIVE_DMA) {
3668 /*
3669 * use Multiword DMA
3670 * Timings will be used for both PIO and DMA,
3671 * so adjust DMA mode if needed
3672 */
3673 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3674 drvp->PIO_mode = drvp->DMA_mode + 2;
3675 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3676 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3677 drvp->PIO_mode - 2 : 0;
3678 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3679 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3680 } else {
3681 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3682 }
3683 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3684 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3685 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3686 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3687 }
3688 if (idedma_ctl != 0) {
3689 /* Add software bits in status register */
3690 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3691 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3692 idedma_ctl);
3693 }
3694 pciide_print_modes(cp);
3695 }
3696
3697 void
3698 sis_setup_channel(chp)
3699 struct channel_softc *chp;
3700 {
3701 struct ata_drive_datas *drvp;
3702 int drive;
3703 u_int32_t sis_tim;
3704 u_int32_t idedma_ctl;
3705 struct pciide_channel *cp = (struct pciide_channel*)chp;
3706 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3707
3708 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3709 "channel %d 0x%x\n", chp->channel,
3710 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3711 DEBUG_PROBE);
3712 sis_tim = 0;
3713 idedma_ctl = 0;
3714 /* setup DMA if needed */
3715 pciide_channel_dma_setup(cp);
3716
3717 for (drive = 0; drive < 2; drive++) {
3718 drvp = &chp->ch_drive[drive];
3719 /* If no drive, skip */
3720 if ((drvp->drive_flags & DRIVE) == 0)
3721 continue;
3722 /* add timing values, setup DMA if needed */
3723 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3724 (drvp->drive_flags & DRIVE_UDMA) == 0)
3725 goto pio;
3726
3727 if (drvp->drive_flags & DRIVE_UDMA) {
3728 /* use Ultra/DMA */
3729 drvp->drive_flags &= ~DRIVE_DMA;
3730 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3731 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3732 if (drvp->UDMA_mode > 2)
3733 drvp->UDMA_mode = 2;
3734 }
3735 switch (sc->sis_type) {
3736 case SIS_TYPE_66:
3737 case SIS_TYPE_100OLD:
3738 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3739 SIS_TIM66_UDMA_TIME_OFF(drive);
3740 break;
3741 case SIS_TYPE_100NEW:
3742 sis_tim |=
3743 sis_udma100new_tim[drvp->UDMA_mode] <<
3744 SIS_TIM100_UDMA_TIME_OFF(drive);
3745 case SIS_TYPE_133OLD:
3746 sis_tim |=
3747 sis_udma133old_tim[drvp->UDMA_mode] <<
3748 SIS_TIM100_UDMA_TIME_OFF(drive);
3749 break;
3750 default:
3751 aprint_error("unknown SiS IDE type %d\n",
3752 sc->sis_type);
3753 }
3754 } else {
3755 /*
3756 * use Multiword DMA
3757 * Timings will be used for both PIO and DMA,
3758 * so adjust DMA mode if needed
3759 */
3760 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3761 drvp->PIO_mode = drvp->DMA_mode + 2;
3762 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3763 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3764 drvp->PIO_mode - 2 : 0;
3765 if (drvp->DMA_mode == 0)
3766 drvp->PIO_mode = 0;
3767 }
3768 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3769 pio: switch (sc->sis_type) {
3770 case SIS_TYPE_NOUDMA:
3771 case SIS_TYPE_66:
3772 case SIS_TYPE_100OLD:
3773 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3774 SIS_TIM66_ACT_OFF(drive);
3775 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3776 SIS_TIM66_REC_OFF(drive);
3777 break;
3778 case SIS_TYPE_100NEW:
3779 case SIS_TYPE_133OLD:
3780 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3781 SIS_TIM100_ACT_OFF(drive);
3782 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3783 SIS_TIM100_REC_OFF(drive);
3784 break;
3785 default:
3786 aprint_error("unknown SiS IDE type %d\n",
3787 sc->sis_type);
3788 }
3789 }
3790 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3791 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3792 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3793 if (idedma_ctl != 0) {
3794 /* Add software bits in status register */
3795 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3796 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3797 idedma_ctl);
3798 }
3799 pciide_print_modes(cp);
3800 }
3801
3802 void
3803 acer_chip_map(sc, pa)
3804 struct pciide_softc *sc;
3805 struct pci_attach_args *pa;
3806 {
3807 struct pciide_channel *cp;
3808 int channel;
3809 pcireg_t cr, interface;
3810 bus_size_t cmdsize, ctlsize;
3811 pcireg_t rev = PCI_REVISION(pa->pa_class);
3812
3813 if (pciide_chipen(sc, pa) == 0)
3814 return;
3815 aprint_normal("%s: bus-master DMA support present",
3816 sc->sc_wdcdev.sc_dev.dv_xname);
3817 pciide_mapreg_dma(sc, pa);
3818 aprint_normal("\n");
3819 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3820 WDC_CAPABILITY_MODE;
3821 if (sc->sc_dma_ok) {
3822 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3823 if (rev >= 0x20) {
3824 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3825 if (rev >= 0xC4)
3826 sc->sc_wdcdev.UDMA_cap = 5;
3827 else if (rev >= 0xC2)
3828 sc->sc_wdcdev.UDMA_cap = 4;
3829 else
3830 sc->sc_wdcdev.UDMA_cap = 2;
3831 }
3832 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3833 sc->sc_wdcdev.irqack = pciide_irqack;
3834 }
3835
3836 sc->sc_wdcdev.PIO_cap = 4;
3837 sc->sc_wdcdev.DMA_cap = 2;
3838 sc->sc_wdcdev.set_modes = acer_setup_channel;
3839 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3840 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3841
3842 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3843 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3844 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3845
3846 /* Enable "microsoft register bits" R/W. */
3847 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3848 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3849 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3850 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3851 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3852 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3853 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3854 ~ACER_CHANSTATUSREGS_RO);
3855 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3856 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3857 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3858 /* Don't use cr, re-read the real register content instead */
3859 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3860 PCI_CLASS_REG));
3861
3862 /* From linux: enable "Cable Detection" */
3863 if (rev >= 0xC2) {
3864 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3865 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3866 | ACER_0x4B_CDETECT);
3867 }
3868
3869 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3870 cp = &sc->pciide_channels[channel];
3871 if (pciide_chansetup(sc, channel, interface) == 0)
3872 continue;
3873 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3874 aprint_normal("%s: %s channel ignored (disabled)\n",
3875 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3876 continue;
3877 }
3878 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3879 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3880 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3881 if (cp->hw_ok == 0)
3882 continue;
3883 if (pciide_chan_candisable(cp)) {
3884 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3885 pci_conf_write(sc->sc_pc, sc->sc_tag,
3886 PCI_CLASS_REG, cr);
3887 }
3888 pciide_map_compat_intr(pa, cp, channel, interface);
3889 acer_setup_channel(&cp->wdc_channel);
3890 }
3891 }
3892
3893 void
3894 acer_setup_channel(chp)
3895 struct channel_softc *chp;
3896 {
3897 struct ata_drive_datas *drvp;
3898 int drive;
3899 u_int32_t acer_fifo_udma;
3900 u_int32_t idedma_ctl;
3901 struct pciide_channel *cp = (struct pciide_channel*)chp;
3902 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3903
3904 idedma_ctl = 0;
3905 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3906 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3907 acer_fifo_udma), DEBUG_PROBE);
3908 /* setup DMA if needed */
3909 pciide_channel_dma_setup(cp);
3910
3911 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3912 DRIVE_UDMA) { /* check 80 pins cable */
3913 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3914 ACER_0x4A_80PIN(chp->channel)) {
3915 if (chp->ch_drive[0].UDMA_mode > 2)
3916 chp->ch_drive[0].UDMA_mode = 2;
3917 if (chp->ch_drive[1].UDMA_mode > 2)
3918 chp->ch_drive[1].UDMA_mode = 2;
3919 }
3920 }
3921
3922 for (drive = 0; drive < 2; drive++) {
3923 drvp = &chp->ch_drive[drive];
3924 /* If no drive, skip */
3925 if ((drvp->drive_flags & DRIVE) == 0)
3926 continue;
3927 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3928 "channel %d drive %d 0x%x\n", chp->channel, drive,
3929 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3930 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3931 /* clear FIFO/DMA mode */
3932 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3933 ACER_UDMA_EN(chp->channel, drive) |
3934 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3935
3936 /* add timing values, setup DMA if needed */
3937 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3938 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3939 acer_fifo_udma |=
3940 ACER_FTH_OPL(chp->channel, drive, 0x1);
3941 goto pio;
3942 }
3943
3944 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3945 if (drvp->drive_flags & DRIVE_UDMA) {
3946 /* use Ultra/DMA */
3947 drvp->drive_flags &= ~DRIVE_DMA;
3948 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3949 acer_fifo_udma |=
3950 ACER_UDMA_TIM(chp->channel, drive,
3951 acer_udma[drvp->UDMA_mode]);
3952 /* XXX disable if one drive < UDMA3 ? */
3953 if (drvp->UDMA_mode >= 3) {
3954 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3955 ACER_0x4B,
3956 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3957 ACER_0x4B) | ACER_0x4B_UDMA66);
3958 }
3959 } else {
3960 /*
3961 * use Multiword DMA
3962 * Timings will be used for both PIO and DMA,
3963 * so adjust DMA mode if needed
3964 */
3965 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3966 drvp->PIO_mode = drvp->DMA_mode + 2;
3967 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3968 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3969 drvp->PIO_mode - 2 : 0;
3970 if (drvp->DMA_mode == 0)
3971 drvp->PIO_mode = 0;
3972 }
3973 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3974 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3975 ACER_IDETIM(chp->channel, drive),
3976 acer_pio[drvp->PIO_mode]);
3977 }
3978 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3979 acer_fifo_udma), DEBUG_PROBE);
3980 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3981 if (idedma_ctl != 0) {
3982 /* Add software bits in status register */
3983 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3984 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3985 idedma_ctl);
3986 }
3987 pciide_print_modes(cp);
3988 }
3989
3990 int
3991 acer_pci_intr(arg)
3992 void *arg;
3993 {
3994 struct pciide_softc *sc = arg;
3995 struct pciide_channel *cp;
3996 struct channel_softc *wdc_cp;
3997 int i, rv, crv;
3998 u_int32_t chids;
3999
4000 rv = 0;
4001 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
4002 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4003 cp = &sc->pciide_channels[i];
4004 wdc_cp = &cp->wdc_channel;
4005 /* If a compat channel skip. */
4006 if (cp->compat)
4007 continue;
4008 if (chids & ACER_CHIDS_INT(i)) {
4009 crv = wdcintr(wdc_cp);
4010 if (crv == 0)
4011 printf("%s:%d: bogus intr\n",
4012 sc->sc_wdcdev.sc_dev.dv_xname, i);
4013 else
4014 rv = 1;
4015 }
4016 }
4017 return rv;
4018 }
4019
4020 void
4021 hpt_chip_map(sc, pa)
4022 struct pciide_softc *sc;
4023 struct pci_attach_args *pa;
4024 {
4025 struct pciide_channel *cp;
4026 int i, compatchan, revision;
4027 pcireg_t interface;
4028 bus_size_t cmdsize, ctlsize;
4029
4030 if (pciide_chipen(sc, pa) == 0)
4031 return;
4032 revision = PCI_REVISION(pa->pa_class);
4033 aprint_normal(": Triones/Highpoint ");
4034 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4035 aprint_normal("HPT374 IDE Controller\n");
4036 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
4037 aprint_normal("HPT372 IDE Controller\n");
4038 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
4039 if (revision == HPT372_REV)
4040 aprint_normal("HPT372 IDE Controller\n");
4041 else if (revision == HPT370_REV)
4042 aprint_normal("HPT370 IDE Controller\n");
4043 else if (revision == HPT370A_REV)
4044 aprint_normal("HPT370A IDE Controller\n");
4045 else if (revision == HPT366_REV)
4046 aprint_normal("HPT366 IDE Controller\n");
4047 else
4048 aprint_normal("unknown HPT IDE controller rev %d\n",
4049 revision);
4050 } else
4051 aprint_normal("unknown HPT IDE controller 0x%x\n",
4052 sc->sc_pp->ide_product);
4053
4054 /*
4055 * when the chip is in native mode it identifies itself as a
4056 * 'misc mass storage'. Fake interface in this case.
4057 */
4058 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4059 interface = PCI_INTERFACE(pa->pa_class);
4060 } else {
4061 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4062 PCIIDE_INTERFACE_PCI(0);
4063 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4064 (revision == HPT370_REV || revision == HPT370A_REV ||
4065 revision == HPT372_REV)) ||
4066 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4067 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4068 interface |= PCIIDE_INTERFACE_PCI(1);
4069 }
4070
4071 aprint_normal("%s: bus-master DMA support present",
4072 sc->sc_wdcdev.sc_dev.dv_xname);
4073 pciide_mapreg_dma(sc, pa);
4074 aprint_normal("\n");
4075 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4076 WDC_CAPABILITY_MODE;
4077 if (sc->sc_dma_ok) {
4078 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4079 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4080 sc->sc_wdcdev.irqack = pciide_irqack;
4081 }
4082 sc->sc_wdcdev.PIO_cap = 4;
4083 sc->sc_wdcdev.DMA_cap = 2;
4084
4085 sc->sc_wdcdev.set_modes = hpt_setup_channel;
4086 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4087 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4088 revision == HPT366_REV) {
4089 sc->sc_wdcdev.UDMA_cap = 4;
4090 /*
4091 * The 366 has 2 PCI IDE functions, one for primary and one
4092 * for secondary. So we need to call pciide_mapregs_compat()
4093 * with the real channel
4094 */
4095 if (pa->pa_function == 0) {
4096 compatchan = 0;
4097 } else if (pa->pa_function == 1) {
4098 compatchan = 1;
4099 } else {
4100 aprint_error("%s: unexpected PCI function %d\n",
4101 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4102 return;
4103 }
4104 sc->sc_wdcdev.nchannels = 1;
4105 } else {
4106 sc->sc_wdcdev.nchannels = 2;
4107 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4108 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4109 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4110 revision == HPT372_REV))
4111 sc->sc_wdcdev.UDMA_cap = 6;
4112 else
4113 sc->sc_wdcdev.UDMA_cap = 5;
4114 }
4115 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4116 cp = &sc->pciide_channels[i];
4117 if (sc->sc_wdcdev.nchannels > 1) {
4118 compatchan = i;
4119 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4120 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4121 aprint_normal(
4122 "%s: %s channel ignored (disabled)\n",
4123 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4124 continue;
4125 }
4126 }
4127 if (pciide_chansetup(sc, i, interface) == 0)
4128 continue;
4129 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4130 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4131 &ctlsize, hpt_pci_intr);
4132 } else {
4133 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4134 &cmdsize, &ctlsize);
4135 }
4136 if (cp->hw_ok == 0)
4137 return;
4138 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4139 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4140 wdcattach(&cp->wdc_channel);
4141 hpt_setup_channel(&cp->wdc_channel);
4142 }
4143 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4144 (revision == HPT370_REV || revision == HPT370A_REV ||
4145 revision == HPT372_REV)) ||
4146 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4147 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4148 /*
4149 * HPT370_REV and highter has a bit to disable interrupts,
4150 * make sure to clear it
4151 */
4152 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4153 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4154 ~HPT_CSEL_IRQDIS);
4155 }
4156 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4157 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4158 revision == HPT372_REV ) ||
4159 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4160 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4161 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4162 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4163 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4164 return;
4165 }
4166
4167 void
4168 hpt_setup_channel(chp)
4169 struct channel_softc *chp;
4170 {
4171 struct ata_drive_datas *drvp;
4172 int drive;
4173 int cable;
4174 u_int32_t before, after;
4175 u_int32_t idedma_ctl;
4176 struct pciide_channel *cp = (struct pciide_channel*)chp;
4177 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4178 int revision =
4179 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4180
4181 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4182
4183 /* setup DMA if needed */
4184 pciide_channel_dma_setup(cp);
4185
4186 idedma_ctl = 0;
4187
4188 /* Per drive settings */
4189 for (drive = 0; drive < 2; drive++) {
4190 drvp = &chp->ch_drive[drive];
4191 /* If no drive, skip */
4192 if ((drvp->drive_flags & DRIVE) == 0)
4193 continue;
4194 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4195 HPT_IDETIM(chp->channel, drive));
4196
4197 /* add timing values, setup DMA if needed */
4198 if (drvp->drive_flags & DRIVE_UDMA) {
4199 /* use Ultra/DMA */
4200 drvp->drive_flags &= ~DRIVE_DMA;
4201 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4202 drvp->UDMA_mode > 2)
4203 drvp->UDMA_mode = 2;
4204 switch (sc->sc_pp->ide_product) {
4205 case PCI_PRODUCT_TRIONES_HPT374:
4206 after = hpt374_udma[drvp->UDMA_mode];
4207 break;
4208 case PCI_PRODUCT_TRIONES_HPT372:
4209 after = hpt372_udma[drvp->UDMA_mode];
4210 break;
4211 case PCI_PRODUCT_TRIONES_HPT366:
4212 default:
4213 switch(revision) {
4214 case HPT372_REV:
4215 after = hpt372_udma[drvp->UDMA_mode];
4216 break;
4217 case HPT370_REV:
4218 case HPT370A_REV:
4219 after = hpt370_udma[drvp->UDMA_mode];
4220 break;
4221 case HPT366_REV:
4222 default:
4223 after = hpt366_udma[drvp->UDMA_mode];
4224 break;
4225 }
4226 }
4227 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4228 } else if (drvp->drive_flags & DRIVE_DMA) {
4229 /*
4230 * use Multiword DMA.
4231 * Timings will be used for both PIO and DMA, so adjust
4232 * DMA mode if needed
4233 */
4234 if (drvp->PIO_mode >= 3 &&
4235 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4236 drvp->DMA_mode = drvp->PIO_mode - 2;
4237 }
4238 switch (sc->sc_pp->ide_product) {
4239 case PCI_PRODUCT_TRIONES_HPT374:
4240 after = hpt374_dma[drvp->DMA_mode];
4241 break;
4242 case PCI_PRODUCT_TRIONES_HPT372:
4243 after = hpt372_dma[drvp->DMA_mode];
4244 break;
4245 case PCI_PRODUCT_TRIONES_HPT366:
4246 default:
4247 switch(revision) {
4248 case HPT372_REV:
4249 after = hpt372_dma[drvp->DMA_mode];
4250 break;
4251 case HPT370_REV:
4252 case HPT370A_REV:
4253 after = hpt370_dma[drvp->DMA_mode];
4254 break;
4255 case HPT366_REV:
4256 default:
4257 after = hpt366_dma[drvp->DMA_mode];
4258 break;
4259 }
4260 }
4261 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4262 } else {
4263 /* PIO only */
4264 switch (sc->sc_pp->ide_product) {
4265 case PCI_PRODUCT_TRIONES_HPT374:
4266 after = hpt374_pio[drvp->PIO_mode];
4267 break;
4268 case PCI_PRODUCT_TRIONES_HPT372:
4269 after = hpt372_pio[drvp->PIO_mode];
4270 break;
4271 case PCI_PRODUCT_TRIONES_HPT366:
4272 default:
4273 switch(revision) {
4274 case HPT372_REV:
4275 after = hpt372_pio[drvp->PIO_mode];
4276 break;
4277 case HPT370_REV:
4278 case HPT370A_REV:
4279 after = hpt370_pio[drvp->PIO_mode];
4280 break;
4281 case HPT366_REV:
4282 default:
4283 after = hpt366_pio[drvp->PIO_mode];
4284 break;
4285 }
4286 }
4287 }
4288 pci_conf_write(sc->sc_pc, sc->sc_tag,
4289 HPT_IDETIM(chp->channel, drive), after);
4290 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4291 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4292 after, before), DEBUG_PROBE);
4293 }
4294 if (idedma_ctl != 0) {
4295 /* Add software bits in status register */
4296 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4297 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4298 idedma_ctl);
4299 }
4300 pciide_print_modes(cp);
4301 }
4302
4303 int
4304 hpt_pci_intr(arg)
4305 void *arg;
4306 {
4307 struct pciide_softc *sc = arg;
4308 struct pciide_channel *cp;
4309 struct channel_softc *wdc_cp;
4310 int rv = 0;
4311 int dmastat, i, crv;
4312
4313 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4314 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4315 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4316 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4317 IDEDMA_CTL_INTR)
4318 continue;
4319 cp = &sc->pciide_channels[i];
4320 wdc_cp = &cp->wdc_channel;
4321 crv = wdcintr(wdc_cp);
4322 if (crv == 0) {
4323 printf("%s:%d: bogus intr\n",
4324 sc->sc_wdcdev.sc_dev.dv_xname, i);
4325 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4326 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4327 } else
4328 rv = 1;
4329 }
4330 return rv;
4331 }
4332
4333
4334 /* Macros to test product */
4335 #define PDC_IS_262(sc) \
4336 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4337 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4338 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4339 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4340 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4341 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4342 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4343 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4344 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4345 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4346 #define PDC_IS_265(sc) \
4347 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4348 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4349 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4350 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4351 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4352 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4353 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4354 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4355 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4356 #define PDC_IS_268(sc) \
4357 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4358 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4359 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4360 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4361 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4362 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4363 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4364 #define PDC_IS_276(sc) \
4365 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4366 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4367 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4368 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4369 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4370
4371 void
4372 pdc202xx_chip_map(sc, pa)
4373 struct pciide_softc *sc;
4374 struct pci_attach_args *pa;
4375 {
4376 struct pciide_channel *cp;
4377 int channel;
4378 pcireg_t interface, st, mode;
4379 bus_size_t cmdsize, ctlsize;
4380
4381 if (!PDC_IS_268(sc)) {
4382 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4383 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4384 st), DEBUG_PROBE);
4385 }
4386 if (pciide_chipen(sc, pa) == 0)
4387 return;
4388
4389 /* turn off RAID mode */
4390 if (!PDC_IS_268(sc))
4391 st &= ~PDC2xx_STATE_IDERAID;
4392
4393 /*
4394 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4395 * mode. We have to fake interface
4396 */
4397 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4398 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4399 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4400
4401 aprint_normal("%s: bus-master DMA support present",
4402 sc->sc_wdcdev.sc_dev.dv_xname);
4403 pciide_mapreg_dma(sc, pa);
4404 aprint_normal("\n");
4405 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4406 WDC_CAPABILITY_MODE;
4407 if (sc->sc_dma_ok) {
4408 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4409 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4410 sc->sc_wdcdev.irqack = pciide_irqack;
4411 }
4412 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4413 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4414 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4415 sc->sc_wdcdev.PIO_cap = 4;
4416 sc->sc_wdcdev.DMA_cap = 2;
4417 if (PDC_IS_276(sc))
4418 sc->sc_wdcdev.UDMA_cap = 6;
4419 else if (PDC_IS_265(sc))
4420 sc->sc_wdcdev.UDMA_cap = 5;
4421 else if (PDC_IS_262(sc))
4422 sc->sc_wdcdev.UDMA_cap = 4;
4423 else
4424 sc->sc_wdcdev.UDMA_cap = 2;
4425 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4426 pdc20268_setup_channel : pdc202xx_setup_channel;
4427 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4428 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4429
4430 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4431 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4432 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4433 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4434 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4435 }
4436
4437 if (!PDC_IS_268(sc)) {
4438 /* setup failsafe defaults */
4439 mode = 0;
4440 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4441 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4442 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4443 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4444 for (channel = 0;
4445 channel < sc->sc_wdcdev.nchannels;
4446 channel++) {
4447 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4448 "drive 0 initial timings 0x%x, now 0x%x\n",
4449 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4450 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4451 DEBUG_PROBE);
4452 pci_conf_write(sc->sc_pc, sc->sc_tag,
4453 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4454 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4455 "drive 1 initial timings 0x%x, now 0x%x\n",
4456 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4457 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4458 pci_conf_write(sc->sc_pc, sc->sc_tag,
4459 PDC2xx_TIM(channel, 1), mode);
4460 }
4461
4462 mode = PDC2xx_SCR_DMA;
4463 if (PDC_IS_265(sc)) {
4464 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4465 } else if (PDC_IS_262(sc)) {
4466 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4467 } else {
4468 /* the BIOS set it up this way */
4469 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4470 }
4471 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4472 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4473 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4474 "now 0x%x\n",
4475 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4476 PDC2xx_SCR),
4477 mode), DEBUG_PROBE);
4478 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4479 PDC2xx_SCR, mode);
4480
4481 /* controller initial state register is OK even without BIOS */
4482 /* Set DMA mode to IDE DMA compatibility */
4483 mode =
4484 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4485 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4486 DEBUG_PROBE);
4487 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4488 mode | 0x1);
4489 mode =
4490 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4491 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4492 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4493 mode | 0x1);
4494 }
4495
4496 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4497 cp = &sc->pciide_channels[channel];
4498 if (pciide_chansetup(sc, channel, interface) == 0)
4499 continue;
4500 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4501 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4502 aprint_normal("%s: %s channel ignored (disabled)\n",
4503 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4504 continue;
4505 }
4506 if (PDC_IS_265(sc))
4507 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4508 pdc20265_pci_intr);
4509 else
4510 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4511 pdc202xx_pci_intr);
4512 if (cp->hw_ok == 0)
4513 continue;
4514 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4515 st &= ~(PDC_IS_262(sc) ?
4516 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4517 pciide_map_compat_intr(pa, cp, channel, interface);
4518 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4519 }
4520 if (!PDC_IS_268(sc)) {
4521 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4522 "0x%x\n", st), DEBUG_PROBE);
4523 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4524 }
4525 return;
4526 }
4527
4528 void
4529 pdc202xx_setup_channel(chp)
4530 struct channel_softc *chp;
4531 {
4532 struct ata_drive_datas *drvp;
4533 int drive;
4534 pcireg_t mode, st;
4535 u_int32_t idedma_ctl, scr, atapi;
4536 struct pciide_channel *cp = (struct pciide_channel*)chp;
4537 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4538 int channel = chp->channel;
4539
4540 /* setup DMA if needed */
4541 pciide_channel_dma_setup(cp);
4542
4543 idedma_ctl = 0;
4544 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4545 sc->sc_wdcdev.sc_dev.dv_xname,
4546 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4547 DEBUG_PROBE);
4548
4549 /* Per channel settings */
4550 if (PDC_IS_262(sc)) {
4551 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4552 PDC262_U66);
4553 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4554 /* Trim UDMA mode */
4555 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4556 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4557 chp->ch_drive[0].UDMA_mode <= 2) ||
4558 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4559 chp->ch_drive[1].UDMA_mode <= 2)) {
4560 if (chp->ch_drive[0].UDMA_mode > 2)
4561 chp->ch_drive[0].UDMA_mode = 2;
4562 if (chp->ch_drive[1].UDMA_mode > 2)
4563 chp->ch_drive[1].UDMA_mode = 2;
4564 }
4565 /* Set U66 if needed */
4566 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4567 chp->ch_drive[0].UDMA_mode > 2) ||
4568 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4569 chp->ch_drive[1].UDMA_mode > 2))
4570 scr |= PDC262_U66_EN(channel);
4571 else
4572 scr &= ~PDC262_U66_EN(channel);
4573 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4574 PDC262_U66, scr);
4575 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4576 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4577 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4578 PDC262_ATAPI(channel))), DEBUG_PROBE);
4579 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4580 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4581 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4582 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4583 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4584 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4585 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4586 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4587 atapi = 0;
4588 else
4589 atapi = PDC262_ATAPI_UDMA;
4590 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4591 PDC262_ATAPI(channel), atapi);
4592 }
4593 }
4594 for (drive = 0; drive < 2; drive++) {
4595 drvp = &chp->ch_drive[drive];
4596 /* If no drive, skip */
4597 if ((drvp->drive_flags & DRIVE) == 0)
4598 continue;
4599 mode = 0;
4600 if (drvp->drive_flags & DRIVE_UDMA) {
4601 /* use Ultra/DMA */
4602 drvp->drive_flags &= ~DRIVE_DMA;
4603 mode = PDC2xx_TIM_SET_MB(mode,
4604 pdc2xx_udma_mb[drvp->UDMA_mode]);
4605 mode = PDC2xx_TIM_SET_MC(mode,
4606 pdc2xx_udma_mc[drvp->UDMA_mode]);
4607 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4608 } else if (drvp->drive_flags & DRIVE_DMA) {
4609 mode = PDC2xx_TIM_SET_MB(mode,
4610 pdc2xx_dma_mb[drvp->DMA_mode]);
4611 mode = PDC2xx_TIM_SET_MC(mode,
4612 pdc2xx_dma_mc[drvp->DMA_mode]);
4613 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4614 } else {
4615 mode = PDC2xx_TIM_SET_MB(mode,
4616 pdc2xx_dma_mb[0]);
4617 mode = PDC2xx_TIM_SET_MC(mode,
4618 pdc2xx_dma_mc[0]);
4619 }
4620 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4621 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4622 if (drvp->drive_flags & DRIVE_ATA)
4623 mode |= PDC2xx_TIM_PRE;
4624 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4625 if (drvp->PIO_mode >= 3) {
4626 mode |= PDC2xx_TIM_IORDY;
4627 if (drive == 0)
4628 mode |= PDC2xx_TIM_IORDYp;
4629 }
4630 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4631 "timings 0x%x\n",
4632 sc->sc_wdcdev.sc_dev.dv_xname,
4633 chp->channel, drive, mode), DEBUG_PROBE);
4634 pci_conf_write(sc->sc_pc, sc->sc_tag,
4635 PDC2xx_TIM(chp->channel, drive), mode);
4636 }
4637 if (idedma_ctl != 0) {
4638 /* Add software bits in status register */
4639 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4640 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4641 idedma_ctl);
4642 }
4643 pciide_print_modes(cp);
4644 }
4645
4646 void
4647 pdc20268_setup_channel(chp)
4648 struct channel_softc *chp;
4649 {
4650 struct ata_drive_datas *drvp;
4651 int drive;
4652 u_int32_t idedma_ctl;
4653 struct pciide_channel *cp = (struct pciide_channel*)chp;
4654 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4655 int u100;
4656
4657 /* setup DMA if needed */
4658 pciide_channel_dma_setup(cp);
4659
4660 idedma_ctl = 0;
4661
4662 /* I don't know what this is for, FreeBSD does it ... */
4663 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4664 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4665
4666 /*
4667 * cable type detect, from FreeBSD
4668 */
4669 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4670 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4671 0 : 1;
4672
4673 for (drive = 0; drive < 2; drive++) {
4674 drvp = &chp->ch_drive[drive];
4675 /* If no drive, skip */
4676 if ((drvp->drive_flags & DRIVE) == 0)
4677 continue;
4678 if (drvp->drive_flags & DRIVE_UDMA) {
4679 /* use Ultra/DMA */
4680 drvp->drive_flags &= ~DRIVE_DMA;
4681 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4682 if (drvp->UDMA_mode > 2 && u100 == 0)
4683 drvp->UDMA_mode = 2;
4684 } else if (drvp->drive_flags & DRIVE_DMA) {
4685 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4686 }
4687 }
4688 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4689 if (idedma_ctl != 0) {
4690 /* Add software bits in status register */
4691 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4692 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4693 idedma_ctl);
4694 }
4695 pciide_print_modes(cp);
4696 }
4697
4698 int
4699 pdc202xx_pci_intr(arg)
4700 void *arg;
4701 {
4702 struct pciide_softc *sc = arg;
4703 struct pciide_channel *cp;
4704 struct channel_softc *wdc_cp;
4705 int i, rv, crv;
4706 u_int32_t scr;
4707
4708 rv = 0;
4709 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4710 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4711 cp = &sc->pciide_channels[i];
4712 wdc_cp = &cp->wdc_channel;
4713 /* If a compat channel skip. */
4714 if (cp->compat)
4715 continue;
4716 if (scr & PDC2xx_SCR_INT(i)) {
4717 crv = wdcintr(wdc_cp);
4718 if (crv == 0)
4719 printf("%s:%d: bogus intr (reg 0x%x)\n",
4720 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4721 else
4722 rv = 1;
4723 }
4724 }
4725 return rv;
4726 }
4727
4728 int
4729 pdc20265_pci_intr(arg)
4730 void *arg;
4731 {
4732 struct pciide_softc *sc = arg;
4733 struct pciide_channel *cp;
4734 struct channel_softc *wdc_cp;
4735 int i, rv, crv;
4736 u_int32_t dmastat;
4737
4738 rv = 0;
4739 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4740 cp = &sc->pciide_channels[i];
4741 wdc_cp = &cp->wdc_channel;
4742 /* If a compat channel skip. */
4743 if (cp->compat)
4744 continue;
4745 /*
4746 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4747 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4748 * So use it instead (requires 2 reg reads instead of 1,
4749 * but we can't do it another way).
4750 */
4751 dmastat = bus_space_read_1(sc->sc_dma_iot,
4752 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4753 if((dmastat & IDEDMA_CTL_INTR) == 0)
4754 continue;
4755 crv = wdcintr(wdc_cp);
4756 if (crv == 0)
4757 printf("%s:%d: bogus intr\n",
4758 sc->sc_wdcdev.sc_dev.dv_xname, i);
4759 else
4760 rv = 1;
4761 }
4762 return rv;
4763 }
4764
4765 static void
4766 pdc20262_dma_start(v, channel, drive)
4767 void *v;
4768 int channel, drive;
4769 {
4770 struct pciide_softc *sc = v;
4771 struct pciide_dma_maps *dma_maps =
4772 &sc->pciide_channels[channel].dma_maps[drive];
4773 int atapi;
4774
4775 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4776 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4777 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4778 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4779 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4780 PDC262_ATAPI(channel), atapi);
4781 }
4782
4783 pciide_dma_start(v, channel, drive);
4784 }
4785
4786 int
4787 pdc20262_dma_finish(v, channel, drive, force)
4788 void *v;
4789 int channel, drive;
4790 int force;
4791 {
4792 struct pciide_softc *sc = v;
4793 struct pciide_dma_maps *dma_maps =
4794 &sc->pciide_channels[channel].dma_maps[drive];
4795 struct channel_softc *chp;
4796 int atapi, error;
4797
4798 error = pciide_dma_finish(v, channel, drive, force);
4799
4800 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4801 chp = sc->wdc_chanarray[channel];
4802 atapi = 0;
4803 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4804 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4805 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4806 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4807 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4808 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4809 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4810 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4811 atapi = PDC262_ATAPI_UDMA;
4812 }
4813 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4814 PDC262_ATAPI(channel), atapi);
4815 }
4816
4817 return error;
4818 }
4819
4820 void
4821 opti_chip_map(sc, pa)
4822 struct pciide_softc *sc;
4823 struct pci_attach_args *pa;
4824 {
4825 struct pciide_channel *cp;
4826 bus_size_t cmdsize, ctlsize;
4827 pcireg_t interface;
4828 u_int8_t init_ctrl;
4829 int channel;
4830
4831 if (pciide_chipen(sc, pa) == 0)
4832 return;
4833 aprint_normal("%s: bus-master DMA support present",
4834 sc->sc_wdcdev.sc_dev.dv_xname);
4835
4836 /*
4837 * XXXSCW:
4838 * There seem to be a couple of buggy revisions/implementations
4839 * of the OPTi pciide chipset. This kludge seems to fix one of
4840 * the reported problems (PR/11644) but still fails for the
4841 * other (PR/13151), although the latter may be due to other
4842 * issues too...
4843 */
4844 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4845 aprint_normal(" but disabled due to chip rev. <= 0x12");
4846 sc->sc_dma_ok = 0;
4847 } else
4848 pciide_mapreg_dma(sc, pa);
4849
4850 aprint_normal("\n");
4851
4852 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4853 WDC_CAPABILITY_MODE;
4854 sc->sc_wdcdev.PIO_cap = 4;
4855 if (sc->sc_dma_ok) {
4856 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4857 sc->sc_wdcdev.irqack = pciide_irqack;
4858 sc->sc_wdcdev.DMA_cap = 2;
4859 }
4860 sc->sc_wdcdev.set_modes = opti_setup_channel;
4861
4862 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4863 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4864
4865 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4866 OPTI_REG_INIT_CONTROL);
4867
4868 interface = PCI_INTERFACE(pa->pa_class);
4869
4870 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4871 cp = &sc->pciide_channels[channel];
4872 if (pciide_chansetup(sc, channel, interface) == 0)
4873 continue;
4874 if (channel == 1 &&
4875 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4876 aprint_normal("%s: %s channel ignored (disabled)\n",
4877 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4878 continue;
4879 }
4880 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4881 pciide_pci_intr);
4882 if (cp->hw_ok == 0)
4883 continue;
4884 pciide_map_compat_intr(pa, cp, channel, interface);
4885 if (cp->hw_ok == 0)
4886 continue;
4887 opti_setup_channel(&cp->wdc_channel);
4888 }
4889 }
4890
4891 void
4892 opti_setup_channel(chp)
4893 struct channel_softc *chp;
4894 {
4895 struct ata_drive_datas *drvp;
4896 struct pciide_channel *cp = (struct pciide_channel*)chp;
4897 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4898 int drive, spd;
4899 int mode[2];
4900 u_int8_t rv, mr;
4901
4902 /*
4903 * The `Delay' and `Address Setup Time' fields of the
4904 * Miscellaneous Register are always zero initially.
4905 */
4906 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4907 mr &= ~(OPTI_MISC_DELAY_MASK |
4908 OPTI_MISC_ADDR_SETUP_MASK |
4909 OPTI_MISC_INDEX_MASK);
4910
4911 /* Prime the control register before setting timing values */
4912 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4913
4914 /* Determine the clockrate of the PCIbus the chip is attached to */
4915 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4916 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4917
4918 /* setup DMA if needed */
4919 pciide_channel_dma_setup(cp);
4920
4921 for (drive = 0; drive < 2; drive++) {
4922 drvp = &chp->ch_drive[drive];
4923 /* If no drive, skip */
4924 if ((drvp->drive_flags & DRIVE) == 0) {
4925 mode[drive] = -1;
4926 continue;
4927 }
4928
4929 if ((drvp->drive_flags & DRIVE_DMA)) {
4930 /*
4931 * Timings will be used for both PIO and DMA,
4932 * so adjust DMA mode if needed
4933 */
4934 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4935 drvp->PIO_mode = drvp->DMA_mode + 2;
4936 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4937 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4938 drvp->PIO_mode - 2 : 0;
4939 if (drvp->DMA_mode == 0)
4940 drvp->PIO_mode = 0;
4941
4942 mode[drive] = drvp->DMA_mode + 5;
4943 } else
4944 mode[drive] = drvp->PIO_mode;
4945
4946 if (drive && mode[0] >= 0 &&
4947 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4948 /*
4949 * Can't have two drives using different values
4950 * for `Address Setup Time'.
4951 * Slow down the faster drive to compensate.
4952 */
4953 int d = (opti_tim_as[spd][mode[0]] >
4954 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4955
4956 mode[d] = mode[1-d];
4957 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4958 chp->ch_drive[d].DMA_mode = 0;
4959 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4960 }
4961 }
4962
4963 for (drive = 0; drive < 2; drive++) {
4964 int m;
4965 if ((m = mode[drive]) < 0)
4966 continue;
4967
4968 /* Set the Address Setup Time and select appropriate index */
4969 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4970 rv |= OPTI_MISC_INDEX(drive);
4971 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4972
4973 /* Set the pulse width and recovery timing parameters */
4974 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4975 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4976 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4977 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4978
4979 /* Set the Enhanced Mode register appropriately */
4980 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4981 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4982 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4983 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4984 }
4985
4986 /* Finally, enable the timings */
4987 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4988
4989 pciide_print_modes(cp);
4990 }
4991
4992 #define ACARD_IS_850(sc) \
4993 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4994
4995 void
4996 acard_chip_map(sc, pa)
4997 struct pciide_softc *sc;
4998 struct pci_attach_args *pa;
4999 {
5000 struct pciide_channel *cp;
5001 int i;
5002 pcireg_t interface;
5003 bus_size_t cmdsize, ctlsize;
5004
5005 if (pciide_chipen(sc, pa) == 0)
5006 return;
5007
5008 /*
5009 * when the chip is in native mode it identifies itself as a
5010 * 'misc mass storage'. Fake interface in this case.
5011 */
5012 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
5013 interface = PCI_INTERFACE(pa->pa_class);
5014 } else {
5015 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
5016 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
5017 }
5018
5019 aprint_normal("%s: bus-master DMA support present",
5020 sc->sc_wdcdev.sc_dev.dv_xname);
5021 pciide_mapreg_dma(sc, pa);
5022 aprint_normal("\n");
5023 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5024 WDC_CAPABILITY_MODE;
5025
5026 if (sc->sc_dma_ok) {
5027 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5028 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5029 sc->sc_wdcdev.irqack = pciide_irqack;
5030 }
5031 sc->sc_wdcdev.PIO_cap = 4;
5032 sc->sc_wdcdev.DMA_cap = 2;
5033 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
5034
5035 sc->sc_wdcdev.set_modes = acard_setup_channel;
5036 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5037 sc->sc_wdcdev.nchannels = 2;
5038
5039 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5040 cp = &sc->pciide_channels[i];
5041 if (pciide_chansetup(sc, i, interface) == 0)
5042 continue;
5043 if (interface & PCIIDE_INTERFACE_PCI(i)) {
5044 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
5045 &ctlsize, pciide_pci_intr);
5046 } else {
5047 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
5048 &cmdsize, &ctlsize);
5049 }
5050 if (cp->hw_ok == 0)
5051 return;
5052 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
5053 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
5054 wdcattach(&cp->wdc_channel);
5055 acard_setup_channel(&cp->wdc_channel);
5056 }
5057 if (!ACARD_IS_850(sc)) {
5058 u_int32_t reg;
5059 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
5060 reg &= ~ATP860_CTRL_INT;
5061 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
5062 }
5063 }
5064
5065 void
5066 acard_setup_channel(chp)
5067 struct channel_softc *chp;
5068 {
5069 struct ata_drive_datas *drvp;
5070 struct pciide_channel *cp = (struct pciide_channel*)chp;
5071 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5072 int channel = chp->channel;
5073 int drive;
5074 u_int32_t idetime, udma_mode;
5075 u_int32_t idedma_ctl;
5076
5077 /* setup DMA if needed */
5078 pciide_channel_dma_setup(cp);
5079
5080 if (ACARD_IS_850(sc)) {
5081 idetime = 0;
5082 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
5083 udma_mode &= ~ATP850_UDMA_MASK(channel);
5084 } else {
5085 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
5086 idetime &= ~ATP860_SETTIME_MASK(channel);
5087 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
5088 udma_mode &= ~ATP860_UDMA_MASK(channel);
5089
5090 /* check 80 pins cable */
5091 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
5092 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
5093 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5094 & ATP860_CTRL_80P(chp->channel)) {
5095 if (chp->ch_drive[0].UDMA_mode > 2)
5096 chp->ch_drive[0].UDMA_mode = 2;
5097 if (chp->ch_drive[1].UDMA_mode > 2)
5098 chp->ch_drive[1].UDMA_mode = 2;
5099 }
5100 }
5101 }
5102
5103 idedma_ctl = 0;
5104
5105 /* Per drive settings */
5106 for (drive = 0; drive < 2; drive++) {
5107 drvp = &chp->ch_drive[drive];
5108 /* If no drive, skip */
5109 if ((drvp->drive_flags & DRIVE) == 0)
5110 continue;
5111 /* add timing values, setup DMA if needed */
5112 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5113 (drvp->drive_flags & DRIVE_UDMA)) {
5114 /* use Ultra/DMA */
5115 if (ACARD_IS_850(sc)) {
5116 idetime |= ATP850_SETTIME(drive,
5117 acard_act_udma[drvp->UDMA_mode],
5118 acard_rec_udma[drvp->UDMA_mode]);
5119 udma_mode |= ATP850_UDMA_MODE(channel, drive,
5120 acard_udma_conf[drvp->UDMA_mode]);
5121 } else {
5122 idetime |= ATP860_SETTIME(channel, drive,
5123 acard_act_udma[drvp->UDMA_mode],
5124 acard_rec_udma[drvp->UDMA_mode]);
5125 udma_mode |= ATP860_UDMA_MODE(channel, drive,
5126 acard_udma_conf[drvp->UDMA_mode]);
5127 }
5128 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5129 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5130 (drvp->drive_flags & DRIVE_DMA)) {
5131 /* use Multiword DMA */
5132 drvp->drive_flags &= ~DRIVE_UDMA;
5133 if (ACARD_IS_850(sc)) {
5134 idetime |= ATP850_SETTIME(drive,
5135 acard_act_dma[drvp->DMA_mode],
5136 acard_rec_dma[drvp->DMA_mode]);
5137 } else {
5138 idetime |= ATP860_SETTIME(channel, drive,
5139 acard_act_dma[drvp->DMA_mode],
5140 acard_rec_dma[drvp->DMA_mode]);
5141 }
5142 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5143 } else {
5144 /* PIO only */
5145 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5146 if (ACARD_IS_850(sc)) {
5147 idetime |= ATP850_SETTIME(drive,
5148 acard_act_pio[drvp->PIO_mode],
5149 acard_rec_pio[drvp->PIO_mode]);
5150 } else {
5151 idetime |= ATP860_SETTIME(channel, drive,
5152 acard_act_pio[drvp->PIO_mode],
5153 acard_rec_pio[drvp->PIO_mode]);
5154 }
5155 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5156 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5157 | ATP8x0_CTRL_EN(channel));
5158 }
5159 }
5160
5161 if (idedma_ctl != 0) {
5162 /* Add software bits in status register */
5163 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5164 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5165 }
5166 pciide_print_modes(cp);
5167
5168 if (ACARD_IS_850(sc)) {
5169 pci_conf_write(sc->sc_pc, sc->sc_tag,
5170 ATP850_IDETIME(channel), idetime);
5171 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5172 } else {
5173 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5174 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5175 }
5176 }
5177
5178 int
5179 acard_pci_intr(arg)
5180 void *arg;
5181 {
5182 struct pciide_softc *sc = arg;
5183 struct pciide_channel *cp;
5184 struct channel_softc *wdc_cp;
5185 int rv = 0;
5186 int dmastat, i, crv;
5187
5188 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5189 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5190 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5191 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5192 continue;
5193 cp = &sc->pciide_channels[i];
5194 wdc_cp = &cp->wdc_channel;
5195 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5196 (void)wdcintr(wdc_cp);
5197 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5198 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5199 continue;
5200 }
5201 crv = wdcintr(wdc_cp);
5202 if (crv == 0)
5203 printf("%s:%d: bogus intr\n",
5204 sc->sc_wdcdev.sc_dev.dv_xname, i);
5205 else if (crv == 1)
5206 rv = 1;
5207 else if (rv == 0)
5208 rv = crv;
5209 }
5210 return rv;
5211 }
5212
5213 static int
5214 sl82c105_bugchk(struct pci_attach_args *pa)
5215 {
5216
5217 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5218 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5219 return (0);
5220
5221 if (PCI_REVISION(pa->pa_class) <= 0x05)
5222 return (1);
5223
5224 return (0);
5225 }
5226
5227 void
5228 sl82c105_chip_map(sc, pa)
5229 struct pciide_softc *sc;
5230 struct pci_attach_args *pa;
5231 {
5232 struct pciide_channel *cp;
5233 bus_size_t cmdsize, ctlsize;
5234 pcireg_t interface, idecr;
5235 int channel;
5236
5237 if (pciide_chipen(sc, pa) == 0)
5238 return;
5239
5240 aprint_normal("%s: bus-master DMA support present",
5241 sc->sc_wdcdev.sc_dev.dv_xname);
5242
5243 /*
5244 * Check to see if we're part of the Winbond 83c553 Southbridge.
5245 * If so, we need to disable DMA on rev. <= 5 of that chip.
5246 */
5247 if (pci_find_device(pa, sl82c105_bugchk)) {
5248 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5249 sc->sc_dma_ok = 0;
5250 } else
5251 pciide_mapreg_dma(sc, pa);
5252 aprint_normal("\n");
5253
5254 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5255 WDC_CAPABILITY_MODE;
5256 sc->sc_wdcdev.PIO_cap = 4;
5257 if (sc->sc_dma_ok) {
5258 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5259 sc->sc_wdcdev.irqack = pciide_irqack;
5260 sc->sc_wdcdev.DMA_cap = 2;
5261 }
5262 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5263
5264 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5265 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5266
5267 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5268
5269 interface = PCI_INTERFACE(pa->pa_class);
5270
5271 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5272 cp = &sc->pciide_channels[channel];
5273 if (pciide_chansetup(sc, channel, interface) == 0)
5274 continue;
5275 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5276 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5277 aprint_normal("%s: %s channel ignored (disabled)\n",
5278 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5279 continue;
5280 }
5281 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5282 pciide_pci_intr);
5283 if (cp->hw_ok == 0)
5284 continue;
5285 pciide_map_compat_intr(pa, cp, channel, interface);
5286 if (cp->hw_ok == 0)
5287 continue;
5288 sl82c105_setup_channel(&cp->wdc_channel);
5289 }
5290 }
5291
5292 void
5293 sl82c105_setup_channel(chp)
5294 struct channel_softc *chp;
5295 {
5296 struct ata_drive_datas *drvp;
5297 struct pciide_channel *cp = (struct pciide_channel*)chp;
5298 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5299 int pxdx_reg, drive;
5300 pcireg_t pxdx;
5301
5302 /* Set up DMA if needed. */
5303 pciide_channel_dma_setup(cp);
5304
5305 for (drive = 0; drive < 2; drive++) {
5306 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5307 : SYMPH_P1D0CR) + (drive * 4);
5308
5309 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5310
5311 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5312 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5313
5314 drvp = &chp->ch_drive[drive];
5315 /* If no drive, skip. */
5316 if ((drvp->drive_flags & DRIVE) == 0) {
5317 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5318 continue;
5319 }
5320
5321 if (drvp->drive_flags & DRIVE_DMA) {
5322 /*
5323 * Timings will be used for both PIO and DMA,
5324 * so adjust DMA mode if needed.
5325 */
5326 if (drvp->PIO_mode >= 3) {
5327 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5328 drvp->DMA_mode = drvp->PIO_mode - 2;
5329 if (drvp->DMA_mode < 1) {
5330 /*
5331 * Can't mix both PIO and DMA.
5332 * Disable DMA.
5333 */
5334 drvp->drive_flags &= ~DRIVE_DMA;
5335 }
5336 } else {
5337 /*
5338 * Can't mix both PIO and DMA. Disable
5339 * DMA.
5340 */
5341 drvp->drive_flags &= ~DRIVE_DMA;
5342 }
5343 }
5344
5345 if (drvp->drive_flags & DRIVE_DMA) {
5346 /* Use multi-word DMA. */
5347 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5348 PxDx_CMD_ON_SHIFT;
5349 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5350 } else {
5351 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5352 PxDx_CMD_ON_SHIFT;
5353 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5354 }
5355
5356 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5357
5358 /* ...and set the mode for this drive. */
5359 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5360 }
5361
5362 pciide_print_modes(cp);
5363 }
5364
5365 void
5366 serverworks_chip_map(sc, pa)
5367 struct pciide_softc *sc;
5368 struct pci_attach_args *pa;
5369 {
5370 struct pciide_channel *cp;
5371 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5372 pcitag_t pcib_tag;
5373 int channel;
5374 bus_size_t cmdsize, ctlsize;
5375
5376 if (pciide_chipen(sc, pa) == 0)
5377 return;
5378
5379 aprint_normal("%s: bus-master DMA support present",
5380 sc->sc_wdcdev.sc_dev.dv_xname);
5381 pciide_mapreg_dma(sc, pa);
5382 aprint_normal("\n");
5383 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5384 WDC_CAPABILITY_MODE;
5385
5386 if (sc->sc_dma_ok) {
5387 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5388 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5389 sc->sc_wdcdev.irqack = pciide_irqack;
5390 }
5391 sc->sc_wdcdev.PIO_cap = 4;
5392 sc->sc_wdcdev.DMA_cap = 2;
5393 switch (sc->sc_pp->ide_product) {
5394 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5395 sc->sc_wdcdev.UDMA_cap = 2;
5396 break;
5397 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5398 if (PCI_REVISION(pa->pa_class) < 0x92)
5399 sc->sc_wdcdev.UDMA_cap = 4;
5400 else
5401 sc->sc_wdcdev.UDMA_cap = 5;
5402 break;
5403 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5404 sc->sc_wdcdev.UDMA_cap = 5;
5405 break;
5406 }
5407
5408 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5409 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5410 sc->sc_wdcdev.nchannels = 2;
5411
5412 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5413 cp = &sc->pciide_channels[channel];
5414 if (pciide_chansetup(sc, channel, interface) == 0)
5415 continue;
5416 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5417 serverworks_pci_intr);
5418 if (cp->hw_ok == 0)
5419 return;
5420 pciide_map_compat_intr(pa, cp, channel, interface);
5421 if (cp->hw_ok == 0)
5422 return;
5423 serverworks_setup_channel(&cp->wdc_channel);
5424 }
5425
5426 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5427 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5428 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5429 }
5430
5431 void
5432 serverworks_setup_channel(chp)
5433 struct channel_softc *chp;
5434 {
5435 struct ata_drive_datas *drvp;
5436 struct pciide_channel *cp = (struct pciide_channel*)chp;
5437 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5438 int channel = chp->channel;
5439 int drive, unit;
5440 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5441 u_int32_t idedma_ctl;
5442 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5443 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5444
5445 /* setup DMA if needed */
5446 pciide_channel_dma_setup(cp);
5447
5448 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5449 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5450 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5451 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5452
5453 pio_time &= ~(0xffff << (16 * channel));
5454 dma_time &= ~(0xffff << (16 * channel));
5455 pio_mode &= ~(0xff << (8 * channel + 16));
5456 udma_mode &= ~(0xff << (8 * channel + 16));
5457 udma_mode &= ~(3 << (2 * channel));
5458
5459 idedma_ctl = 0;
5460
5461 /* Per drive settings */
5462 for (drive = 0; drive < 2; drive++) {
5463 drvp = &chp->ch_drive[drive];
5464 /* If no drive, skip */
5465 if ((drvp->drive_flags & DRIVE) == 0)
5466 continue;
5467 unit = drive + 2 * channel;
5468 /* add timing values, setup DMA if needed */
5469 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5470 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5471 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5472 (drvp->drive_flags & DRIVE_UDMA)) {
5473 /* use Ultra/DMA, check for 80-pin cable */
5474 if (drvp->UDMA_mode > 2 &&
5475 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5476 drvp->UDMA_mode = 2;
5477 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5478 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5479 udma_mode |= 1 << unit;
5480 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5481 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5482 (drvp->drive_flags & DRIVE_DMA)) {
5483 /* use Multiword DMA */
5484 drvp->drive_flags &= ~DRIVE_UDMA;
5485 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5486 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5487 } else {
5488 /* PIO only */
5489 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5490 }
5491 }
5492
5493 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5494 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5495 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5496 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5497 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5498
5499 if (idedma_ctl != 0) {
5500 /* Add software bits in status register */
5501 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5502 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5503 }
5504 pciide_print_modes(cp);
5505 }
5506
5507 int
5508 serverworks_pci_intr(arg)
5509 void *arg;
5510 {
5511 struct pciide_softc *sc = arg;
5512 struct pciide_channel *cp;
5513 struct channel_softc *wdc_cp;
5514 int rv = 0;
5515 int dmastat, i, crv;
5516
5517 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5518 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5519 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5520 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5521 IDEDMA_CTL_INTR)
5522 continue;
5523 cp = &sc->pciide_channels[i];
5524 wdc_cp = &cp->wdc_channel;
5525 crv = wdcintr(wdc_cp);
5526 if (crv == 0) {
5527 printf("%s:%d: bogus intr\n",
5528 sc->sc_wdcdev.sc_dev.dv_xname, i);
5529 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5530 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5531 } else
5532 rv = 1;
5533 }
5534 return rv;
5535 }
5536
5537 void
5538 artisea_chip_map(sc, pa)
5539 struct pciide_softc *sc;
5540 struct pci_attach_args *pa;
5541 {
5542 struct pciide_channel *cp;
5543 bus_size_t cmdsize, ctlsize;
5544 pcireg_t interface;
5545 int channel;
5546
5547 if (pciide_chipen(sc, pa) == 0)
5548 return;
5549
5550 aprint_normal("%s: bus-master DMA support present",
5551 sc->sc_wdcdev.sc_dev.dv_xname);
5552 #ifndef PCIIDE_I31244_ENABLEDMA
5553 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 &&
5554 PCI_REVISION(pa->pa_class) == 0) {
5555 aprint_normal(" but disabled due to rev. 0");
5556 sc->sc_dma_ok = 0;
5557 } else
5558 #endif
5559 pciide_mapreg_dma(sc, pa);
5560 aprint_normal("\n");
5561
5562 /*
5563 * XXX Configure LEDs to show activity.
5564 */
5565
5566 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5567 WDC_CAPABILITY_MODE;
5568 sc->sc_wdcdev.PIO_cap = 4;
5569 if (sc->sc_dma_ok) {
5570 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5571 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5572 sc->sc_wdcdev.irqack = pciide_irqack;
5573 sc->sc_wdcdev.DMA_cap = 2;
5574 sc->sc_wdcdev.UDMA_cap = 6;
5575 }
5576 sc->sc_wdcdev.set_modes = sata_setup_channel;
5577
5578 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5579 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5580
5581 interface = PCI_INTERFACE(pa->pa_class);
5582
5583 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5584 cp = &sc->pciide_channels[channel];
5585 if (pciide_chansetup(sc, channel, interface) == 0)
5586 continue;
5587 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5588 pciide_pci_intr);
5589 if (cp->hw_ok == 0)
5590 continue;
5591 pciide_map_compat_intr(pa, cp, channel, interface);
5592 sata_setup_channel(&cp->wdc_channel);
5593 }
5594 }
5595