pciide.c revision 1.208 1 /* $NetBSD: pciide.c,v 1.208 2003/10/08 10:58:12 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34
35 /*
36 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by Christopher G. Demetriou
49 * for the NetBSD Project.
50 * 4. The name of the author may not be used to endorse or promote products
51 * derived from this software without specific prior written permission
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
57 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
58 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
62 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 */
64
65 /*
66 * PCI IDE controller driver.
67 *
68 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
69 * sys/dev/pci/ppb.c, revision 1.16).
70 *
71 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
72 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
73 * 5/16/94" from the PCI SIG.
74 *
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.208 2003/10/08 10:58:12 bouyer Exp $");
79
80 #ifndef WDCDEBUG
81 #define WDCDEBUG
82 #endif
83
84 #define DEBUG_DMA 0x01
85 #define DEBUG_XFERS 0x02
86 #define DEBUG_FUNCS 0x08
87 #define DEBUG_PROBE 0x10
88 #ifdef WDCDEBUG
89 int wdcdebug_pciide_mask = 0;
90 #define WDCDEBUG_PRINT(args, level) \
91 if (wdcdebug_pciide_mask & (level)) printf args
92 #else
93 #define WDCDEBUG_PRINT(args, level)
94 #endif
95 #include <sys/param.h>
96 #include <sys/systm.h>
97 #include <sys/device.h>
98 #include <sys/malloc.h>
99
100 #include <uvm/uvm_extern.h>
101
102 #include <machine/endian.h>
103
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107 #include <dev/pci/pciidereg.h>
108 #include <dev/pci/pciidevar.h>
109 #include <dev/pci/pciide_piix_reg.h>
110 #include <dev/pci/pciide_amd_reg.h>
111 #include <dev/pci/pciide_apollo_reg.h>
112 #include <dev/pci/pciide_cmd_reg.h>
113 #include <dev/pci/pciide_cy693_reg.h>
114 #include <dev/pci/pciide_sis_reg.h>
115 #include <dev/pci/pciide_acer_reg.h>
116 #include <dev/pci/pciide_pdc202xx_reg.h>
117 #include <dev/pci/pciide_opti_reg.h>
118 #include <dev/pci/pciide_hpt_reg.h>
119 #include <dev/pci/pciide_acard_reg.h>
120 #include <dev/pci/pciide_sl82c105_reg.h>
121 #include <dev/pci/pciide_i31244_reg.h>
122 #include <dev/pci/pciide_sii3112_reg.h>
123 #include <dev/pci/cy82c693var.h>
124
125 #include "opt_pciide.h"
126
127 static const char dmaerrfmt[] =
128 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
129
130 /* inlines for reading/writing 8-bit PCI registers */
131 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
132 int));
133 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
134 int, u_int8_t));
135
136 static __inline u_int8_t
137 pciide_pci_read(pc, pa, reg)
138 pci_chipset_tag_t pc;
139 pcitag_t pa;
140 int reg;
141 {
142
143 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
144 ((reg & 0x03) * 8) & 0xff);
145 }
146
147 static __inline void
148 pciide_pci_write(pc, pa, reg, val)
149 pci_chipset_tag_t pc;
150 pcitag_t pa;
151 int reg;
152 u_int8_t val;
153 {
154 pcireg_t pcival;
155
156 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
157 pcival &= ~(0xff << ((reg & 0x03) * 8));
158 pcival |= (val << ((reg & 0x03) * 8));
159 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
160 }
161
162 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
163
164 void sata_setup_channel __P((struct channel_softc*));
165
166 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void piix_setup_channel __P((struct channel_softc*));
168 void piix3_4_setup_channel __P((struct channel_softc*));
169 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
170 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
171 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
172
173 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void amd7x6_setup_channel __P((struct channel_softc*));
175
176 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
177 void apollo_sata_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_setup_channel __P((struct channel_softc*));
179
180 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_setup_channel __P((struct channel_softc*));
183 void cmd_channel_map __P((struct pci_attach_args *,
184 struct pciide_softc *, int));
185 int cmd_pci_intr __P((void *));
186 void cmd646_9_irqack __P((struct channel_softc *));
187 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void cmd680_setup_channel __P((struct channel_softc*));
189 void cmd680_channel_map __P((struct pci_attach_args *,
190 struct pciide_softc *, int));
191
192 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void cmd3112_setup_channel __P((struct channel_softc*));
194
195 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void cy693_setup_channel __P((struct channel_softc*));
197
198 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void sis_setup_channel __P((struct channel_softc*));
200 void sis96x_setup_channel __P((struct channel_softc*));
201 static int sis_hostbr_match __P(( struct pci_attach_args *));
202 static int sis_south_match __P(( struct pci_attach_args *));
203
204 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
205 void acer_setup_channel __P((struct channel_softc*));
206 int acer_pci_intr __P((void *));
207
208 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void pdc202xx_setup_channel __P((struct channel_softc*));
210 void pdc20268_setup_channel __P((struct channel_softc*));
211 int pdc202xx_pci_intr __P((void *));
212 int pdc20265_pci_intr __P((void *));
213 static void pdc20262_dma_start __P((void*, int, int));
214 static int pdc20262_dma_finish __P((void*, int, int, int));
215
216 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
217 void opti_setup_channel __P((struct channel_softc*));
218
219 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
220 void hpt_setup_channel __P((struct channel_softc*));
221 int hpt_pci_intr __P((void *));
222
223 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
224 void acard_setup_channel __P((struct channel_softc*));
225 int acard_pci_intr __P((void *));
226
227 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
228 void serverworks_setup_channel __P((struct channel_softc*));
229 int serverworks_pci_intr __P((void *));
230
231 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
232 void sl82c105_setup_channel __P((struct channel_softc*));
233
234 void pciide_channel_dma_setup __P((struct pciide_channel *));
235 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
236 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
237 void pciide_dma_start __P((void*, int, int));
238 int pciide_dma_finish __P((void*, int, int, int));
239 void pciide_irqack __P((struct channel_softc *));
240
241 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
242
243 struct pciide_product_desc {
244 u_int32_t ide_product;
245 int ide_flags;
246 const char *ide_name;
247 /* map and setup chip, probe drives */
248 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
249 };
250
251 /* Flags for ide_flags */
252 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
253 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
254
255 /* Default product description for devices not known from this controller */
256 const struct pciide_product_desc default_product_desc = {
257 0,
258 0,
259 "Generic PCI IDE controller",
260 default_chip_map,
261 };
262
263 const struct pciide_product_desc pciide_intel_products[] = {
264 { PCI_PRODUCT_INTEL_82092AA,
265 0,
266 "Intel 82092AA IDE controller",
267 default_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82371FB_IDE,
270 0,
271 "Intel 82371FB IDE controller (PIIX)",
272 piix_chip_map,
273 },
274 { PCI_PRODUCT_INTEL_82371SB_IDE,
275 0,
276 "Intel 82371SB IDE Interface (PIIX3)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82371AB_IDE,
280 0,
281 "Intel 82371AB IDE controller (PIIX4)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82440MX_IDE,
285 0,
286 "Intel 82440MX IDE controller",
287 piix_chip_map
288 },
289 { PCI_PRODUCT_INTEL_82801AA_IDE,
290 0,
291 "Intel 82801AA IDE Controller (ICH)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801AB_IDE,
295 0,
296 "Intel 82801AB IDE Controller (ICH0)",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801BA_IDE,
300 0,
301 "Intel 82801BA IDE Controller (ICH2)",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801BAM_IDE,
305 0,
306 "Intel 82801BAM IDE Controller (ICH2-M)",
307 piix_chip_map,
308 },
309 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
310 0,
311 "Intel 82801CA IDE Controller (ICH3)",
312 piix_chip_map,
313 },
314 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
315 0,
316 "Intel 82801CA IDE Controller (ICH3)",
317 piix_chip_map,
318 },
319 { PCI_PRODUCT_INTEL_82801DB_IDE,
320 0,
321 "Intel 82801DB IDE Controller (ICH4)",
322 piix_chip_map,
323 },
324 { PCI_PRODUCT_INTEL_82801DBM_IDE,
325 0,
326 "Intel 82801DBM IDE Controller (ICH4-M)",
327 piix_chip_map,
328 },
329 { PCI_PRODUCT_INTEL_82801EB_IDE,
330 0,
331 "Intel 82801EB IDE Controller (ICH5)",
332 piix_chip_map,
333 },
334 { PCI_PRODUCT_INTEL_31244,
335 0,
336 "Intel 31244 Serial ATA Controller",
337 artisea_chip_map,
338 },
339 { PCI_PRODUCT_INTEL_82801EB_SATA,
340 0,
341 "Intel 82801EB Serial ATA Controller",
342 artisea_chip_map,
343 },
344 { 0,
345 0,
346 NULL,
347 NULL
348 }
349 };
350
351 const struct pciide_product_desc pciide_amd_products[] = {
352 { PCI_PRODUCT_AMD_PBC756_IDE,
353 0,
354 "Advanced Micro Devices AMD756 IDE Controller",
355 amd7x6_chip_map
356 },
357 { PCI_PRODUCT_AMD_PBC766_IDE,
358 0,
359 "Advanced Micro Devices AMD766 IDE Controller",
360 amd7x6_chip_map
361 },
362 { PCI_PRODUCT_AMD_PBC768_IDE,
363 0,
364 "Advanced Micro Devices AMD768 IDE Controller",
365 amd7x6_chip_map
366 },
367 { PCI_PRODUCT_AMD_PBC8111_IDE,
368 0,
369 "Advanced Micro Devices AMD8111 IDE Controller",
370 amd7x6_chip_map
371 },
372 { 0,
373 0,
374 NULL,
375 NULL
376 }
377 };
378
379 const struct pciide_product_desc pciide_nvidia_products[] = {
380 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
381 0,
382 "NVIDIA nForce IDE Controller",
383 amd7x6_chip_map
384 },
385 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
386 0,
387 "NVIDIA nForce2 IDE Controller",
388 amd7x6_chip_map
389 },
390 { 0,
391 0,
392 NULL,
393 NULL
394 }
395 };
396
397 const struct pciide_product_desc pciide_cmd_products[] = {
398 { PCI_PRODUCT_CMDTECH_640,
399 0,
400 "CMD Technology PCI0640",
401 cmd_chip_map
402 },
403 { PCI_PRODUCT_CMDTECH_643,
404 0,
405 "CMD Technology PCI0643",
406 cmd0643_9_chip_map,
407 },
408 { PCI_PRODUCT_CMDTECH_646,
409 0,
410 "CMD Technology PCI0646",
411 cmd0643_9_chip_map,
412 },
413 { PCI_PRODUCT_CMDTECH_648,
414 IDE_PCI_CLASS_OVERRIDE,
415 "CMD Technology PCI0648",
416 cmd0643_9_chip_map,
417 },
418 { PCI_PRODUCT_CMDTECH_649,
419 IDE_PCI_CLASS_OVERRIDE,
420 "CMD Technology PCI0649",
421 cmd0643_9_chip_map,
422 },
423 { PCI_PRODUCT_CMDTECH_680,
424 IDE_PCI_CLASS_OVERRIDE,
425 "Silicon Image 0680",
426 cmd680_chip_map,
427 },
428 { PCI_PRODUCT_CMDTECH_3112,
429 IDE_PCI_CLASS_OVERRIDE,
430 "Silicon Image SATALink 3112",
431 cmd3112_chip_map,
432 },
433 { 0,
434 0,
435 NULL,
436 NULL
437 }
438 };
439
440 const struct pciide_product_desc pciide_via_products[] = {
441 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
442 0,
443 NULL,
444 apollo_chip_map,
445 },
446 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
447 0,
448 NULL,
449 apollo_chip_map,
450 },
451 { PCI_PRODUCT_VIATECH_VT8237_SATA,
452 IDE_PCI_CLASS_OVERRIDE,
453 "VIA Technologies VT8237 SATA Controller",
454 apollo_sata_chip_map,
455 },
456 { 0,
457 0,
458 NULL,
459 NULL
460 }
461 };
462
463 const struct pciide_product_desc pciide_cypress_products[] = {
464 { PCI_PRODUCT_CONTAQ_82C693,
465 IDE_16BIT_IOSPACE,
466 "Cypress 82C693 IDE Controller",
467 cy693_chip_map,
468 },
469 { 0,
470 0,
471 NULL,
472 NULL
473 }
474 };
475
476 const struct pciide_product_desc pciide_sis_products[] = {
477 { PCI_PRODUCT_SIS_5597_IDE,
478 0,
479 NULL,
480 sis_chip_map,
481 },
482 { 0,
483 0,
484 NULL,
485 NULL
486 }
487 };
488
489 const struct pciide_product_desc pciide_acer_products[] = {
490 { PCI_PRODUCT_ALI_M5229,
491 0,
492 "Acer Labs M5229 UDMA IDE Controller",
493 acer_chip_map,
494 },
495 { 0,
496 0,
497 NULL,
498 NULL
499 }
500 };
501
502 const struct pciide_product_desc pciide_promise_products[] = {
503 { PCI_PRODUCT_PROMISE_ULTRA33,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Promise Ultra33/ATA Bus Master IDE Accelerator",
506 pdc202xx_chip_map,
507 },
508 { PCI_PRODUCT_PROMISE_ULTRA66,
509 IDE_PCI_CLASS_OVERRIDE,
510 "Promise Ultra66/ATA Bus Master IDE Accelerator",
511 pdc202xx_chip_map,
512 },
513 { PCI_PRODUCT_PROMISE_ULTRA100,
514 IDE_PCI_CLASS_OVERRIDE,
515 "Promise Ultra100/ATA Bus Master IDE Accelerator",
516 pdc202xx_chip_map,
517 },
518 { PCI_PRODUCT_PROMISE_ULTRA100X,
519 IDE_PCI_CLASS_OVERRIDE,
520 "Promise Ultra100/ATA Bus Master IDE Accelerator",
521 pdc202xx_chip_map,
522 },
523 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
524 IDE_PCI_CLASS_OVERRIDE,
525 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
526 pdc202xx_chip_map,
527 },
528 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
529 IDE_PCI_CLASS_OVERRIDE,
530 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
531 pdc202xx_chip_map,
532 },
533 { PCI_PRODUCT_PROMISE_ULTRA133,
534 IDE_PCI_CLASS_OVERRIDE,
535 "Promise Ultra133/ATA Bus Master IDE Accelerator",
536 pdc202xx_chip_map,
537 },
538 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
539 IDE_PCI_CLASS_OVERRIDE,
540 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
541 pdc202xx_chip_map,
542 },
543 { PCI_PRODUCT_PROMISE_MBULTRA133,
544 IDE_PCI_CLASS_OVERRIDE,
545 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
546 pdc202xx_chip_map,
547 },
548 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
549 IDE_PCI_CLASS_OVERRIDE,
550 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
551 pdc202xx_chip_map,
552 },
553 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
554 IDE_PCI_CLASS_OVERRIDE,
555 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
556 pdc202xx_chip_map,
557 },
558 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
559 IDE_PCI_CLASS_OVERRIDE,
560 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
561 pdc202xx_chip_map,
562 },
563 { 0,
564 0,
565 NULL,
566 NULL
567 }
568 };
569
570 const struct pciide_product_desc pciide_opti_products[] = {
571 { PCI_PRODUCT_OPTI_82C621,
572 0,
573 "OPTi 82c621 PCI IDE controller",
574 opti_chip_map,
575 },
576 { PCI_PRODUCT_OPTI_82C568,
577 0,
578 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
579 opti_chip_map,
580 },
581 { PCI_PRODUCT_OPTI_82D568,
582 0,
583 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
584 opti_chip_map,
585 },
586 { 0,
587 0,
588 NULL,
589 NULL
590 }
591 };
592
593 const struct pciide_product_desc pciide_triones_products[] = {
594 { PCI_PRODUCT_TRIONES_HPT366,
595 IDE_PCI_CLASS_OVERRIDE,
596 NULL,
597 hpt_chip_map,
598 },
599 { PCI_PRODUCT_TRIONES_HPT372,
600 IDE_PCI_CLASS_OVERRIDE,
601 NULL,
602 hpt_chip_map
603 },
604 { PCI_PRODUCT_TRIONES_HPT374,
605 IDE_PCI_CLASS_OVERRIDE,
606 NULL,
607 hpt_chip_map
608 },
609 { 0,
610 0,
611 NULL,
612 NULL
613 }
614 };
615
616 const struct pciide_product_desc pciide_acard_products[] = {
617 { PCI_PRODUCT_ACARD_ATP850U,
618 IDE_PCI_CLASS_OVERRIDE,
619 "Acard ATP850U Ultra33 IDE Controller",
620 acard_chip_map,
621 },
622 { PCI_PRODUCT_ACARD_ATP860,
623 IDE_PCI_CLASS_OVERRIDE,
624 "Acard ATP860 Ultra66 IDE Controller",
625 acard_chip_map,
626 },
627 { PCI_PRODUCT_ACARD_ATP860A,
628 IDE_PCI_CLASS_OVERRIDE,
629 "Acard ATP860-A Ultra66 IDE Controller",
630 acard_chip_map,
631 },
632 { 0,
633 0,
634 NULL,
635 NULL
636 }
637 };
638
639 const struct pciide_product_desc pciide_serverworks_products[] = {
640 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
641 0,
642 "ServerWorks OSB4 IDE Controller",
643 serverworks_chip_map,
644 },
645 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
646 0,
647 "ServerWorks CSB5 IDE Controller",
648 serverworks_chip_map,
649 },
650 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
651 0,
652 "ServerWorks CSB6 RAID/IDE Controller",
653 serverworks_chip_map,
654 },
655 { 0,
656 0,
657 NULL,
658 }
659 };
660
661 const struct pciide_product_desc pciide_symphony_products[] = {
662 { PCI_PRODUCT_SYMPHONY_82C105,
663 0,
664 "Symphony Labs 82C105 IDE controller",
665 sl82c105_chip_map,
666 },
667 { 0,
668 0,
669 NULL,
670 }
671 };
672
673 const struct pciide_product_desc pciide_winbond_products[] = {
674 { PCI_PRODUCT_WINBOND_W83C553F_1,
675 0,
676 "Winbond W83C553F IDE controller",
677 sl82c105_chip_map,
678 },
679 { 0,
680 0,
681 NULL,
682 }
683 };
684
685 struct pciide_vendor_desc {
686 u_int32_t ide_vendor;
687 const struct pciide_product_desc *ide_products;
688 };
689
690 const struct pciide_vendor_desc pciide_vendors[] = {
691 { PCI_VENDOR_INTEL, pciide_intel_products },
692 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
693 { PCI_VENDOR_VIATECH, pciide_via_products },
694 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
695 { PCI_VENDOR_SIS, pciide_sis_products },
696 { PCI_VENDOR_ALI, pciide_acer_products },
697 { PCI_VENDOR_PROMISE, pciide_promise_products },
698 { PCI_VENDOR_AMD, pciide_amd_products },
699 { PCI_VENDOR_OPTI, pciide_opti_products },
700 { PCI_VENDOR_TRIONES, pciide_triones_products },
701 { PCI_VENDOR_ACARD, pciide_acard_products },
702 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
703 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
704 { PCI_VENDOR_WINBOND, pciide_winbond_products },
705 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
706 { 0, NULL }
707 };
708
709 /* options passed via the 'flags' config keyword */
710 #define PCIIDE_OPTIONS_DMA 0x01
711 #define PCIIDE_OPTIONS_NODMA 0x02
712
713 int pciide_match __P((struct device *, struct cfdata *, void *));
714 void pciide_attach __P((struct device *, struct device *, void *));
715
716 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
717 pciide_match, pciide_attach, NULL, NULL);
718
719 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
720 void pciide_mapregs_compat __P(( struct pci_attach_args *,
721 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
722 void pciide_mapregs_native __P((struct pci_attach_args *,
723 struct pciide_channel *, bus_size_t *, bus_size_t *,
724 int (*pci_intr) __P((void *))));
725 void pciide_mapreg_dma __P((struct pciide_softc *,
726 struct pci_attach_args *));
727 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
728 void pciide_mapchan __P((struct pci_attach_args *,
729 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
730 int (*pci_intr) __P((void *))));
731 void pciide_map_compat_intr __P(( struct pci_attach_args *,
732 struct pciide_channel *, int));
733 int pciide_compat_intr __P((void *));
734 int pciide_pci_intr __P((void *));
735 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
736
737 const struct pciide_product_desc *
738 pciide_lookup_product(id)
739 u_int32_t id;
740 {
741 const struct pciide_product_desc *pp;
742 const struct pciide_vendor_desc *vp;
743
744 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
745 if (PCI_VENDOR(id) == vp->ide_vendor)
746 break;
747
748 if ((pp = vp->ide_products) == NULL)
749 return NULL;
750
751 for (; pp->chip_map != NULL; pp++)
752 if (PCI_PRODUCT(id) == pp->ide_product)
753 break;
754
755 if (pp->chip_map == NULL)
756 return NULL;
757 return pp;
758 }
759
760 int
761 pciide_match(parent, match, aux)
762 struct device *parent;
763 struct cfdata *match;
764 void *aux;
765 {
766 struct pci_attach_args *pa = aux;
767 const struct pciide_product_desc *pp;
768
769 /*
770 * Check the ID register to see that it's a PCI IDE controller.
771 * If it is, we assume that we can deal with it; it _should_
772 * work in a standardized way...
773 */
774 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
775 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
776 return (1);
777 }
778
779 /*
780 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
781 * controllers. Let see if we can deal with it anyway.
782 */
783 pp = pciide_lookup_product(pa->pa_id);
784 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
785 return (1);
786 }
787
788 return (0);
789 }
790
791 void
792 pciide_attach(parent, self, aux)
793 struct device *parent, *self;
794 void *aux;
795 {
796 struct pci_attach_args *pa = aux;
797 pci_chipset_tag_t pc = pa->pa_pc;
798 pcitag_t tag = pa->pa_tag;
799 struct pciide_softc *sc = (struct pciide_softc *)self;
800 pcireg_t csr;
801 char devinfo[256];
802 const char *displaydev;
803
804 aprint_naive(": disk controller\n");
805 aprint_normal("\n");
806
807 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
808 sc->sc_pp = pciide_lookup_product(pa->pa_id);
809 if (sc->sc_pp == NULL) {
810 sc->sc_pp = &default_product_desc;
811 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
812 displaydev = devinfo;
813 } else
814 displaydev = sc->sc_pp->ide_name;
815
816 /* if displaydev == NULL, printf is done in chip-specific map */
817 if (displaydev)
818 aprint_normal("%s: %s (rev. 0x%02x)\n",
819 sc->sc_wdcdev.sc_dev.dv_xname, displaydev,
820 PCI_REVISION(pa->pa_class));
821
822 sc->sc_pc = pa->pa_pc;
823 sc->sc_tag = pa->pa_tag;
824
825 /* Set up DMA defaults; these might be adjusted by chip_map. */
826 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
827 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
828
829 #ifdef WDCDEBUG
830 if (wdcdebug_pciide_mask & DEBUG_PROBE)
831 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
832 #endif
833 sc->sc_pp->chip_map(sc, pa);
834
835 if (sc->sc_dma_ok) {
836 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
837 csr |= PCI_COMMAND_MASTER_ENABLE;
838 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
839 }
840 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
841 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
842 }
843
844 /* tell whether the chip is enabled or not */
845 int
846 pciide_chipen(sc, pa)
847 struct pciide_softc *sc;
848 struct pci_attach_args *pa;
849 {
850 pcireg_t csr;
851
852 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
853 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
854 PCI_COMMAND_STATUS_REG);
855 aprint_normal("%s: device disabled (at %s)\n",
856 sc->sc_wdcdev.sc_dev.dv_xname,
857 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
858 "device" : "bridge");
859 return 0;
860 }
861 return 1;
862 }
863
864 void
865 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
866 struct pci_attach_args *pa;
867 struct pciide_channel *cp;
868 int compatchan;
869 bus_size_t *cmdsizep, *ctlsizep;
870 {
871 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
872 struct channel_softc *wdc_cp = &cp->wdc_channel;
873
874 cp->compat = 1;
875 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
876 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
877
878 wdc_cp->cmd_iot = pa->pa_iot;
879 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
880 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
881 aprint_error("%s: couldn't map %s channel cmd regs\n",
882 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
883 goto bad;
884 }
885
886 wdc_cp->ctl_iot = pa->pa_iot;
887 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
888 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
889 aprint_error("%s: couldn't map %s channel ctl regs\n",
890 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
891 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
892 PCIIDE_COMPAT_CMD_SIZE);
893 goto bad;
894 }
895
896 wdc_cp->data32iot = wdc_cp->cmd_iot;
897 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
898 pciide_map_compat_intr(pa, cp, compatchan);
899 return;
900
901 bad:
902 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
903 return;
904 }
905
906 void
907 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
908 struct pci_attach_args * pa;
909 struct pciide_channel *cp;
910 bus_size_t *cmdsizep, *ctlsizep;
911 int (*pci_intr) __P((void *));
912 {
913 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
914 struct channel_softc *wdc_cp = &cp->wdc_channel;
915 const char *intrstr;
916 pci_intr_handle_t intrhandle;
917
918 cp->compat = 0;
919
920 if (sc->sc_pci_ih == NULL) {
921 if (pci_intr_map(pa, &intrhandle) != 0) {
922 aprint_error("%s: couldn't map native-PCI interrupt\n",
923 sc->sc_wdcdev.sc_dev.dv_xname);
924 goto bad;
925 }
926 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
927 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
928 intrhandle, IPL_BIO, pci_intr, sc);
929 if (sc->sc_pci_ih != NULL) {
930 aprint_normal("%s: using %s for native-PCI interrupt\n",
931 sc->sc_wdcdev.sc_dev.dv_xname,
932 intrstr ? intrstr : "unknown interrupt");
933 } else {
934 aprint_error(
935 "%s: couldn't establish native-PCI interrupt",
936 sc->sc_wdcdev.sc_dev.dv_xname);
937 if (intrstr != NULL)
938 aprint_normal(" at %s", intrstr);
939 aprint_normal("\n");
940 goto bad;
941 }
942 }
943 cp->ih = sc->sc_pci_ih;
944 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
945 PCI_MAPREG_TYPE_IO, 0,
946 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
947 aprint_error("%s: couldn't map %s channel cmd regs\n",
948 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
949 goto bad;
950 }
951
952 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
953 PCI_MAPREG_TYPE_IO, 0,
954 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
955 aprint_error("%s: couldn't map %s channel ctl regs\n",
956 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
957 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
958 goto bad;
959 }
960 /*
961 * In native mode, 4 bytes of I/O space are mapped for the control
962 * register, the control register is at offset 2. Pass the generic
963 * code a handle for only one byte at the right offset.
964 */
965 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
966 &wdc_cp->ctl_ioh) != 0) {
967 aprint_error("%s: unable to subregion %s channel ctl regs\n",
968 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
969 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
970 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
971 goto bad;
972 }
973
974 wdc_cp->data32iot = wdc_cp->cmd_iot;
975 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
976 return;
977
978 bad:
979 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
980 return;
981 }
982
983 void
984 pciide_mapreg_dma(sc, pa)
985 struct pciide_softc *sc;
986 struct pci_attach_args *pa;
987 {
988 pcireg_t maptype;
989 bus_addr_t addr;
990
991 /*
992 * Map DMA registers
993 *
994 * Note that sc_dma_ok is the right variable to test to see if
995 * DMA can be done. If the interface doesn't support DMA,
996 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
997 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
998 * non-zero if the interface supports DMA and the registers
999 * could be mapped.
1000 *
1001 * XXX Note that despite the fact that the Bus Master IDE specs
1002 * XXX say that "The bus master IDE function uses 16 bytes of IO
1003 * XXX space," some controllers (at least the United
1004 * XXX Microelectronics UM8886BF) place it in memory space.
1005 */
1006 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
1007 PCIIDE_REG_BUS_MASTER_DMA);
1008
1009 switch (maptype) {
1010 case PCI_MAPREG_TYPE_IO:
1011 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
1012 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
1013 &addr, NULL, NULL) == 0);
1014 if (sc->sc_dma_ok == 0) {
1015 aprint_normal(
1016 ", but unused (couldn't query registers)");
1017 break;
1018 }
1019 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
1020 && addr >= 0x10000) {
1021 sc->sc_dma_ok = 0;
1022 aprint_normal(
1023 ", but unused (registers at unsafe address "
1024 "%#lx)", (unsigned long)addr);
1025 break;
1026 }
1027 /* FALLTHROUGH */
1028
1029 case PCI_MAPREG_MEM_TYPE_32BIT:
1030 sc->sc_dma_ok = (pci_mapreg_map(pa,
1031 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1032 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1033 sc->sc_dmat = pa->pa_dmat;
1034 if (sc->sc_dma_ok == 0) {
1035 aprint_normal(", but unused (couldn't map registers)");
1036 } else {
1037 sc->sc_wdcdev.dma_arg = sc;
1038 sc->sc_wdcdev.dma_init = pciide_dma_init;
1039 sc->sc_wdcdev.dma_start = pciide_dma_start;
1040 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1041 }
1042
1043 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1044 PCIIDE_OPTIONS_NODMA) {
1045 aprint_normal(
1046 ", but unused (forced off by config file)");
1047 sc->sc_dma_ok = 0;
1048 }
1049 break;
1050
1051 default:
1052 sc->sc_dma_ok = 0;
1053 aprint_normal(
1054 ", but unsupported register maptype (0x%x)", maptype);
1055 }
1056 }
1057
1058 int
1059 pciide_compat_intr(arg)
1060 void *arg;
1061 {
1062 struct pciide_channel *cp = arg;
1063
1064 #ifdef DIAGNOSTIC
1065 /* should only be called for a compat channel */
1066 if (cp->compat == 0)
1067 panic("pciide compat intr called for non-compat chan %p", cp);
1068 #endif
1069 return (wdcintr(&cp->wdc_channel));
1070 }
1071
1072 int
1073 pciide_pci_intr(arg)
1074 void *arg;
1075 {
1076 struct pciide_softc *sc = arg;
1077 struct pciide_channel *cp;
1078 struct channel_softc *wdc_cp;
1079 int i, rv, crv;
1080
1081 rv = 0;
1082 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1083 cp = &sc->pciide_channels[i];
1084 wdc_cp = &cp->wdc_channel;
1085
1086 /* If a compat channel skip. */
1087 if (cp->compat)
1088 continue;
1089 /* if this channel not waiting for intr, skip */
1090 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1091 continue;
1092
1093 crv = wdcintr(wdc_cp);
1094 if (crv == 0)
1095 ; /* leave rv alone */
1096 else if (crv == 1)
1097 rv = 1; /* claim the intr */
1098 else if (rv == 0) /* crv should be -1 in this case */
1099 rv = crv; /* if we've done no better, take it */
1100 }
1101 return (rv);
1102 }
1103
1104 void
1105 pciide_channel_dma_setup(cp)
1106 struct pciide_channel *cp;
1107 {
1108 int drive;
1109 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1110 struct ata_drive_datas *drvp;
1111
1112 for (drive = 0; drive < 2; drive++) {
1113 drvp = &cp->wdc_channel.ch_drive[drive];
1114 /* If no drive, skip */
1115 if ((drvp->drive_flags & DRIVE) == 0)
1116 continue;
1117 /* setup DMA if needed */
1118 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1119 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1120 sc->sc_dma_ok == 0) {
1121 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1122 continue;
1123 }
1124 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1125 != 0) {
1126 /* Abort DMA setup */
1127 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1128 continue;
1129 }
1130 }
1131 }
1132
1133 int
1134 pciide_dma_table_setup(sc, channel, drive)
1135 struct pciide_softc *sc;
1136 int channel, drive;
1137 {
1138 bus_dma_segment_t seg;
1139 int error, rseg;
1140 const bus_size_t dma_table_size =
1141 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1142 struct pciide_dma_maps *dma_maps =
1143 &sc->pciide_channels[channel].dma_maps[drive];
1144
1145 /* If table was already allocated, just return */
1146 if (dma_maps->dma_table)
1147 return 0;
1148
1149 /* Allocate memory for the DMA tables and map it */
1150 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1151 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1152 BUS_DMA_NOWAIT)) != 0) {
1153 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1154 "allocate", drive, error);
1155 return error;
1156 }
1157 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1158 dma_table_size,
1159 (caddr_t *)&dma_maps->dma_table,
1160 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1161 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1162 "map", drive, error);
1163 return error;
1164 }
1165 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1166 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1167 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1168 /* Create and load table DMA map for this disk */
1169 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1170 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1171 &dma_maps->dmamap_table)) != 0) {
1172 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1173 "create", drive, error);
1174 return error;
1175 }
1176 if ((error = bus_dmamap_load(sc->sc_dmat,
1177 dma_maps->dmamap_table,
1178 dma_maps->dma_table,
1179 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1180 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1181 "load", drive, error);
1182 return error;
1183 }
1184 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1185 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1186 DEBUG_PROBE);
1187 /* Create a xfer DMA map for this drive */
1188 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1189 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1190 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1191 &dma_maps->dmamap_xfer)) != 0) {
1192 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1193 "create xfer", drive, error);
1194 return error;
1195 }
1196 return 0;
1197 }
1198
1199 int
1200 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1201 void *v;
1202 int channel, drive;
1203 void *databuf;
1204 size_t datalen;
1205 int flags;
1206 {
1207 struct pciide_softc *sc = v;
1208 int error, seg;
1209 struct pciide_dma_maps *dma_maps =
1210 &sc->pciide_channels[channel].dma_maps[drive];
1211
1212 error = bus_dmamap_load(sc->sc_dmat,
1213 dma_maps->dmamap_xfer,
1214 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1215 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1216 if (error) {
1217 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1218 "load xfer", drive, error);
1219 return error;
1220 }
1221
1222 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1223 dma_maps->dmamap_xfer->dm_mapsize,
1224 (flags & WDC_DMA_READ) ?
1225 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1226
1227 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1228 #ifdef DIAGNOSTIC
1229 /* A segment must not cross a 64k boundary */
1230 {
1231 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1232 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1233 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1234 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1235 printf("pciide_dma: segment %d physical addr 0x%lx"
1236 " len 0x%lx not properly aligned\n",
1237 seg, phys, len);
1238 panic("pciide_dma: buf align");
1239 }
1240 }
1241 #endif
1242 dma_maps->dma_table[seg].base_addr =
1243 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1244 dma_maps->dma_table[seg].byte_count =
1245 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1246 IDEDMA_BYTE_COUNT_MASK);
1247 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1248 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1249 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1250
1251 }
1252 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1253 htole32(IDEDMA_BYTE_COUNT_EOT);
1254
1255 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1256 dma_maps->dmamap_table->dm_mapsize,
1257 BUS_DMASYNC_PREWRITE);
1258
1259 /* Maps are ready. Start DMA function */
1260 #ifdef DIAGNOSTIC
1261 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1262 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1263 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1264 panic("pciide_dma_init: table align");
1265 }
1266 #endif
1267
1268 /* Clear status bits */
1269 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1270 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1271 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1272 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1273 /* Write table addr */
1274 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1275 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1276 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1277 /* set read/write */
1278 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1279 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1280 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1281 /* remember flags */
1282 dma_maps->dma_flags = flags;
1283 return 0;
1284 }
1285
1286 void
1287 pciide_dma_start(v, channel, drive)
1288 void *v;
1289 int channel, drive;
1290 {
1291 struct pciide_softc *sc = v;
1292
1293 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1294 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1295 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1296 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1297 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1298 }
1299
1300 int
1301 pciide_dma_finish(v, channel, drive, force)
1302 void *v;
1303 int channel, drive;
1304 int force;
1305 {
1306 struct pciide_softc *sc = v;
1307 u_int8_t status;
1308 int error = 0;
1309 struct pciide_dma_maps *dma_maps =
1310 &sc->pciide_channels[channel].dma_maps[drive];
1311
1312 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1313 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1314 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1315 DEBUG_XFERS);
1316
1317 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1318 return WDC_DMAST_NOIRQ;
1319
1320 /* stop DMA channel */
1321 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1322 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1323 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1324 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1325
1326 /* Unload the map of the data buffer */
1327 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1328 dma_maps->dmamap_xfer->dm_mapsize,
1329 (dma_maps->dma_flags & WDC_DMA_READ) ?
1330 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1331 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1332
1333 if ((status & IDEDMA_CTL_ERR) != 0) {
1334 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1335 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1336 error |= WDC_DMAST_ERR;
1337 }
1338
1339 if ((status & IDEDMA_CTL_INTR) == 0) {
1340 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1341 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1342 drive, status);
1343 error |= WDC_DMAST_NOIRQ;
1344 }
1345
1346 if ((status & IDEDMA_CTL_ACT) != 0) {
1347 /* data underrun, may be a valid condition for ATAPI */
1348 error |= WDC_DMAST_UNDER;
1349 }
1350 return error;
1351 }
1352
1353 void
1354 pciide_irqack(chp)
1355 struct channel_softc *chp;
1356 {
1357 struct pciide_channel *cp = (struct pciide_channel*)chp;
1358 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1359
1360 /* clear status bits in IDE DMA registers */
1361 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1362 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1363 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1364 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1365 }
1366
1367 /* some common code used by several chip_map */
1368 int
1369 pciide_chansetup(sc, channel, interface)
1370 struct pciide_softc *sc;
1371 int channel;
1372 pcireg_t interface;
1373 {
1374 struct pciide_channel *cp = &sc->pciide_channels[channel];
1375 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1376 cp->name = PCIIDE_CHANNEL_NAME(channel);
1377 cp->wdc_channel.channel = channel;
1378 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1379 cp->wdc_channel.ch_queue =
1380 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1381 if (cp->wdc_channel.ch_queue == NULL) {
1382 aprint_error("%s %s channel: "
1383 "can't allocate memory for command queue",
1384 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1385 return 0;
1386 }
1387 aprint_normal("%s: %s channel %s to %s mode\n",
1388 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1389 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1390 "configured" : "wired",
1391 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1392 "native-PCI" : "compatibility");
1393 return 1;
1394 }
1395
1396 /* some common code used by several chip channel_map */
1397 void
1398 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1399 struct pci_attach_args *pa;
1400 struct pciide_channel *cp;
1401 pcireg_t interface;
1402 bus_size_t *cmdsizep, *ctlsizep;
1403 int (*pci_intr) __P((void *));
1404 {
1405 struct channel_softc *wdc_cp = &cp->wdc_channel;
1406
1407 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1408 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr);
1409 else
1410 pciide_mapregs_compat(pa, cp, wdc_cp->channel, cmdsizep,
1411 ctlsizep);
1412 wdcattach(wdc_cp);
1413 }
1414
1415 /*
1416 * generic code to map the compat intr.
1417 */
1418 void
1419 pciide_map_compat_intr(pa, cp, compatchan)
1420 struct pci_attach_args *pa;
1421 struct pciide_channel *cp;
1422 int compatchan;
1423 {
1424 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1425
1426 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1427 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1428 pa, compatchan, pciide_compat_intr, cp);
1429 if (cp->ih == NULL) {
1430 #endif
1431 aprint_error("%s: no compatibility interrupt for use by %s "
1432 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1433 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
1434 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1435 }
1436 #endif
1437 }
1438
1439 void
1440 default_chip_map(sc, pa)
1441 struct pciide_softc *sc;
1442 struct pci_attach_args *pa;
1443 {
1444 struct pciide_channel *cp;
1445 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1446 pcireg_t csr;
1447 int channel, drive;
1448 struct ata_drive_datas *drvp;
1449 u_int8_t idedma_ctl;
1450 bus_size_t cmdsize, ctlsize;
1451 char *failreason;
1452
1453 if (pciide_chipen(sc, pa) == 0)
1454 return;
1455
1456 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1457 aprint_normal("%s: bus-master DMA support present",
1458 sc->sc_wdcdev.sc_dev.dv_xname);
1459 if (sc->sc_pp == &default_product_desc &&
1460 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1461 PCIIDE_OPTIONS_DMA) == 0) {
1462 aprint_normal(", but unused (no driver support)");
1463 sc->sc_dma_ok = 0;
1464 } else {
1465 pciide_mapreg_dma(sc, pa);
1466 if (sc->sc_dma_ok != 0)
1467 aprint_normal(", used without full driver "
1468 "support");
1469 }
1470 } else {
1471 aprint_normal("%s: hardware does not support DMA",
1472 sc->sc_wdcdev.sc_dev.dv_xname);
1473 sc->sc_dma_ok = 0;
1474 }
1475 aprint_normal("\n");
1476 if (sc->sc_dma_ok) {
1477 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1478 sc->sc_wdcdev.irqack = pciide_irqack;
1479 }
1480 sc->sc_wdcdev.PIO_cap = 0;
1481 sc->sc_wdcdev.DMA_cap = 0;
1482
1483 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1484 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1485 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1486
1487 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1488 cp = &sc->pciide_channels[channel];
1489 if (pciide_chansetup(sc, channel, interface) == 0)
1490 continue;
1491 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1492 pciide_pci_intr);
1493 if (cp->wdc_channel.ch_flags & WDCF_DISABLED)
1494 continue;
1495 /*
1496 * Check to see if something appears to be there.
1497 */
1498 failreason = NULL;
1499 /*
1500 * In native mode, always enable the controller. It's
1501 * not possible to have an ISA board using the same address
1502 * anyway.
1503 */
1504 if (interface & PCIIDE_INTERFACE_PCI(channel))
1505 goto next;
1506 if (!wdcprobe(&cp->wdc_channel)) {
1507 failreason = "not responding; disabled or no drives?";
1508 goto next;
1509 }
1510 /*
1511 * Now, make sure it's actually attributable to this PCI IDE
1512 * channel by trying to access the channel again while the
1513 * PCI IDE controller's I/O space is disabled. (If the
1514 * channel no longer appears to be there, it belongs to
1515 * this controller.) YUCK!
1516 */
1517 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1518 PCI_COMMAND_STATUS_REG);
1519 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1520 csr & ~PCI_COMMAND_IO_ENABLE);
1521 if (wdcprobe(&cp->wdc_channel))
1522 failreason = "other hardware responding at addresses";
1523 pci_conf_write(sc->sc_pc, sc->sc_tag,
1524 PCI_COMMAND_STATUS_REG, csr);
1525 next:
1526 if (failreason) {
1527 aprint_error("%s: %s channel ignored (%s)\n",
1528 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1529 failreason);
1530 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
1531 }
1532 wdcattach(&cp->wdc_channel);
1533 }
1534
1535 if (sc->sc_dma_ok == 0)
1536 return;
1537
1538 /* Allocate DMA maps */
1539 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1540 idedma_ctl = 0;
1541 cp = &sc->pciide_channels[channel];
1542 for (drive = 0; drive < 2; drive++) {
1543 drvp = &cp->wdc_channel.ch_drive[drive];
1544 /* If no drive, skip */
1545 if ((drvp->drive_flags & DRIVE) == 0)
1546 continue;
1547 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1548 continue;
1549 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1550 /* Abort DMA setup */
1551 aprint_error(
1552 "%s:%d:%d: can't allocate DMA maps, "
1553 "using PIO transfers\n",
1554 sc->sc_wdcdev.sc_dev.dv_xname,
1555 channel, drive);
1556 drvp->drive_flags &= ~DRIVE_DMA;
1557 }
1558 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1559 sc->sc_wdcdev.sc_dev.dv_xname,
1560 channel, drive);
1561 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1562 }
1563 if (idedma_ctl != 0) {
1564 /* Add software bits in status register */
1565 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1566 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1567 idedma_ctl);
1568 }
1569 }
1570 }
1571
1572 void
1573 sata_setup_channel(chp)
1574 struct channel_softc *chp;
1575 {
1576 struct ata_drive_datas *drvp;
1577 int drive;
1578 u_int32_t idedma_ctl;
1579 struct pciide_channel *cp = (struct pciide_channel*)chp;
1580 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1581
1582 /* setup DMA if needed */
1583 pciide_channel_dma_setup(cp);
1584
1585 idedma_ctl = 0;
1586
1587 for (drive = 0; drive < 2; drive++) {
1588 drvp = &chp->ch_drive[drive];
1589 /* If no drive, skip */
1590 if ((drvp->drive_flags & DRIVE) == 0)
1591 continue;
1592 if (drvp->drive_flags & DRIVE_UDMA) {
1593 /* use Ultra/DMA */
1594 drvp->drive_flags &= ~DRIVE_DMA;
1595 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1596 } else if (drvp->drive_flags & DRIVE_DMA) {
1597 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1598 }
1599 }
1600
1601 /*
1602 * Nothing to do to setup modes; it is meaningless in S-ATA
1603 * (but many S-ATA drives still want to get the SET_FEATURE
1604 * command).
1605 */
1606 if (idedma_ctl != 0) {
1607 /* Add software bits in status register */
1608 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1609 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1610 idedma_ctl);
1611 }
1612 }
1613
1614 void
1615 piix_chip_map(sc, pa)
1616 struct pciide_softc *sc;
1617 struct pci_attach_args *pa;
1618 {
1619 struct pciide_channel *cp;
1620 int channel;
1621 u_int32_t idetim;
1622 bus_size_t cmdsize, ctlsize;
1623
1624 if (pciide_chipen(sc, pa) == 0)
1625 return;
1626
1627 aprint_normal("%s: bus-master DMA support present",
1628 sc->sc_wdcdev.sc_dev.dv_xname);
1629 pciide_mapreg_dma(sc, pa);
1630 aprint_normal("\n");
1631 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1632 WDC_CAPABILITY_MODE;
1633 if (sc->sc_dma_ok) {
1634 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1635 sc->sc_wdcdev.irqack = pciide_irqack;
1636 switch(sc->sc_pp->ide_product) {
1637 case PCI_PRODUCT_INTEL_82371AB_IDE:
1638 case PCI_PRODUCT_INTEL_82440MX_IDE:
1639 case PCI_PRODUCT_INTEL_82801AA_IDE:
1640 case PCI_PRODUCT_INTEL_82801AB_IDE:
1641 case PCI_PRODUCT_INTEL_82801BA_IDE:
1642 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1643 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1644 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1645 case PCI_PRODUCT_INTEL_82801DB_IDE:
1646 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1647 case PCI_PRODUCT_INTEL_82801EB_IDE:
1648 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1649 }
1650 }
1651 sc->sc_wdcdev.PIO_cap = 4;
1652 sc->sc_wdcdev.DMA_cap = 2;
1653 switch(sc->sc_pp->ide_product) {
1654 case PCI_PRODUCT_INTEL_82801AA_IDE:
1655 sc->sc_wdcdev.UDMA_cap = 4;
1656 break;
1657 case PCI_PRODUCT_INTEL_82801BA_IDE:
1658 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1659 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1660 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1661 case PCI_PRODUCT_INTEL_82801DB_IDE:
1662 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1663 case PCI_PRODUCT_INTEL_82801EB_IDE:
1664 sc->sc_wdcdev.UDMA_cap = 5;
1665 break;
1666 default:
1667 sc->sc_wdcdev.UDMA_cap = 2;
1668 }
1669 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1670 sc->sc_wdcdev.set_modes = piix_setup_channel;
1671 else
1672 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1673 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1674 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1675
1676 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1677 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1678 DEBUG_PROBE);
1679 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1680 WDCDEBUG_PRINT((", sidetim=0x%x",
1681 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1682 DEBUG_PROBE);
1683 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1684 WDCDEBUG_PRINT((", udamreg 0x%x",
1685 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1686 DEBUG_PROBE);
1687 }
1688 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1689 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1690 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1691 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1692 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1693 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1694 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1695 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1696 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1697 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1698 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1699 DEBUG_PROBE);
1700 }
1701
1702 }
1703 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1704
1705 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1706 cp = &sc->pciide_channels[channel];
1707 /* PIIX is compat-only */
1708 if (pciide_chansetup(sc, channel, 0) == 0)
1709 continue;
1710 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1711 if ((PIIX_IDETIM_READ(idetim, channel) &
1712 PIIX_IDETIM_IDE) == 0) {
1713 #if 1
1714 aprint_normal("%s: %s channel ignored (disabled)\n",
1715 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1716 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
1717 continue;
1718 #else
1719 pcireg_t interface;
1720
1721 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1722 channel);
1723 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1724 idetim);
1725 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1726 sc->sc_tag, PCI_CLASS_REG));
1727 aprint_normal("channel %d idetim=%08x interface=%02x\n",
1728 channel, idetim, interface);
1729 #endif
1730 }
1731 /* PIIX are compat-only pciide devices */
1732 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1733 }
1734
1735 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1736 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1737 DEBUG_PROBE);
1738 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1739 WDCDEBUG_PRINT((", sidetim=0x%x",
1740 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1741 DEBUG_PROBE);
1742 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1743 WDCDEBUG_PRINT((", udamreg 0x%x",
1744 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1745 DEBUG_PROBE);
1746 }
1747 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1748 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1749 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1750 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1751 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1752 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1753 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1754 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1755 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1756 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1757 DEBUG_PROBE);
1758 }
1759 }
1760 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1761 }
1762
1763 void
1764 piix_setup_channel(chp)
1765 struct channel_softc *chp;
1766 {
1767 u_int8_t mode[2], drive;
1768 u_int32_t oidetim, idetim, idedma_ctl;
1769 struct pciide_channel *cp = (struct pciide_channel*)chp;
1770 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1771 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1772
1773 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1774 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1775 idedma_ctl = 0;
1776
1777 /* set up new idetim: Enable IDE registers decode */
1778 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1779 chp->channel);
1780
1781 /* setup DMA */
1782 pciide_channel_dma_setup(cp);
1783
1784 /*
1785 * Here we have to mess up with drives mode: PIIX can't have
1786 * different timings for master and slave drives.
1787 * We need to find the best combination.
1788 */
1789
1790 /* If both drives supports DMA, take the lower mode */
1791 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1792 (drvp[1].drive_flags & DRIVE_DMA)) {
1793 mode[0] = mode[1] =
1794 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1795 drvp[0].DMA_mode = mode[0];
1796 drvp[1].DMA_mode = mode[1];
1797 goto ok;
1798 }
1799 /*
1800 * If only one drive supports DMA, use its mode, and
1801 * put the other one in PIO mode 0 if mode not compatible
1802 */
1803 if (drvp[0].drive_flags & DRIVE_DMA) {
1804 mode[0] = drvp[0].DMA_mode;
1805 mode[1] = drvp[1].PIO_mode;
1806 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1807 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1808 mode[1] = drvp[1].PIO_mode = 0;
1809 goto ok;
1810 }
1811 if (drvp[1].drive_flags & DRIVE_DMA) {
1812 mode[1] = drvp[1].DMA_mode;
1813 mode[0] = drvp[0].PIO_mode;
1814 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1815 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1816 mode[0] = drvp[0].PIO_mode = 0;
1817 goto ok;
1818 }
1819 /*
1820 * If both drives are not DMA, takes the lower mode, unless
1821 * one of them is PIO mode < 2
1822 */
1823 if (drvp[0].PIO_mode < 2) {
1824 mode[0] = drvp[0].PIO_mode = 0;
1825 mode[1] = drvp[1].PIO_mode;
1826 } else if (drvp[1].PIO_mode < 2) {
1827 mode[1] = drvp[1].PIO_mode = 0;
1828 mode[0] = drvp[0].PIO_mode;
1829 } else {
1830 mode[0] = mode[1] =
1831 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1832 drvp[0].PIO_mode = mode[0];
1833 drvp[1].PIO_mode = mode[1];
1834 }
1835 ok: /* The modes are setup */
1836 for (drive = 0; drive < 2; drive++) {
1837 if (drvp[drive].drive_flags & DRIVE_DMA) {
1838 idetim |= piix_setup_idetim_timings(
1839 mode[drive], 1, chp->channel);
1840 goto end;
1841 }
1842 }
1843 /* If we are there, none of the drives are DMA */
1844 if (mode[0] >= 2)
1845 idetim |= piix_setup_idetim_timings(
1846 mode[0], 0, chp->channel);
1847 else
1848 idetim |= piix_setup_idetim_timings(
1849 mode[1], 0, chp->channel);
1850 end: /*
1851 * timing mode is now set up in the controller. Enable
1852 * it per-drive
1853 */
1854 for (drive = 0; drive < 2; drive++) {
1855 /* If no drive, skip */
1856 if ((drvp[drive].drive_flags & DRIVE) == 0)
1857 continue;
1858 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1859 if (drvp[drive].drive_flags & DRIVE_DMA)
1860 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1861 }
1862 if (idedma_ctl != 0) {
1863 /* Add software bits in status register */
1864 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1865 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1866 idedma_ctl);
1867 }
1868 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1869 }
1870
1871 void
1872 piix3_4_setup_channel(chp)
1873 struct channel_softc *chp;
1874 {
1875 struct ata_drive_datas *drvp;
1876 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1877 struct pciide_channel *cp = (struct pciide_channel*)chp;
1878 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1879 int drive;
1880 int channel = chp->channel;
1881
1882 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1883 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1884 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1885 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1886 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1887 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1888 PIIX_SIDETIM_RTC_MASK(channel));
1889 idedma_ctl = 0;
1890
1891 /* set up new idetim: Enable IDE registers decode */
1892 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1893
1894 /* setup DMA if needed */
1895 pciide_channel_dma_setup(cp);
1896
1897 for (drive = 0; drive < 2; drive++) {
1898 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1899 PIIX_UDMATIM_SET(0x3, channel, drive));
1900 drvp = &chp->ch_drive[drive];
1901 /* If no drive, skip */
1902 if ((drvp->drive_flags & DRIVE) == 0)
1903 continue;
1904 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1905 (drvp->drive_flags & DRIVE_UDMA) == 0))
1906 goto pio;
1907
1908 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1909 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1910 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1911 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1912 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1913 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1914 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1915 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1916 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1917 ideconf |= PIIX_CONFIG_PINGPONG;
1918 }
1919 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1920 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1921 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1922 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1923 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1924 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
1925 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1926 /* setup Ultra/100 */
1927 if (drvp->UDMA_mode > 2 &&
1928 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1929 drvp->UDMA_mode = 2;
1930 if (drvp->UDMA_mode > 4) {
1931 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1932 } else {
1933 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1934 if (drvp->UDMA_mode > 2) {
1935 ideconf |= PIIX_CONFIG_UDMA66(channel,
1936 drive);
1937 } else {
1938 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1939 drive);
1940 }
1941 }
1942 }
1943 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1944 /* setup Ultra/66 */
1945 if (drvp->UDMA_mode > 2 &&
1946 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1947 drvp->UDMA_mode = 2;
1948 if (drvp->UDMA_mode > 2)
1949 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1950 else
1951 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1952 }
1953 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1954 (drvp->drive_flags & DRIVE_UDMA)) {
1955 /* use Ultra/DMA */
1956 drvp->drive_flags &= ~DRIVE_DMA;
1957 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1958 udmareg |= PIIX_UDMATIM_SET(
1959 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1960 } else {
1961 /* use Multiword DMA */
1962 drvp->drive_flags &= ~DRIVE_UDMA;
1963 if (drive == 0) {
1964 idetim |= piix_setup_idetim_timings(
1965 drvp->DMA_mode, 1, channel);
1966 } else {
1967 sidetim |= piix_setup_sidetim_timings(
1968 drvp->DMA_mode, 1, channel);
1969 idetim =PIIX_IDETIM_SET(idetim,
1970 PIIX_IDETIM_SITRE, channel);
1971 }
1972 }
1973 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1974
1975 pio: /* use PIO mode */
1976 idetim |= piix_setup_idetim_drvs(drvp);
1977 if (drive == 0) {
1978 idetim |= piix_setup_idetim_timings(
1979 drvp->PIO_mode, 0, channel);
1980 } else {
1981 sidetim |= piix_setup_sidetim_timings(
1982 drvp->PIO_mode, 0, channel);
1983 idetim =PIIX_IDETIM_SET(idetim,
1984 PIIX_IDETIM_SITRE, channel);
1985 }
1986 }
1987 if (idedma_ctl != 0) {
1988 /* Add software bits in status register */
1989 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1990 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1991 idedma_ctl);
1992 }
1993 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1994 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1995 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1996 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1997 }
1998
1999
2000 /* setup ISP and RTC fields, based on mode */
2001 static u_int32_t
2002 piix_setup_idetim_timings(mode, dma, channel)
2003 u_int8_t mode;
2004 u_int8_t dma;
2005 u_int8_t channel;
2006 {
2007
2008 if (dma)
2009 return PIIX_IDETIM_SET(0,
2010 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2011 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2012 channel);
2013 else
2014 return PIIX_IDETIM_SET(0,
2015 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2016 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2017 channel);
2018 }
2019
2020 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2021 static u_int32_t
2022 piix_setup_idetim_drvs(drvp)
2023 struct ata_drive_datas *drvp;
2024 {
2025 u_int32_t ret = 0;
2026 struct channel_softc *chp = drvp->chnl_softc;
2027 u_int8_t channel = chp->channel;
2028 u_int8_t drive = drvp->drive;
2029
2030 /*
2031 * If drive is using UDMA, timings setups are independant
2032 * So just check DMA and PIO here.
2033 */
2034 if (drvp->drive_flags & DRIVE_DMA) {
2035 /* if mode = DMA mode 0, use compatible timings */
2036 if ((drvp->drive_flags & DRIVE_DMA) &&
2037 drvp->DMA_mode == 0) {
2038 drvp->PIO_mode = 0;
2039 return ret;
2040 }
2041 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2042 /*
2043 * PIO and DMA timings are the same, use fast timings for PIO
2044 * too, else use compat timings.
2045 */
2046 if ((piix_isp_pio[drvp->PIO_mode] !=
2047 piix_isp_dma[drvp->DMA_mode]) ||
2048 (piix_rtc_pio[drvp->PIO_mode] !=
2049 piix_rtc_dma[drvp->DMA_mode]))
2050 drvp->PIO_mode = 0;
2051 /* if PIO mode <= 2, use compat timings for PIO */
2052 if (drvp->PIO_mode <= 2) {
2053 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2054 channel);
2055 return ret;
2056 }
2057 }
2058
2059 /*
2060 * Now setup PIO modes. If mode < 2, use compat timings.
2061 * Else enable fast timings. Enable IORDY and prefetch/post
2062 * if PIO mode >= 3.
2063 */
2064
2065 if (drvp->PIO_mode < 2)
2066 return ret;
2067
2068 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2069 if (drvp->PIO_mode >= 3) {
2070 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2071 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2072 }
2073 return ret;
2074 }
2075
2076 /* setup values in SIDETIM registers, based on mode */
2077 static u_int32_t
2078 piix_setup_sidetim_timings(mode, dma, channel)
2079 u_int8_t mode;
2080 u_int8_t dma;
2081 u_int8_t channel;
2082 {
2083 if (dma)
2084 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2085 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2086 else
2087 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2088 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2089 }
2090
2091 void
2092 amd7x6_chip_map(sc, pa)
2093 struct pciide_softc *sc;
2094 struct pci_attach_args *pa;
2095 {
2096 struct pciide_channel *cp;
2097 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2098 int channel;
2099 pcireg_t chanenable;
2100 bus_size_t cmdsize, ctlsize;
2101
2102 if (pciide_chipen(sc, pa) == 0)
2103 return;
2104
2105 aprint_normal("%s: bus-master DMA support present",
2106 sc->sc_wdcdev.sc_dev.dv_xname);
2107 pciide_mapreg_dma(sc, pa);
2108 aprint_normal("\n");
2109 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2110 WDC_CAPABILITY_MODE;
2111 if (sc->sc_dma_ok) {
2112 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2113 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2114 sc->sc_wdcdev.irqack = pciide_irqack;
2115 }
2116 sc->sc_wdcdev.PIO_cap = 4;
2117 sc->sc_wdcdev.DMA_cap = 2;
2118
2119 switch (sc->sc_pci_vendor) {
2120 case PCI_VENDOR_AMD:
2121 switch (sc->sc_pp->ide_product) {
2122 case PCI_PRODUCT_AMD_PBC766_IDE:
2123 case PCI_PRODUCT_AMD_PBC768_IDE:
2124 case PCI_PRODUCT_AMD_PBC8111_IDE:
2125 sc->sc_wdcdev.UDMA_cap = 5;
2126 break;
2127 default:
2128 sc->sc_wdcdev.UDMA_cap = 4;
2129 }
2130 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2131 break;
2132
2133 case PCI_VENDOR_NVIDIA:
2134 switch (sc->sc_pp->ide_product) {
2135 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2136 sc->sc_wdcdev.UDMA_cap = 5;
2137 break;
2138 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2139 sc->sc_wdcdev.UDMA_cap = 6;
2140 break;
2141 }
2142 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2143 break;
2144
2145 default:
2146 panic("amd7x6_chip_map: unknown vendor");
2147 }
2148 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2149 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2150 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2151 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2152 AMD7X6_CHANSTATUS_EN(sc));
2153
2154 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2155 DEBUG_PROBE);
2156 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2157 cp = &sc->pciide_channels[channel];
2158 if (pciide_chansetup(sc, channel, interface) == 0)
2159 continue;
2160
2161 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2162 aprint_normal("%s: %s channel ignored (disabled)\n",
2163 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2164 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
2165 continue;
2166 }
2167 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2168 pciide_pci_intr);
2169 }
2170 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2171 chanenable);
2172 return;
2173 }
2174
2175 void
2176 amd7x6_setup_channel(chp)
2177 struct channel_softc *chp;
2178 {
2179 u_int32_t udmatim_reg, datatim_reg;
2180 u_int8_t idedma_ctl;
2181 int mode, drive;
2182 struct ata_drive_datas *drvp;
2183 struct pciide_channel *cp = (struct pciide_channel*)chp;
2184 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2185 #ifndef PCIIDE_AMD756_ENABLEDMA
2186 int rev = PCI_REVISION(
2187 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2188 #endif
2189
2190 idedma_ctl = 0;
2191 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2192 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2193 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2194 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2195
2196 /* setup DMA if needed */
2197 pciide_channel_dma_setup(cp);
2198
2199 for (drive = 0; drive < 2; drive++) {
2200 drvp = &chp->ch_drive[drive];
2201 /* If no drive, skip */
2202 if ((drvp->drive_flags & DRIVE) == 0)
2203 continue;
2204 /* add timing values, setup DMA if needed */
2205 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2206 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2207 mode = drvp->PIO_mode;
2208 goto pio;
2209 }
2210 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2211 (drvp->drive_flags & DRIVE_UDMA)) {
2212 /* use Ultra/DMA */
2213 drvp->drive_flags &= ~DRIVE_DMA;
2214 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2215 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2216 AMD7X6_UDMA_TIME(chp->channel, drive,
2217 amd7x6_udma_tim[drvp->UDMA_mode]);
2218 /* can use PIO timings, MW DMA unused */
2219 mode = drvp->PIO_mode;
2220 } else {
2221 /* use Multiword DMA, but only if revision is OK */
2222 drvp->drive_flags &= ~DRIVE_UDMA;
2223 #ifndef PCIIDE_AMD756_ENABLEDMA
2224 /*
2225 * The workaround doesn't seem to be necessary
2226 * with all drives, so it can be disabled by
2227 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2228 * triggered.
2229 */
2230 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2231 sc->sc_pp->ide_product ==
2232 PCI_PRODUCT_AMD_PBC756_IDE &&
2233 AMD756_CHIPREV_DISABLEDMA(rev)) {
2234 aprint_normal(
2235 "%s:%d:%d: multi-word DMA disabled due "
2236 "to chip revision\n",
2237 sc->sc_wdcdev.sc_dev.dv_xname,
2238 chp->channel, drive);
2239 mode = drvp->PIO_mode;
2240 drvp->drive_flags &= ~DRIVE_DMA;
2241 goto pio;
2242 }
2243 #endif
2244 /* mode = min(pio, dma+2) */
2245 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2246 mode = drvp->PIO_mode;
2247 else
2248 mode = drvp->DMA_mode + 2;
2249 }
2250 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2251
2252 pio: /* setup PIO mode */
2253 if (mode <= 2) {
2254 drvp->DMA_mode = 0;
2255 drvp->PIO_mode = 0;
2256 mode = 0;
2257 } else {
2258 drvp->PIO_mode = mode;
2259 drvp->DMA_mode = mode - 2;
2260 }
2261 datatim_reg |=
2262 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2263 amd7x6_pio_set[mode]) |
2264 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2265 amd7x6_pio_rec[mode]);
2266 }
2267 if (idedma_ctl != 0) {
2268 /* Add software bits in status register */
2269 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2270 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2271 idedma_ctl);
2272 }
2273 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2274 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2275 }
2276
2277 void
2278 apollo_chip_map(sc, pa)
2279 struct pciide_softc *sc;
2280 struct pci_attach_args *pa;
2281 {
2282 struct pciide_channel *cp;
2283 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2284 int channel;
2285 u_int32_t ideconf;
2286 bus_size_t cmdsize, ctlsize;
2287 pcitag_t pcib_tag;
2288 pcireg_t pcib_id, pcib_class;
2289
2290 if (pciide_chipen(sc, pa) == 0)
2291 return;
2292
2293 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2294 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2295 /* and read ID and rev of the ISA bridge */
2296 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2297 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2298 aprint_normal("%s: VIA Technologies ", sc->sc_wdcdev.sc_dev.dv_xname);
2299 switch (PCI_PRODUCT(pcib_id)) {
2300 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2301 aprint_normal("VT82C586 (Apollo VP) ");
2302 if(PCI_REVISION(pcib_class) >= 0x02) {
2303 aprint_normal("ATA33 controller\n");
2304 sc->sc_wdcdev.UDMA_cap = 2;
2305 } else {
2306 aprint_normal("controller\n");
2307 sc->sc_wdcdev.UDMA_cap = 0;
2308 }
2309 break;
2310 case PCI_PRODUCT_VIATECH_VT82C596A:
2311 aprint_normal("VT82C596A (Apollo Pro) ");
2312 if (PCI_REVISION(pcib_class) >= 0x12) {
2313 aprint_normal("ATA66 controller\n");
2314 sc->sc_wdcdev.UDMA_cap = 4;
2315 } else {
2316 aprint_normal("ATA33 controller\n");
2317 sc->sc_wdcdev.UDMA_cap = 2;
2318 }
2319 break;
2320 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2321 aprint_normal("VT82C686A (Apollo KX133) ");
2322 if (PCI_REVISION(pcib_class) >= 0x40) {
2323 aprint_normal("ATA100 controller\n");
2324 sc->sc_wdcdev.UDMA_cap = 5;
2325 } else {
2326 aprint_normal("ATA66 controller\n");
2327 sc->sc_wdcdev.UDMA_cap = 4;
2328 }
2329 break;
2330 case PCI_PRODUCT_VIATECH_VT8231:
2331 aprint_normal("VT8231 ATA100 controller\n");
2332 sc->sc_wdcdev.UDMA_cap = 5;
2333 break;
2334 case PCI_PRODUCT_VIATECH_VT8233:
2335 aprint_normal("VT8233 ATA100 controller\n");
2336 sc->sc_wdcdev.UDMA_cap = 5;
2337 break;
2338 case PCI_PRODUCT_VIATECH_VT8233A:
2339 aprint_normal("VT8233A ATA133 controller\n");
2340 sc->sc_wdcdev.UDMA_cap = 6;
2341 break;
2342 case PCI_PRODUCT_VIATECH_VT8235:
2343 aprint_normal("VT8235 ATA133 controller\n");
2344 sc->sc_wdcdev.UDMA_cap = 6;
2345 break;
2346 case PCI_PRODUCT_VIATECH_VT8237_SATA:
2347 aprint_normal("VT8237 ATA133 controller\n");
2348 sc->sc_wdcdev.UDMA_cap = 6;
2349 break;
2350 default:
2351 aprint_normal("unknown ATA controller\n");
2352 sc->sc_wdcdev.UDMA_cap = 0;
2353 }
2354
2355 aprint_normal("%s: bus-master DMA support present",
2356 sc->sc_wdcdev.sc_dev.dv_xname);
2357 pciide_mapreg_dma(sc, pa);
2358 aprint_normal("\n");
2359 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2360 WDC_CAPABILITY_MODE;
2361 if (sc->sc_dma_ok) {
2362 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2363 sc->sc_wdcdev.irqack = pciide_irqack;
2364 if (sc->sc_wdcdev.UDMA_cap > 0)
2365 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2366 }
2367 sc->sc_wdcdev.PIO_cap = 4;
2368 sc->sc_wdcdev.DMA_cap = 2;
2369 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2370 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2371 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2372
2373 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2374 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2375 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2376 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2377 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2378 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2379 DEBUG_PROBE);
2380
2381 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2382 cp = &sc->pciide_channels[channel];
2383 if (pciide_chansetup(sc, channel, interface) == 0)
2384 continue;
2385
2386 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2387 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2388 aprint_normal("%s: %s channel ignored (disabled)\n",
2389 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2390 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
2391 continue;
2392 }
2393 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2394 pciide_pci_intr);
2395 }
2396 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2397 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2398 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2399 }
2400
2401 void
2402 apollo_setup_channel(chp)
2403 struct channel_softc *chp;
2404 {
2405 u_int32_t udmatim_reg, datatim_reg;
2406 u_int8_t idedma_ctl;
2407 int mode, drive;
2408 struct ata_drive_datas *drvp;
2409 struct pciide_channel *cp = (struct pciide_channel*)chp;
2410 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2411
2412 idedma_ctl = 0;
2413 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2414 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2415 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2416 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2417
2418 /* setup DMA if needed */
2419 pciide_channel_dma_setup(cp);
2420
2421 for (drive = 0; drive < 2; drive++) {
2422 drvp = &chp->ch_drive[drive];
2423 /* If no drive, skip */
2424 if ((drvp->drive_flags & DRIVE) == 0)
2425 continue;
2426 /* add timing values, setup DMA if needed */
2427 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2428 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2429 mode = drvp->PIO_mode;
2430 goto pio;
2431 }
2432 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2433 (drvp->drive_flags & DRIVE_UDMA)) {
2434 /* use Ultra/DMA */
2435 drvp->drive_flags &= ~DRIVE_DMA;
2436 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2437 APO_UDMA_EN_MTH(chp->channel, drive);
2438 if (sc->sc_wdcdev.UDMA_cap == 6) {
2439 /* 8233a */
2440 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2441 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2442 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2443 /* 686b */
2444 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2445 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2446 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2447 /* 596b or 686a */
2448 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2449 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2450 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2451 } else {
2452 /* 596a or 586b */
2453 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2454 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2455 }
2456 /* can use PIO timings, MW DMA unused */
2457 mode = drvp->PIO_mode;
2458 } else {
2459 /* use Multiword DMA */
2460 drvp->drive_flags &= ~DRIVE_UDMA;
2461 /* mode = min(pio, dma+2) */
2462 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2463 mode = drvp->PIO_mode;
2464 else
2465 mode = drvp->DMA_mode + 2;
2466 }
2467 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2468
2469 pio: /* setup PIO mode */
2470 if (mode <= 2) {
2471 drvp->DMA_mode = 0;
2472 drvp->PIO_mode = 0;
2473 mode = 0;
2474 } else {
2475 drvp->PIO_mode = mode;
2476 drvp->DMA_mode = mode - 2;
2477 }
2478 datatim_reg |=
2479 APO_DATATIM_PULSE(chp->channel, drive,
2480 apollo_pio_set[mode]) |
2481 APO_DATATIM_RECOV(chp->channel, drive,
2482 apollo_pio_rec[mode]);
2483 }
2484 if (idedma_ctl != 0) {
2485 /* Add software bits in status register */
2486 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2487 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2488 idedma_ctl);
2489 }
2490 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2491 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2492 }
2493
2494 void
2495 apollo_sata_chip_map(sc, pa)
2496 struct pciide_softc *sc;
2497 struct pci_attach_args *pa;
2498 {
2499 struct pciide_channel *cp;
2500 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2501 int channel;
2502 bus_size_t cmdsize, ctlsize;
2503
2504 if (pciide_chipen(sc, pa) == 0)
2505 return;
2506
2507 if ( interface == 0 ) {
2508 WDCDEBUG_PRINT(("apollo_sata_chip_map interface == 0\n"),
2509 DEBUG_PROBE);
2510 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2511 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2512 }
2513
2514 aprint_normal("%s: bus-master DMA support present",
2515 sc->sc_wdcdev.sc_dev.dv_xname);
2516 pciide_mapreg_dma(sc, pa);
2517 aprint_normal("\n");
2518
2519 if (sc->sc_dma_ok) {
2520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2521 sc->sc_wdcdev.irqack = pciide_irqack;
2522 }
2523 sc->sc_wdcdev.PIO_cap = 4;
2524 sc->sc_wdcdev.DMA_cap = 2;
2525 sc->sc_wdcdev.UDMA_cap = 6;
2526
2527 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2528 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2529 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2530 WDC_CAPABILITY_MODE;
2531 sc->sc_wdcdev.set_modes = sata_setup_channel;
2532
2533 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2534 cp = &sc->pciide_channels[channel];
2535 if (pciide_chansetup(sc, channel, interface) == 0)
2536 continue;
2537 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2538 pciide_pci_intr);
2539 }
2540 }
2541
2542 void
2543 cmd_channel_map(pa, sc, channel)
2544 struct pci_attach_args *pa;
2545 struct pciide_softc *sc;
2546 int channel;
2547 {
2548 struct pciide_channel *cp = &sc->pciide_channels[channel];
2549 bus_size_t cmdsize, ctlsize;
2550 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2551 int interface, one_channel;
2552
2553 /*
2554 * The 0648/0649 can be told to identify as a RAID controller.
2555 * In this case, we have to fake interface
2556 */
2557 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2558 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2559 PCIIDE_INTERFACE_SETTABLE(1);
2560 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2561 CMD_CONF_DSA1)
2562 interface |= PCIIDE_INTERFACE_PCI(0) |
2563 PCIIDE_INTERFACE_PCI(1);
2564 } else {
2565 interface = PCI_INTERFACE(pa->pa_class);
2566 }
2567
2568 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2569 cp->name = PCIIDE_CHANNEL_NAME(channel);
2570 cp->wdc_channel.channel = channel;
2571 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2572
2573 /*
2574 * Older CMD64X doesn't have independant channels
2575 */
2576 switch (sc->sc_pp->ide_product) {
2577 case PCI_PRODUCT_CMDTECH_649:
2578 one_channel = 0;
2579 break;
2580 default:
2581 one_channel = 1;
2582 break;
2583 }
2584
2585 if (channel > 0 && one_channel) {
2586 cp->wdc_channel.ch_queue =
2587 sc->pciide_channels[0].wdc_channel.ch_queue;
2588 } else {
2589 cp->wdc_channel.ch_queue =
2590 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2591 }
2592 if (cp->wdc_channel.ch_queue == NULL) {
2593 aprint_error("%s %s channel: "
2594 "can't allocate memory for command queue",
2595 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2596 return;
2597 }
2598
2599 aprint_normal("%s: %s channel %s to %s mode\n",
2600 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2601 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2602 "configured" : "wired",
2603 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2604 "native-PCI" : "compatibility");
2605
2606 /*
2607 * with a CMD PCI64x, if we get here, the first channel is enabled:
2608 * there's no way to disable the first channel without disabling
2609 * the whole device
2610 */
2611 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2612 aprint_normal("%s: %s channel ignored (disabled)\n",
2613 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2614 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
2615 return;
2616 }
2617
2618 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2619 }
2620
2621 int
2622 cmd_pci_intr(arg)
2623 void *arg;
2624 {
2625 struct pciide_softc *sc = arg;
2626 struct pciide_channel *cp;
2627 struct channel_softc *wdc_cp;
2628 int i, rv, crv;
2629 u_int32_t priirq, secirq;
2630
2631 rv = 0;
2632 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2633 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2634 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2635 cp = &sc->pciide_channels[i];
2636 wdc_cp = &cp->wdc_channel;
2637 /* If a compat channel skip. */
2638 if (cp->compat)
2639 continue;
2640 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2641 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2642 crv = wdcintr(wdc_cp);
2643 if (crv == 0)
2644 printf("%s:%d: bogus intr\n",
2645 sc->sc_wdcdev.sc_dev.dv_xname, i);
2646 else
2647 rv = 1;
2648 }
2649 }
2650 return rv;
2651 }
2652
2653 void
2654 cmd_chip_map(sc, pa)
2655 struct pciide_softc *sc;
2656 struct pci_attach_args *pa;
2657 {
2658 int channel;
2659
2660 /*
2661 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2662 * and base adresses registers can be disabled at
2663 * hardware level. In this case, the device is wired
2664 * in compat mode and its first channel is always enabled,
2665 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2666 * In fact, it seems that the first channel of the CMD PCI0640
2667 * can't be disabled.
2668 */
2669
2670 #ifdef PCIIDE_CMD064x_DISABLE
2671 if (pciide_chipen(sc, pa) == 0)
2672 return;
2673 #endif
2674
2675 aprint_normal("%s: hardware does not support DMA\n",
2676 sc->sc_wdcdev.sc_dev.dv_xname);
2677 sc->sc_dma_ok = 0;
2678
2679 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2680 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2681 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2682
2683 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2684 cmd_channel_map(pa, sc, channel);
2685 }
2686 }
2687
2688 void
2689 cmd0643_9_chip_map(sc, pa)
2690 struct pciide_softc *sc;
2691 struct pci_attach_args *pa;
2692 {
2693 struct pciide_channel *cp;
2694 int channel;
2695 pcireg_t rev = PCI_REVISION(pa->pa_class);
2696
2697 /*
2698 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2699 * and base adresses registers can be disabled at
2700 * hardware level. In this case, the device is wired
2701 * in compat mode and its first channel is always enabled,
2702 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2703 * In fact, it seems that the first channel of the CMD PCI0640
2704 * can't be disabled.
2705 */
2706
2707 #ifdef PCIIDE_CMD064x_DISABLE
2708 if (pciide_chipen(sc, pa) == 0)
2709 return;
2710 #endif
2711
2712 aprint_normal("%s: bus-master DMA support present",
2713 sc->sc_wdcdev.sc_dev.dv_xname);
2714 pciide_mapreg_dma(sc, pa);
2715 aprint_normal("\n");
2716 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2717 WDC_CAPABILITY_MODE;
2718 if (sc->sc_dma_ok) {
2719 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2720 switch (sc->sc_pp->ide_product) {
2721 case PCI_PRODUCT_CMDTECH_649:
2722 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2723 sc->sc_wdcdev.UDMA_cap = 5;
2724 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2725 break;
2726 case PCI_PRODUCT_CMDTECH_648:
2727 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2728 sc->sc_wdcdev.UDMA_cap = 4;
2729 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2730 break;
2731 case PCI_PRODUCT_CMDTECH_646:
2732 if (rev >= CMD0646U2_REV) {
2733 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2734 sc->sc_wdcdev.UDMA_cap = 2;
2735 } else if (rev >= CMD0646U_REV) {
2736 /*
2737 * Linux's driver claims that the 646U is broken
2738 * with UDMA. Only enable it if we know what we're
2739 * doing
2740 */
2741 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2742 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2743 sc->sc_wdcdev.UDMA_cap = 2;
2744 #endif
2745 /* explicitly disable UDMA */
2746 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2747 CMD_UDMATIM(0), 0);
2748 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2749 CMD_UDMATIM(1), 0);
2750 }
2751 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2752 break;
2753 default:
2754 sc->sc_wdcdev.irqack = pciide_irqack;
2755 }
2756 }
2757
2758 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2759 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2760 sc->sc_wdcdev.PIO_cap = 4;
2761 sc->sc_wdcdev.DMA_cap = 2;
2762 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2763
2764 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2765 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2766 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2767 DEBUG_PROBE);
2768
2769 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2770 cp = &sc->pciide_channels[channel];
2771 cmd_channel_map(pa, sc, channel);
2772 }
2773 /*
2774 * note - this also makes sure we clear the irq disable and reset
2775 * bits
2776 */
2777 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2778 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2779 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2780 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2781 DEBUG_PROBE);
2782 }
2783
2784 void
2785 cmd0643_9_setup_channel(chp)
2786 struct channel_softc *chp;
2787 {
2788 struct ata_drive_datas *drvp;
2789 u_int8_t tim;
2790 u_int32_t idedma_ctl, udma_reg;
2791 int drive;
2792 struct pciide_channel *cp = (struct pciide_channel*)chp;
2793 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2794
2795 idedma_ctl = 0;
2796 /* setup DMA if needed */
2797 pciide_channel_dma_setup(cp);
2798
2799 for (drive = 0; drive < 2; drive++) {
2800 drvp = &chp->ch_drive[drive];
2801 /* If no drive, skip */
2802 if ((drvp->drive_flags & DRIVE) == 0)
2803 continue;
2804 /* add timing values, setup DMA if needed */
2805 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2806 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2807 if (drvp->drive_flags & DRIVE_UDMA) {
2808 /* UltraDMA on a 646U2, 0648 or 0649 */
2809 drvp->drive_flags &= ~DRIVE_DMA;
2810 udma_reg = pciide_pci_read(sc->sc_pc,
2811 sc->sc_tag, CMD_UDMATIM(chp->channel));
2812 if (drvp->UDMA_mode > 2 &&
2813 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2814 CMD_BICSR) &
2815 CMD_BICSR_80(chp->channel)) == 0)
2816 drvp->UDMA_mode = 2;
2817 if (drvp->UDMA_mode > 2)
2818 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2819 else if (sc->sc_wdcdev.UDMA_cap > 2)
2820 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2821 udma_reg |= CMD_UDMATIM_UDMA(drive);
2822 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2823 CMD_UDMATIM_TIM_OFF(drive));
2824 udma_reg |=
2825 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2826 CMD_UDMATIM_TIM_OFF(drive));
2827 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2828 CMD_UDMATIM(chp->channel), udma_reg);
2829 } else {
2830 /*
2831 * use Multiword DMA.
2832 * Timings will be used for both PIO and DMA,
2833 * so adjust DMA mode if needed
2834 * if we have a 0646U2/8/9, turn off UDMA
2835 */
2836 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2837 udma_reg = pciide_pci_read(sc->sc_pc,
2838 sc->sc_tag,
2839 CMD_UDMATIM(chp->channel));
2840 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2841 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2842 CMD_UDMATIM(chp->channel),
2843 udma_reg);
2844 }
2845 if (drvp->PIO_mode >= 3 &&
2846 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2847 drvp->DMA_mode = drvp->PIO_mode - 2;
2848 }
2849 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2850 }
2851 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2852 }
2853 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2854 CMD_DATA_TIM(chp->channel, drive), tim);
2855 }
2856 if (idedma_ctl != 0) {
2857 /* Add software bits in status register */
2858 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2859 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2860 idedma_ctl);
2861 }
2862 }
2863
2864 void
2865 cmd646_9_irqack(chp)
2866 struct channel_softc *chp;
2867 {
2868 u_int32_t priirq, secirq;
2869 struct pciide_channel *cp = (struct pciide_channel*)chp;
2870 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2871
2872 if (chp->channel == 0) {
2873 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2874 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2875 } else {
2876 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2877 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2878 }
2879 pciide_irqack(chp);
2880 }
2881
2882 void
2883 cmd680_chip_map(sc, pa)
2884 struct pciide_softc *sc;
2885 struct pci_attach_args *pa;
2886 {
2887 struct pciide_channel *cp;
2888 int channel;
2889
2890 if (pciide_chipen(sc, pa) == 0)
2891 return;
2892
2893 aprint_normal("%s: bus-master DMA support present",
2894 sc->sc_wdcdev.sc_dev.dv_xname);
2895 pciide_mapreg_dma(sc, pa);
2896 aprint_normal("\n");
2897 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2898 WDC_CAPABILITY_MODE;
2899 if (sc->sc_dma_ok) {
2900 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2901 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2902 sc->sc_wdcdev.UDMA_cap = 6;
2903 sc->sc_wdcdev.irqack = pciide_irqack;
2904 }
2905
2906 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2907 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2908 sc->sc_wdcdev.PIO_cap = 4;
2909 sc->sc_wdcdev.DMA_cap = 2;
2910 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2911
2912 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2913 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2914 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2915 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2916 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2917 cp = &sc->pciide_channels[channel];
2918 cmd680_channel_map(pa, sc, channel);
2919 }
2920 }
2921
2922 void
2923 cmd680_channel_map(pa, sc, channel)
2924 struct pci_attach_args *pa;
2925 struct pciide_softc *sc;
2926 int channel;
2927 {
2928 struct pciide_channel *cp = &sc->pciide_channels[channel];
2929 bus_size_t cmdsize, ctlsize;
2930 int interface, i, reg;
2931 static const u_int8_t init_val[] =
2932 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2933 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2934
2935 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2936 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2937 PCIIDE_INTERFACE_SETTABLE(1);
2938 interface |= PCIIDE_INTERFACE_PCI(0) |
2939 PCIIDE_INTERFACE_PCI(1);
2940 } else {
2941 interface = PCI_INTERFACE(pa->pa_class);
2942 }
2943
2944 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2945 cp->name = PCIIDE_CHANNEL_NAME(channel);
2946 cp->wdc_channel.channel = channel;
2947 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2948
2949 cp->wdc_channel.ch_queue =
2950 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2951 if (cp->wdc_channel.ch_queue == NULL) {
2952 aprint_error("%s %s channel: "
2953 "can't allocate memory for command queue",
2954 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2955 return;
2956 }
2957
2958 /* XXX */
2959 reg = 0xa2 + channel * 16;
2960 for (i = 0; i < sizeof(init_val); i++)
2961 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2962
2963 aprint_normal("%s: %s channel %s to %s mode\n",
2964 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2965 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2966 "configured" : "wired",
2967 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2968 "native-PCI" : "compatibility");
2969
2970 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2971 }
2972
2973 void
2974 cmd680_setup_channel(chp)
2975 struct channel_softc *chp;
2976 {
2977 struct ata_drive_datas *drvp;
2978 u_int8_t mode, off, scsc;
2979 u_int16_t val;
2980 u_int32_t idedma_ctl;
2981 int drive;
2982 struct pciide_channel *cp = (struct pciide_channel*)chp;
2983 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2984 pci_chipset_tag_t pc = sc->sc_pc;
2985 pcitag_t pa = sc->sc_tag;
2986 static const u_int8_t udma2_tbl[] =
2987 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2988 static const u_int8_t udma_tbl[] =
2989 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2990 static const u_int16_t dma_tbl[] =
2991 { 0x2208, 0x10c2, 0x10c1 };
2992 static const u_int16_t pio_tbl[] =
2993 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2994
2995 idedma_ctl = 0;
2996 pciide_channel_dma_setup(cp);
2997 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2998
2999 for (drive = 0; drive < 2; drive++) {
3000 drvp = &chp->ch_drive[drive];
3001 /* If no drive, skip */
3002 if ((drvp->drive_flags & DRIVE) == 0)
3003 continue;
3004 mode &= ~(0x03 << (drive * 4));
3005 if (drvp->drive_flags & DRIVE_UDMA) {
3006 drvp->drive_flags &= ~DRIVE_DMA;
3007 off = 0xa0 + chp->channel * 16;
3008 if (drvp->UDMA_mode > 2 &&
3009 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3010 drvp->UDMA_mode = 2;
3011 scsc = pciide_pci_read(pc, pa, 0x8a);
3012 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3013 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3014 scsc = pciide_pci_read(pc, pa, 0x8a);
3015 if ((scsc & 0x30) == 0)
3016 drvp->UDMA_mode = 5;
3017 }
3018 mode |= 0x03 << (drive * 4);
3019 off = 0xac + chp->channel * 16 + drive * 2;
3020 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3021 if (scsc & 0x30)
3022 val |= udma2_tbl[drvp->UDMA_mode];
3023 else
3024 val |= udma_tbl[drvp->UDMA_mode];
3025 pciide_pci_write(pc, pa, off, val);
3026 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3027 } else if (drvp->drive_flags & DRIVE_DMA) {
3028 mode |= 0x02 << (drive * 4);
3029 off = 0xa8 + chp->channel * 16 + drive * 2;
3030 val = dma_tbl[drvp->DMA_mode];
3031 pciide_pci_write(pc, pa, off, val & 0xff);
3032 pciide_pci_write(pc, pa, off, val >> 8);
3033 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3034 } else {
3035 mode |= 0x01 << (drive * 4);
3036 off = 0xa4 + chp->channel * 16 + drive * 2;
3037 val = pio_tbl[drvp->PIO_mode];
3038 pciide_pci_write(pc, pa, off, val & 0xff);
3039 pciide_pci_write(pc, pa, off, val >> 8);
3040 }
3041 }
3042
3043 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3044 if (idedma_ctl != 0) {
3045 /* Add software bits in status register */
3046 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3047 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3048 idedma_ctl);
3049 }
3050 }
3051
3052 void
3053 cmd3112_chip_map(sc, pa)
3054 struct pciide_softc *sc;
3055 struct pci_attach_args *pa;
3056 {
3057 struct pciide_channel *cp;
3058 bus_size_t cmdsize, ctlsize;
3059 pcireg_t interface;
3060 int channel;
3061
3062 if (pciide_chipen(sc, pa) == 0)
3063 return;
3064
3065 aprint_normal("%s: bus-master DMA support present",
3066 sc->sc_wdcdev.sc_dev.dv_xname);
3067 pciide_mapreg_dma(sc, pa);
3068 aprint_normal("\n");
3069
3070 /*
3071 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3072 * corruption if DMA transfers cross an 8K boundary. This is
3073 * apparently hard to tickle, but we'll go ahead and play it
3074 * safe.
3075 */
3076 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3077 sc->sc_dma_maxsegsz = 8192;
3078 sc->sc_dma_boundary = 8192;
3079 }
3080
3081 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3082 WDC_CAPABILITY_MODE;
3083 sc->sc_wdcdev.PIO_cap = 4;
3084 if (sc->sc_dma_ok) {
3085 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3086 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3087 sc->sc_wdcdev.irqack = pciide_irqack;
3088 sc->sc_wdcdev.DMA_cap = 2;
3089 sc->sc_wdcdev.UDMA_cap = 6;
3090 }
3091 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3092
3093 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3094 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3095
3096 /*
3097 * The 3112 can be told to identify as a RAID controller.
3098 * In this case, we have to fake interface
3099 */
3100 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3101 interface = PCI_INTERFACE(pa->pa_class);
3102 } else {
3103 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3104 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3105 }
3106
3107 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3108 cp = &sc->pciide_channels[channel];
3109 if (pciide_chansetup(sc, channel, interface) == 0)
3110 continue;
3111 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3112 pciide_pci_intr);
3113 }
3114 }
3115
3116 void
3117 cmd3112_setup_channel(chp)
3118 struct channel_softc *chp;
3119 {
3120 struct ata_drive_datas *drvp;
3121 int drive;
3122 u_int32_t idedma_ctl, dtm;
3123 struct pciide_channel *cp = (struct pciide_channel*)chp;
3124 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3125
3126 /* setup DMA if needed */
3127 pciide_channel_dma_setup(cp);
3128
3129 idedma_ctl = 0;
3130 dtm = 0;
3131
3132 for (drive = 0; drive < 2; drive++) {
3133 drvp = &chp->ch_drive[drive];
3134 /* If no drive, skip */
3135 if ((drvp->drive_flags & DRIVE) == 0)
3136 continue;
3137 if (drvp->drive_flags & DRIVE_UDMA) {
3138 /* use Ultra/DMA */
3139 drvp->drive_flags &= ~DRIVE_DMA;
3140 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3141 dtm |= DTM_IDEx_DMA;
3142 } else if (drvp->drive_flags & DRIVE_DMA) {
3143 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3144 dtm |= DTM_IDEx_DMA;
3145 } else {
3146 dtm |= DTM_IDEx_PIO;
3147 }
3148 }
3149
3150 /*
3151 * Nothing to do to setup modes; it is meaningless in S-ATA
3152 * (but many S-ATA drives still want to get the SET_FEATURE
3153 * command).
3154 */
3155 if (idedma_ctl != 0) {
3156 /* Add software bits in status register */
3157 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3158 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3159 idedma_ctl);
3160 }
3161 pci_conf_write(sc->sc_pc, sc->sc_tag,
3162 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3163 }
3164
3165 void
3166 cy693_chip_map(sc, pa)
3167 struct pciide_softc *sc;
3168 struct pci_attach_args *pa;
3169 {
3170 struct pciide_channel *cp;
3171 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3172 bus_size_t cmdsize, ctlsize;
3173
3174 if (pciide_chipen(sc, pa) == 0)
3175 return;
3176
3177 /*
3178 * this chip has 2 PCI IDE functions, one for primary and one for
3179 * secondary. So we need to call pciide_mapregs_compat() with
3180 * the real channel
3181 */
3182 if (pa->pa_function == 1) {
3183 sc->sc_cy_compatchan = 0;
3184 } else if (pa->pa_function == 2) {
3185 sc->sc_cy_compatchan = 1;
3186 } else {
3187 aprint_error("%s: unexpected PCI function %d\n",
3188 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3189 return;
3190 }
3191 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3192 aprint_normal("%s: bus-master DMA support present",
3193 sc->sc_wdcdev.sc_dev.dv_xname);
3194 pciide_mapreg_dma(sc, pa);
3195 } else {
3196 aprint_normal("%s: hardware does not support DMA",
3197 sc->sc_wdcdev.sc_dev.dv_xname);
3198 sc->sc_dma_ok = 0;
3199 }
3200 aprint_normal("\n");
3201
3202 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3203 if (sc->sc_cy_handle == NULL) {
3204 aprint_error("%s: unable to map hyperCache control registers\n",
3205 sc->sc_wdcdev.sc_dev.dv_xname);
3206 sc->sc_dma_ok = 0;
3207 }
3208
3209 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3210 WDC_CAPABILITY_MODE;
3211 if (sc->sc_dma_ok) {
3212 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3213 sc->sc_wdcdev.irqack = pciide_irqack;
3214 }
3215 sc->sc_wdcdev.PIO_cap = 4;
3216 sc->sc_wdcdev.DMA_cap = 2;
3217 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3218
3219 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3220 sc->sc_wdcdev.nchannels = 1;
3221
3222 /* Only one channel for this chip; if we are here it's enabled */
3223 cp = &sc->pciide_channels[0];
3224 sc->wdc_chanarray[0] = &cp->wdc_channel;
3225 cp->name = PCIIDE_CHANNEL_NAME(0);
3226 cp->wdc_channel.channel = 0;
3227 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3228 cp->wdc_channel.ch_queue =
3229 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3230 if (cp->wdc_channel.ch_queue == NULL) {
3231 aprint_error("%s primary channel: "
3232 "can't allocate memory for command queue",
3233 sc->sc_wdcdev.sc_dev.dv_xname);
3234 return;
3235 }
3236 aprint_normal("%s: primary channel %s to ",
3237 sc->sc_wdcdev.sc_dev.dv_xname,
3238 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3239 "configured" : "wired");
3240 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3241 aprint_normal("native-PCI mode\n");
3242 pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3243 pciide_pci_intr);
3244 } else {
3245 aprint_normal("compatibility mode\n");
3246 pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, &cmdsize,
3247 &ctlsize);
3248 }
3249 wdcattach(&cp->wdc_channel);
3250 }
3251
3252 void
3253 cy693_setup_channel(chp)
3254 struct channel_softc *chp;
3255 {
3256 struct ata_drive_datas *drvp;
3257 int drive;
3258 u_int32_t cy_cmd_ctrl;
3259 u_int32_t idedma_ctl;
3260 struct pciide_channel *cp = (struct pciide_channel*)chp;
3261 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3262 int dma_mode = -1;
3263
3264 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3265 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3266
3267 cy_cmd_ctrl = idedma_ctl = 0;
3268
3269 /* setup DMA if needed */
3270 pciide_channel_dma_setup(cp);
3271
3272 for (drive = 0; drive < 2; drive++) {
3273 drvp = &chp->ch_drive[drive];
3274 /* If no drive, skip */
3275 if ((drvp->drive_flags & DRIVE) == 0)
3276 continue;
3277 /* add timing values, setup DMA if needed */
3278 if (drvp->drive_flags & DRIVE_DMA) {
3279 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3280 /* use Multiword DMA */
3281 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3282 dma_mode = drvp->DMA_mode;
3283 }
3284 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3285 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3286 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3287 CY_CMD_CTRL_IOW_REC_OFF(drive));
3288 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3289 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3290 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3291 CY_CMD_CTRL_IOR_REC_OFF(drive));
3292 }
3293 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3294 chp->ch_drive[0].DMA_mode = dma_mode;
3295 chp->ch_drive[1].DMA_mode = dma_mode;
3296
3297 if (dma_mode == -1)
3298 dma_mode = 0;
3299
3300 if (sc->sc_cy_handle != NULL) {
3301 /* Note: `multiple' is implied. */
3302 cy82c693_write(sc->sc_cy_handle,
3303 (sc->sc_cy_compatchan == 0) ?
3304 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3305 }
3306
3307 if (idedma_ctl != 0) {
3308 /* Add software bits in status register */
3309 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3310 IDEDMA_CTL, idedma_ctl);
3311 }
3312 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3313 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3314 }
3315
3316 static struct sis_hostbr_type {
3317 u_int16_t id;
3318 u_int8_t rev;
3319 u_int8_t udma_mode;
3320 char *name;
3321 u_int8_t type;
3322 #define SIS_TYPE_NOUDMA 0
3323 #define SIS_TYPE_66 1
3324 #define SIS_TYPE_100OLD 2
3325 #define SIS_TYPE_100NEW 3
3326 #define SIS_TYPE_133OLD 4
3327 #define SIS_TYPE_133NEW 5
3328 #define SIS_TYPE_SOUTH 6
3329 } sis_hostbr_type[] = {
3330 /* Most infos here are from sos (at) freebsd.org */
3331 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3332 #if 0
3333 /*
3334 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3335 * have problems with UDMA (info provided by Christos)
3336 */
3337 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3338 #endif
3339 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3340 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3341 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3342 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3343 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3344 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3345 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3346 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3347 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3348 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3349 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3350 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3351 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3352 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3353 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3354 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3355 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3356 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3357 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3358 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3359 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3360 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3361 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3362 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3363 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3364 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3365 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3366 /*
3367 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3368 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3369 */
3370 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3371 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3372 };
3373
3374 static struct sis_hostbr_type *sis_hostbr_type_match;
3375
3376 static int
3377 sis_hostbr_match(pa)
3378 struct pci_attach_args *pa;
3379 {
3380 int i;
3381 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3382 return 0;
3383 sis_hostbr_type_match = NULL;
3384 for (i = 0;
3385 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3386 i++) {
3387 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3388 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3389 sis_hostbr_type_match = &sis_hostbr_type[i];
3390 }
3391 return (sis_hostbr_type_match != NULL);
3392 }
3393
3394 static int sis_south_match(pa)
3395 struct pci_attach_args *pa;
3396 {
3397 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3398 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3399 PCI_REVISION(pa->pa_class) >= 0x10);
3400 }
3401
3402 void
3403 sis_chip_map(sc, pa)
3404 struct pciide_softc *sc;
3405 struct pci_attach_args *pa;
3406 {
3407 struct pciide_channel *cp;
3408 int channel;
3409 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3410 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3411 pcireg_t rev = PCI_REVISION(pa->pa_class);
3412 bus_size_t cmdsize, ctlsize;
3413
3414 if (pciide_chipen(sc, pa) == 0)
3415 return;
3416
3417 aprint_normal("%s: Silicon Integrated System ",
3418 sc->sc_wdcdev.sc_dev.dv_xname);
3419 pci_find_device(NULL, sis_hostbr_match);
3420 if (sis_hostbr_type_match) {
3421 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3422 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3423 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3424 SIS_REG_57) & 0x7f);
3425 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3426 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3427 aprint_normal("96X UDMA%d",
3428 sis_hostbr_type_match->udma_mode);
3429 sc->sis_type = SIS_TYPE_133NEW;
3430 sc->sc_wdcdev.UDMA_cap =
3431 sis_hostbr_type_match->udma_mode;
3432 } else {
3433 if (pci_find_device(NULL, sis_south_match)) {
3434 sc->sis_type = SIS_TYPE_133OLD;
3435 sc->sc_wdcdev.UDMA_cap =
3436 sis_hostbr_type_match->udma_mode;
3437 } else {
3438 sc->sis_type = SIS_TYPE_100NEW;
3439 sc->sc_wdcdev.UDMA_cap =
3440 sis_hostbr_type_match->udma_mode;
3441 }
3442 }
3443 } else {
3444 sc->sis_type = sis_hostbr_type_match->type;
3445 sc->sc_wdcdev.UDMA_cap =
3446 sis_hostbr_type_match->udma_mode;
3447 }
3448 aprint_normal(sis_hostbr_type_match->name);
3449 } else {
3450 aprint_normal("5597/5598");
3451 if (rev >= 0xd0) {
3452 sc->sc_wdcdev.UDMA_cap = 2;
3453 sc->sis_type = SIS_TYPE_66;
3454 } else {
3455 sc->sc_wdcdev.UDMA_cap = 0;
3456 sc->sis_type = SIS_TYPE_NOUDMA;
3457 }
3458 }
3459 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3460 PCI_REVISION(pa->pa_class));
3461 aprint_normal("%s: bus-master DMA support present",
3462 sc->sc_wdcdev.sc_dev.dv_xname);
3463 pciide_mapreg_dma(sc, pa);
3464 aprint_normal("\n");
3465
3466 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3467 WDC_CAPABILITY_MODE;
3468 if (sc->sc_dma_ok) {
3469 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3470 sc->sc_wdcdev.irqack = pciide_irqack;
3471 if (sc->sis_type >= SIS_TYPE_66)
3472 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3473 }
3474
3475 sc->sc_wdcdev.PIO_cap = 4;
3476 sc->sc_wdcdev.DMA_cap = 2;
3477
3478 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3479 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3480 switch(sc->sis_type) {
3481 case SIS_TYPE_NOUDMA:
3482 case SIS_TYPE_66:
3483 case SIS_TYPE_100OLD:
3484 sc->sc_wdcdev.set_modes = sis_setup_channel;
3485 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3486 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3487 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3488 break;
3489 case SIS_TYPE_100NEW:
3490 case SIS_TYPE_133OLD:
3491 sc->sc_wdcdev.set_modes = sis_setup_channel;
3492 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3493 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3494 break;
3495 case SIS_TYPE_133NEW:
3496 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3497 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3498 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3499 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3500 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3501 break;
3502 }
3503
3504
3505 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3506 cp = &sc->pciide_channels[channel];
3507 if (pciide_chansetup(sc, channel, interface) == 0)
3508 continue;
3509 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3510 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3511 aprint_normal("%s: %s channel ignored (disabled)\n",
3512 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3513 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
3514 continue;
3515 }
3516 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3517 pciide_pci_intr);
3518 }
3519 }
3520
3521 void
3522 sis96x_setup_channel(chp)
3523 struct channel_softc *chp;
3524 {
3525 struct ata_drive_datas *drvp;
3526 int drive;
3527 u_int32_t sis_tim;
3528 u_int32_t idedma_ctl;
3529 int regtim;
3530 struct pciide_channel *cp = (struct pciide_channel*)chp;
3531 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3532
3533 sis_tim = 0;
3534 idedma_ctl = 0;
3535 /* setup DMA if needed */
3536 pciide_channel_dma_setup(cp);
3537
3538 for (drive = 0; drive < 2; drive++) {
3539 regtim = SIS_TIM133(
3540 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3541 chp->channel, drive);
3542 drvp = &chp->ch_drive[drive];
3543 /* If no drive, skip */
3544 if ((drvp->drive_flags & DRIVE) == 0)
3545 continue;
3546 /* add timing values, setup DMA if needed */
3547 if (drvp->drive_flags & DRIVE_UDMA) {
3548 /* use Ultra/DMA */
3549 drvp->drive_flags &= ~DRIVE_DMA;
3550 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3551 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3552 if (drvp->UDMA_mode > 2)
3553 drvp->UDMA_mode = 2;
3554 }
3555 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3556 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3557 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3558 } else if (drvp->drive_flags & DRIVE_DMA) {
3559 /*
3560 * use Multiword DMA
3561 * Timings will be used for both PIO and DMA,
3562 * so adjust DMA mode if needed
3563 */
3564 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3565 drvp->PIO_mode = drvp->DMA_mode + 2;
3566 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3567 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3568 drvp->PIO_mode - 2 : 0;
3569 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3570 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3571 } else {
3572 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3573 }
3574 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3575 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3576 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3577 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3578 }
3579 if (idedma_ctl != 0) {
3580 /* Add software bits in status register */
3581 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3582 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3583 idedma_ctl);
3584 }
3585 }
3586
3587 void
3588 sis_setup_channel(chp)
3589 struct channel_softc *chp;
3590 {
3591 struct ata_drive_datas *drvp;
3592 int drive;
3593 u_int32_t sis_tim;
3594 u_int32_t idedma_ctl;
3595 struct pciide_channel *cp = (struct pciide_channel*)chp;
3596 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3597
3598 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3599 "channel %d 0x%x\n", chp->channel,
3600 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3601 DEBUG_PROBE);
3602 sis_tim = 0;
3603 idedma_ctl = 0;
3604 /* setup DMA if needed */
3605 pciide_channel_dma_setup(cp);
3606
3607 for (drive = 0; drive < 2; drive++) {
3608 drvp = &chp->ch_drive[drive];
3609 /* If no drive, skip */
3610 if ((drvp->drive_flags & DRIVE) == 0)
3611 continue;
3612 /* add timing values, setup DMA if needed */
3613 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3614 (drvp->drive_flags & DRIVE_UDMA) == 0)
3615 goto pio;
3616
3617 if (drvp->drive_flags & DRIVE_UDMA) {
3618 /* use Ultra/DMA */
3619 drvp->drive_flags &= ~DRIVE_DMA;
3620 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3621 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3622 if (drvp->UDMA_mode > 2)
3623 drvp->UDMA_mode = 2;
3624 }
3625 switch (sc->sis_type) {
3626 case SIS_TYPE_66:
3627 case SIS_TYPE_100OLD:
3628 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3629 SIS_TIM66_UDMA_TIME_OFF(drive);
3630 break;
3631 case SIS_TYPE_100NEW:
3632 sis_tim |=
3633 sis_udma100new_tim[drvp->UDMA_mode] <<
3634 SIS_TIM100_UDMA_TIME_OFF(drive);
3635 case SIS_TYPE_133OLD:
3636 sis_tim |=
3637 sis_udma133old_tim[drvp->UDMA_mode] <<
3638 SIS_TIM100_UDMA_TIME_OFF(drive);
3639 break;
3640 default:
3641 aprint_error("unknown SiS IDE type %d\n",
3642 sc->sis_type);
3643 }
3644 } else {
3645 /*
3646 * use Multiword DMA
3647 * Timings will be used for both PIO and DMA,
3648 * so adjust DMA mode if needed
3649 */
3650 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3651 drvp->PIO_mode = drvp->DMA_mode + 2;
3652 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3653 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3654 drvp->PIO_mode - 2 : 0;
3655 if (drvp->DMA_mode == 0)
3656 drvp->PIO_mode = 0;
3657 }
3658 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3659 pio: switch (sc->sis_type) {
3660 case SIS_TYPE_NOUDMA:
3661 case SIS_TYPE_66:
3662 case SIS_TYPE_100OLD:
3663 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3664 SIS_TIM66_ACT_OFF(drive);
3665 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3666 SIS_TIM66_REC_OFF(drive);
3667 break;
3668 case SIS_TYPE_100NEW:
3669 case SIS_TYPE_133OLD:
3670 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3671 SIS_TIM100_ACT_OFF(drive);
3672 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3673 SIS_TIM100_REC_OFF(drive);
3674 break;
3675 default:
3676 aprint_error("unknown SiS IDE type %d\n",
3677 sc->sis_type);
3678 }
3679 }
3680 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3681 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3682 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3683 if (idedma_ctl != 0) {
3684 /* Add software bits in status register */
3685 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3686 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3687 idedma_ctl);
3688 }
3689 }
3690
3691 void
3692 acer_chip_map(sc, pa)
3693 struct pciide_softc *sc;
3694 struct pci_attach_args *pa;
3695 {
3696 struct pciide_channel *cp;
3697 int channel;
3698 pcireg_t cr, interface;
3699 bus_size_t cmdsize, ctlsize;
3700 pcireg_t rev = PCI_REVISION(pa->pa_class);
3701
3702 if (pciide_chipen(sc, pa) == 0)
3703 return;
3704
3705 aprint_normal("%s: bus-master DMA support present",
3706 sc->sc_wdcdev.sc_dev.dv_xname);
3707 pciide_mapreg_dma(sc, pa);
3708 aprint_normal("\n");
3709 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3710 WDC_CAPABILITY_MODE;
3711 if (sc->sc_dma_ok) {
3712 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3713 if (rev >= 0x20) {
3714 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3715 if (rev >= 0xC4)
3716 sc->sc_wdcdev.UDMA_cap = 5;
3717 else if (rev >= 0xC2)
3718 sc->sc_wdcdev.UDMA_cap = 4;
3719 else
3720 sc->sc_wdcdev.UDMA_cap = 2;
3721 }
3722 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3723 sc->sc_wdcdev.irqack = pciide_irqack;
3724 }
3725
3726 sc->sc_wdcdev.PIO_cap = 4;
3727 sc->sc_wdcdev.DMA_cap = 2;
3728 sc->sc_wdcdev.set_modes = acer_setup_channel;
3729 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3730 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3731
3732 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3733 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3734 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3735
3736 /* Enable "microsoft register bits" R/W. */
3737 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3738 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3739 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3740 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3741 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3742 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3743 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3744 ~ACER_CHANSTATUSREGS_RO);
3745 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3746 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3747 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3748 /* Don't use cr, re-read the real register content instead */
3749 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3750 PCI_CLASS_REG));
3751
3752 /* From linux: enable "Cable Detection" */
3753 if (rev >= 0xC2) {
3754 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3755 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3756 | ACER_0x4B_CDETECT);
3757 }
3758
3759 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3760 cp = &sc->pciide_channels[channel];
3761 if (pciide_chansetup(sc, channel, interface) == 0)
3762 continue;
3763 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3764 aprint_normal("%s: %s channel ignored (disabled)\n",
3765 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3766 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
3767 continue;
3768 }
3769 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3770 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3771 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3772 }
3773 }
3774
3775 void
3776 acer_setup_channel(chp)
3777 struct channel_softc *chp;
3778 {
3779 struct ata_drive_datas *drvp;
3780 int drive;
3781 u_int32_t acer_fifo_udma;
3782 u_int32_t idedma_ctl;
3783 struct pciide_channel *cp = (struct pciide_channel*)chp;
3784 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3785
3786 idedma_ctl = 0;
3787 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3788 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3789 acer_fifo_udma), DEBUG_PROBE);
3790 /* setup DMA if needed */
3791 pciide_channel_dma_setup(cp);
3792
3793 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3794 DRIVE_UDMA) { /* check 80 pins cable */
3795 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3796 ACER_0x4A_80PIN(chp->channel)) {
3797 if (chp->ch_drive[0].UDMA_mode > 2)
3798 chp->ch_drive[0].UDMA_mode = 2;
3799 if (chp->ch_drive[1].UDMA_mode > 2)
3800 chp->ch_drive[1].UDMA_mode = 2;
3801 }
3802 }
3803
3804 for (drive = 0; drive < 2; drive++) {
3805 drvp = &chp->ch_drive[drive];
3806 /* If no drive, skip */
3807 if ((drvp->drive_flags & DRIVE) == 0)
3808 continue;
3809 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3810 "channel %d drive %d 0x%x\n", chp->channel, drive,
3811 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3812 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3813 /* clear FIFO/DMA mode */
3814 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3815 ACER_UDMA_EN(chp->channel, drive) |
3816 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3817
3818 /* add timing values, setup DMA if needed */
3819 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3820 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3821 acer_fifo_udma |=
3822 ACER_FTH_OPL(chp->channel, drive, 0x1);
3823 goto pio;
3824 }
3825
3826 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3827 if (drvp->drive_flags & DRIVE_UDMA) {
3828 /* use Ultra/DMA */
3829 drvp->drive_flags &= ~DRIVE_DMA;
3830 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3831 acer_fifo_udma |=
3832 ACER_UDMA_TIM(chp->channel, drive,
3833 acer_udma[drvp->UDMA_mode]);
3834 /* XXX disable if one drive < UDMA3 ? */
3835 if (drvp->UDMA_mode >= 3) {
3836 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3837 ACER_0x4B,
3838 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3839 ACER_0x4B) | ACER_0x4B_UDMA66);
3840 }
3841 } else {
3842 /*
3843 * use Multiword DMA
3844 * Timings will be used for both PIO and DMA,
3845 * so adjust DMA mode if needed
3846 */
3847 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3848 drvp->PIO_mode = drvp->DMA_mode + 2;
3849 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3850 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3851 drvp->PIO_mode - 2 : 0;
3852 if (drvp->DMA_mode == 0)
3853 drvp->PIO_mode = 0;
3854 }
3855 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3856 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3857 ACER_IDETIM(chp->channel, drive),
3858 acer_pio[drvp->PIO_mode]);
3859 }
3860 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3861 acer_fifo_udma), DEBUG_PROBE);
3862 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3863 if (idedma_ctl != 0) {
3864 /* Add software bits in status register */
3865 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3866 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3867 idedma_ctl);
3868 }
3869 }
3870
3871 int
3872 acer_pci_intr(arg)
3873 void *arg;
3874 {
3875 struct pciide_softc *sc = arg;
3876 struct pciide_channel *cp;
3877 struct channel_softc *wdc_cp;
3878 int i, rv, crv;
3879 u_int32_t chids;
3880
3881 rv = 0;
3882 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3883 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3884 cp = &sc->pciide_channels[i];
3885 wdc_cp = &cp->wdc_channel;
3886 /* If a compat channel skip. */
3887 if (cp->compat)
3888 continue;
3889 if (chids & ACER_CHIDS_INT(i)) {
3890 crv = wdcintr(wdc_cp);
3891 if (crv == 0)
3892 printf("%s:%d: bogus intr\n",
3893 sc->sc_wdcdev.sc_dev.dv_xname, i);
3894 else
3895 rv = 1;
3896 }
3897 }
3898 return rv;
3899 }
3900
3901 void
3902 hpt_chip_map(sc, pa)
3903 struct pciide_softc *sc;
3904 struct pci_attach_args *pa;
3905 {
3906 struct pciide_channel *cp;
3907 int i, compatchan, revision;
3908 pcireg_t interface;
3909 bus_size_t cmdsize, ctlsize;
3910
3911 if (pciide_chipen(sc, pa) == 0)
3912 return;
3913
3914 revision = PCI_REVISION(pa->pa_class);
3915 aprint_normal("%s: Triones/Highpoint ",
3916 sc->sc_wdcdev.sc_dev.dv_xname);
3917 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3918 aprint_normal("HPT374 IDE Controller\n");
3919 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3920 aprint_normal("HPT372 IDE Controller\n");
3921 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3922 if (revision == HPT372_REV)
3923 aprint_normal("HPT372 IDE Controller\n");
3924 else if (revision == HPT370_REV)
3925 aprint_normal("HPT370 IDE Controller\n");
3926 else if (revision == HPT370A_REV)
3927 aprint_normal("HPT370A IDE Controller\n");
3928 else if (revision == HPT366_REV)
3929 aprint_normal("HPT366 IDE Controller\n");
3930 else
3931 aprint_normal("unknown HPT IDE controller rev %d\n",
3932 revision);
3933 } else
3934 aprint_normal("unknown HPT IDE controller 0x%x\n",
3935 sc->sc_pp->ide_product);
3936
3937 /*
3938 * when the chip is in native mode it identifies itself as a
3939 * 'misc mass storage'. Fake interface in this case.
3940 */
3941 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3942 interface = PCI_INTERFACE(pa->pa_class);
3943 } else {
3944 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3945 PCIIDE_INTERFACE_PCI(0);
3946 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3947 (revision == HPT370_REV || revision == HPT370A_REV ||
3948 revision == HPT372_REV)) ||
3949 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3950 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3951 interface |= PCIIDE_INTERFACE_PCI(1);
3952 }
3953
3954 aprint_normal("%s: bus-master DMA support present",
3955 sc->sc_wdcdev.sc_dev.dv_xname);
3956 pciide_mapreg_dma(sc, pa);
3957 aprint_normal("\n");
3958 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3959 WDC_CAPABILITY_MODE;
3960 if (sc->sc_dma_ok) {
3961 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3962 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3963 sc->sc_wdcdev.irqack = pciide_irqack;
3964 }
3965 sc->sc_wdcdev.PIO_cap = 4;
3966 sc->sc_wdcdev.DMA_cap = 2;
3967
3968 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3969 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3970 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3971 revision == HPT366_REV) {
3972 sc->sc_wdcdev.UDMA_cap = 4;
3973 /*
3974 * The 366 has 2 PCI IDE functions, one for primary and one
3975 * for secondary. So we need to call pciide_mapregs_compat()
3976 * with the real channel
3977 */
3978 if (pa->pa_function == 0) {
3979 compatchan = 0;
3980 } else if (pa->pa_function == 1) {
3981 compatchan = 1;
3982 } else {
3983 aprint_error("%s: unexpected PCI function %d\n",
3984 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3985 return;
3986 }
3987 sc->sc_wdcdev.nchannels = 1;
3988 } else {
3989 sc->sc_wdcdev.nchannels = 2;
3990 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3991 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3992 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3993 revision == HPT372_REV))
3994 sc->sc_wdcdev.UDMA_cap = 6;
3995 else
3996 sc->sc_wdcdev.UDMA_cap = 5;
3997 }
3998 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3999 cp = &sc->pciide_channels[i];
4000 if (sc->sc_wdcdev.nchannels > 1) {
4001 compatchan = i;
4002 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4003 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4004 aprint_normal(
4005 "%s: %s channel ignored (disabled)\n",
4006 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4007 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
4008 continue;
4009 }
4010 }
4011 if (pciide_chansetup(sc, i, interface) == 0)
4012 continue;
4013 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4014 pciide_mapregs_native(pa, cp, &cmdsize,
4015 &ctlsize, hpt_pci_intr);
4016 } else {
4017 pciide_mapregs_compat(pa, cp, compatchan,
4018 &cmdsize, &ctlsize);
4019 }
4020 wdcattach(&cp->wdc_channel);
4021 }
4022 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4023 (revision == HPT370_REV || revision == HPT370A_REV ||
4024 revision == HPT372_REV)) ||
4025 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4026 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4027 /*
4028 * HPT370_REV and highter has a bit to disable interrupts,
4029 * make sure to clear it
4030 */
4031 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4032 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4033 ~HPT_CSEL_IRQDIS);
4034 }
4035 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4036 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4037 revision == HPT372_REV ) ||
4038 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4039 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4040 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4041 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4042 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4043 return;
4044 }
4045
4046 void
4047 hpt_setup_channel(chp)
4048 struct channel_softc *chp;
4049 {
4050 struct ata_drive_datas *drvp;
4051 int drive;
4052 int cable;
4053 u_int32_t before, after;
4054 u_int32_t idedma_ctl;
4055 struct pciide_channel *cp = (struct pciide_channel*)chp;
4056 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4057 int revision =
4058 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4059
4060 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4061
4062 /* setup DMA if needed */
4063 pciide_channel_dma_setup(cp);
4064
4065 idedma_ctl = 0;
4066
4067 /* Per drive settings */
4068 for (drive = 0; drive < 2; drive++) {
4069 drvp = &chp->ch_drive[drive];
4070 /* If no drive, skip */
4071 if ((drvp->drive_flags & DRIVE) == 0)
4072 continue;
4073 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4074 HPT_IDETIM(chp->channel, drive));
4075
4076 /* add timing values, setup DMA if needed */
4077 if (drvp->drive_flags & DRIVE_UDMA) {
4078 /* use Ultra/DMA */
4079 drvp->drive_flags &= ~DRIVE_DMA;
4080 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4081 drvp->UDMA_mode > 2)
4082 drvp->UDMA_mode = 2;
4083 switch (sc->sc_pp->ide_product) {
4084 case PCI_PRODUCT_TRIONES_HPT374:
4085 after = hpt374_udma[drvp->UDMA_mode];
4086 break;
4087 case PCI_PRODUCT_TRIONES_HPT372:
4088 after = hpt372_udma[drvp->UDMA_mode];
4089 break;
4090 case PCI_PRODUCT_TRIONES_HPT366:
4091 default:
4092 switch(revision) {
4093 case HPT372_REV:
4094 after = hpt372_udma[drvp->UDMA_mode];
4095 break;
4096 case HPT370_REV:
4097 case HPT370A_REV:
4098 after = hpt370_udma[drvp->UDMA_mode];
4099 break;
4100 case HPT366_REV:
4101 default:
4102 after = hpt366_udma[drvp->UDMA_mode];
4103 break;
4104 }
4105 }
4106 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4107 } else if (drvp->drive_flags & DRIVE_DMA) {
4108 /*
4109 * use Multiword DMA.
4110 * Timings will be used for both PIO and DMA, so adjust
4111 * DMA mode if needed
4112 */
4113 if (drvp->PIO_mode >= 3 &&
4114 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4115 drvp->DMA_mode = drvp->PIO_mode - 2;
4116 }
4117 switch (sc->sc_pp->ide_product) {
4118 case PCI_PRODUCT_TRIONES_HPT374:
4119 after = hpt374_dma[drvp->DMA_mode];
4120 break;
4121 case PCI_PRODUCT_TRIONES_HPT372:
4122 after = hpt372_dma[drvp->DMA_mode];
4123 break;
4124 case PCI_PRODUCT_TRIONES_HPT366:
4125 default:
4126 switch(revision) {
4127 case HPT372_REV:
4128 after = hpt372_dma[drvp->DMA_mode];
4129 break;
4130 case HPT370_REV:
4131 case HPT370A_REV:
4132 after = hpt370_dma[drvp->DMA_mode];
4133 break;
4134 case HPT366_REV:
4135 default:
4136 after = hpt366_dma[drvp->DMA_mode];
4137 break;
4138 }
4139 }
4140 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4141 } else {
4142 /* PIO only */
4143 switch (sc->sc_pp->ide_product) {
4144 case PCI_PRODUCT_TRIONES_HPT374:
4145 after = hpt374_pio[drvp->PIO_mode];
4146 break;
4147 case PCI_PRODUCT_TRIONES_HPT372:
4148 after = hpt372_pio[drvp->PIO_mode];
4149 break;
4150 case PCI_PRODUCT_TRIONES_HPT366:
4151 default:
4152 switch(revision) {
4153 case HPT372_REV:
4154 after = hpt372_pio[drvp->PIO_mode];
4155 break;
4156 case HPT370_REV:
4157 case HPT370A_REV:
4158 after = hpt370_pio[drvp->PIO_mode];
4159 break;
4160 case HPT366_REV:
4161 default:
4162 after = hpt366_pio[drvp->PIO_mode];
4163 break;
4164 }
4165 }
4166 }
4167 pci_conf_write(sc->sc_pc, sc->sc_tag,
4168 HPT_IDETIM(chp->channel, drive), after);
4169 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4170 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4171 after, before), DEBUG_PROBE);
4172 }
4173 if (idedma_ctl != 0) {
4174 /* Add software bits in status register */
4175 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4176 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4177 idedma_ctl);
4178 }
4179 }
4180
4181 int
4182 hpt_pci_intr(arg)
4183 void *arg;
4184 {
4185 struct pciide_softc *sc = arg;
4186 struct pciide_channel *cp;
4187 struct channel_softc *wdc_cp;
4188 int rv = 0;
4189 int dmastat, i, crv;
4190
4191 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4192 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4193 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4194 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4195 IDEDMA_CTL_INTR)
4196 continue;
4197 cp = &sc->pciide_channels[i];
4198 wdc_cp = &cp->wdc_channel;
4199 crv = wdcintr(wdc_cp);
4200 if (crv == 0) {
4201 printf("%s:%d: bogus intr\n",
4202 sc->sc_wdcdev.sc_dev.dv_xname, i);
4203 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4204 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4205 } else
4206 rv = 1;
4207 }
4208 return rv;
4209 }
4210
4211
4212 /* Macros to test product */
4213 #define PDC_IS_262(sc) \
4214 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4215 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4216 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4217 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4218 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4219 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4220 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4221 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4222 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4223 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4224 #define PDC_IS_265(sc) \
4225 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4226 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4227 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4228 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4229 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4230 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4231 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4232 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4233 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4234 #define PDC_IS_268(sc) \
4235 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4236 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4237 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4238 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4239 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4240 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4241 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4242 #define PDC_IS_276(sc) \
4243 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4244 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4245 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4246 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4247 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4248
4249 void
4250 pdc202xx_chip_map(sc, pa)
4251 struct pciide_softc *sc;
4252 struct pci_attach_args *pa;
4253 {
4254 struct pciide_channel *cp;
4255 int channel;
4256 pcireg_t interface, st, mode;
4257 bus_size_t cmdsize, ctlsize;
4258
4259 if (!PDC_IS_268(sc)) {
4260 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4261 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4262 st), DEBUG_PROBE);
4263 }
4264 if (pciide_chipen(sc, pa) == 0)
4265 return;
4266
4267 /* turn off RAID mode */
4268 if (!PDC_IS_268(sc))
4269 st &= ~PDC2xx_STATE_IDERAID;
4270
4271 /*
4272 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4273 * mode. We have to fake interface
4274 */
4275 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4276 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4277 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4278
4279 aprint_normal("%s: bus-master DMA support present",
4280 sc->sc_wdcdev.sc_dev.dv_xname);
4281 pciide_mapreg_dma(sc, pa);
4282 aprint_normal("\n");
4283 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4284 WDC_CAPABILITY_MODE;
4285 if (sc->sc_dma_ok) {
4286 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4287 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4288 sc->sc_wdcdev.irqack = pciide_irqack;
4289 }
4290 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4291 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4292 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4293 sc->sc_wdcdev.PIO_cap = 4;
4294 sc->sc_wdcdev.DMA_cap = 2;
4295 if (PDC_IS_276(sc))
4296 sc->sc_wdcdev.UDMA_cap = 6;
4297 else if (PDC_IS_265(sc))
4298 sc->sc_wdcdev.UDMA_cap = 5;
4299 else if (PDC_IS_262(sc))
4300 sc->sc_wdcdev.UDMA_cap = 4;
4301 else
4302 sc->sc_wdcdev.UDMA_cap = 2;
4303 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4304 pdc20268_setup_channel : pdc202xx_setup_channel;
4305 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4306 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4307
4308 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4309 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4310 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4311 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4312 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4313 }
4314
4315 if (!PDC_IS_268(sc)) {
4316 /* setup failsafe defaults */
4317 mode = 0;
4318 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4319 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4320 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4321 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4322 for (channel = 0;
4323 channel < sc->sc_wdcdev.nchannels;
4324 channel++) {
4325 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4326 "drive 0 initial timings 0x%x, now 0x%x\n",
4327 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4328 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4329 DEBUG_PROBE);
4330 pci_conf_write(sc->sc_pc, sc->sc_tag,
4331 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4332 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4333 "drive 1 initial timings 0x%x, now 0x%x\n",
4334 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4335 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4336 pci_conf_write(sc->sc_pc, sc->sc_tag,
4337 PDC2xx_TIM(channel, 1), mode);
4338 }
4339
4340 mode = PDC2xx_SCR_DMA;
4341 if (PDC_IS_265(sc)) {
4342 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4343 } else if (PDC_IS_262(sc)) {
4344 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4345 } else {
4346 /* the BIOS set it up this way */
4347 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4348 }
4349 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4350 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4351 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4352 "now 0x%x\n",
4353 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4354 PDC2xx_SCR),
4355 mode), DEBUG_PROBE);
4356 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4357 PDC2xx_SCR, mode);
4358
4359 /* controller initial state register is OK even without BIOS */
4360 /* Set DMA mode to IDE DMA compatibility */
4361 mode =
4362 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4363 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4364 DEBUG_PROBE);
4365 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4366 mode | 0x1);
4367 mode =
4368 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4369 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4370 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4371 mode | 0x1);
4372 }
4373
4374 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4375 cp = &sc->pciide_channels[channel];
4376 if (pciide_chansetup(sc, channel, interface) == 0)
4377 continue;
4378 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4379 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4380 aprint_normal("%s: %s channel ignored (disabled)\n",
4381 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4382 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
4383 continue;
4384 }
4385 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4386 PDC_IS_265(sc) ? pdc20265_pci_intr : pdc202xx_pci_intr);
4387 }
4388 if (!PDC_IS_268(sc)) {
4389 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4390 "0x%x\n", st), DEBUG_PROBE);
4391 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4392 }
4393 return;
4394 }
4395
4396 void
4397 pdc202xx_setup_channel(chp)
4398 struct channel_softc *chp;
4399 {
4400 struct ata_drive_datas *drvp;
4401 int drive;
4402 pcireg_t mode, st;
4403 u_int32_t idedma_ctl, scr, atapi;
4404 struct pciide_channel *cp = (struct pciide_channel*)chp;
4405 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4406 int channel = chp->channel;
4407
4408 /* setup DMA if needed */
4409 pciide_channel_dma_setup(cp);
4410
4411 idedma_ctl = 0;
4412 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4413 sc->sc_wdcdev.sc_dev.dv_xname,
4414 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4415 DEBUG_PROBE);
4416
4417 /* Per channel settings */
4418 if (PDC_IS_262(sc)) {
4419 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4420 PDC262_U66);
4421 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4422 /* Trim UDMA mode */
4423 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4424 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4425 chp->ch_drive[0].UDMA_mode <= 2) ||
4426 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4427 chp->ch_drive[1].UDMA_mode <= 2)) {
4428 if (chp->ch_drive[0].UDMA_mode > 2)
4429 chp->ch_drive[0].UDMA_mode = 2;
4430 if (chp->ch_drive[1].UDMA_mode > 2)
4431 chp->ch_drive[1].UDMA_mode = 2;
4432 }
4433 /* Set U66 if needed */
4434 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4435 chp->ch_drive[0].UDMA_mode > 2) ||
4436 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4437 chp->ch_drive[1].UDMA_mode > 2))
4438 scr |= PDC262_U66_EN(channel);
4439 else
4440 scr &= ~PDC262_U66_EN(channel);
4441 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4442 PDC262_U66, scr);
4443 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4444 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4445 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4446 PDC262_ATAPI(channel))), DEBUG_PROBE);
4447 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4448 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4449 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4450 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4451 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4452 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4453 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4454 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4455 atapi = 0;
4456 else
4457 atapi = PDC262_ATAPI_UDMA;
4458 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4459 PDC262_ATAPI(channel), atapi);
4460 }
4461 }
4462 for (drive = 0; drive < 2; drive++) {
4463 drvp = &chp->ch_drive[drive];
4464 /* If no drive, skip */
4465 if ((drvp->drive_flags & DRIVE) == 0)
4466 continue;
4467 mode = 0;
4468 if (drvp->drive_flags & DRIVE_UDMA) {
4469 /* use Ultra/DMA */
4470 drvp->drive_flags &= ~DRIVE_DMA;
4471 mode = PDC2xx_TIM_SET_MB(mode,
4472 pdc2xx_udma_mb[drvp->UDMA_mode]);
4473 mode = PDC2xx_TIM_SET_MC(mode,
4474 pdc2xx_udma_mc[drvp->UDMA_mode]);
4475 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4476 } else if (drvp->drive_flags & DRIVE_DMA) {
4477 mode = PDC2xx_TIM_SET_MB(mode,
4478 pdc2xx_dma_mb[drvp->DMA_mode]);
4479 mode = PDC2xx_TIM_SET_MC(mode,
4480 pdc2xx_dma_mc[drvp->DMA_mode]);
4481 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4482 } else {
4483 mode = PDC2xx_TIM_SET_MB(mode,
4484 pdc2xx_dma_mb[0]);
4485 mode = PDC2xx_TIM_SET_MC(mode,
4486 pdc2xx_dma_mc[0]);
4487 }
4488 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4489 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4490 if (drvp->drive_flags & DRIVE_ATA)
4491 mode |= PDC2xx_TIM_PRE;
4492 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4493 if (drvp->PIO_mode >= 3) {
4494 mode |= PDC2xx_TIM_IORDY;
4495 if (drive == 0)
4496 mode |= PDC2xx_TIM_IORDYp;
4497 }
4498 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4499 "timings 0x%x\n",
4500 sc->sc_wdcdev.sc_dev.dv_xname,
4501 chp->channel, drive, mode), DEBUG_PROBE);
4502 pci_conf_write(sc->sc_pc, sc->sc_tag,
4503 PDC2xx_TIM(chp->channel, drive), mode);
4504 }
4505 if (idedma_ctl != 0) {
4506 /* Add software bits in status register */
4507 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4508 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4509 idedma_ctl);
4510 }
4511 }
4512
4513 void
4514 pdc20268_setup_channel(chp)
4515 struct channel_softc *chp;
4516 {
4517 struct ata_drive_datas *drvp;
4518 int drive;
4519 u_int32_t idedma_ctl;
4520 struct pciide_channel *cp = (struct pciide_channel*)chp;
4521 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4522 int u100;
4523
4524 /* setup DMA if needed */
4525 pciide_channel_dma_setup(cp);
4526
4527 idedma_ctl = 0;
4528
4529 /* I don't know what this is for, FreeBSD does it ... */
4530 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4531 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4532
4533 /*
4534 * cable type detect, from FreeBSD
4535 */
4536 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4537 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4538 0 : 1;
4539
4540 for (drive = 0; drive < 2; drive++) {
4541 drvp = &chp->ch_drive[drive];
4542 /* If no drive, skip */
4543 if ((drvp->drive_flags & DRIVE) == 0)
4544 continue;
4545 if (drvp->drive_flags & DRIVE_UDMA) {
4546 /* use Ultra/DMA */
4547 drvp->drive_flags &= ~DRIVE_DMA;
4548 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4549 if (drvp->UDMA_mode > 2 && u100 == 0)
4550 drvp->UDMA_mode = 2;
4551 } else if (drvp->drive_flags & DRIVE_DMA) {
4552 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4553 }
4554 }
4555 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4556 if (idedma_ctl != 0) {
4557 /* Add software bits in status register */
4558 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4559 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4560 idedma_ctl);
4561 }
4562 }
4563
4564 int
4565 pdc202xx_pci_intr(arg)
4566 void *arg;
4567 {
4568 struct pciide_softc *sc = arg;
4569 struct pciide_channel *cp;
4570 struct channel_softc *wdc_cp;
4571 int i, rv, crv;
4572 u_int32_t scr;
4573
4574 rv = 0;
4575 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4576 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4577 cp = &sc->pciide_channels[i];
4578 wdc_cp = &cp->wdc_channel;
4579 /* If a compat channel skip. */
4580 if (cp->compat)
4581 continue;
4582 if (scr & PDC2xx_SCR_INT(i)) {
4583 crv = wdcintr(wdc_cp);
4584 if (crv == 0)
4585 printf("%s:%d: bogus intr (reg 0x%x)\n",
4586 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4587 else
4588 rv = 1;
4589 }
4590 }
4591 return rv;
4592 }
4593
4594 int
4595 pdc20265_pci_intr(arg)
4596 void *arg;
4597 {
4598 struct pciide_softc *sc = arg;
4599 struct pciide_channel *cp;
4600 struct channel_softc *wdc_cp;
4601 int i, rv, crv;
4602 u_int32_t dmastat;
4603
4604 rv = 0;
4605 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4606 cp = &sc->pciide_channels[i];
4607 wdc_cp = &cp->wdc_channel;
4608 /* If a compat channel skip. */
4609 if (cp->compat)
4610 continue;
4611 #if 0
4612 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * i, 0x0b);
4613 if ((bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * i) & 0x20) == 0)
4614 continue;
4615 #endif
4616 /*
4617 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4618 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4619 * So use it instead (requires 2 reg reads instead of 1,
4620 * but we can't do it another way).
4621 */
4622 dmastat = bus_space_read_1(sc->sc_dma_iot,
4623 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4624 if((dmastat & IDEDMA_CTL_INTR) == 0)
4625 continue;
4626 crv = wdcintr(wdc_cp);
4627 if (crv == 0)
4628 printf("%s:%d: bogus intr\n",
4629 sc->sc_wdcdev.sc_dev.dv_xname, i);
4630 else
4631 rv = 1;
4632 }
4633 return rv;
4634 }
4635
4636 static void
4637 pdc20262_dma_start(v, channel, drive)
4638 void *v;
4639 int channel, drive;
4640 {
4641 struct pciide_softc *sc = v;
4642 struct pciide_dma_maps *dma_maps =
4643 &sc->pciide_channels[channel].dma_maps[drive];
4644 int atapi;
4645
4646 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4647 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4648 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4649 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4650 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4651 PDC262_ATAPI(channel), atapi);
4652 }
4653
4654 pciide_dma_start(v, channel, drive);
4655 }
4656
4657 int
4658 pdc20262_dma_finish(v, channel, drive, force)
4659 void *v;
4660 int channel, drive;
4661 int force;
4662 {
4663 struct pciide_softc *sc = v;
4664 struct pciide_dma_maps *dma_maps =
4665 &sc->pciide_channels[channel].dma_maps[drive];
4666 struct channel_softc *chp;
4667 int atapi, error;
4668
4669 error = pciide_dma_finish(v, channel, drive, force);
4670
4671 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4672 chp = sc->wdc_chanarray[channel];
4673 atapi = 0;
4674 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4675 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4676 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4677 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4678 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4679 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4680 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4681 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4682 atapi = PDC262_ATAPI_UDMA;
4683 }
4684 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4685 PDC262_ATAPI(channel), atapi);
4686 }
4687
4688 return error;
4689 }
4690
4691 void
4692 opti_chip_map(sc, pa)
4693 struct pciide_softc *sc;
4694 struct pci_attach_args *pa;
4695 {
4696 struct pciide_channel *cp;
4697 bus_size_t cmdsize, ctlsize;
4698 pcireg_t interface;
4699 u_int8_t init_ctrl;
4700 int channel;
4701
4702 if (pciide_chipen(sc, pa) == 0)
4703 return;
4704
4705 aprint_normal("%s: bus-master DMA support present",
4706 sc->sc_wdcdev.sc_dev.dv_xname);
4707
4708 /*
4709 * XXXSCW:
4710 * There seem to be a couple of buggy revisions/implementations
4711 * of the OPTi pciide chipset. This kludge seems to fix one of
4712 * the reported problems (PR/11644) but still fails for the
4713 * other (PR/13151), although the latter may be due to other
4714 * issues too...
4715 */
4716 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4717 aprint_normal(" but disabled due to chip rev. <= 0x12");
4718 sc->sc_dma_ok = 0;
4719 } else
4720 pciide_mapreg_dma(sc, pa);
4721
4722 aprint_normal("\n");
4723
4724 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4725 WDC_CAPABILITY_MODE;
4726 sc->sc_wdcdev.PIO_cap = 4;
4727 if (sc->sc_dma_ok) {
4728 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4729 sc->sc_wdcdev.irqack = pciide_irqack;
4730 sc->sc_wdcdev.DMA_cap = 2;
4731 }
4732 sc->sc_wdcdev.set_modes = opti_setup_channel;
4733
4734 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4735 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4736
4737 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4738 OPTI_REG_INIT_CONTROL);
4739
4740 interface = PCI_INTERFACE(pa->pa_class);
4741
4742 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4743 cp = &sc->pciide_channels[channel];
4744 if (pciide_chansetup(sc, channel, interface) == 0)
4745 continue;
4746 if (channel == 1 &&
4747 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4748 aprint_normal("%s: %s channel ignored (disabled)\n",
4749 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4750 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
4751 continue;
4752 }
4753 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4754 pciide_pci_intr);
4755 }
4756 }
4757
4758 void
4759 opti_setup_channel(chp)
4760 struct channel_softc *chp;
4761 {
4762 struct ata_drive_datas *drvp;
4763 struct pciide_channel *cp = (struct pciide_channel*)chp;
4764 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4765 int drive, spd;
4766 int mode[2];
4767 u_int8_t rv, mr;
4768
4769 /*
4770 * The `Delay' and `Address Setup Time' fields of the
4771 * Miscellaneous Register are always zero initially.
4772 */
4773 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4774 mr &= ~(OPTI_MISC_DELAY_MASK |
4775 OPTI_MISC_ADDR_SETUP_MASK |
4776 OPTI_MISC_INDEX_MASK);
4777
4778 /* Prime the control register before setting timing values */
4779 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4780
4781 /* Determine the clockrate of the PCIbus the chip is attached to */
4782 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4783 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4784
4785 /* setup DMA if needed */
4786 pciide_channel_dma_setup(cp);
4787
4788 for (drive = 0; drive < 2; drive++) {
4789 drvp = &chp->ch_drive[drive];
4790 /* If no drive, skip */
4791 if ((drvp->drive_flags & DRIVE) == 0) {
4792 mode[drive] = -1;
4793 continue;
4794 }
4795
4796 if ((drvp->drive_flags & DRIVE_DMA)) {
4797 /*
4798 * Timings will be used for both PIO and DMA,
4799 * so adjust DMA mode if needed
4800 */
4801 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4802 drvp->PIO_mode = drvp->DMA_mode + 2;
4803 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4804 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4805 drvp->PIO_mode - 2 : 0;
4806 if (drvp->DMA_mode == 0)
4807 drvp->PIO_mode = 0;
4808
4809 mode[drive] = drvp->DMA_mode + 5;
4810 } else
4811 mode[drive] = drvp->PIO_mode;
4812
4813 if (drive && mode[0] >= 0 &&
4814 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4815 /*
4816 * Can't have two drives using different values
4817 * for `Address Setup Time'.
4818 * Slow down the faster drive to compensate.
4819 */
4820 int d = (opti_tim_as[spd][mode[0]] >
4821 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4822
4823 mode[d] = mode[1-d];
4824 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4825 chp->ch_drive[d].DMA_mode = 0;
4826 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4827 }
4828 }
4829
4830 for (drive = 0; drive < 2; drive++) {
4831 int m;
4832 if ((m = mode[drive]) < 0)
4833 continue;
4834
4835 /* Set the Address Setup Time and select appropriate index */
4836 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4837 rv |= OPTI_MISC_INDEX(drive);
4838 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4839
4840 /* Set the pulse width and recovery timing parameters */
4841 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4842 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4843 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4844 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4845
4846 /* Set the Enhanced Mode register appropriately */
4847 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4848 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4849 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4850 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4851 }
4852
4853 /* Finally, enable the timings */
4854 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4855 }
4856
4857 #define ACARD_IS_850(sc) \
4858 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4859
4860 void
4861 acard_chip_map(sc, pa)
4862 struct pciide_softc *sc;
4863 struct pci_attach_args *pa;
4864 {
4865 struct pciide_channel *cp;
4866 int i;
4867 pcireg_t interface;
4868 bus_size_t cmdsize, ctlsize;
4869
4870 if (pciide_chipen(sc, pa) == 0)
4871 return;
4872
4873 /*
4874 * when the chip is in native mode it identifies itself as a
4875 * 'misc mass storage'. Fake interface in this case.
4876 */
4877 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4878 interface = PCI_INTERFACE(pa->pa_class);
4879 } else {
4880 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4881 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4882 }
4883
4884 aprint_normal("%s: bus-master DMA support present",
4885 sc->sc_wdcdev.sc_dev.dv_xname);
4886 pciide_mapreg_dma(sc, pa);
4887 aprint_normal("\n");
4888 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4889 WDC_CAPABILITY_MODE;
4890
4891 if (sc->sc_dma_ok) {
4892 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4893 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4894 sc->sc_wdcdev.irqack = pciide_irqack;
4895 }
4896 sc->sc_wdcdev.PIO_cap = 4;
4897 sc->sc_wdcdev.DMA_cap = 2;
4898 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4899
4900 sc->sc_wdcdev.set_modes = acard_setup_channel;
4901 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4902 sc->sc_wdcdev.nchannels = 2;
4903
4904 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4905 cp = &sc->pciide_channels[i];
4906 if (pciide_chansetup(sc, i, interface) == 0)
4907 continue;
4908 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4909 pciide_pci_intr);
4910 }
4911 if (!ACARD_IS_850(sc)) {
4912 u_int32_t reg;
4913 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4914 reg &= ~ATP860_CTRL_INT;
4915 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4916 }
4917 }
4918
4919 void
4920 acard_setup_channel(chp)
4921 struct channel_softc *chp;
4922 {
4923 struct ata_drive_datas *drvp;
4924 struct pciide_channel *cp = (struct pciide_channel*)chp;
4925 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4926 int channel = chp->channel;
4927 int drive;
4928 u_int32_t idetime, udma_mode;
4929 u_int32_t idedma_ctl;
4930
4931 /* setup DMA if needed */
4932 pciide_channel_dma_setup(cp);
4933
4934 if (ACARD_IS_850(sc)) {
4935 idetime = 0;
4936 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4937 udma_mode &= ~ATP850_UDMA_MASK(channel);
4938 } else {
4939 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4940 idetime &= ~ATP860_SETTIME_MASK(channel);
4941 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4942 udma_mode &= ~ATP860_UDMA_MASK(channel);
4943
4944 /* check 80 pins cable */
4945 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4946 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4947 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4948 & ATP860_CTRL_80P(chp->channel)) {
4949 if (chp->ch_drive[0].UDMA_mode > 2)
4950 chp->ch_drive[0].UDMA_mode = 2;
4951 if (chp->ch_drive[1].UDMA_mode > 2)
4952 chp->ch_drive[1].UDMA_mode = 2;
4953 }
4954 }
4955 }
4956
4957 idedma_ctl = 0;
4958
4959 /* Per drive settings */
4960 for (drive = 0; drive < 2; drive++) {
4961 drvp = &chp->ch_drive[drive];
4962 /* If no drive, skip */
4963 if ((drvp->drive_flags & DRIVE) == 0)
4964 continue;
4965 /* add timing values, setup DMA if needed */
4966 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4967 (drvp->drive_flags & DRIVE_UDMA)) {
4968 /* use Ultra/DMA */
4969 if (ACARD_IS_850(sc)) {
4970 idetime |= ATP850_SETTIME(drive,
4971 acard_act_udma[drvp->UDMA_mode],
4972 acard_rec_udma[drvp->UDMA_mode]);
4973 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4974 acard_udma_conf[drvp->UDMA_mode]);
4975 } else {
4976 idetime |= ATP860_SETTIME(channel, drive,
4977 acard_act_udma[drvp->UDMA_mode],
4978 acard_rec_udma[drvp->UDMA_mode]);
4979 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4980 acard_udma_conf[drvp->UDMA_mode]);
4981 }
4982 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4983 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4984 (drvp->drive_flags & DRIVE_DMA)) {
4985 /* use Multiword DMA */
4986 drvp->drive_flags &= ~DRIVE_UDMA;
4987 if (ACARD_IS_850(sc)) {
4988 idetime |= ATP850_SETTIME(drive,
4989 acard_act_dma[drvp->DMA_mode],
4990 acard_rec_dma[drvp->DMA_mode]);
4991 } else {
4992 idetime |= ATP860_SETTIME(channel, drive,
4993 acard_act_dma[drvp->DMA_mode],
4994 acard_rec_dma[drvp->DMA_mode]);
4995 }
4996 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4997 } else {
4998 /* PIO only */
4999 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5000 if (ACARD_IS_850(sc)) {
5001 idetime |= ATP850_SETTIME(drive,
5002 acard_act_pio[drvp->PIO_mode],
5003 acard_rec_pio[drvp->PIO_mode]);
5004 } else {
5005 idetime |= ATP860_SETTIME(channel, drive,
5006 acard_act_pio[drvp->PIO_mode],
5007 acard_rec_pio[drvp->PIO_mode]);
5008 }
5009 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5010 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5011 | ATP8x0_CTRL_EN(channel));
5012 }
5013 }
5014
5015 if (idedma_ctl != 0) {
5016 /* Add software bits in status register */
5017 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5018 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5019 }
5020
5021 if (ACARD_IS_850(sc)) {
5022 pci_conf_write(sc->sc_pc, sc->sc_tag,
5023 ATP850_IDETIME(channel), idetime);
5024 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5025 } else {
5026 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5027 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5028 }
5029 }
5030
5031 int
5032 acard_pci_intr(arg)
5033 void *arg;
5034 {
5035 struct pciide_softc *sc = arg;
5036 struct pciide_channel *cp;
5037 struct channel_softc *wdc_cp;
5038 int rv = 0;
5039 int dmastat, i, crv;
5040
5041 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5042 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5043 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5044 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5045 continue;
5046 cp = &sc->pciide_channels[i];
5047 wdc_cp = &cp->wdc_channel;
5048 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5049 (void)wdcintr(wdc_cp);
5050 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5051 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5052 continue;
5053 }
5054 crv = wdcintr(wdc_cp);
5055 if (crv == 0)
5056 printf("%s:%d: bogus intr\n",
5057 sc->sc_wdcdev.sc_dev.dv_xname, i);
5058 else if (crv == 1)
5059 rv = 1;
5060 else if (rv == 0)
5061 rv = crv;
5062 }
5063 return rv;
5064 }
5065
5066 static int
5067 sl82c105_bugchk(struct pci_attach_args *pa)
5068 {
5069
5070 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5071 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5072 return (0);
5073
5074 if (PCI_REVISION(pa->pa_class) <= 0x05)
5075 return (1);
5076
5077 return (0);
5078 }
5079
5080 void
5081 sl82c105_chip_map(sc, pa)
5082 struct pciide_softc *sc;
5083 struct pci_attach_args *pa;
5084 {
5085 struct pciide_channel *cp;
5086 bus_size_t cmdsize, ctlsize;
5087 pcireg_t interface, idecr;
5088 int channel;
5089
5090 if (pciide_chipen(sc, pa) == 0)
5091 return;
5092
5093 aprint_normal("%s: bus-master DMA support present",
5094 sc->sc_wdcdev.sc_dev.dv_xname);
5095
5096 /*
5097 * Check to see if we're part of the Winbond 83c553 Southbridge.
5098 * If so, we need to disable DMA on rev. <= 5 of that chip.
5099 */
5100 if (pci_find_device(pa, sl82c105_bugchk)) {
5101 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5102 sc->sc_dma_ok = 0;
5103 } else
5104 pciide_mapreg_dma(sc, pa);
5105 aprint_normal("\n");
5106
5107 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5108 WDC_CAPABILITY_MODE;
5109 sc->sc_wdcdev.PIO_cap = 4;
5110 if (sc->sc_dma_ok) {
5111 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5112 sc->sc_wdcdev.irqack = pciide_irqack;
5113 sc->sc_wdcdev.DMA_cap = 2;
5114 }
5115 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5116
5117 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5118 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5119
5120 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5121
5122 interface = PCI_INTERFACE(pa->pa_class);
5123
5124 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5125 cp = &sc->pciide_channels[channel];
5126 if (pciide_chansetup(sc, channel, interface) == 0)
5127 continue;
5128 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5129 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5130 aprint_normal("%s: %s channel ignored (disabled)\n",
5131 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5132 cp->wdc_channel.ch_flags |= WDCF_DISABLED;
5133 continue;
5134 }
5135 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5136 pciide_pci_intr);
5137 }
5138 }
5139
5140 void
5141 sl82c105_setup_channel(chp)
5142 struct channel_softc *chp;
5143 {
5144 struct ata_drive_datas *drvp;
5145 struct pciide_channel *cp = (struct pciide_channel*)chp;
5146 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5147 int pxdx_reg, drive;
5148 pcireg_t pxdx;
5149
5150 /* Set up DMA if needed. */
5151 pciide_channel_dma_setup(cp);
5152
5153 for (drive = 0; drive < 2; drive++) {
5154 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5155 : SYMPH_P1D0CR) + (drive * 4);
5156
5157 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5158
5159 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5160 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5161
5162 drvp = &chp->ch_drive[drive];
5163 /* If no drive, skip. */
5164 if ((drvp->drive_flags & DRIVE) == 0) {
5165 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5166 continue;
5167 }
5168
5169 if (drvp->drive_flags & DRIVE_DMA) {
5170 /*
5171 * Timings will be used for both PIO and DMA,
5172 * so adjust DMA mode if needed.
5173 */
5174 if (drvp->PIO_mode >= 3) {
5175 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5176 drvp->DMA_mode = drvp->PIO_mode - 2;
5177 if (drvp->DMA_mode < 1) {
5178 /*
5179 * Can't mix both PIO and DMA.
5180 * Disable DMA.
5181 */
5182 drvp->drive_flags &= ~DRIVE_DMA;
5183 }
5184 } else {
5185 /*
5186 * Can't mix both PIO and DMA. Disable
5187 * DMA.
5188 */
5189 drvp->drive_flags &= ~DRIVE_DMA;
5190 }
5191 }
5192
5193 if (drvp->drive_flags & DRIVE_DMA) {
5194 /* Use multi-word DMA. */
5195 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5196 PxDx_CMD_ON_SHIFT;
5197 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5198 } else {
5199 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5200 PxDx_CMD_ON_SHIFT;
5201 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5202 }
5203
5204 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5205
5206 /* ...and set the mode for this drive. */
5207 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5208 }
5209 }
5210
5211 void
5212 serverworks_chip_map(sc, pa)
5213 struct pciide_softc *sc;
5214 struct pci_attach_args *pa;
5215 {
5216 struct pciide_channel *cp;
5217 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5218 pcitag_t pcib_tag;
5219 int channel;
5220 bus_size_t cmdsize, ctlsize;
5221
5222 if (pciide_chipen(sc, pa) == 0)
5223 return;
5224
5225 aprint_normal("%s: bus-master DMA support present",
5226 sc->sc_wdcdev.sc_dev.dv_xname);
5227 pciide_mapreg_dma(sc, pa);
5228 aprint_normal("\n");
5229 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5230 WDC_CAPABILITY_MODE;
5231
5232 if (sc->sc_dma_ok) {
5233 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5234 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5235 sc->sc_wdcdev.irqack = pciide_irqack;
5236 }
5237 sc->sc_wdcdev.PIO_cap = 4;
5238 sc->sc_wdcdev.DMA_cap = 2;
5239 switch (sc->sc_pp->ide_product) {
5240 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5241 sc->sc_wdcdev.UDMA_cap = 2;
5242 break;
5243 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5244 if (PCI_REVISION(pa->pa_class) < 0x92)
5245 sc->sc_wdcdev.UDMA_cap = 4;
5246 else
5247 sc->sc_wdcdev.UDMA_cap = 5;
5248 break;
5249 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5250 sc->sc_wdcdev.UDMA_cap = 5;
5251 break;
5252 }
5253
5254 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5255 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5256 sc->sc_wdcdev.nchannels = 2;
5257
5258 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5259 cp = &sc->pciide_channels[channel];
5260 if (pciide_chansetup(sc, channel, interface) == 0)
5261 continue;
5262 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5263 serverworks_pci_intr);
5264 }
5265
5266 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5267 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5268 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5269 }
5270
5271 void
5272 serverworks_setup_channel(chp)
5273 struct channel_softc *chp;
5274 {
5275 struct ata_drive_datas *drvp;
5276 struct pciide_channel *cp = (struct pciide_channel*)chp;
5277 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5278 int channel = chp->channel;
5279 int drive, unit;
5280 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5281 u_int32_t idedma_ctl;
5282 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5283 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5284
5285 /* setup DMA if needed */
5286 pciide_channel_dma_setup(cp);
5287
5288 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5289 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5290 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5291 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5292
5293 pio_time &= ~(0xffff << (16 * channel));
5294 dma_time &= ~(0xffff << (16 * channel));
5295 pio_mode &= ~(0xff << (8 * channel + 16));
5296 udma_mode &= ~(0xff << (8 * channel + 16));
5297 udma_mode &= ~(3 << (2 * channel));
5298
5299 idedma_ctl = 0;
5300
5301 /* Per drive settings */
5302 for (drive = 0; drive < 2; drive++) {
5303 drvp = &chp->ch_drive[drive];
5304 /* If no drive, skip */
5305 if ((drvp->drive_flags & DRIVE) == 0)
5306 continue;
5307 unit = drive + 2 * channel;
5308 /* add timing values, setup DMA if needed */
5309 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5310 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5311 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5312 (drvp->drive_flags & DRIVE_UDMA)) {
5313 /* use Ultra/DMA, check for 80-pin cable */
5314 if (drvp->UDMA_mode > 2 &&
5315 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5316 drvp->UDMA_mode = 2;
5317 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5318 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5319 udma_mode |= 1 << unit;
5320 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5321 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5322 (drvp->drive_flags & DRIVE_DMA)) {
5323 /* use Multiword DMA */
5324 drvp->drive_flags &= ~DRIVE_UDMA;
5325 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5326 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5327 } else {
5328 /* PIO only */
5329 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5330 }
5331 }
5332
5333 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5334 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5335 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5336 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5337 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5338
5339 if (idedma_ctl != 0) {
5340 /* Add software bits in status register */
5341 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5342 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5343 }
5344 }
5345
5346 int
5347 serverworks_pci_intr(arg)
5348 void *arg;
5349 {
5350 struct pciide_softc *sc = arg;
5351 struct pciide_channel *cp;
5352 struct channel_softc *wdc_cp;
5353 int rv = 0;
5354 int dmastat, i, crv;
5355
5356 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5357 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5358 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5359 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5360 IDEDMA_CTL_INTR)
5361 continue;
5362 cp = &sc->pciide_channels[i];
5363 wdc_cp = &cp->wdc_channel;
5364 crv = wdcintr(wdc_cp);
5365 if (crv == 0) {
5366 printf("%s:%d: bogus intr\n",
5367 sc->sc_wdcdev.sc_dev.dv_xname, i);
5368 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5369 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5370 } else
5371 rv = 1;
5372 }
5373 return rv;
5374 }
5375
5376 void
5377 artisea_chip_map(sc, pa)
5378 struct pciide_softc *sc;
5379 struct pci_attach_args *pa;
5380 {
5381 struct pciide_channel *cp;
5382 bus_size_t cmdsize, ctlsize;
5383 pcireg_t interface;
5384 int channel;
5385
5386 if (pciide_chipen(sc, pa) == 0)
5387 return;
5388
5389 aprint_normal("%s: bus-master DMA support present",
5390 sc->sc_wdcdev.sc_dev.dv_xname);
5391 #ifndef PCIIDE_I31244_ENABLEDMA
5392 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 &&
5393 PCI_REVISION(pa->pa_class) == 0) {
5394 aprint_normal(" but disabled due to rev. 0");
5395 sc->sc_dma_ok = 0;
5396 } else
5397 #endif
5398 pciide_mapreg_dma(sc, pa);
5399 aprint_normal("\n");
5400
5401 /*
5402 * XXX Configure LEDs to show activity.
5403 */
5404
5405 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5406 WDC_CAPABILITY_MODE;
5407 sc->sc_wdcdev.PIO_cap = 4;
5408 if (sc->sc_dma_ok) {
5409 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5410 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5411 sc->sc_wdcdev.irqack = pciide_irqack;
5412 sc->sc_wdcdev.DMA_cap = 2;
5413 sc->sc_wdcdev.UDMA_cap = 6;
5414 }
5415 sc->sc_wdcdev.set_modes = sata_setup_channel;
5416
5417 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5418 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5419
5420 interface = PCI_INTERFACE(pa->pa_class);
5421
5422 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5423 cp = &sc->pciide_channels[channel];
5424 if (pciide_chansetup(sc, channel, interface) == 0)
5425 continue;
5426 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5427 pciide_pci_intr);
5428 }
5429 }
5430