pciide.c revision 1.153.2.17 1 /* $NetBSD: pciide.c,v 1.153.2.17 2004/07/12 21:24:55 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.153.2.17 2004/07/12 21:24:55 he Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_sii3112_reg.h>
123 #include <dev/pci/cy82c693var.h>
124
125 #include "opt_pciide.h"
126
127 /* inlines for reading/writing 8-bit PCI registers */
128 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
129 int));
130 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
131 int, u_int8_t));
132
133 static __inline u_int8_t
134 pciide_pci_read(pc, pa, reg)
135 pci_chipset_tag_t pc;
136 pcitag_t pa;
137 int reg;
138 {
139
140 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
141 ((reg & 0x03) * 8) & 0xff);
142 }
143
144 static __inline void
145 pciide_pci_write(pc, pa, reg, val)
146 pci_chipset_tag_t pc;
147 pcitag_t pa;
148 int reg;
149 u_int8_t val;
150 {
151 pcireg_t pcival;
152
153 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
154 pcival &= ~(0xff << ((reg & 0x03) * 8));
155 pcival |= (val << ((reg & 0x03) * 8));
156 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
157 }
158
159 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
160
161 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
162 void piix_setup_channel __P((struct channel_softc*));
163 void piix3_4_setup_channel __P((struct channel_softc*));
164 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
165 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
166 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
167
168 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void amd7x6_setup_channel __P((struct channel_softc*));
170
171 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void apollo_setup_channel __P((struct channel_softc*));
173
174 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
176 void cmd0643_9_setup_channel __P((struct channel_softc*));
177 void cmd_channel_map __P((struct pci_attach_args *,
178 struct pciide_softc *, int));
179 int cmd_pci_intr __P((void *));
180 void cmd646_9_irqack __P((struct channel_softc *));
181
182 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void cmd3112_setup_channel __P((struct channel_softc*));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 void sis96x_setup_channel __P((struct channel_softc*));
191 static int sis_hostbr_match __P(( struct pci_attach_args *));
192 static int sis_south_match __P(( struct pci_attach_args *));
193
194 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void acer_setup_channel __P((struct channel_softc*));
196 int acer_pci_intr __P((void *));
197
198 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void pdc202xx_setup_channel __P((struct channel_softc*));
200 void pdc20268_setup_channel __P((struct channel_softc*));
201 int pdc202xx_pci_intr __P((void *));
202 int pdc20265_pci_intr __P((void *));
203 static void pdc20262_dma_start __P((void*, int, int));
204 static int pdc20262_dma_finish __P((void*, int, int, int));
205
206 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
207 void opti_setup_channel __P((struct channel_softc*));
208
209 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void hpt_setup_channel __P((struct channel_softc*));
211 int hpt_pci_intr __P((void *));
212
213 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void acard_setup_channel __P((struct channel_softc*));
215 int acard_pci_intr __P((void *));
216
217 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void serverworks_setup_channel __P((struct channel_softc*));
219 int serverworks_pci_intr __P((void *));
220
221 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
222 void sl82c105_setup_channel __P((struct channel_softc*));
223
224 void pciide_channel_dma_setup __P((struct pciide_channel *));
225 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
226 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
227 void pciide_dma_start __P((void*, int, int));
228 int pciide_dma_finish __P((void*, int, int, int));
229 void pciide_irqack __P((struct channel_softc *));
230 void pciide_print_modes __P((struct pciide_channel *));
231
232 struct pciide_product_desc {
233 u_int32_t ide_product;
234 int ide_flags;
235 const char *ide_name;
236 /* map and setup chip, probe drives */
237 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
238 };
239
240 /* Flags for ide_flags */
241 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
242 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
243
244 /* Default product description for devices not known from this controller */
245 const struct pciide_product_desc default_product_desc = {
246 0,
247 0,
248 "Generic PCI IDE controller",
249 default_chip_map,
250 };
251
252 const struct pciide_product_desc pciide_intel_products[] = {
253 { PCI_PRODUCT_INTEL_82092AA,
254 0,
255 "Intel 82092AA IDE controller",
256 default_chip_map,
257 },
258 { PCI_PRODUCT_INTEL_82371FB_IDE,
259 0,
260 "Intel 82371FB IDE controller (PIIX)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82371SB_IDE,
264 0,
265 "Intel 82371SB IDE Interface (PIIX3)",
266 piix_chip_map,
267 },
268 { PCI_PRODUCT_INTEL_82371AB_IDE,
269 0,
270 "Intel 82371AB IDE controller (PIIX4)",
271 piix_chip_map,
272 },
273 { PCI_PRODUCT_INTEL_82440MX_IDE,
274 0,
275 "Intel 82440MX IDE controller",
276 piix_chip_map
277 },
278 { PCI_PRODUCT_INTEL_82801AA_IDE,
279 0,
280 "Intel 82801AA IDE Controller (ICH)",
281 piix_chip_map,
282 },
283 { PCI_PRODUCT_INTEL_82801AB_IDE,
284 0,
285 "Intel 82801AB IDE Controller (ICH0)",
286 piix_chip_map,
287 },
288 { PCI_PRODUCT_INTEL_82801BA_IDE,
289 0,
290 "Intel 82801BA IDE Controller (ICH2)",
291 piix_chip_map,
292 },
293 { PCI_PRODUCT_INTEL_82801BAM_IDE,
294 0,
295 "Intel 82801BAM IDE Controller (ICH2)",
296 piix_chip_map,
297 },
298 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
299 0,
300 "Intel 82801CA IDE Controller",
301 piix_chip_map,
302 },
303 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
304 0,
305 "Intel 82801CA IDE Controller",
306 piix_chip_map,
307 },
308 { PCI_PRODUCT_INTEL_82801DB_IDE,
309 0,
310 "Intel 82801DB IDE Controller (ICH4)",
311 piix_chip_map,
312 },
313 { PCI_PRODUCT_INTEL_82801EB_IDE,
314 0,
315 "Intel 82801EB IDE Controller (ICH5)",
316 piix_chip_map,
317 },
318 { 0,
319 0,
320 NULL,
321 NULL
322 }
323 };
324
325 const struct pciide_product_desc pciide_amd_products[] = {
326 { PCI_PRODUCT_AMD_PBC756_IDE,
327 0,
328 "Advanced Micro Devices AMD756 IDE Controller",
329 amd7x6_chip_map
330 },
331 { PCI_PRODUCT_AMD_PBC766_IDE,
332 0,
333 "Advanced Micro Devices AMD766 IDE Controller",
334 amd7x6_chip_map
335 },
336 { PCI_PRODUCT_AMD_PBC768_IDE,
337 0,
338 "Advanced Micro Devices AMD768 IDE Controller",
339 amd7x6_chip_map
340 },
341 { 0,
342 0,
343 NULL,
344 NULL
345 }
346 };
347
348 const struct pciide_product_desc pciide_cmd_products[] = {
349 { PCI_PRODUCT_CMDTECH_640,
350 0,
351 "CMD Technology PCI0640",
352 cmd_chip_map
353 },
354 { PCI_PRODUCT_CMDTECH_643,
355 0,
356 "CMD Technology PCI0643",
357 cmd0643_9_chip_map,
358 },
359 { PCI_PRODUCT_CMDTECH_646,
360 0,
361 "CMD Technology PCI0646",
362 cmd0643_9_chip_map,
363 },
364 { PCI_PRODUCT_CMDTECH_648,
365 IDE_PCI_CLASS_OVERRIDE,
366 "CMD Technology PCI0648",
367 cmd0643_9_chip_map,
368 },
369 { PCI_PRODUCT_CMDTECH_649,
370 IDE_PCI_CLASS_OVERRIDE,
371 "CMD Technology PCI0649",
372 cmd0643_9_chip_map,
373 },
374 { 0,
375 0,
376 NULL,
377 NULL
378 }
379 };
380
381 const struct pciide_product_desc pciide_via_products[] = {
382 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
383 0,
384 NULL,
385 apollo_chip_map,
386 },
387 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
388 0,
389 NULL,
390 apollo_chip_map,
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_cypress_products[] = {
400 { PCI_PRODUCT_CONTAQ_82C693,
401 IDE_16BIT_IOSPACE,
402 "Cypress 82C693 IDE Controller",
403 cy693_chip_map,
404 },
405 { PCI_PRODUCT_CMDTECH_3112,
406 IDE_PCI_CLASS_OVERRIDE,
407 "Silicon Image SATALink 3112",
408 cmd3112_chip_map,
409 },
410 { 0,
411 0,
412 NULL,
413 NULL
414 }
415 };
416
417 const struct pciide_product_desc pciide_sis_products[] = {
418 { PCI_PRODUCT_SIS_5597_IDE,
419 0,
420 NULL,
421 sis_chip_map,
422 },
423 { 0,
424 0,
425 NULL,
426 NULL
427 }
428 };
429
430 const struct pciide_product_desc pciide_acer_products[] = {
431 { PCI_PRODUCT_ALI_M5229,
432 0,
433 "Acer Labs M5229 UDMA IDE Controller",
434 acer_chip_map,
435 },
436 { 0,
437 0,
438 NULL,
439 NULL
440 }
441 };
442
443 const struct pciide_product_desc pciide_promise_products[] = {
444 { PCI_PRODUCT_PROMISE_ULTRA33,
445 IDE_PCI_CLASS_OVERRIDE,
446 "Promise Ultra33/ATA Bus Master IDE Accelerator",
447 pdc202xx_chip_map,
448 },
449 { PCI_PRODUCT_PROMISE_ULTRA66,
450 IDE_PCI_CLASS_OVERRIDE,
451 "Promise Ultra66/ATA Bus Master IDE Accelerator",
452 pdc202xx_chip_map,
453 },
454 { PCI_PRODUCT_PROMISE_ULTRA100,
455 IDE_PCI_CLASS_OVERRIDE,
456 "Promise Ultra100/ATA Bus Master IDE Accelerator",
457 pdc202xx_chip_map,
458 },
459 { PCI_PRODUCT_PROMISE_ULTRA100X,
460 IDE_PCI_CLASS_OVERRIDE,
461 "Promise Ultra100/ATA Bus Master IDE Accelerator",
462 pdc202xx_chip_map,
463 },
464 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
465 IDE_PCI_CLASS_OVERRIDE,
466 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
467 pdc202xx_chip_map,
468 },
469 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
470 IDE_PCI_CLASS_OVERRIDE,
471 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
472 pdc202xx_chip_map,
473 },
474 { PCI_PRODUCT_PROMISE_ULTRA133,
475 IDE_PCI_CLASS_OVERRIDE,
476 "Promise Ultra133/ATA Bus Master IDE Accelerator",
477 pdc202xx_chip_map,
478 },
479 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
480 IDE_PCI_CLASS_OVERRIDE,
481 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
482 pdc202xx_chip_map,
483 },
484 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
485 IDE_PCI_CLASS_OVERRIDE,
486 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
487 pdc202xx_chip_map,
488 },
489 { 0,
490 0,
491 NULL,
492 NULL
493 }
494 };
495
496 const struct pciide_product_desc pciide_opti_products[] = {
497 { PCI_PRODUCT_OPTI_82C621,
498 0,
499 "OPTi 82c621 PCI IDE controller",
500 opti_chip_map,
501 },
502 { PCI_PRODUCT_OPTI_82C568,
503 0,
504 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
505 opti_chip_map,
506 },
507 { PCI_PRODUCT_OPTI_82D568,
508 0,
509 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
510 opti_chip_map,
511 },
512 { 0,
513 0,
514 NULL,
515 NULL
516 }
517 };
518
519 const struct pciide_product_desc pciide_triones_products[] = {
520 { PCI_PRODUCT_TRIONES_HPT366,
521 IDE_PCI_CLASS_OVERRIDE,
522 NULL,
523 hpt_chip_map,
524 },
525 { PCI_PRODUCT_TRIONES_HPT372,
526 IDE_PCI_CLASS_OVERRIDE,
527 NULL,
528 hpt_chip_map
529 },
530 { PCI_PRODUCT_TRIONES_HPT374,
531 IDE_PCI_CLASS_OVERRIDE,
532 NULL,
533 hpt_chip_map
534 },
535 { 0,
536 0,
537 NULL,
538 NULL
539 }
540 };
541
542 const struct pciide_product_desc pciide_acard_products[] = {
543 { PCI_PRODUCT_ACARD_ATP850U,
544 IDE_PCI_CLASS_OVERRIDE,
545 "Acard ATP850U Ultra33 IDE Controller",
546 acard_chip_map,
547 },
548 { PCI_PRODUCT_ACARD_ATP860,
549 IDE_PCI_CLASS_OVERRIDE,
550 "Acard ATP860 Ultra66 IDE Controller",
551 acard_chip_map,
552 },
553 { PCI_PRODUCT_ACARD_ATP860A,
554 IDE_PCI_CLASS_OVERRIDE,
555 "Acard ATP860-A Ultra66 IDE Controller",
556 acard_chip_map,
557 },
558 { 0,
559 0,
560 NULL,
561 NULL
562 }
563 };
564
565 const struct pciide_product_desc pciide_serverworks_products[] = {
566 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
567 0,
568 "ServerWorks OSB4 IDE Controller",
569 serverworks_chip_map,
570 },
571 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
572 0,
573 "ServerWorks CSB5 IDE Controller",
574 serverworks_chip_map,
575 },
576 { 0,
577 0,
578 NULL,
579 }
580 };
581
582 const struct pciide_product_desc pciide_symphony_products[] = {
583 { PCI_PRODUCT_SYMPHONY_82C105,
584 0,
585 "Symphony Labs 82C105 IDE controller",
586 sl82c105_chip_map,
587 },
588 { 0,
589 0,
590 NULL,
591 }
592 };
593
594 const struct pciide_product_desc pciide_winbond_products[] = {
595 { PCI_PRODUCT_WINBOND_W83C553F_1,
596 0,
597 "Winbond W83C553F IDE controller",
598 sl82c105_chip_map,
599 },
600 { 0,
601 0,
602 NULL,
603 }
604 };
605
606 struct pciide_vendor_desc {
607 u_int32_t ide_vendor;
608 const struct pciide_product_desc *ide_products;
609 };
610
611 const struct pciide_vendor_desc pciide_vendors[] = {
612 { PCI_VENDOR_INTEL, pciide_intel_products },
613 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
614 { PCI_VENDOR_VIATECH, pciide_via_products },
615 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
616 { PCI_VENDOR_SIS, pciide_sis_products },
617 { PCI_VENDOR_ALI, pciide_acer_products },
618 { PCI_VENDOR_PROMISE, pciide_promise_products },
619 { PCI_VENDOR_AMD, pciide_amd_products },
620 { PCI_VENDOR_OPTI, pciide_opti_products },
621 { PCI_VENDOR_TRIONES, pciide_triones_products },
622 { PCI_VENDOR_ACARD, pciide_acard_products },
623 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
624 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
625 { PCI_VENDOR_WINBOND, pciide_winbond_products },
626 { 0, NULL }
627 };
628
629 /* options passed via the 'flags' config keyword */
630 #define PCIIDE_OPTIONS_DMA 0x01
631 #define PCIIDE_OPTIONS_NODMA 0x02
632
633 int pciide_match __P((struct device *, struct cfdata *, void *));
634 void pciide_attach __P((struct device *, struct device *, void *));
635
636 struct cfattach pciide_ca = {
637 sizeof(struct pciide_softc), pciide_match, pciide_attach
638 };
639 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
640 int pciide_mapregs_compat __P(( struct pci_attach_args *,
641 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
642 int pciide_mapregs_native __P((struct pci_attach_args *,
643 struct pciide_channel *, bus_size_t *, bus_size_t *,
644 int (*pci_intr) __P((void *))));
645 void pciide_mapreg_dma __P((struct pciide_softc *,
646 struct pci_attach_args *));
647 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
648 void pciide_mapchan __P((struct pci_attach_args *,
649 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
650 int (*pci_intr) __P((void *))));
651 int pciide_chan_candisable __P((struct pciide_channel *));
652 void pciide_map_compat_intr __P(( struct pci_attach_args *,
653 struct pciide_channel *, int, int));
654 int pciide_compat_intr __P((void *));
655 int pciide_pci_intr __P((void *));
656 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
657
658 const struct pciide_product_desc *
659 pciide_lookup_product(id)
660 u_int32_t id;
661 {
662 const struct pciide_product_desc *pp;
663 const struct pciide_vendor_desc *vp;
664
665 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
666 if (PCI_VENDOR(id) == vp->ide_vendor)
667 break;
668
669 if ((pp = vp->ide_products) == NULL)
670 return NULL;
671
672 for (; pp->chip_map != NULL; pp++)
673 if (PCI_PRODUCT(id) == pp->ide_product)
674 break;
675
676 if (pp->chip_map == NULL)
677 return NULL;
678 return pp;
679 }
680
681 int
682 pciide_match(parent, match, aux)
683 struct device *parent;
684 struct cfdata *match;
685 void *aux;
686 {
687 struct pci_attach_args *pa = aux;
688 const struct pciide_product_desc *pp;
689
690 /*
691 * Check the ID register to see that it's a PCI IDE controller.
692 * If it is, we assume that we can deal with it; it _should_
693 * work in a standardized way...
694 */
695 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
696 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
697 return (1);
698 }
699
700 /*
701 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
702 * controllers. Let see if we can deal with it anyway.
703 */
704 pp = pciide_lookup_product(pa->pa_id);
705 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
706 return (1);
707 }
708
709 return (0);
710 }
711
712 void
713 pciide_attach(parent, self, aux)
714 struct device *parent, *self;
715 void *aux;
716 {
717 struct pci_attach_args *pa = aux;
718 pci_chipset_tag_t pc = pa->pa_pc;
719 pcitag_t tag = pa->pa_tag;
720 struct pciide_softc *sc = (struct pciide_softc *)self;
721 pcireg_t csr;
722 char devinfo[256];
723 const char *displaydev;
724
725 sc->sc_pp = pciide_lookup_product(pa->pa_id);
726 if (sc->sc_pp == NULL) {
727 sc->sc_pp = &default_product_desc;
728 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
729 displaydev = devinfo;
730 } else
731 displaydev = sc->sc_pp->ide_name;
732
733 /* if displaydev == NULL, printf is done in chip-specific map */
734 if (displaydev)
735 printf(": %s (rev. 0x%02x)\n", displaydev,
736 PCI_REVISION(pa->pa_class));
737
738 sc->sc_pc = pa->pa_pc;
739 sc->sc_tag = pa->pa_tag;
740
741 /* Set up DMA defaults; these might be adjusted by chip_map. */
742 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
743 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
744
745 #ifdef WDCDEBUG
746 if (wdcdebug_pciide_mask & DEBUG_PROBE)
747 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
748 #endif
749 sc->sc_pp->chip_map(sc, pa);
750
751 if (sc->sc_dma_ok) {
752 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
753 csr |= PCI_COMMAND_MASTER_ENABLE;
754 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
755 }
756 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
757 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
758 }
759
760 /* tell wether the chip is enabled or not */
761 int
762 pciide_chipen(sc, pa)
763 struct pciide_softc *sc;
764 struct pci_attach_args *pa;
765 {
766 pcireg_t csr;
767 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
768 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
769 PCI_COMMAND_STATUS_REG);
770 printf("%s: device disabled (at %s)\n",
771 sc->sc_wdcdev.sc_dev.dv_xname,
772 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
773 "device" : "bridge");
774 return 0;
775 }
776 return 1;
777 }
778
779 int
780 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
781 struct pci_attach_args *pa;
782 struct pciide_channel *cp;
783 int compatchan;
784 bus_size_t *cmdsizep, *ctlsizep;
785 {
786 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
787 struct channel_softc *wdc_cp = &cp->wdc_channel;
788
789 cp->compat = 1;
790 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
791 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
792
793 wdc_cp->cmd_iot = pa->pa_iot;
794 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
795 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
796 printf("%s: couldn't map %s channel cmd regs\n",
797 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
798 return (0);
799 }
800
801 wdc_cp->ctl_iot = pa->pa_iot;
802 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
803 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
804 printf("%s: couldn't map %s channel ctl regs\n",
805 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
806 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
807 PCIIDE_COMPAT_CMD_SIZE);
808 return (0);
809 }
810
811 return (1);
812 }
813
814 int
815 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
816 struct pci_attach_args * pa;
817 struct pciide_channel *cp;
818 bus_size_t *cmdsizep, *ctlsizep;
819 int (*pci_intr) __P((void *));
820 {
821 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
822 struct channel_softc *wdc_cp = &cp->wdc_channel;
823 const char *intrstr;
824 pci_intr_handle_t intrhandle;
825
826 cp->compat = 0;
827
828 if (sc->sc_pci_ih == NULL) {
829 if (pci_intr_map(pa, &intrhandle) != 0) {
830 printf("%s: couldn't map native-PCI interrupt\n",
831 sc->sc_wdcdev.sc_dev.dv_xname);
832 return 0;
833 }
834 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
835 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
836 intrhandle, IPL_BIO, pci_intr, sc);
837 if (sc->sc_pci_ih != NULL) {
838 printf("%s: using %s for native-PCI interrupt\n",
839 sc->sc_wdcdev.sc_dev.dv_xname,
840 intrstr ? intrstr : "unknown interrupt");
841 } else {
842 printf("%s: couldn't establish native-PCI interrupt",
843 sc->sc_wdcdev.sc_dev.dv_xname);
844 if (intrstr != NULL)
845 printf(" at %s", intrstr);
846 printf("\n");
847 return 0;
848 }
849 }
850 cp->ih = sc->sc_pci_ih;
851 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
852 PCI_MAPREG_TYPE_IO, 0,
853 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
854 printf("%s: couldn't map %s channel cmd regs\n",
855 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
856 return 0;
857 }
858
859 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
860 PCI_MAPREG_TYPE_IO, 0,
861 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
862 printf("%s: couldn't map %s channel ctl regs\n",
863 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
864 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
865 return 0;
866 }
867 /*
868 * In native mode, 4 bytes of I/O space are mapped for the control
869 * register, the control register is at offset 2. Pass the generic
870 * code a handle for only one byte at the rigth offset.
871 */
872 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
873 &wdc_cp->ctl_ioh) != 0) {
874 printf("%s: unable to subregion %s channel ctl regs\n",
875 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
876 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
877 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
878 return 0;
879 }
880 return (1);
881 }
882
883 void
884 pciide_mapreg_dma(sc, pa)
885 struct pciide_softc *sc;
886 struct pci_attach_args *pa;
887 {
888 pcireg_t maptype;
889 bus_addr_t addr;
890
891 /*
892 * Map DMA registers
893 *
894 * Note that sc_dma_ok is the right variable to test to see if
895 * DMA can be done. If the interface doesn't support DMA,
896 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
897 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
898 * non-zero if the interface supports DMA and the registers
899 * could be mapped.
900 *
901 * XXX Note that despite the fact that the Bus Master IDE specs
902 * XXX say that "The bus master IDE function uses 16 bytes of IO
903 * XXX space," some controllers (at least the United
904 * XXX Microelectronics UM8886BF) place it in memory space.
905 */
906 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
907 PCIIDE_REG_BUS_MASTER_DMA);
908
909 switch (maptype) {
910 case PCI_MAPREG_TYPE_IO:
911 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
912 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
913 &addr, NULL, NULL) == 0);
914 if (sc->sc_dma_ok == 0) {
915 printf(", but unused (couldn't query registers)");
916 break;
917 }
918 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
919 && addr >= 0x10000) {
920 sc->sc_dma_ok = 0;
921 printf(", but unused (registers at unsafe address "
922 "%#lx)", (unsigned long)addr);
923 break;
924 }
925 /* FALLTHROUGH */
926
927 case PCI_MAPREG_MEM_TYPE_32BIT:
928 sc->sc_dma_ok = (pci_mapreg_map(pa,
929 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
930 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
931 sc->sc_dmat = pa->pa_dmat;
932 if (sc->sc_dma_ok == 0) {
933 printf(", but unused (couldn't map registers)");
934 } else {
935 sc->sc_wdcdev.dma_arg = sc;
936 sc->sc_wdcdev.dma_init = pciide_dma_init;
937 sc->sc_wdcdev.dma_start = pciide_dma_start;
938 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
939 }
940
941 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
942 PCIIDE_OPTIONS_NODMA) {
943 printf(", but unused (forced off by config file)");
944 sc->sc_dma_ok = 0;
945 }
946 break;
947
948 default:
949 sc->sc_dma_ok = 0;
950 printf(", but unsupported register maptype (0x%x)", maptype);
951 }
952 }
953
954 int
955 pciide_compat_intr(arg)
956 void *arg;
957 {
958 struct pciide_channel *cp = arg;
959
960 #ifdef DIAGNOSTIC
961 /* should only be called for a compat channel */
962 if (cp->compat == 0)
963 panic("pciide compat intr called for non-compat chan %p\n", cp);
964 #endif
965 return (wdcintr(&cp->wdc_channel));
966 }
967
968 int
969 pciide_pci_intr(arg)
970 void *arg;
971 {
972 struct pciide_softc *sc = arg;
973 struct pciide_channel *cp;
974 struct channel_softc *wdc_cp;
975 int i, rv, crv;
976
977 rv = 0;
978 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
979 cp = &sc->pciide_channels[i];
980 wdc_cp = &cp->wdc_channel;
981
982 /* If a compat channel skip. */
983 if (cp->compat)
984 continue;
985 /* if this channel not waiting for intr, skip */
986 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
987 continue;
988
989 crv = wdcintr(wdc_cp);
990 if (crv == 0)
991 ; /* leave rv alone */
992 else if (crv == 1)
993 rv = 1; /* claim the intr */
994 else if (rv == 0) /* crv should be -1 in this case */
995 rv = crv; /* if we've done no better, take it */
996 }
997 return (rv);
998 }
999
1000 void
1001 pciide_channel_dma_setup(cp)
1002 struct pciide_channel *cp;
1003 {
1004 int drive;
1005 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1006 struct ata_drive_datas *drvp;
1007
1008 for (drive = 0; drive < 2; drive++) {
1009 drvp = &cp->wdc_channel.ch_drive[drive];
1010 /* If no drive, skip */
1011 if ((drvp->drive_flags & DRIVE) == 0)
1012 continue;
1013 /* setup DMA if needed */
1014 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1015 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1016 sc->sc_dma_ok == 0) {
1017 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1018 continue;
1019 }
1020 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1021 != 0) {
1022 /* Abort DMA setup */
1023 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1024 continue;
1025 }
1026 }
1027 }
1028
1029 int
1030 pciide_dma_table_setup(sc, channel, drive)
1031 struct pciide_softc *sc;
1032 int channel, drive;
1033 {
1034 bus_dma_segment_t seg;
1035 int error, rseg;
1036 const bus_size_t dma_table_size =
1037 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1038 struct pciide_dma_maps *dma_maps =
1039 &sc->pciide_channels[channel].dma_maps[drive];
1040
1041 /* If table was already allocated, just return */
1042 if (dma_maps->dma_table)
1043 return 0;
1044
1045 /* Allocate memory for the DMA tables and map it */
1046 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1047 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1048 BUS_DMA_NOWAIT)) != 0) {
1049 printf("%s:%d: unable to allocate table DMA for "
1050 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1051 channel, drive, error);
1052 return error;
1053 }
1054 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1055 dma_table_size,
1056 (caddr_t *)&dma_maps->dma_table,
1057 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1058 printf("%s:%d: unable to map table DMA for"
1059 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1060 channel, drive, error);
1061 return error;
1062 }
1063 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1064 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1065 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1066
1067 /* Create and load table DMA map for this disk */
1068 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1069 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1070 &dma_maps->dmamap_table)) != 0) {
1071 printf("%s:%d: unable to create table DMA map for "
1072 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1073 channel, drive, error);
1074 return error;
1075 }
1076 if ((error = bus_dmamap_load(sc->sc_dmat,
1077 dma_maps->dmamap_table,
1078 dma_maps->dma_table,
1079 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1080 printf("%s:%d: unable to load table DMA map for "
1081 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1082 channel, drive, error);
1083 return error;
1084 }
1085 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1086 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1087 DEBUG_PROBE);
1088 /* Create a xfer DMA map for this drive */
1089 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1090 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1091 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1092 &dma_maps->dmamap_xfer)) != 0) {
1093 printf("%s:%d: unable to create xfer DMA map for "
1094 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1095 channel, drive, error);
1096 return error;
1097 }
1098 return 0;
1099 }
1100
1101 int
1102 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1103 void *v;
1104 int channel, drive;
1105 void *databuf;
1106 size_t datalen;
1107 int flags;
1108 {
1109 struct pciide_softc *sc = v;
1110 int error, seg;
1111 struct pciide_dma_maps *dma_maps =
1112 &sc->pciide_channels[channel].dma_maps[drive];
1113
1114 error = bus_dmamap_load(sc->sc_dmat,
1115 dma_maps->dmamap_xfer,
1116 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1117 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1118 if (error) {
1119 printf("%s:%d: unable to load xfer DMA map for"
1120 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1121 channel, drive, error);
1122 return error;
1123 }
1124
1125 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1126 dma_maps->dmamap_xfer->dm_mapsize,
1127 (flags & WDC_DMA_READ) ?
1128 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1129
1130 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1131 #ifdef DIAGNOSTIC
1132 /* A segment must not cross a 64k boundary */
1133 {
1134 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1135 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1136 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1137 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1138 printf("pciide_dma: segment %d physical addr 0x%lx"
1139 " len 0x%lx not properly aligned\n",
1140 seg, phys, len);
1141 panic("pciide_dma: buf align");
1142 }
1143 }
1144 #endif
1145 dma_maps->dma_table[seg].base_addr =
1146 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1147 dma_maps->dma_table[seg].byte_count =
1148 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1149 IDEDMA_BYTE_COUNT_MASK);
1150 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1151 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1152 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1153
1154 }
1155 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1156 htole32(IDEDMA_BYTE_COUNT_EOT);
1157
1158 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1159 dma_maps->dmamap_table->dm_mapsize,
1160 BUS_DMASYNC_PREWRITE);
1161
1162 /* Maps are ready. Start DMA function */
1163 #ifdef DIAGNOSTIC
1164 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1165 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1166 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1167 panic("pciide_dma_init: table align");
1168 }
1169 #endif
1170
1171 /* Clear status bits */
1172 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1173 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1174 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1175 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1176 /* Write table addr */
1177 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1178 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1179 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1180 /* set read/write */
1181 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1182 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1183 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1184 /* remember flags */
1185 dma_maps->dma_flags = flags;
1186 return 0;
1187 }
1188
1189 void
1190 pciide_dma_start(v, channel, drive)
1191 void *v;
1192 int channel, drive;
1193 {
1194 struct pciide_softc *sc = v;
1195
1196 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1197 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1198 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1199 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1200 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1201 }
1202
1203 int
1204 pciide_dma_finish(v, channel, drive, force)
1205 void *v;
1206 int channel, drive;
1207 int force;
1208 {
1209 struct pciide_softc *sc = v;
1210 u_int8_t status;
1211 int error = 0;
1212 struct pciide_dma_maps *dma_maps =
1213 &sc->pciide_channels[channel].dma_maps[drive];
1214
1215 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1216 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1217 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1218 DEBUG_XFERS);
1219
1220 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1221 return WDC_DMAST_NOIRQ;
1222
1223 /* stop DMA channel */
1224 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1225 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1226 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1227 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1228
1229 /* Unload the map of the data buffer */
1230 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1231 dma_maps->dmamap_xfer->dm_mapsize,
1232 (dma_maps->dma_flags & WDC_DMA_READ) ?
1233 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1234 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1235
1236 if ((status & IDEDMA_CTL_ERR) != 0) {
1237 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1238 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1239 error |= WDC_DMAST_ERR;
1240 }
1241
1242 if ((status & IDEDMA_CTL_INTR) == 0) {
1243 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1244 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1245 drive, status);
1246 error |= WDC_DMAST_NOIRQ;
1247 }
1248
1249 if ((status & IDEDMA_CTL_ACT) != 0) {
1250 /* data underrun, may be a valid condition for ATAPI */
1251 error |= WDC_DMAST_UNDER;
1252 }
1253 return error;
1254 }
1255
1256 void
1257 pciide_irqack(chp)
1258 struct channel_softc *chp;
1259 {
1260 struct pciide_channel *cp = (struct pciide_channel*)chp;
1261 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1262
1263 /* clear status bits in IDE DMA registers */
1264 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1265 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1266 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1267 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1268 }
1269
1270 /* some common code used by several chip_map */
1271 int
1272 pciide_chansetup(sc, channel, interface)
1273 struct pciide_softc *sc;
1274 int channel;
1275 pcireg_t interface;
1276 {
1277 struct pciide_channel *cp = &sc->pciide_channels[channel];
1278 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1279 cp->name = PCIIDE_CHANNEL_NAME(channel);
1280 cp->wdc_channel.channel = channel;
1281 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1282 cp->wdc_channel.ch_queue =
1283 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1284 if (cp->wdc_channel.ch_queue == NULL) {
1285 printf("%s %s channel: "
1286 "can't allocate memory for command queue",
1287 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1288 return 0;
1289 }
1290 printf("%s: %s channel %s to %s mode\n",
1291 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1292 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1293 "configured" : "wired",
1294 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1295 "native-PCI" : "compatibility");
1296 return 1;
1297 }
1298
1299 /* some common code used by several chip channel_map */
1300 void
1301 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1302 struct pci_attach_args *pa;
1303 struct pciide_channel *cp;
1304 pcireg_t interface;
1305 bus_size_t *cmdsizep, *ctlsizep;
1306 int (*pci_intr) __P((void *));
1307 {
1308 struct channel_softc *wdc_cp = &cp->wdc_channel;
1309
1310 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1311 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1312 pci_intr);
1313 else
1314 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1315 wdc_cp->channel, cmdsizep, ctlsizep);
1316
1317 if (cp->hw_ok == 0)
1318 return;
1319 wdc_cp->data32iot = wdc_cp->cmd_iot;
1320 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1321 wdcattach(wdc_cp);
1322 }
1323
1324 /*
1325 * Generic code to call to know if a channel can be disabled. Return 1
1326 * if channel can be disabled, 0 if not
1327 */
1328 int
1329 pciide_chan_candisable(cp)
1330 struct pciide_channel *cp;
1331 {
1332 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1333 struct channel_softc *wdc_cp = &cp->wdc_channel;
1334
1335 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1336 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1337 printf("%s: disabling %s channel (no drives)\n",
1338 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1339 cp->hw_ok = 0;
1340 return 1;
1341 }
1342 return 0;
1343 }
1344
1345 /*
1346 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1347 * Set hw_ok=0 on failure
1348 */
1349 void
1350 pciide_map_compat_intr(pa, cp, compatchan, interface)
1351 struct pci_attach_args *pa;
1352 struct pciide_channel *cp;
1353 int compatchan, interface;
1354 {
1355 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1356 struct channel_softc *wdc_cp = &cp->wdc_channel;
1357
1358 if (cp->hw_ok == 0)
1359 return;
1360 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1361 return;
1362
1363 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1364 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1365 pa, compatchan, pciide_compat_intr, cp);
1366 if (cp->ih == NULL) {
1367 #endif
1368 printf("%s: no compatibility interrupt for use by %s "
1369 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1370 cp->hw_ok = 0;
1371 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1372 }
1373 #endif
1374 }
1375
1376 void
1377 pciide_print_modes(cp)
1378 struct pciide_channel *cp;
1379 {
1380 wdc_print_modes(&cp->wdc_channel);
1381 }
1382
1383 void
1384 default_chip_map(sc, pa)
1385 struct pciide_softc *sc;
1386 struct pci_attach_args *pa;
1387 {
1388 struct pciide_channel *cp;
1389 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1390 pcireg_t csr;
1391 int channel, drive;
1392 struct ata_drive_datas *drvp;
1393 u_int8_t idedma_ctl;
1394 bus_size_t cmdsize, ctlsize;
1395 char *failreason;
1396
1397 if (pciide_chipen(sc, pa) == 0)
1398 return;
1399
1400 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1401 printf("%s: bus-master DMA support present",
1402 sc->sc_wdcdev.sc_dev.dv_xname);
1403 if (sc->sc_pp == &default_product_desc &&
1404 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1405 PCIIDE_OPTIONS_DMA) == 0) {
1406 printf(", but unused (no driver support)");
1407 sc->sc_dma_ok = 0;
1408 } else {
1409 pciide_mapreg_dma(sc, pa);
1410 if (sc->sc_dma_ok != 0)
1411 printf(", used without full driver "
1412 "support");
1413 }
1414 } else {
1415 printf("%s: hardware does not support DMA",
1416 sc->sc_wdcdev.sc_dev.dv_xname);
1417 sc->sc_dma_ok = 0;
1418 }
1419 printf("\n");
1420 if (sc->sc_dma_ok) {
1421 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1422 sc->sc_wdcdev.irqack = pciide_irqack;
1423 }
1424 sc->sc_wdcdev.PIO_cap = 0;
1425 sc->sc_wdcdev.DMA_cap = 0;
1426
1427 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1428 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1429 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1430
1431 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1432 cp = &sc->pciide_channels[channel];
1433 if (pciide_chansetup(sc, channel, interface) == 0)
1434 continue;
1435 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1436 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1437 &ctlsize, pciide_pci_intr);
1438 } else {
1439 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1440 channel, &cmdsize, &ctlsize);
1441 }
1442 if (cp->hw_ok == 0)
1443 continue;
1444 /*
1445 * Check to see if something appears to be there.
1446 */
1447 failreason = NULL;
1448 if (!wdcprobe(&cp->wdc_channel)) {
1449 failreason = "not responding; disabled or no drives?";
1450 goto next;
1451 }
1452 /*
1453 * Now, make sure it's actually attributable to this PCI IDE
1454 * channel by trying to access the channel again while the
1455 * PCI IDE controller's I/O space is disabled. (If the
1456 * channel no longer appears to be there, it belongs to
1457 * this controller.) YUCK!
1458 */
1459 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1460 PCI_COMMAND_STATUS_REG);
1461 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1462 csr & ~PCI_COMMAND_IO_ENABLE);
1463 if (wdcprobe(&cp->wdc_channel))
1464 failreason = "other hardware responding at addresses";
1465 pci_conf_write(sc->sc_pc, sc->sc_tag,
1466 PCI_COMMAND_STATUS_REG, csr);
1467 next:
1468 if (failreason) {
1469 printf("%s: %s channel ignored (%s)\n",
1470 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1471 failreason);
1472 cp->hw_ok = 0;
1473 bus_space_unmap(cp->wdc_channel.cmd_iot,
1474 cp->wdc_channel.cmd_ioh, cmdsize);
1475 if (interface & PCIIDE_INTERFACE_PCI(channel))
1476 bus_space_unmap(cp->wdc_channel.ctl_iot,
1477 cp->ctl_baseioh, ctlsize);
1478 else
1479 bus_space_unmap(cp->wdc_channel.ctl_iot,
1480 cp->wdc_channel.ctl_ioh, ctlsize);
1481 } else {
1482 pciide_map_compat_intr(pa, cp, channel, interface);
1483 }
1484 if (cp->hw_ok) {
1485 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1486 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1487 wdcattach(&cp->wdc_channel);
1488 }
1489 }
1490
1491 if (sc->sc_dma_ok == 0)
1492 return;
1493
1494 /* Allocate DMA maps */
1495 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1496 idedma_ctl = 0;
1497 cp = &sc->pciide_channels[channel];
1498 for (drive = 0; drive < 2; drive++) {
1499 drvp = &cp->wdc_channel.ch_drive[drive];
1500 /* If no drive, skip */
1501 if ((drvp->drive_flags & DRIVE) == 0)
1502 continue;
1503 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1504 continue;
1505 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1506 /* Abort DMA setup */
1507 printf("%s:%d:%d: can't allocate DMA maps, "
1508 "using PIO transfers\n",
1509 sc->sc_wdcdev.sc_dev.dv_xname,
1510 channel, drive);
1511 drvp->drive_flags &= ~DRIVE_DMA;
1512 }
1513 printf("%s:%d:%d: using DMA data transfers\n",
1514 sc->sc_wdcdev.sc_dev.dv_xname,
1515 channel, drive);
1516 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1517 }
1518 if (idedma_ctl != 0) {
1519 /* Add software bits in status register */
1520 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1521 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1522 idedma_ctl);
1523 }
1524 }
1525 }
1526
1527 void
1528 piix_chip_map(sc, pa)
1529 struct pciide_softc *sc;
1530 struct pci_attach_args *pa;
1531 {
1532 struct pciide_channel *cp;
1533 int channel;
1534 u_int32_t idetim;
1535 bus_size_t cmdsize, ctlsize;
1536
1537 if (pciide_chipen(sc, pa) == 0)
1538 return;
1539
1540 printf("%s: bus-master DMA support present",
1541 sc->sc_wdcdev.sc_dev.dv_xname);
1542 pciide_mapreg_dma(sc, pa);
1543 printf("\n");
1544 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1545 WDC_CAPABILITY_MODE;
1546 if (sc->sc_dma_ok) {
1547 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1548 sc->sc_wdcdev.irqack = pciide_irqack;
1549 switch(sc->sc_pp->ide_product) {
1550 case PCI_PRODUCT_INTEL_82371AB_IDE:
1551 case PCI_PRODUCT_INTEL_82440MX_IDE:
1552 case PCI_PRODUCT_INTEL_82801AA_IDE:
1553 case PCI_PRODUCT_INTEL_82801AB_IDE:
1554 case PCI_PRODUCT_INTEL_82801BA_IDE:
1555 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1556 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1557 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1558 case PCI_PRODUCT_INTEL_82801DB_IDE:
1559 case PCI_PRODUCT_INTEL_82801EB_IDE:
1560 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1561 }
1562 }
1563 sc->sc_wdcdev.PIO_cap = 4;
1564 sc->sc_wdcdev.DMA_cap = 2;
1565 switch(sc->sc_pp->ide_product) {
1566 case PCI_PRODUCT_INTEL_82801AA_IDE:
1567 sc->sc_wdcdev.UDMA_cap = 4;
1568 break;
1569 case PCI_PRODUCT_INTEL_82801BA_IDE:
1570 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1571 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1572 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1573 case PCI_PRODUCT_INTEL_82801DB_IDE:
1574 case PCI_PRODUCT_INTEL_82801EB_IDE:
1575 sc->sc_wdcdev.UDMA_cap = 5;
1576 break;
1577 default:
1578 sc->sc_wdcdev.UDMA_cap = 2;
1579 }
1580 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1581 sc->sc_wdcdev.set_modes = piix_setup_channel;
1582 else
1583 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1584 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1585 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1586
1587 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1588 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1589 DEBUG_PROBE);
1590 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1591 WDCDEBUG_PRINT((", sidetim=0x%x",
1592 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1593 DEBUG_PROBE);
1594 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1595 WDCDEBUG_PRINT((", udamreg 0x%x",
1596 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1597 DEBUG_PROBE);
1598 }
1599 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1600 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1601 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1602 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1603 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1604 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1605 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1606 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1607 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1608 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1609 DEBUG_PROBE);
1610 }
1611
1612 }
1613 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1614
1615 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1616 cp = &sc->pciide_channels[channel];
1617 /* PIIX is compat-only */
1618 if (pciide_chansetup(sc, channel, 0) == 0)
1619 continue;
1620 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1621 if ((PIIX_IDETIM_READ(idetim, channel) &
1622 PIIX_IDETIM_IDE) == 0) {
1623 printf("%s: %s channel ignored (disabled)\n",
1624 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1625 continue;
1626 }
1627 /* PIIX are compat-only pciide devices */
1628 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1629 if (cp->hw_ok == 0)
1630 continue;
1631 if (pciide_chan_candisable(cp)) {
1632 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1633 channel);
1634 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1635 idetim);
1636 }
1637 pciide_map_compat_intr(pa, cp, channel, 0);
1638 if (cp->hw_ok == 0)
1639 continue;
1640 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1641 }
1642
1643 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1644 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1645 DEBUG_PROBE);
1646 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1647 WDCDEBUG_PRINT((", sidetim=0x%x",
1648 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1649 DEBUG_PROBE);
1650 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1651 WDCDEBUG_PRINT((", udamreg 0x%x",
1652 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1653 DEBUG_PROBE);
1654 }
1655 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1656 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1657 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1658 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1659 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1660 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1661 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1662 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1663 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1664 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1665 DEBUG_PROBE);
1666 }
1667 }
1668 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1669 }
1670
1671 void
1672 piix_setup_channel(chp)
1673 struct channel_softc *chp;
1674 {
1675 u_int8_t mode[2], drive;
1676 u_int32_t oidetim, idetim, idedma_ctl;
1677 struct pciide_channel *cp = (struct pciide_channel*)chp;
1678 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1679 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1680
1681 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1682 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1683 idedma_ctl = 0;
1684
1685 /* set up new idetim: Enable IDE registers decode */
1686 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1687 chp->channel);
1688
1689 /* setup DMA */
1690 pciide_channel_dma_setup(cp);
1691
1692 /*
1693 * Here we have to mess up with drives mode: PIIX can't have
1694 * different timings for master and slave drives.
1695 * We need to find the best combination.
1696 */
1697
1698 /* If both drives supports DMA, take the lower mode */
1699 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1700 (drvp[1].drive_flags & DRIVE_DMA)) {
1701 mode[0] = mode[1] =
1702 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1703 drvp[0].DMA_mode = mode[0];
1704 drvp[1].DMA_mode = mode[1];
1705 goto ok;
1706 }
1707 /*
1708 * If only one drive supports DMA, use its mode, and
1709 * put the other one in PIO mode 0 if mode not compatible
1710 */
1711 if (drvp[0].drive_flags & DRIVE_DMA) {
1712 mode[0] = drvp[0].DMA_mode;
1713 mode[1] = drvp[1].PIO_mode;
1714 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1715 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1716 mode[1] = drvp[1].PIO_mode = 0;
1717 goto ok;
1718 }
1719 if (drvp[1].drive_flags & DRIVE_DMA) {
1720 mode[1] = drvp[1].DMA_mode;
1721 mode[0] = drvp[0].PIO_mode;
1722 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1723 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1724 mode[0] = drvp[0].PIO_mode = 0;
1725 goto ok;
1726 }
1727 /*
1728 * If both drives are not DMA, takes the lower mode, unless
1729 * one of them is PIO mode < 2
1730 */
1731 if (drvp[0].PIO_mode < 2) {
1732 mode[0] = drvp[0].PIO_mode = 0;
1733 mode[1] = drvp[1].PIO_mode;
1734 } else if (drvp[1].PIO_mode < 2) {
1735 mode[1] = drvp[1].PIO_mode = 0;
1736 mode[0] = drvp[0].PIO_mode;
1737 } else {
1738 mode[0] = mode[1] =
1739 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1740 drvp[0].PIO_mode = mode[0];
1741 drvp[1].PIO_mode = mode[1];
1742 }
1743 ok: /* The modes are setup */
1744 for (drive = 0; drive < 2; drive++) {
1745 if (drvp[drive].drive_flags & DRIVE_DMA) {
1746 idetim |= piix_setup_idetim_timings(
1747 mode[drive], 1, chp->channel);
1748 goto end;
1749 }
1750 }
1751 /* If we are there, none of the drives are DMA */
1752 if (mode[0] >= 2)
1753 idetim |= piix_setup_idetim_timings(
1754 mode[0], 0, chp->channel);
1755 else
1756 idetim |= piix_setup_idetim_timings(
1757 mode[1], 0, chp->channel);
1758 end: /*
1759 * timing mode is now set up in the controller. Enable
1760 * it per-drive
1761 */
1762 for (drive = 0; drive < 2; drive++) {
1763 /* If no drive, skip */
1764 if ((drvp[drive].drive_flags & DRIVE) == 0)
1765 continue;
1766 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1767 if (drvp[drive].drive_flags & DRIVE_DMA)
1768 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1769 }
1770 if (idedma_ctl != 0) {
1771 /* Add software bits in status register */
1772 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1773 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1774 idedma_ctl);
1775 }
1776 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1777 pciide_print_modes(cp);
1778 }
1779
1780 void
1781 piix3_4_setup_channel(chp)
1782 struct channel_softc *chp;
1783 {
1784 struct ata_drive_datas *drvp;
1785 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1786 struct pciide_channel *cp = (struct pciide_channel*)chp;
1787 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1788 int drive;
1789 int channel = chp->channel;
1790
1791 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1792 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1793 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1794 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1795 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1796 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1797 PIIX_SIDETIM_RTC_MASK(channel));
1798
1799 idedma_ctl = 0;
1800 /* If channel disabled, no need to go further */
1801 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1802 return;
1803 /* set up new idetim: Enable IDE registers decode */
1804 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1805
1806 /* setup DMA if needed */
1807 pciide_channel_dma_setup(cp);
1808
1809 for (drive = 0; drive < 2; drive++) {
1810 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1811 PIIX_UDMATIM_SET(0x3, channel, drive));
1812 drvp = &chp->ch_drive[drive];
1813 /* If no drive, skip */
1814 if ((drvp->drive_flags & DRIVE) == 0)
1815 continue;
1816 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1817 (drvp->drive_flags & DRIVE_UDMA) == 0))
1818 goto pio;
1819
1820 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1821 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1822 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1823 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1824 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1825 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1826 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1827 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1828 ideconf |= PIIX_CONFIG_PINGPONG;
1829 }
1830 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1831 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1832 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1833 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1834 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1835 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1836 /* setup Ultra/100 */
1837 if (drvp->UDMA_mode > 2 &&
1838 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1839 drvp->UDMA_mode = 2;
1840 if (drvp->UDMA_mode > 4) {
1841 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1842 } else {
1843 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1844 if (drvp->UDMA_mode > 2) {
1845 ideconf |= PIIX_CONFIG_UDMA66(channel,
1846 drive);
1847 } else {
1848 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1849 drive);
1850 }
1851 }
1852 }
1853 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1854 /* setup Ultra/66 */
1855 if (drvp->UDMA_mode > 2 &&
1856 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1857 drvp->UDMA_mode = 2;
1858 if (drvp->UDMA_mode > 2)
1859 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1860 else
1861 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1862 }
1863 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1864 (drvp->drive_flags & DRIVE_UDMA)) {
1865 /* use Ultra/DMA */
1866 drvp->drive_flags &= ~DRIVE_DMA;
1867 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1868 udmareg |= PIIX_UDMATIM_SET(
1869 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1870 } else {
1871 /* use Multiword DMA */
1872 drvp->drive_flags &= ~DRIVE_UDMA;
1873 if (drive == 0) {
1874 idetim |= piix_setup_idetim_timings(
1875 drvp->DMA_mode, 1, channel);
1876 } else {
1877 sidetim |= piix_setup_sidetim_timings(
1878 drvp->DMA_mode, 1, channel);
1879 idetim =PIIX_IDETIM_SET(idetim,
1880 PIIX_IDETIM_SITRE, channel);
1881 }
1882 }
1883 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1884
1885 pio: /* use PIO mode */
1886 idetim |= piix_setup_idetim_drvs(drvp);
1887 if (drive == 0) {
1888 idetim |= piix_setup_idetim_timings(
1889 drvp->PIO_mode, 0, channel);
1890 } else {
1891 sidetim |= piix_setup_sidetim_timings(
1892 drvp->PIO_mode, 0, channel);
1893 idetim =PIIX_IDETIM_SET(idetim,
1894 PIIX_IDETIM_SITRE, channel);
1895 }
1896 }
1897 if (idedma_ctl != 0) {
1898 /* Add software bits in status register */
1899 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1900 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1901 idedma_ctl);
1902 }
1903 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1904 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1905 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1906 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1907 pciide_print_modes(cp);
1908 }
1909
1910
1911 /* setup ISP and RTC fields, based on mode */
1912 static u_int32_t
1913 piix_setup_idetim_timings(mode, dma, channel)
1914 u_int8_t mode;
1915 u_int8_t dma;
1916 u_int8_t channel;
1917 {
1918
1919 if (dma)
1920 return PIIX_IDETIM_SET(0,
1921 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1922 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1923 channel);
1924 else
1925 return PIIX_IDETIM_SET(0,
1926 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1927 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1928 channel);
1929 }
1930
1931 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1932 static u_int32_t
1933 piix_setup_idetim_drvs(drvp)
1934 struct ata_drive_datas *drvp;
1935 {
1936 u_int32_t ret = 0;
1937 struct channel_softc *chp = drvp->chnl_softc;
1938 u_int8_t channel = chp->channel;
1939 u_int8_t drive = drvp->drive;
1940
1941 /*
1942 * If drive is using UDMA, timings setups are independant
1943 * So just check DMA and PIO here.
1944 */
1945 if (drvp->drive_flags & DRIVE_DMA) {
1946 /* if mode = DMA mode 0, use compatible timings */
1947 if ((drvp->drive_flags & DRIVE_DMA) &&
1948 drvp->DMA_mode == 0) {
1949 drvp->PIO_mode = 0;
1950 return ret;
1951 }
1952 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1953 /*
1954 * PIO and DMA timings are the same, use fast timings for PIO
1955 * too, else use compat timings.
1956 */
1957 if ((piix_isp_pio[drvp->PIO_mode] !=
1958 piix_isp_dma[drvp->DMA_mode]) ||
1959 (piix_rtc_pio[drvp->PIO_mode] !=
1960 piix_rtc_dma[drvp->DMA_mode]))
1961 drvp->PIO_mode = 0;
1962 /* if PIO mode <= 2, use compat timings for PIO */
1963 if (drvp->PIO_mode <= 2) {
1964 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1965 channel);
1966 return ret;
1967 }
1968 }
1969
1970 /*
1971 * Now setup PIO modes. If mode < 2, use compat timings.
1972 * Else enable fast timings. Enable IORDY and prefetch/post
1973 * if PIO mode >= 3.
1974 */
1975
1976 if (drvp->PIO_mode < 2)
1977 return ret;
1978
1979 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1980 if (drvp->PIO_mode >= 3) {
1981 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1982 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1983 }
1984 return ret;
1985 }
1986
1987 /* setup values in SIDETIM registers, based on mode */
1988 static u_int32_t
1989 piix_setup_sidetim_timings(mode, dma, channel)
1990 u_int8_t mode;
1991 u_int8_t dma;
1992 u_int8_t channel;
1993 {
1994 if (dma)
1995 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1996 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1997 else
1998 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1999 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2000 }
2001
2002 void
2003 amd7x6_chip_map(sc, pa)
2004 struct pciide_softc *sc;
2005 struct pci_attach_args *pa;
2006 {
2007 struct pciide_channel *cp;
2008 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2009 int channel;
2010 pcireg_t chanenable;
2011 bus_size_t cmdsize, ctlsize;
2012
2013 if (pciide_chipen(sc, pa) == 0)
2014 return;
2015 printf("%s: bus-master DMA support present",
2016 sc->sc_wdcdev.sc_dev.dv_xname);
2017 pciide_mapreg_dma(sc, pa);
2018 printf("\n");
2019 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2020 WDC_CAPABILITY_MODE;
2021 if (sc->sc_dma_ok) {
2022 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2023 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2024 sc->sc_wdcdev.irqack = pciide_irqack;
2025 }
2026 sc->sc_wdcdev.PIO_cap = 4;
2027 sc->sc_wdcdev.DMA_cap = 2;
2028
2029 switch (sc->sc_pp->ide_product) {
2030 case PCI_PRODUCT_AMD_PBC766_IDE:
2031 case PCI_PRODUCT_AMD_PBC768_IDE:
2032 sc->sc_wdcdev.UDMA_cap = 5;
2033 break;
2034 default:
2035 sc->sc_wdcdev.UDMA_cap = 4;
2036 }
2037 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2038 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2039 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2040 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2041
2042 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2043 DEBUG_PROBE);
2044 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2045 cp = &sc->pciide_channels[channel];
2046 if (pciide_chansetup(sc, channel, interface) == 0)
2047 continue;
2048
2049 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2050 printf("%s: %s channel ignored (disabled)\n",
2051 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2052 continue;
2053 }
2054 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2055 pciide_pci_intr);
2056
2057 if (pciide_chan_candisable(cp))
2058 chanenable &= ~AMD7X6_CHAN_EN(channel);
2059 pciide_map_compat_intr(pa, cp, channel, interface);
2060 if (cp->hw_ok == 0)
2061 continue;
2062
2063 amd7x6_setup_channel(&cp->wdc_channel);
2064 }
2065 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2066 chanenable);
2067 return;
2068 }
2069
2070 void
2071 amd7x6_setup_channel(chp)
2072 struct channel_softc *chp;
2073 {
2074 u_int32_t udmatim_reg, datatim_reg;
2075 u_int8_t idedma_ctl;
2076 int mode, drive;
2077 struct ata_drive_datas *drvp;
2078 struct pciide_channel *cp = (struct pciide_channel*)chp;
2079 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2080 #ifndef PCIIDE_AMD756_ENABLEDMA
2081 int rev = PCI_REVISION(
2082 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2083 #endif
2084
2085 idedma_ctl = 0;
2086 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2087 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2088 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2089 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2090
2091 /* setup DMA if needed */
2092 pciide_channel_dma_setup(cp);
2093
2094 for (drive = 0; drive < 2; drive++) {
2095 drvp = &chp->ch_drive[drive];
2096 /* If no drive, skip */
2097 if ((drvp->drive_flags & DRIVE) == 0)
2098 continue;
2099 /* add timing values, setup DMA if needed */
2100 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2101 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2102 mode = drvp->PIO_mode;
2103 goto pio;
2104 }
2105 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2106 (drvp->drive_flags & DRIVE_UDMA)) {
2107 /* use Ultra/DMA */
2108 drvp->drive_flags &= ~DRIVE_DMA;
2109 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2110 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2111 AMD7X6_UDMA_TIME(chp->channel, drive,
2112 amd7x6_udma_tim[drvp->UDMA_mode]);
2113 /* can use PIO timings, MW DMA unused */
2114 mode = drvp->PIO_mode;
2115 } else {
2116 /* use Multiword DMA, but only if revision is OK */
2117 drvp->drive_flags &= ~DRIVE_UDMA;
2118 #ifndef PCIIDE_AMD756_ENABLEDMA
2119 /*
2120 * The workaround doesn't seem to be necessary
2121 * with all drives, so it can be disabled by
2122 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2123 * triggered.
2124 */
2125 if (sc->sc_pp->ide_product ==
2126 PCI_PRODUCT_AMD_PBC756_IDE &&
2127 AMD756_CHIPREV_DISABLEDMA(rev)) {
2128 printf("%s:%d:%d: multi-word DMA disabled due "
2129 "to chip revision\n",
2130 sc->sc_wdcdev.sc_dev.dv_xname,
2131 chp->channel, drive);
2132 mode = drvp->PIO_mode;
2133 drvp->drive_flags &= ~DRIVE_DMA;
2134 goto pio;
2135 }
2136 #endif
2137 /* mode = min(pio, dma+2) */
2138 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2139 mode = drvp->PIO_mode;
2140 else
2141 mode = drvp->DMA_mode + 2;
2142 }
2143 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2144
2145 pio: /* setup PIO mode */
2146 if (mode <= 2) {
2147 drvp->DMA_mode = 0;
2148 drvp->PIO_mode = 0;
2149 mode = 0;
2150 } else {
2151 drvp->PIO_mode = mode;
2152 drvp->DMA_mode = mode - 2;
2153 }
2154 datatim_reg |=
2155 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2156 amd7x6_pio_set[mode]) |
2157 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2158 amd7x6_pio_rec[mode]);
2159 }
2160 if (idedma_ctl != 0) {
2161 /* Add software bits in status register */
2162 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2163 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2164 idedma_ctl);
2165 }
2166 pciide_print_modes(cp);
2167 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2168 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2169 }
2170
2171 void
2172 apollo_chip_map(sc, pa)
2173 struct pciide_softc *sc;
2174 struct pci_attach_args *pa;
2175 {
2176 struct pciide_channel *cp;
2177 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2178 int channel;
2179 u_int32_t ideconf;
2180 bus_size_t cmdsize, ctlsize;
2181 pcitag_t pcib_tag;
2182 pcireg_t pcib_id, pcib_class;
2183
2184 if (pciide_chipen(sc, pa) == 0)
2185 return;
2186 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2187 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2188 /* and read ID and rev of the ISA bridge */
2189 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2190 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2191 printf(": VIA Technologies ");
2192 switch (PCI_PRODUCT(pcib_id)) {
2193 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2194 printf("VT82C586 (Apollo VP) ");
2195 if(PCI_REVISION(pcib_class) >= 0x02) {
2196 printf("ATA33 controller\n");
2197 sc->sc_wdcdev.UDMA_cap = 2;
2198 } else {
2199 printf("controller\n");
2200 sc->sc_wdcdev.UDMA_cap = 0;
2201 }
2202 break;
2203 case PCI_PRODUCT_VIATECH_VT82C596A:
2204 printf("VT82C596A (Apollo Pro) ");
2205 if (PCI_REVISION(pcib_class) >= 0x12) {
2206 printf("ATA66 controller\n");
2207 sc->sc_wdcdev.UDMA_cap = 4;
2208 } else {
2209 printf("ATA33 controller\n");
2210 sc->sc_wdcdev.UDMA_cap = 2;
2211 }
2212 break;
2213 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2214 printf("VT82C686A (Apollo KX133) ");
2215 if (PCI_REVISION(pcib_class) >= 0x40) {
2216 printf("ATA100 controller\n");
2217 sc->sc_wdcdev.UDMA_cap = 5;
2218 } else {
2219 printf("ATA66 controller\n");
2220 sc->sc_wdcdev.UDMA_cap = 4;
2221 }
2222 break;
2223 case PCI_PRODUCT_VIATECH_VT8231:
2224 printf("VT8231 ATA100 controller\n");
2225 sc->sc_wdcdev.UDMA_cap = 5;
2226 break;
2227 case PCI_PRODUCT_VIATECH_VT8233:
2228 printf("VT8233 ATA100 controller\n");
2229 sc->sc_wdcdev.UDMA_cap = 5;
2230 break;
2231 case PCI_PRODUCT_VIATECH_VT8233A:
2232 printf("VT8233A ATA133 controller\n");
2233 sc->sc_wdcdev.UDMA_cap = 6;
2234 break;
2235 case PCI_PRODUCT_VIATECH_VT8235:
2236 printf("VT8235 ATA133 controller\n");
2237 sc->sc_wdcdev.UDMA_cap = 6;
2238 break;
2239 case PCI_PRODUCT_VIATECH_VT8237_RAID:
2240 printf("VT8237 ATA133 controller\n");
2241 sc->sc_wdcdev.UDMA_cap = 6;
2242 break;
2243 default:
2244 printf("unknown ATA controller\n");
2245 sc->sc_wdcdev.UDMA_cap = 0;
2246 }
2247
2248 printf("%s: bus-master DMA support present",
2249 sc->sc_wdcdev.sc_dev.dv_xname);
2250 pciide_mapreg_dma(sc, pa);
2251 printf("\n");
2252 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2253 WDC_CAPABILITY_MODE;
2254 if (sc->sc_dma_ok) {
2255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2256 sc->sc_wdcdev.irqack = pciide_irqack;
2257 if (sc->sc_wdcdev.UDMA_cap > 0)
2258 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2259 }
2260 sc->sc_wdcdev.PIO_cap = 4;
2261 sc->sc_wdcdev.DMA_cap = 2;
2262 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2263 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2264 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2265
2266 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2267 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2268 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2269 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2270 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2271 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2272 DEBUG_PROBE);
2273
2274 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2275 cp = &sc->pciide_channels[channel];
2276 if (pciide_chansetup(sc, channel, interface) == 0)
2277 continue;
2278
2279 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2280 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2281 printf("%s: %s channel ignored (disabled)\n",
2282 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2283 continue;
2284 }
2285 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2286 pciide_pci_intr);
2287 if (cp->hw_ok == 0)
2288 continue;
2289 if (pciide_chan_candisable(cp)) {
2290 ideconf &= ~APO_IDECONF_EN(channel);
2291 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2292 ideconf);
2293 }
2294 pciide_map_compat_intr(pa, cp, channel, interface);
2295
2296 if (cp->hw_ok == 0)
2297 continue;
2298 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2299 }
2300 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2301 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2302 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2303 }
2304
2305 void
2306 apollo_setup_channel(chp)
2307 struct channel_softc *chp;
2308 {
2309 u_int32_t udmatim_reg, datatim_reg;
2310 u_int8_t idedma_ctl;
2311 int mode, drive;
2312 struct ata_drive_datas *drvp;
2313 struct pciide_channel *cp = (struct pciide_channel*)chp;
2314 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2315
2316 idedma_ctl = 0;
2317 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2318 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2319 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2320 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2321
2322 /* setup DMA if needed */
2323 pciide_channel_dma_setup(cp);
2324
2325 for (drive = 0; drive < 2; drive++) {
2326 drvp = &chp->ch_drive[drive];
2327 /* If no drive, skip */
2328 if ((drvp->drive_flags & DRIVE) == 0)
2329 continue;
2330 /* add timing values, setup DMA if needed */
2331 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2332 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2333 mode = drvp->PIO_mode;
2334 goto pio;
2335 }
2336 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2337 (drvp->drive_flags & DRIVE_UDMA)) {
2338 /* use Ultra/DMA */
2339 drvp->drive_flags &= ~DRIVE_DMA;
2340 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2341 APO_UDMA_EN_MTH(chp->channel, drive);
2342 if (sc->sc_wdcdev.UDMA_cap == 6) {
2343 /* 8233a */
2344 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2345 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2346 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2347 /* 686b */
2348 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2349 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2350 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2351 /* 596b or 686a */
2352 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2353 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2354 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2355 } else {
2356 /* 596a or 586b */
2357 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2358 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2359 }
2360 /* can use PIO timings, MW DMA unused */
2361 mode = drvp->PIO_mode;
2362 } else {
2363 /* use Multiword DMA */
2364 drvp->drive_flags &= ~DRIVE_UDMA;
2365 /* mode = min(pio, dma+2) */
2366 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2367 mode = drvp->PIO_mode;
2368 else
2369 mode = drvp->DMA_mode + 2;
2370 }
2371 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2372
2373 pio: /* setup PIO mode */
2374 if (mode <= 2) {
2375 drvp->DMA_mode = 0;
2376 drvp->PIO_mode = 0;
2377 mode = 0;
2378 } else {
2379 drvp->PIO_mode = mode;
2380 drvp->DMA_mode = mode - 2;
2381 }
2382 datatim_reg |=
2383 APO_DATATIM_PULSE(chp->channel, drive,
2384 apollo_pio_set[mode]) |
2385 APO_DATATIM_RECOV(chp->channel, drive,
2386 apollo_pio_rec[mode]);
2387 }
2388 if (idedma_ctl != 0) {
2389 /* Add software bits in status register */
2390 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2391 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2392 idedma_ctl);
2393 }
2394 pciide_print_modes(cp);
2395 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2396 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2397 }
2398
2399 void
2400 cmd_channel_map(pa, sc, channel)
2401 struct pci_attach_args *pa;
2402 struct pciide_softc *sc;
2403 int channel;
2404 {
2405 struct pciide_channel *cp = &sc->pciide_channels[channel];
2406 bus_size_t cmdsize, ctlsize;
2407 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2408 int interface, one_channel;
2409
2410 /*
2411 * The 0648/0649 can be told to identify as a RAID controller.
2412 * In this case, we have to fake interface
2413 */
2414 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2415 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2416 PCIIDE_INTERFACE_SETTABLE(1);
2417 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2418 CMD_CONF_DSA1)
2419 interface |= PCIIDE_INTERFACE_PCI(0) |
2420 PCIIDE_INTERFACE_PCI(1);
2421 } else {
2422 interface = PCI_INTERFACE(pa->pa_class);
2423 }
2424
2425 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2426 cp->name = PCIIDE_CHANNEL_NAME(channel);
2427 cp->wdc_channel.channel = channel;
2428 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2429
2430 /*
2431 * Older CMD64X doesn't have independant channels
2432 */
2433 switch (sc->sc_pp->ide_product) {
2434 case PCI_PRODUCT_CMDTECH_649:
2435 one_channel = 0;
2436 break;
2437 default:
2438 one_channel = 1;
2439 break;
2440 }
2441
2442 if (channel > 0 && one_channel) {
2443 cp->wdc_channel.ch_queue =
2444 sc->pciide_channels[0].wdc_channel.ch_queue;
2445 } else {
2446 cp->wdc_channel.ch_queue =
2447 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2448 }
2449 if (cp->wdc_channel.ch_queue == NULL) {
2450 printf("%s %s channel: "
2451 "can't allocate memory for command queue",
2452 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2453 return;
2454 }
2455
2456 printf("%s: %s channel %s to %s mode\n",
2457 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2458 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2459 "configured" : "wired",
2460 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2461 "native-PCI" : "compatibility");
2462
2463 /*
2464 * with a CMD PCI64x, if we get here, the first channel is enabled:
2465 * there's no way to disable the first channel without disabling
2466 * the whole device
2467 */
2468 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2469 printf("%s: %s channel ignored (disabled)\n",
2470 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2471 return;
2472 }
2473
2474 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2475 if (cp->hw_ok == 0)
2476 return;
2477 if (channel == 1) {
2478 if (pciide_chan_candisable(cp)) {
2479 ctrl &= ~CMD_CTRL_2PORT;
2480 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2481 CMD_CTRL, ctrl);
2482 }
2483 }
2484 pciide_map_compat_intr(pa, cp, channel, interface);
2485 }
2486
2487 int
2488 cmd_pci_intr(arg)
2489 void *arg;
2490 {
2491 struct pciide_softc *sc = arg;
2492 struct pciide_channel *cp;
2493 struct channel_softc *wdc_cp;
2494 int i, rv, crv;
2495 u_int32_t priirq, secirq;
2496
2497 rv = 0;
2498 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2499 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2500 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2501 cp = &sc->pciide_channels[i];
2502 wdc_cp = &cp->wdc_channel;
2503 /* If a compat channel skip. */
2504 if (cp->compat)
2505 continue;
2506 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2507 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2508 crv = wdcintr(wdc_cp);
2509 if (crv == 0)
2510 printf("%s:%d: bogus intr\n",
2511 sc->sc_wdcdev.sc_dev.dv_xname, i);
2512 else
2513 rv = 1;
2514 }
2515 }
2516 return rv;
2517 }
2518
2519 void
2520 cmd_chip_map(sc, pa)
2521 struct pciide_softc *sc;
2522 struct pci_attach_args *pa;
2523 {
2524 int channel;
2525
2526 /*
2527 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2528 * and base adresses registers can be disabled at
2529 * hardware level. In this case, the device is wired
2530 * in compat mode and its first channel is always enabled,
2531 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2532 * In fact, it seems that the first channel of the CMD PCI0640
2533 * can't be disabled.
2534 */
2535
2536 #ifdef PCIIDE_CMD064x_DISABLE
2537 if (pciide_chipen(sc, pa) == 0)
2538 return;
2539 #endif
2540
2541 printf("%s: hardware does not support DMA\n",
2542 sc->sc_wdcdev.sc_dev.dv_xname);
2543 sc->sc_dma_ok = 0;
2544
2545 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2546 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2547 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2548
2549 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2550 cmd_channel_map(pa, sc, channel);
2551 }
2552 }
2553
2554 void
2555 cmd0643_9_chip_map(sc, pa)
2556 struct pciide_softc *sc;
2557 struct pci_attach_args *pa;
2558 {
2559 struct pciide_channel *cp;
2560 int channel;
2561 pcireg_t rev = PCI_REVISION(pa->pa_class);
2562
2563 /*
2564 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2565 * and base adresses registers can be disabled at
2566 * hardware level. In this case, the device is wired
2567 * in compat mode and its first channel is always enabled,
2568 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2569 * In fact, it seems that the first channel of the CMD PCI0640
2570 * can't be disabled.
2571 */
2572
2573 #ifdef PCIIDE_CMD064x_DISABLE
2574 if (pciide_chipen(sc, pa) == 0)
2575 return;
2576 #endif
2577 printf("%s: bus-master DMA support present",
2578 sc->sc_wdcdev.sc_dev.dv_xname);
2579 pciide_mapreg_dma(sc, pa);
2580 printf("\n");
2581 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2582 WDC_CAPABILITY_MODE;
2583 if (sc->sc_dma_ok) {
2584 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2585 switch (sc->sc_pp->ide_product) {
2586 case PCI_PRODUCT_CMDTECH_649:
2587 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2588 sc->sc_wdcdev.UDMA_cap = 5;
2589 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2590 break;
2591 case PCI_PRODUCT_CMDTECH_648:
2592 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2593 sc->sc_wdcdev.UDMA_cap = 4;
2594 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2595 break;
2596 case PCI_PRODUCT_CMDTECH_646:
2597 if (rev >= CMD0646U2_REV) {
2598 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2599 sc->sc_wdcdev.UDMA_cap = 2;
2600 } else if (rev >= CMD0646U_REV) {
2601 /*
2602 * Linux's driver claims that the 646U is broken
2603 * with UDMA. Only enable it if we know what we're
2604 * doing
2605 */
2606 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2607 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2608 sc->sc_wdcdev.UDMA_cap = 2;
2609 #endif
2610 /* explicitly disable UDMA */
2611 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2612 CMD_UDMATIM(0), 0);
2613 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2614 CMD_UDMATIM(1), 0);
2615 }
2616 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2617 break;
2618 default:
2619 sc->sc_wdcdev.irqack = pciide_irqack;
2620 }
2621 }
2622
2623 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2624 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2625 sc->sc_wdcdev.PIO_cap = 4;
2626 sc->sc_wdcdev.DMA_cap = 2;
2627 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2628
2629 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2630 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2631 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2632 DEBUG_PROBE);
2633
2634 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2635 cp = &sc->pciide_channels[channel];
2636 cmd_channel_map(pa, sc, channel);
2637 if (cp->hw_ok == 0)
2638 continue;
2639 cmd0643_9_setup_channel(&cp->wdc_channel);
2640 }
2641 /*
2642 * note - this also makes sure we clear the irq disable and reset
2643 * bits
2644 */
2645 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2646 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2647 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2648 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2649 DEBUG_PROBE);
2650 }
2651
2652 void
2653 cmd0643_9_setup_channel(chp)
2654 struct channel_softc *chp;
2655 {
2656 struct ata_drive_datas *drvp;
2657 u_int8_t tim;
2658 u_int32_t idedma_ctl, udma_reg;
2659 int drive;
2660 struct pciide_channel *cp = (struct pciide_channel*)chp;
2661 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2662
2663 idedma_ctl = 0;
2664 /* setup DMA if needed */
2665 pciide_channel_dma_setup(cp);
2666
2667 for (drive = 0; drive < 2; drive++) {
2668 drvp = &chp->ch_drive[drive];
2669 /* If no drive, skip */
2670 if ((drvp->drive_flags & DRIVE) == 0)
2671 continue;
2672 /* add timing values, setup DMA if needed */
2673 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2674 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2675 if (drvp->drive_flags & DRIVE_UDMA) {
2676 /* UltraDMA on a 646U2, 0648 or 0649 */
2677 drvp->drive_flags &= ~DRIVE_DMA;
2678 udma_reg = pciide_pci_read(sc->sc_pc,
2679 sc->sc_tag, CMD_UDMATIM(chp->channel));
2680 if (drvp->UDMA_mode > 2 &&
2681 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2682 CMD_BICSR) &
2683 CMD_BICSR_80(chp->channel)) == 0)
2684 drvp->UDMA_mode = 2;
2685 if (drvp->UDMA_mode > 2)
2686 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2687 else if (sc->sc_wdcdev.UDMA_cap > 2)
2688 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2689 udma_reg |= CMD_UDMATIM_UDMA(drive);
2690 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2691 CMD_UDMATIM_TIM_OFF(drive));
2692 udma_reg |=
2693 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2694 CMD_UDMATIM_TIM_OFF(drive));
2695 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2696 CMD_UDMATIM(chp->channel), udma_reg);
2697 } else {
2698 /*
2699 * use Multiword DMA.
2700 * Timings will be used for both PIO and DMA,
2701 * so adjust DMA mode if needed
2702 * if we have a 0646U2/8/9, turn off UDMA
2703 */
2704 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2705 udma_reg = pciide_pci_read(sc->sc_pc,
2706 sc->sc_tag,
2707 CMD_UDMATIM(chp->channel));
2708 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2709 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2710 CMD_UDMATIM(chp->channel),
2711 udma_reg);
2712 }
2713 if (drvp->PIO_mode >= 3 &&
2714 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2715 drvp->DMA_mode = drvp->PIO_mode - 2;
2716 }
2717 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2718 }
2719 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2720 }
2721 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2722 CMD_DATA_TIM(chp->channel, drive), tim);
2723 }
2724 if (idedma_ctl != 0) {
2725 /* Add software bits in status register */
2726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2727 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2728 idedma_ctl);
2729 }
2730 pciide_print_modes(cp);
2731 }
2732
2733 void
2734 cmd646_9_irqack(chp)
2735 struct channel_softc *chp;
2736 {
2737 u_int32_t priirq, secirq;
2738 struct pciide_channel *cp = (struct pciide_channel*)chp;
2739 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2740
2741 if (chp->channel == 0) {
2742 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2743 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2744 } else {
2745 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2746 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2747 }
2748 pciide_irqack(chp);
2749 }
2750
2751 void
2752 cmd3112_chip_map(sc, pa)
2753 struct pciide_softc *sc;
2754 struct pci_attach_args *pa;
2755 {
2756 struct pciide_channel *cp;
2757 bus_size_t cmdsize, ctlsize;
2758 pcireg_t interface;
2759 int channel;
2760
2761 if (pciide_chipen(sc, pa) == 0)
2762 return;
2763
2764 printf("%s: bus-master DMA support present",
2765 sc->sc_wdcdev.sc_dev.dv_xname);
2766 pciide_mapreg_dma(sc, pa);
2767 printf("\n");
2768
2769 /*
2770 * Rev. <= 0x01 of the 3112 have a bug that can cause data
2771 * corruption if DMA transfers cross an 8K boundary. This is
2772 * apparently hard to tickle, but we'll go ahead and play it
2773 * safe.
2774 */
2775 if (PCI_REVISION(pa->pa_class) <= 0x01) {
2776 sc->sc_dma_maxsegsz = 8192;
2777 sc->sc_dma_boundary = 8192;
2778 }
2779
2780 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2781 WDC_CAPABILITY_MODE;
2782 sc->sc_wdcdev.PIO_cap = 4;
2783 if (sc->sc_dma_ok) {
2784 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2785 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2786 sc->sc_wdcdev.irqack = pciide_irqack;
2787 sc->sc_wdcdev.DMA_cap = 2;
2788 sc->sc_wdcdev.UDMA_cap = 6;
2789 }
2790 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
2791
2792 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2793 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2794
2795 /*
2796 * The 3112 can be told to identify as a RAID controller.
2797 * In this case, we have to fake interface
2798 */
2799 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2800 interface = PCI_INTERFACE(pa->pa_class);
2801 } else {
2802 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2803 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2804 }
2805
2806 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2807 cp = &sc->pciide_channels[channel];
2808 if (pciide_chansetup(sc, channel, interface) == 0)
2809 continue;
2810 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2811 pciide_pci_intr);
2812 if (cp->hw_ok == 0)
2813 continue;
2814 pciide_map_compat_intr(pa, cp, channel, interface);
2815 cmd3112_setup_channel(&cp->wdc_channel);
2816 }
2817 }
2818
2819 void
2820 cmd3112_setup_channel(chp)
2821 struct channel_softc *chp;
2822 {
2823 struct ata_drive_datas *drvp;
2824 int drive;
2825 u_int32_t idedma_ctl, dtm;
2826 struct pciide_channel *cp = (struct pciide_channel*)chp;
2827 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
2828
2829 /* setup DMA if needed */
2830 pciide_channel_dma_setup(cp);
2831
2832 idedma_ctl = 0;
2833 dtm = 0;
2834
2835 for (drive = 0; drive < 2; drive++) {
2836 drvp = &chp->ch_drive[drive];
2837 /* If no drive, skip */
2838 if ((drvp->drive_flags & DRIVE) == 0)
2839 continue;
2840 if (drvp->drive_flags & DRIVE_UDMA) {
2841 /* use Ultra/DMA */
2842 drvp->drive_flags &= ~DRIVE_DMA;
2843 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2844 dtm |= DTM_IDEx_DMA;
2845 } else if (drvp->drive_flags & DRIVE_DMA) {
2846 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2847 dtm |= DTM_IDEx_DMA;
2848 } else {
2849 dtm |= DTM_IDEx_PIO;
2850 }
2851 }
2852
2853 /*
2854 * Nothing to do to setup modes; it is meaningless in S-ATA
2855 * (but many S-ATA drives still want to get the SET_FEATURE
2856 * command).
2857 */
2858 if (idedma_ctl != 0) {
2859 /* Add software bits in status register */
2860 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2861 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2862 idedma_ctl);
2863 }
2864 pci_conf_write(sc->sc_pc, sc->sc_tag,
2865 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
2866 pciide_print_modes(cp);
2867 }
2868
2869 void
2870 cy693_chip_map(sc, pa)
2871 struct pciide_softc *sc;
2872 struct pci_attach_args *pa;
2873 {
2874 struct pciide_channel *cp;
2875 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2876 bus_size_t cmdsize, ctlsize;
2877
2878 if (pciide_chipen(sc, pa) == 0)
2879 return;
2880 /*
2881 * this chip has 2 PCI IDE functions, one for primary and one for
2882 * secondary. So we need to call pciide_mapregs_compat() with
2883 * the real channel
2884 */
2885 if (pa->pa_function == 1) {
2886 sc->sc_cy_compatchan = 0;
2887 } else if (pa->pa_function == 2) {
2888 sc->sc_cy_compatchan = 1;
2889 } else {
2890 printf("%s: unexpected PCI function %d\n",
2891 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2892 return;
2893 }
2894 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2895 printf("%s: bus-master DMA support present",
2896 sc->sc_wdcdev.sc_dev.dv_xname);
2897 pciide_mapreg_dma(sc, pa);
2898 } else {
2899 printf("%s: hardware does not support DMA",
2900 sc->sc_wdcdev.sc_dev.dv_xname);
2901 sc->sc_dma_ok = 0;
2902 }
2903 printf("\n");
2904
2905 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2906 if (sc->sc_cy_handle == NULL) {
2907 printf("%s: unable to map hyperCache control registers\n",
2908 sc->sc_wdcdev.sc_dev.dv_xname);
2909 sc->sc_dma_ok = 0;
2910 }
2911
2912 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2913 WDC_CAPABILITY_MODE;
2914 if (sc->sc_dma_ok) {
2915 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2916 sc->sc_wdcdev.irqack = pciide_irqack;
2917 }
2918 sc->sc_wdcdev.PIO_cap = 4;
2919 sc->sc_wdcdev.DMA_cap = 2;
2920 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2921
2922 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2923 sc->sc_wdcdev.nchannels = 1;
2924
2925 /* Only one channel for this chip; if we are here it's enabled */
2926 cp = &sc->pciide_channels[0];
2927 sc->wdc_chanarray[0] = &cp->wdc_channel;
2928 cp->name = PCIIDE_CHANNEL_NAME(0);
2929 cp->wdc_channel.channel = 0;
2930 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2931 cp->wdc_channel.ch_queue =
2932 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2933 if (cp->wdc_channel.ch_queue == NULL) {
2934 printf("%s primary channel: "
2935 "can't allocate memory for command queue",
2936 sc->sc_wdcdev.sc_dev.dv_xname);
2937 return;
2938 }
2939 printf("%s: primary channel %s to ",
2940 sc->sc_wdcdev.sc_dev.dv_xname,
2941 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2942 "configured" : "wired");
2943 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2944 printf("native-PCI");
2945 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2946 pciide_pci_intr);
2947 } else {
2948 printf("compatibility");
2949 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2950 &cmdsize, &ctlsize);
2951 }
2952 printf(" mode\n");
2953 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2954 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2955 wdcattach(&cp->wdc_channel);
2956 if (pciide_chan_candisable(cp)) {
2957 pci_conf_write(sc->sc_pc, sc->sc_tag,
2958 PCI_COMMAND_STATUS_REG, 0);
2959 }
2960 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2961 if (cp->hw_ok == 0)
2962 return;
2963 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2964 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2965 cy693_setup_channel(&cp->wdc_channel);
2966 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2967 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2968 }
2969
2970 void
2971 cy693_setup_channel(chp)
2972 struct channel_softc *chp;
2973 {
2974 struct ata_drive_datas *drvp;
2975 int drive;
2976 u_int32_t cy_cmd_ctrl;
2977 u_int32_t idedma_ctl;
2978 struct pciide_channel *cp = (struct pciide_channel*)chp;
2979 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2980 int dma_mode = -1;
2981
2982 cy_cmd_ctrl = idedma_ctl = 0;
2983
2984 /* setup DMA if needed */
2985 pciide_channel_dma_setup(cp);
2986
2987 for (drive = 0; drive < 2; drive++) {
2988 drvp = &chp->ch_drive[drive];
2989 /* If no drive, skip */
2990 if ((drvp->drive_flags & DRIVE) == 0)
2991 continue;
2992 /* add timing values, setup DMA if needed */
2993 if (drvp->drive_flags & DRIVE_DMA) {
2994 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2995 /* use Multiword DMA */
2996 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2997 dma_mode = drvp->DMA_mode;
2998 }
2999 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3000 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3001 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3002 CY_CMD_CTRL_IOW_REC_OFF(drive));
3003 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3004 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3005 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3006 CY_CMD_CTRL_IOR_REC_OFF(drive));
3007 }
3008 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3009 chp->ch_drive[0].DMA_mode = dma_mode;
3010 chp->ch_drive[1].DMA_mode = dma_mode;
3011
3012 if (dma_mode == -1)
3013 dma_mode = 0;
3014
3015 if (sc->sc_cy_handle != NULL) {
3016 /* Note: `multiple' is implied. */
3017 cy82c693_write(sc->sc_cy_handle,
3018 (sc->sc_cy_compatchan == 0) ?
3019 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3020 }
3021
3022 pciide_print_modes(cp);
3023
3024 if (idedma_ctl != 0) {
3025 /* Add software bits in status register */
3026 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3027 IDEDMA_CTL, idedma_ctl);
3028 }
3029 }
3030
3031 static struct sis_hostbr_type {
3032 u_int16_t id;
3033 u_int8_t rev;
3034 u_int8_t udma_mode;
3035 char *name;
3036 u_int8_t type;
3037 #define SIS_TYPE_NOUDMA 0
3038 #define SIS_TYPE_66 1
3039 #define SIS_TYPE_100OLD 2
3040 #define SIS_TYPE_100NEW 3
3041 #define SIS_TYPE_133OLD 4
3042 #define SIS_TYPE_133NEW 5
3043 #define SIS_TYPE_SOUTH 6
3044 } sis_hostbr_type[] = {
3045 /* Most infos here are from sos (at) freebsd.org */
3046 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3047 #if 0
3048 /*
3049 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3050 * have problems with UDMA (info provided by Christos)
3051 */
3052 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3053 #endif
3054 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3055 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3056 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3057 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3058 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3059 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3060 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3061 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3062 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3063 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3064 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3065 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3066 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3067 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3068 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3069 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3070 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3071 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3072 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3073 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3074 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3075 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3076 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3077 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3078 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3079 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3080 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3081 /*
3082 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3083 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3084 */
3085 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3086 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3087 };
3088
3089 static struct sis_hostbr_type *sis_hostbr_type_match;
3090
3091 static int
3092 sis_hostbr_match(pa)
3093 struct pci_attach_args *pa;
3094 {
3095 int i;
3096 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3097 return 0;
3098 sis_hostbr_type_match = NULL;
3099 for (i = 0;
3100 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3101 i++) {
3102 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3103 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3104 sis_hostbr_type_match = &sis_hostbr_type[i];
3105 }
3106 return (sis_hostbr_type_match != NULL);
3107 }
3108
3109 static int sis_south_match(pa)
3110 struct pci_attach_args *pa;
3111 {
3112 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3113 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3114 PCI_REVISION(pa->pa_class) >= 0x10);
3115 }
3116
3117 void
3118 sis_chip_map(sc, pa)
3119 struct pciide_softc *sc;
3120 struct pci_attach_args *pa;
3121 {
3122 struct pciide_channel *cp;
3123 int channel;
3124 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3125 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3126 pcireg_t rev = PCI_REVISION(pa->pa_class);
3127 bus_size_t cmdsize, ctlsize;
3128
3129 if (pciide_chipen(sc, pa) == 0)
3130 return;
3131 printf(": Silicon Integrated System ");
3132 pci_find_device(NULL, sis_hostbr_match);
3133 if (sis_hostbr_type_match) {
3134 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3135 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3136 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3137 SIS_REG_57) & 0x7f);
3138 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3139 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3140 printf("96X UDMA%d",
3141 sis_hostbr_type_match->udma_mode);
3142 sc->sis_type = SIS_TYPE_133NEW;
3143 sc->sc_wdcdev.UDMA_cap =
3144 sis_hostbr_type_match->udma_mode;
3145 } else {
3146 if (pci_find_device(NULL, sis_south_match)) {
3147 sc->sis_type = SIS_TYPE_133OLD;
3148 sc->sc_wdcdev.UDMA_cap =
3149 sis_hostbr_type_match->udma_mode;
3150 } else {
3151 sc->sis_type = SIS_TYPE_100NEW;
3152 sc->sc_wdcdev.UDMA_cap =
3153 sis_hostbr_type_match->udma_mode;
3154 }
3155 }
3156 } else {
3157 sc->sis_type = sis_hostbr_type_match->type;
3158 sc->sc_wdcdev.UDMA_cap =
3159 sis_hostbr_type_match->udma_mode;
3160 }
3161 printf(sis_hostbr_type_match->name);
3162 } else {
3163 printf("5597/5598");
3164 if (rev >= 0xd0) {
3165 sc->sc_wdcdev.UDMA_cap = 2;
3166 sc->sis_type = SIS_TYPE_66;
3167 } else {
3168 sc->sc_wdcdev.UDMA_cap = 0;
3169 sc->sis_type = SIS_TYPE_NOUDMA;
3170 }
3171 }
3172 printf(" IDE controller (rev. 0x%02x)\n", PCI_REVISION(pa->pa_class));
3173 printf("%s: bus-master DMA support present",
3174 sc->sc_wdcdev.sc_dev.dv_xname);
3175 pciide_mapreg_dma(sc, pa);
3176 printf("\n");
3177
3178 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3179 WDC_CAPABILITY_MODE;
3180 if (sc->sc_dma_ok) {
3181 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3182 sc->sc_wdcdev.irqack = pciide_irqack;
3183 if (sc->sis_type >= SIS_TYPE_66)
3184 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3185 }
3186
3187 sc->sc_wdcdev.PIO_cap = 4;
3188 sc->sc_wdcdev.DMA_cap = 2;
3189
3190 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3191 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3192 switch(sc->sis_type) {
3193 case SIS_TYPE_NOUDMA:
3194 case SIS_TYPE_66:
3195 case SIS_TYPE_100OLD:
3196 sc->sc_wdcdev.set_modes = sis_setup_channel;
3197 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3198 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3199 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3200 break;
3201 case SIS_TYPE_100NEW:
3202 case SIS_TYPE_133OLD:
3203 sc->sc_wdcdev.set_modes = sis_setup_channel;
3204 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3205 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3206 break;
3207 case SIS_TYPE_133NEW:
3208 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3209 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3210 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3211 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3212 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3213 break;
3214 }
3215
3216
3217 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3218 cp = &sc->pciide_channels[channel];
3219 if (pciide_chansetup(sc, channel, interface) == 0)
3220 continue;
3221 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3222 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3223 printf("%s: %s channel ignored (disabled)\n",
3224 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3225 continue;
3226 }
3227 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3228 pciide_pci_intr);
3229 if (cp->hw_ok == 0)
3230 continue;
3231 if (pciide_chan_candisable(cp)) {
3232 if (channel == 0)
3233 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3234 else
3235 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3236 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3237 sis_ctr0);
3238 }
3239 pciide_map_compat_intr(pa, cp, channel, interface);
3240 if (cp->hw_ok == 0)
3241 continue;
3242 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3243 }
3244 }
3245
3246 void
3247 sis96x_setup_channel(chp)
3248 struct channel_softc *chp;
3249 {
3250 struct ata_drive_datas *drvp;
3251 int drive;
3252 u_int32_t sis_tim;
3253 u_int32_t idedma_ctl;
3254 int regtim;
3255 struct pciide_channel *cp = (struct pciide_channel*)chp;
3256 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3257
3258 sis_tim = 0;
3259 idedma_ctl = 0;
3260 /* setup DMA if needed */
3261 pciide_channel_dma_setup(cp);
3262
3263 for (drive = 0; drive < 2; drive++) {
3264 regtim = SIS_TIM133(
3265 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3266 chp->channel, drive);
3267 drvp = &chp->ch_drive[drive];
3268 /* If no drive, skip */
3269 if ((drvp->drive_flags & DRIVE) == 0)
3270 continue;
3271 /* add timing values, setup DMA if needed */
3272 if (drvp->drive_flags & DRIVE_UDMA) {
3273 /* use Ultra/DMA */
3274 drvp->drive_flags &= ~DRIVE_DMA;
3275 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3276 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3277 if (drvp->UDMA_mode > 2)
3278 drvp->UDMA_mode = 2;
3279 }
3280 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3281 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3282 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3283 } else if (drvp->drive_flags & DRIVE_DMA) {
3284 /*
3285 * use Multiword DMA
3286 * Timings will be used for both PIO and DMA,
3287 * so adjust DMA mode if needed
3288 */
3289 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3290 drvp->PIO_mode = drvp->DMA_mode + 2;
3291 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3292 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3293 drvp->PIO_mode - 2 : 0;
3294 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3295 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3296 } else {
3297 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3298 }
3299 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3300 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3301 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3302 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3303 }
3304 if (idedma_ctl != 0) {
3305 /* Add software bits in status register */
3306 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3307 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3308 idedma_ctl);
3309 }
3310 pciide_print_modes(cp);
3311 }
3312
3313 void
3314 sis_setup_channel(chp)
3315 struct channel_softc *chp;
3316 {
3317 struct ata_drive_datas *drvp;
3318 int drive;
3319 u_int32_t sis_tim;
3320 u_int32_t idedma_ctl;
3321 struct pciide_channel *cp = (struct pciide_channel*)chp;
3322 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3323
3324 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3325 "channel %d 0x%x\n", chp->channel,
3326 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3327 DEBUG_PROBE);
3328 sis_tim = 0;
3329 idedma_ctl = 0;
3330 /* setup DMA if needed */
3331 pciide_channel_dma_setup(cp);
3332
3333 for (drive = 0; drive < 2; drive++) {
3334 drvp = &chp->ch_drive[drive];
3335 /* If no drive, skip */
3336 if ((drvp->drive_flags & DRIVE) == 0)
3337 continue;
3338 /* add timing values, setup DMA if needed */
3339 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3340 (drvp->drive_flags & DRIVE_UDMA) == 0)
3341 goto pio;
3342
3343 if (drvp->drive_flags & DRIVE_UDMA) {
3344 /* use Ultra/DMA */
3345 drvp->drive_flags &= ~DRIVE_DMA;
3346 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3347 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3348 if (drvp->UDMA_mode > 2)
3349 drvp->UDMA_mode = 2;
3350 }
3351 switch (sc->sis_type) {
3352 case SIS_TYPE_66:
3353 case SIS_TYPE_100OLD:
3354 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3355 SIS_TIM66_UDMA_TIME_OFF(drive);
3356 break;
3357 case SIS_TYPE_100NEW:
3358 sis_tim |=
3359 sis_udma100new_tim[drvp->UDMA_mode] <<
3360 SIS_TIM100_UDMA_TIME_OFF(drive);
3361 case SIS_TYPE_133OLD:
3362 sis_tim |=
3363 sis_udma133old_tim[drvp->UDMA_mode] <<
3364 SIS_TIM100_UDMA_TIME_OFF(drive);
3365 break;
3366 default:
3367 printf("unknown SiS IDE type %d\n",
3368 sc->sis_type);
3369 }
3370 } else {
3371 /*
3372 * use Multiword DMA
3373 * Timings will be used for both PIO and DMA,
3374 * so adjust DMA mode if needed
3375 */
3376 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3377 drvp->PIO_mode = drvp->DMA_mode + 2;
3378 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3379 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3380 drvp->PIO_mode - 2 : 0;
3381 if (drvp->DMA_mode == 0)
3382 drvp->PIO_mode = 0;
3383 }
3384 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3385 pio: switch (sc->sis_type) {
3386 case SIS_TYPE_NOUDMA:
3387 case SIS_TYPE_66:
3388 case SIS_TYPE_100OLD:
3389 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3390 SIS_TIM66_ACT_OFF(drive);
3391 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3392 SIS_TIM66_REC_OFF(drive);
3393 break;
3394 case SIS_TYPE_100NEW:
3395 case SIS_TYPE_133OLD:
3396 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3397 SIS_TIM100_ACT_OFF(drive);
3398 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3399 SIS_TIM100_REC_OFF(drive);
3400 break;
3401 default:
3402 printf("unknown SiS IDE type %d\n",
3403 sc->sis_type);
3404 }
3405 }
3406 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3407 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3408 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3409 if (idedma_ctl != 0) {
3410 /* Add software bits in status register */
3411 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3412 IDEDMA_CTL, idedma_ctl);
3413 }
3414 pciide_print_modes(cp);
3415 }
3416
3417 void
3418 acer_chip_map(sc, pa)
3419 struct pciide_softc *sc;
3420 struct pci_attach_args *pa;
3421 {
3422 struct pciide_channel *cp;
3423 int channel;
3424 pcireg_t cr, interface;
3425 bus_size_t cmdsize, ctlsize;
3426 pcireg_t rev = PCI_REVISION(pa->pa_class);
3427
3428 if (pciide_chipen(sc, pa) == 0)
3429 return;
3430 printf("%s: bus-master DMA support present",
3431 sc->sc_wdcdev.sc_dev.dv_xname);
3432 pciide_mapreg_dma(sc, pa);
3433 printf("\n");
3434 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3435 WDC_CAPABILITY_MODE;
3436 if (sc->sc_dma_ok) {
3437 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3438 if (rev >= 0x20) {
3439 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3440 if (rev >= 0xC4)
3441 sc->sc_wdcdev.UDMA_cap = 5;
3442 else if (rev >= 0xC2)
3443 sc->sc_wdcdev.UDMA_cap = 4;
3444 else
3445 sc->sc_wdcdev.UDMA_cap = 2;
3446 }
3447 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3448 sc->sc_wdcdev.irqack = pciide_irqack;
3449 }
3450
3451 sc->sc_wdcdev.PIO_cap = 4;
3452 sc->sc_wdcdev.DMA_cap = 2;
3453 sc->sc_wdcdev.set_modes = acer_setup_channel;
3454 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3455 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3456
3457 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3458 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3459 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3460
3461 /* Enable "microsoft register bits" R/W. */
3462 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3463 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3464 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3465 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3466 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3467 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3468 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3469 ~ACER_CHANSTATUSREGS_RO);
3470 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3471 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3472 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3473 /* Don't use cr, re-read the real register content instead */
3474 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3475 PCI_CLASS_REG));
3476
3477 /* From linux: enable "Cable Detection" */
3478 if (rev >= 0xC2) {
3479 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3480 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3481 | ACER_0x4B_CDETECT);
3482 }
3483
3484 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3485 cp = &sc->pciide_channels[channel];
3486 if (pciide_chansetup(sc, channel, interface) == 0)
3487 continue;
3488 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3489 printf("%s: %s channel ignored (disabled)\n",
3490 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3491 continue;
3492 }
3493 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3494 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3495 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3496 if (cp->hw_ok == 0)
3497 continue;
3498 if (pciide_chan_candisable(cp)) {
3499 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3500 pci_conf_write(sc->sc_pc, sc->sc_tag,
3501 PCI_CLASS_REG, cr);
3502 }
3503 pciide_map_compat_intr(pa, cp, channel, interface);
3504 acer_setup_channel(&cp->wdc_channel);
3505 }
3506 }
3507
3508 void
3509 acer_setup_channel(chp)
3510 struct channel_softc *chp;
3511 {
3512 struct ata_drive_datas *drvp;
3513 int drive;
3514 u_int32_t acer_fifo_udma;
3515 u_int32_t idedma_ctl;
3516 struct pciide_channel *cp = (struct pciide_channel*)chp;
3517 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3518
3519 idedma_ctl = 0;
3520 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3521 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3522 acer_fifo_udma), DEBUG_PROBE);
3523 /* setup DMA if needed */
3524 pciide_channel_dma_setup(cp);
3525
3526 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3527 DRIVE_UDMA) { /* check 80 pins cable */
3528 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3529 ACER_0x4A_80PIN(chp->channel)) {
3530 if (chp->ch_drive[0].UDMA_mode > 2)
3531 chp->ch_drive[0].UDMA_mode = 2;
3532 if (chp->ch_drive[1].UDMA_mode > 2)
3533 chp->ch_drive[1].UDMA_mode = 2;
3534 }
3535 }
3536
3537 for (drive = 0; drive < 2; drive++) {
3538 drvp = &chp->ch_drive[drive];
3539 /* If no drive, skip */
3540 if ((drvp->drive_flags & DRIVE) == 0)
3541 continue;
3542 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3543 "channel %d drive %d 0x%x\n", chp->channel, drive,
3544 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3545 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3546 /* clear FIFO/DMA mode */
3547 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3548 ACER_UDMA_EN(chp->channel, drive) |
3549 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3550
3551 /* add timing values, setup DMA if needed */
3552 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3553 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3554 acer_fifo_udma |=
3555 ACER_FTH_OPL(chp->channel, drive, 0x1);
3556 goto pio;
3557 }
3558
3559 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3560 if (drvp->drive_flags & DRIVE_UDMA) {
3561 /* use Ultra/DMA */
3562 drvp->drive_flags &= ~DRIVE_DMA;
3563 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3564 acer_fifo_udma |=
3565 ACER_UDMA_TIM(chp->channel, drive,
3566 acer_udma[drvp->UDMA_mode]);
3567 /* XXX disable if one drive < UDMA3 ? */
3568 if (drvp->UDMA_mode >= 3) {
3569 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3570 ACER_0x4B,
3571 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3572 ACER_0x4B) | ACER_0x4B_UDMA66);
3573 }
3574 } else {
3575 /*
3576 * use Multiword DMA
3577 * Timings will be used for both PIO and DMA,
3578 * so adjust DMA mode if needed
3579 */
3580 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3581 drvp->PIO_mode = drvp->DMA_mode + 2;
3582 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3583 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3584 drvp->PIO_mode - 2 : 0;
3585 if (drvp->DMA_mode == 0)
3586 drvp->PIO_mode = 0;
3587 }
3588 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3589 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3590 ACER_IDETIM(chp->channel, drive),
3591 acer_pio[drvp->PIO_mode]);
3592 }
3593 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3594 acer_fifo_udma), DEBUG_PROBE);
3595 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3596 if (idedma_ctl != 0) {
3597 /* Add software bits in status register */
3598 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3599 IDEDMA_CTL, idedma_ctl);
3600 }
3601 pciide_print_modes(cp);
3602 }
3603
3604 int
3605 acer_pci_intr(arg)
3606 void *arg;
3607 {
3608 struct pciide_softc *sc = arg;
3609 struct pciide_channel *cp;
3610 struct channel_softc *wdc_cp;
3611 int i, rv, crv;
3612 u_int32_t chids;
3613
3614 rv = 0;
3615 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3616 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3617 cp = &sc->pciide_channels[i];
3618 wdc_cp = &cp->wdc_channel;
3619 /* If a compat channel skip. */
3620 if (cp->compat)
3621 continue;
3622 if (chids & ACER_CHIDS_INT(i)) {
3623 crv = wdcintr(wdc_cp);
3624 if (crv == 0)
3625 printf("%s:%d: bogus intr\n",
3626 sc->sc_wdcdev.sc_dev.dv_xname, i);
3627 else
3628 rv = 1;
3629 }
3630 }
3631 return rv;
3632 }
3633
3634 void
3635 hpt_chip_map(sc, pa)
3636 struct pciide_softc *sc;
3637 struct pci_attach_args *pa;
3638 {
3639 struct pciide_channel *cp;
3640 int i, compatchan, revision;
3641 pcireg_t interface;
3642 bus_size_t cmdsize, ctlsize;
3643
3644 if (pciide_chipen(sc, pa) == 0)
3645 return;
3646 revision = PCI_REVISION(pa->pa_class);
3647 printf(": Triones/Highpoint ");
3648 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3649 printf("HPT374 IDE Controller\n");
3650 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3651 printf("HPT372 IDE Controller\n");
3652 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3653 if (revision == HPT372_REV)
3654 printf("HPT372 IDE Controller\n");
3655 else if (revision == HPT370_REV)
3656 printf("HPT370 IDE Controller\n");
3657 else if (revision == HPT370A_REV)
3658 printf("HPT370A IDE Controller\n");
3659 else if (revision == HPT366_REV)
3660 printf("HPT366 IDE Controller\n");
3661 else
3662 printf("unknown HPT IDE controller rev %d\n", revision);
3663 } else
3664 printf("unknown HPT IDE controller 0x%x\n",
3665 sc->sc_pp->ide_product);
3666
3667 /*
3668 * when the chip is in native mode it identifies itself as a
3669 * 'misc mass storage'. Fake interface in this case.
3670 */
3671 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3672 interface = PCI_INTERFACE(pa->pa_class);
3673 } else {
3674 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3675 PCIIDE_INTERFACE_PCI(0);
3676 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3677 (revision == HPT370_REV || revision == HPT370A_REV ||
3678 revision == HPT372_REV)) ||
3679 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3680 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3681 interface |= PCIIDE_INTERFACE_PCI(1);
3682 }
3683
3684 printf("%s: bus-master DMA support present",
3685 sc->sc_wdcdev.sc_dev.dv_xname);
3686 pciide_mapreg_dma(sc, pa);
3687 printf("\n");
3688 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3689 WDC_CAPABILITY_MODE;
3690 if (sc->sc_dma_ok) {
3691 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3692 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3693 sc->sc_wdcdev.irqack = pciide_irqack;
3694 }
3695 sc->sc_wdcdev.PIO_cap = 4;
3696 sc->sc_wdcdev.DMA_cap = 2;
3697
3698 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3699 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3700 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3701 revision == HPT366_REV) {
3702 sc->sc_wdcdev.UDMA_cap = 4;
3703 /*
3704 * The 366 has 2 PCI IDE functions, one for primary and one
3705 * for secondary. So we need to call pciide_mapregs_compat()
3706 * with the real channel
3707 */
3708 if (pa->pa_function == 0) {
3709 compatchan = 0;
3710 } else if (pa->pa_function == 1) {
3711 compatchan = 1;
3712 } else {
3713 printf("%s: unexpected PCI function %d\n",
3714 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3715 return;
3716 }
3717 sc->sc_wdcdev.nchannels = 1;
3718 } else {
3719 sc->sc_wdcdev.nchannels = 2;
3720 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3721 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3722 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3723 revision == HPT372_REV))
3724 sc->sc_wdcdev.UDMA_cap = 6;
3725 else
3726 sc->sc_wdcdev.UDMA_cap = 5;
3727 }
3728 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3729 cp = &sc->pciide_channels[i];
3730 if (sc->sc_wdcdev.nchannels > 1) {
3731 compatchan = i;
3732 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3733 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3734 printf("%s: %s channel ignored (disabled)\n",
3735 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3736 continue;
3737 }
3738 }
3739 if (pciide_chansetup(sc, i, interface) == 0)
3740 continue;
3741 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3742 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3743 &ctlsize, hpt_pci_intr);
3744 } else {
3745 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3746 &cmdsize, &ctlsize);
3747 }
3748 if (cp->hw_ok == 0)
3749 return;
3750 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3751 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3752 wdcattach(&cp->wdc_channel);
3753 hpt_setup_channel(&cp->wdc_channel);
3754 }
3755 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3756 (revision == HPT370_REV || revision == HPT370A_REV ||
3757 revision == HPT372_REV)) ||
3758 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3759 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3760 /*
3761 * HPT370_REV and highter has a bit to disable interrupts,
3762 * make sure to clear it
3763 */
3764 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3765 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3766 ~HPT_CSEL_IRQDIS);
3767 }
3768 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3769 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3770 revision == HPT372_REV ) ||
3771 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3772 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3773 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3774 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3775 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3776 return;
3777 }
3778
3779 void
3780 hpt_setup_channel(chp)
3781 struct channel_softc *chp;
3782 {
3783 struct ata_drive_datas *drvp;
3784 int drive;
3785 int cable;
3786 u_int32_t before, after;
3787 u_int32_t idedma_ctl;
3788 struct pciide_channel *cp = (struct pciide_channel*)chp;
3789 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3790 int revision =
3791 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3792
3793 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3794
3795 /* setup DMA if needed */
3796 pciide_channel_dma_setup(cp);
3797
3798 idedma_ctl = 0;
3799
3800 /* Per drive settings */
3801 for (drive = 0; drive < 2; drive++) {
3802 drvp = &chp->ch_drive[drive];
3803 /* If no drive, skip */
3804 if ((drvp->drive_flags & DRIVE) == 0)
3805 continue;
3806 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3807 HPT_IDETIM(chp->channel, drive));
3808
3809 /* add timing values, setup DMA if needed */
3810 if (drvp->drive_flags & DRIVE_UDMA) {
3811 /* use Ultra/DMA */
3812 drvp->drive_flags &= ~DRIVE_DMA;
3813 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3814 drvp->UDMA_mode > 2)
3815 drvp->UDMA_mode = 2;
3816 switch (sc->sc_pp->ide_product) {
3817 case PCI_PRODUCT_TRIONES_HPT374:
3818 after = hpt374_udma[drvp->UDMA_mode];
3819 break;
3820 case PCI_PRODUCT_TRIONES_HPT372:
3821 after = hpt372_udma[drvp->UDMA_mode];
3822 break;
3823 case PCI_PRODUCT_TRIONES_HPT366:
3824 default:
3825 switch(revision) {
3826 case HPT372_REV:
3827 after = hpt372_udma[drvp->UDMA_mode];
3828 break;
3829 case HPT370_REV:
3830 case HPT370A_REV:
3831 after = hpt370_udma[drvp->UDMA_mode];
3832 break;
3833 case HPT366_REV:
3834 default:
3835 after = hpt366_udma[drvp->UDMA_mode];
3836 break;
3837 }
3838 }
3839 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3840 } else if (drvp->drive_flags & DRIVE_DMA) {
3841 /*
3842 * use Multiword DMA.
3843 * Timings will be used for both PIO and DMA, so adjust
3844 * DMA mode if needed
3845 */
3846 if (drvp->PIO_mode >= 3 &&
3847 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3848 drvp->DMA_mode = drvp->PIO_mode - 2;
3849 }
3850 switch (sc->sc_pp->ide_product) {
3851 case PCI_PRODUCT_TRIONES_HPT374:
3852 after = hpt374_dma[drvp->DMA_mode];
3853 break;
3854 case PCI_PRODUCT_TRIONES_HPT372:
3855 after = hpt372_dma[drvp->DMA_mode];
3856 break;
3857 case PCI_PRODUCT_TRIONES_HPT366:
3858 default:
3859 switch(revision) {
3860 case HPT372_REV:
3861 after = hpt372_dma[drvp->DMA_mode];
3862 break;
3863 case HPT370_REV:
3864 case HPT370A_REV:
3865 after = hpt370_dma[drvp->DMA_mode];
3866 break;
3867 case HPT366_REV:
3868 default:
3869 after = hpt366_dma[drvp->DMA_mode];
3870 break;
3871 }
3872 }
3873 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3874 } else {
3875 /* PIO only */
3876 switch (sc->sc_pp->ide_product) {
3877 case PCI_PRODUCT_TRIONES_HPT374:
3878 after = hpt374_pio[drvp->PIO_mode];
3879 break;
3880 case PCI_PRODUCT_TRIONES_HPT372:
3881 after = hpt372_pio[drvp->PIO_mode];
3882 break;
3883 case PCI_PRODUCT_TRIONES_HPT366:
3884 default:
3885 switch(revision) {
3886 case HPT372_REV:
3887 after = hpt372_pio[drvp->PIO_mode];
3888 break;
3889 case HPT370_REV:
3890 case HPT370A_REV:
3891 after = hpt370_pio[drvp->PIO_mode];
3892 break;
3893 case HPT366_REV:
3894 default:
3895 after = hpt366_pio[drvp->PIO_mode];
3896 break;
3897 }
3898 }
3899 }
3900 pci_conf_write(sc->sc_pc, sc->sc_tag,
3901 HPT_IDETIM(chp->channel, drive), after);
3902 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3903 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3904 after, before), DEBUG_PROBE);
3905 }
3906 if (idedma_ctl != 0) {
3907 /* Add software bits in status register */
3908 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3909 IDEDMA_CTL, idedma_ctl);
3910 }
3911 pciide_print_modes(cp);
3912 }
3913
3914 int
3915 hpt_pci_intr(arg)
3916 void *arg;
3917 {
3918 struct pciide_softc *sc = arg;
3919 struct pciide_channel *cp;
3920 struct channel_softc *wdc_cp;
3921 int rv = 0;
3922 int dmastat, i, crv;
3923
3924 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3925 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3926 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3927 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3928 IDEDMA_CTL_INTR)
3929 continue;
3930 cp = &sc->pciide_channels[i];
3931 wdc_cp = &cp->wdc_channel;
3932 crv = wdcintr(wdc_cp);
3933 if (crv == 0) {
3934 printf("%s:%d: bogus intr\n",
3935 sc->sc_wdcdev.sc_dev.dv_xname, i);
3936 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3937 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3938 } else
3939 rv = 1;
3940 }
3941 return rv;
3942 }
3943
3944
3945 /* Macros to test product */
3946 #define PDC_IS_262(sc) \
3947 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3948 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3949 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3950 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3951 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3952 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3953 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3954 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3955 #define PDC_IS_265(sc) \
3956 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3957 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3958 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3959 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3960 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3961 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3962 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3963 #define PDC_IS_268(sc) \
3964 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3965 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3966 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3967 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3968 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3969 #define PDC_IS_276(sc) \
3970 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3971 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3972 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3973
3974 void
3975 pdc202xx_chip_map(sc, pa)
3976 struct pciide_softc *sc;
3977 struct pci_attach_args *pa;
3978 {
3979 struct pciide_channel *cp;
3980 int channel;
3981 pcireg_t interface, st, mode;
3982 bus_size_t cmdsize, ctlsize;
3983
3984 if (!PDC_IS_268(sc)) {
3985 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3986 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3987 st), DEBUG_PROBE);
3988 }
3989 if (pciide_chipen(sc, pa) == 0)
3990 return;
3991
3992 /* turn off RAID mode */
3993 if (!PDC_IS_268(sc))
3994 st &= ~PDC2xx_STATE_IDERAID;
3995
3996 /*
3997 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3998 * mode. We have to fake interface
3999 */
4000 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4001 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4002 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4003
4004 printf("%s: bus-master DMA support present",
4005 sc->sc_wdcdev.sc_dev.dv_xname);
4006 pciide_mapreg_dma(sc, pa);
4007 printf("\n");
4008 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4009 WDC_CAPABILITY_MODE;
4010 if (sc->sc_dma_ok) {
4011 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4012 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4013 sc->sc_wdcdev.irqack = pciide_irqack;
4014 }
4015 sc->sc_wdcdev.PIO_cap = 4;
4016 sc->sc_wdcdev.DMA_cap = 2;
4017 if (PDC_IS_276(sc))
4018 sc->sc_wdcdev.UDMA_cap = 6;
4019 else if (PDC_IS_265(sc))
4020 sc->sc_wdcdev.UDMA_cap = 5;
4021 else if (PDC_IS_262(sc))
4022 sc->sc_wdcdev.UDMA_cap = 4;
4023 else
4024 sc->sc_wdcdev.UDMA_cap = 2;
4025 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4026 pdc20268_setup_channel : pdc202xx_setup_channel;
4027 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4028 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4029
4030 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4031 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4032 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4033 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4034 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4035 }
4036
4037 if (!PDC_IS_268(sc)) {
4038 /* setup failsafe defaults */
4039 mode = 0;
4040 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4041 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4042 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4043 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4044 for (channel = 0;
4045 channel < sc->sc_wdcdev.nchannels;
4046 channel++) {
4047 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4048 "drive 0 initial timings 0x%x, now 0x%x\n",
4049 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4050 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4051 DEBUG_PROBE);
4052 pci_conf_write(sc->sc_pc, sc->sc_tag,
4053 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4054 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4055 "drive 1 initial timings 0x%x, now 0x%x\n",
4056 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4057 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4058 pci_conf_write(sc->sc_pc, sc->sc_tag,
4059 PDC2xx_TIM(channel, 1), mode);
4060 }
4061
4062 mode = PDC2xx_SCR_DMA;
4063 if (PDC_IS_265(sc)) {
4064 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
4065 } else if (PDC_IS_262(sc)) {
4066 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4067 } else {
4068 /* the BIOS set it up this way */
4069 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4070 }
4071 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4072 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4073 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4074 "now 0x%x\n",
4075 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4076 PDC2xx_SCR),
4077 mode), DEBUG_PROBE);
4078 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4079 PDC2xx_SCR, mode);
4080
4081 /* controller initial state register is OK even without BIOS */
4082 /* Set DMA mode to IDE DMA compatibility */
4083 mode =
4084 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4085 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4086 DEBUG_PROBE);
4087 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4088 mode | 0x1);
4089 mode =
4090 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4091 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4092 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4093 mode | 0x1);
4094 }
4095
4096 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4097 cp = &sc->pciide_channels[channel];
4098 if (pciide_chansetup(sc, channel, interface) == 0)
4099 continue;
4100 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4101 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4102 printf("%s: %s channel ignored (disabled)\n",
4103 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4104 continue;
4105 }
4106 if (PDC_IS_265(sc))
4107 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4108 pdc20265_pci_intr);
4109 else
4110 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4111 pdc202xx_pci_intr);
4112 if (cp->hw_ok == 0)
4113 continue;
4114 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4115 st &= ~(PDC_IS_262(sc) ?
4116 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4117 pciide_map_compat_intr(pa, cp, channel, interface);
4118 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4119 }
4120 if (!PDC_IS_268(sc)) {
4121 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4122 "0x%x\n", st), DEBUG_PROBE);
4123 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4124 }
4125 return;
4126 }
4127
4128 void
4129 pdc202xx_setup_channel(chp)
4130 struct channel_softc *chp;
4131 {
4132 struct ata_drive_datas *drvp;
4133 int drive;
4134 pcireg_t mode, st;
4135 u_int32_t idedma_ctl, scr, atapi;
4136 struct pciide_channel *cp = (struct pciide_channel*)chp;
4137 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4138 int channel = chp->channel;
4139
4140 /* setup DMA if needed */
4141 pciide_channel_dma_setup(cp);
4142
4143 idedma_ctl = 0;
4144 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4145 sc->sc_wdcdev.sc_dev.dv_xname,
4146 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4147 DEBUG_PROBE);
4148
4149 /* Per channel settings */
4150 if (PDC_IS_262(sc)) {
4151 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4152 PDC262_U66);
4153 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4154 /* Trim UDMA mode */
4155 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4156 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4157 chp->ch_drive[0].UDMA_mode <= 2) ||
4158 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4159 chp->ch_drive[1].UDMA_mode <= 2)) {
4160 if (chp->ch_drive[0].UDMA_mode > 2)
4161 chp->ch_drive[0].UDMA_mode = 2;
4162 if (chp->ch_drive[1].UDMA_mode > 2)
4163 chp->ch_drive[1].UDMA_mode = 2;
4164 }
4165 /* Set U66 if needed */
4166 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4167 chp->ch_drive[0].UDMA_mode > 2) ||
4168 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4169 chp->ch_drive[1].UDMA_mode > 2))
4170 scr |= PDC262_U66_EN(channel);
4171 else
4172 scr &= ~PDC262_U66_EN(channel);
4173 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4174 PDC262_U66, scr);
4175 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4176 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4177 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4178 PDC262_ATAPI(channel))), DEBUG_PROBE);
4179 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4180 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4181 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4182 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4183 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4184 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4185 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4186 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4187 atapi = 0;
4188 else
4189 atapi = PDC262_ATAPI_UDMA;
4190 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4191 PDC262_ATAPI(channel), atapi);
4192 }
4193 }
4194 for (drive = 0; drive < 2; drive++) {
4195 drvp = &chp->ch_drive[drive];
4196 /* If no drive, skip */
4197 if ((drvp->drive_flags & DRIVE) == 0)
4198 continue;
4199 mode = 0;
4200 if (drvp->drive_flags & DRIVE_UDMA) {
4201 /* use Ultra/DMA */
4202 drvp->drive_flags &= ~DRIVE_DMA;
4203 mode = PDC2xx_TIM_SET_MB(mode,
4204 pdc2xx_udma_mb[drvp->UDMA_mode]);
4205 mode = PDC2xx_TIM_SET_MC(mode,
4206 pdc2xx_udma_mc[drvp->UDMA_mode]);
4207 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4208 } else if (drvp->drive_flags & DRIVE_DMA) {
4209 mode = PDC2xx_TIM_SET_MB(mode,
4210 pdc2xx_dma_mb[drvp->DMA_mode]);
4211 mode = PDC2xx_TIM_SET_MC(mode,
4212 pdc2xx_dma_mc[drvp->DMA_mode]);
4213 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4214 } else {
4215 mode = PDC2xx_TIM_SET_MB(mode,
4216 pdc2xx_dma_mb[0]);
4217 mode = PDC2xx_TIM_SET_MC(mode,
4218 pdc2xx_dma_mc[0]);
4219 }
4220 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4221 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4222 if (drvp->drive_flags & DRIVE_ATA)
4223 mode |= PDC2xx_TIM_PRE;
4224 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4225 if (drvp->PIO_mode >= 3) {
4226 mode |= PDC2xx_TIM_IORDY;
4227 if (drive == 0)
4228 mode |= PDC2xx_TIM_IORDYp;
4229 }
4230 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4231 "timings 0x%x\n",
4232 sc->sc_wdcdev.sc_dev.dv_xname,
4233 chp->channel, drive, mode), DEBUG_PROBE);
4234 pci_conf_write(sc->sc_pc, sc->sc_tag,
4235 PDC2xx_TIM(chp->channel, drive), mode);
4236 }
4237 if (idedma_ctl != 0) {
4238 /* Add software bits in status register */
4239 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4240 IDEDMA_CTL, idedma_ctl);
4241 }
4242 pciide_print_modes(cp);
4243 }
4244
4245 void
4246 pdc20268_setup_channel(chp)
4247 struct channel_softc *chp;
4248 {
4249 struct ata_drive_datas *drvp;
4250 int drive;
4251 u_int32_t idedma_ctl;
4252 struct pciide_channel *cp = (struct pciide_channel*)chp;
4253 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4254 int u100;
4255
4256 /* setup DMA if needed */
4257 pciide_channel_dma_setup(cp);
4258
4259 idedma_ctl = 0;
4260
4261 /* I don't know what this is for, FreeBSD does it ... */
4262 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4263 IDEDMA_CMD + 0x1 + IDEDMA_SCH_OFFSET * chp->channel, 0x0b);
4264
4265 /*
4266 * cable type detect, from FreeBSD
4267 */
4268 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4269 IDEDMA_CMD + 0x3 + IDEDMA_SCH_OFFSET * chp->channel) & 0x04) ?
4270 0 : 1;
4271
4272 for (drive = 0; drive < 2; drive++) {
4273 drvp = &chp->ch_drive[drive];
4274 /* If no drive, skip */
4275 if ((drvp->drive_flags & DRIVE) == 0)
4276 continue;
4277 if (drvp->drive_flags & DRIVE_UDMA) {
4278 /* use Ultra/DMA */
4279 drvp->drive_flags &= ~DRIVE_DMA;
4280 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4281 if (drvp->UDMA_mode > 2 && u100 == 0)
4282 drvp->UDMA_mode = 2;
4283 } else if (drvp->drive_flags & DRIVE_DMA) {
4284 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4285 }
4286 }
4287 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4288 if (idedma_ctl != 0) {
4289 /* Add software bits in status register */
4290 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4291 IDEDMA_CTL, idedma_ctl);
4292 }
4293 pciide_print_modes(cp);
4294 }
4295
4296 int
4297 pdc202xx_pci_intr(arg)
4298 void *arg;
4299 {
4300 struct pciide_softc *sc = arg;
4301 struct pciide_channel *cp;
4302 struct channel_softc *wdc_cp;
4303 int i, rv, crv;
4304 u_int32_t scr;
4305
4306 rv = 0;
4307 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4308 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4309 cp = &sc->pciide_channels[i];
4310 wdc_cp = &cp->wdc_channel;
4311 /* If a compat channel skip. */
4312 if (cp->compat)
4313 continue;
4314 if (scr & PDC2xx_SCR_INT(i)) {
4315 crv = wdcintr(wdc_cp);
4316 if (crv == 0)
4317 printf("%s:%d: bogus intr (reg 0x%x)\n",
4318 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4319 else
4320 rv = 1;
4321 }
4322 }
4323 return rv;
4324 }
4325
4326 int
4327 pdc20265_pci_intr(arg)
4328 void *arg;
4329 {
4330 struct pciide_softc *sc = arg;
4331 struct pciide_channel *cp;
4332 struct channel_softc *wdc_cp;
4333 int i, rv, crv;
4334 u_int32_t dmastat;
4335
4336 rv = 0;
4337 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4338 cp = &sc->pciide_channels[i];
4339 wdc_cp = &cp->wdc_channel;
4340 /* If a compat channel skip. */
4341 if (cp->compat)
4342 continue;
4343 /*
4344 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4345 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4346 * So use it instead (requires 2 reg reads instead of 1,
4347 * but we can't do it another way).
4348 */
4349 dmastat = bus_space_read_1(sc->sc_dma_iot,
4350 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4351 if((dmastat & IDEDMA_CTL_INTR) == 0)
4352 continue;
4353 crv = wdcintr(wdc_cp);
4354 if (crv == 0)
4355 printf("%s:%d: bogus intr\n",
4356 sc->sc_wdcdev.sc_dev.dv_xname, i);
4357 else
4358 rv = 1;
4359 }
4360 return rv;
4361 }
4362
4363 static void
4364 pdc20262_dma_start(v, channel, drive)
4365 void *v;
4366 int channel, drive;
4367 {
4368 struct pciide_softc *sc = v;
4369 struct pciide_dma_maps *dma_maps =
4370 &sc->pciide_channels[channel].dma_maps[drive];
4371 int atapi;
4372
4373 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4374 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4375 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4376 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4377 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4378 PDC262_ATAPI(channel), atapi);
4379 }
4380
4381 pciide_dma_start(v, channel, drive);
4382 }
4383
4384 int
4385 pdc20262_dma_finish(v, channel, drive, force)
4386 void *v;
4387 int channel, drive;
4388 int force;
4389 {
4390 struct pciide_softc *sc = v;
4391 struct pciide_dma_maps *dma_maps =
4392 &sc->pciide_channels[channel].dma_maps[drive];
4393 struct channel_softc *chp;
4394 int atapi, error;
4395
4396 error = pciide_dma_finish(v, channel, drive, force);
4397
4398 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4399 chp = sc->wdc_chanarray[channel];
4400 atapi = 0;
4401 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4402 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4403 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4404 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4405 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4406 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4407 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4408 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4409 atapi = PDC262_ATAPI_UDMA;
4410 }
4411 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4412 PDC262_ATAPI(channel), atapi);
4413 }
4414
4415 return error;
4416 }
4417
4418 void
4419 opti_chip_map(sc, pa)
4420 struct pciide_softc *sc;
4421 struct pci_attach_args *pa;
4422 {
4423 struct pciide_channel *cp;
4424 bus_size_t cmdsize, ctlsize;
4425 pcireg_t interface;
4426 u_int8_t init_ctrl;
4427 int channel;
4428
4429 if (pciide_chipen(sc, pa) == 0)
4430 return;
4431 printf("%s: bus-master DMA support present",
4432 sc->sc_wdcdev.sc_dev.dv_xname);
4433
4434 /*
4435 * XXXSCW:
4436 * There seem to be a couple of buggy revisions/implementations
4437 * of the OPTi pciide chipset. This kludge seems to fix one of
4438 * the reported problems (PR/11644) but still fails for the
4439 * other (PR/13151), although the latter may be due to other
4440 * issues too...
4441 */
4442 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4443 printf(" but disabled due to chip rev. <= 0x12");
4444 sc->sc_dma_ok = 0;
4445 } else
4446 pciide_mapreg_dma(sc, pa);
4447
4448 printf("\n");
4449
4450 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4451 WDC_CAPABILITY_MODE;
4452 sc->sc_wdcdev.PIO_cap = 4;
4453 if (sc->sc_dma_ok) {
4454 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4455 sc->sc_wdcdev.irqack = pciide_irqack;
4456 sc->sc_wdcdev.DMA_cap = 2;
4457 }
4458 sc->sc_wdcdev.set_modes = opti_setup_channel;
4459
4460 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4461 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4462
4463 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4464 OPTI_REG_INIT_CONTROL);
4465
4466 interface = PCI_INTERFACE(pa->pa_class);
4467
4468 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4469 cp = &sc->pciide_channels[channel];
4470 if (pciide_chansetup(sc, channel, interface) == 0)
4471 continue;
4472 if (channel == 1 &&
4473 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4474 printf("%s: %s channel ignored (disabled)\n",
4475 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4476 continue;
4477 }
4478 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4479 pciide_pci_intr);
4480 if (cp->hw_ok == 0)
4481 continue;
4482 pciide_map_compat_intr(pa, cp, channel, interface);
4483 if (cp->hw_ok == 0)
4484 continue;
4485 opti_setup_channel(&cp->wdc_channel);
4486 }
4487 }
4488
4489 void
4490 opti_setup_channel(chp)
4491 struct channel_softc *chp;
4492 {
4493 struct ata_drive_datas *drvp;
4494 struct pciide_channel *cp = (struct pciide_channel*)chp;
4495 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4496 int drive, spd;
4497 int mode[2];
4498 u_int8_t rv, mr;
4499
4500 /*
4501 * The `Delay' and `Address Setup Time' fields of the
4502 * Miscellaneous Register are always zero initially.
4503 */
4504 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4505 mr &= ~(OPTI_MISC_DELAY_MASK |
4506 OPTI_MISC_ADDR_SETUP_MASK |
4507 OPTI_MISC_INDEX_MASK);
4508
4509 /* Prime the control register before setting timing values */
4510 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4511
4512 /* Determine the clockrate of the PCIbus the chip is attached to */
4513 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4514 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4515
4516 /* setup DMA if needed */
4517 pciide_channel_dma_setup(cp);
4518
4519 for (drive = 0; drive < 2; drive++) {
4520 drvp = &chp->ch_drive[drive];
4521 /* If no drive, skip */
4522 if ((drvp->drive_flags & DRIVE) == 0) {
4523 mode[drive] = -1;
4524 continue;
4525 }
4526
4527 if ((drvp->drive_flags & DRIVE_DMA)) {
4528 /*
4529 * Timings will be used for both PIO and DMA,
4530 * so adjust DMA mode if needed
4531 */
4532 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4533 drvp->PIO_mode = drvp->DMA_mode + 2;
4534 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4535 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4536 drvp->PIO_mode - 2 : 0;
4537 if (drvp->DMA_mode == 0)
4538 drvp->PIO_mode = 0;
4539
4540 mode[drive] = drvp->DMA_mode + 5;
4541 } else
4542 mode[drive] = drvp->PIO_mode;
4543
4544 if (drive && mode[0] >= 0 &&
4545 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4546 /*
4547 * Can't have two drives using different values
4548 * for `Address Setup Time'.
4549 * Slow down the faster drive to compensate.
4550 */
4551 int d = (opti_tim_as[spd][mode[0]] >
4552 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4553
4554 mode[d] = mode[1-d];
4555 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4556 chp->ch_drive[d].DMA_mode = 0;
4557 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4558 }
4559 }
4560
4561 for (drive = 0; drive < 2; drive++) {
4562 int m;
4563 if ((m = mode[drive]) < 0)
4564 continue;
4565
4566 /* Set the Address Setup Time and select appropriate index */
4567 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4568 rv |= OPTI_MISC_INDEX(drive);
4569 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4570
4571 /* Set the pulse width and recovery timing parameters */
4572 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4573 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4574 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4575 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4576
4577 /* Set the Enhanced Mode register appropriately */
4578 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4579 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4580 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4581 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4582 }
4583
4584 /* Finally, enable the timings */
4585 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4586
4587 pciide_print_modes(cp);
4588 }
4589
4590 #define ACARD_IS_850(sc) \
4591 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4592
4593 void
4594 acard_chip_map(sc, pa)
4595 struct pciide_softc *sc;
4596 struct pci_attach_args *pa;
4597 {
4598 struct pciide_channel *cp;
4599 int i;
4600 pcireg_t interface;
4601 bus_size_t cmdsize, ctlsize;
4602
4603 if (pciide_chipen(sc, pa) == 0)
4604 return;
4605
4606 /*
4607 * when the chip is in native mode it identifies itself as a
4608 * 'misc mass storage'. Fake interface in this case.
4609 */
4610 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4611 interface = PCI_INTERFACE(pa->pa_class);
4612 } else {
4613 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4614 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4615 }
4616
4617 printf("%s: bus-master DMA support present",
4618 sc->sc_wdcdev.sc_dev.dv_xname);
4619 pciide_mapreg_dma(sc, pa);
4620 printf("\n");
4621 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4622 WDC_CAPABILITY_MODE;
4623
4624 if (sc->sc_dma_ok) {
4625 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4626 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4627 sc->sc_wdcdev.irqack = pciide_irqack;
4628 }
4629 sc->sc_wdcdev.PIO_cap = 4;
4630 sc->sc_wdcdev.DMA_cap = 2;
4631 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4632
4633 sc->sc_wdcdev.set_modes = acard_setup_channel;
4634 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4635 sc->sc_wdcdev.nchannels = 2;
4636
4637 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4638 cp = &sc->pciide_channels[i];
4639 if (pciide_chansetup(sc, i, interface) == 0)
4640 continue;
4641 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4642 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4643 &ctlsize, pciide_pci_intr);
4644 } else {
4645 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4646 &cmdsize, &ctlsize);
4647 }
4648 if (cp->hw_ok == 0)
4649 return;
4650 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4651 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4652 wdcattach(&cp->wdc_channel);
4653 acard_setup_channel(&cp->wdc_channel);
4654 }
4655 if (!ACARD_IS_850(sc)) {
4656 u_int32_t reg;
4657 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4658 reg &= ~ATP860_CTRL_INT;
4659 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4660 }
4661 }
4662
4663 void
4664 acard_setup_channel(chp)
4665 struct channel_softc *chp;
4666 {
4667 struct ata_drive_datas *drvp;
4668 struct pciide_channel *cp = (struct pciide_channel*)chp;
4669 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4670 int channel = chp->channel;
4671 int drive;
4672 u_int32_t idetime, udma_mode;
4673 u_int32_t idedma_ctl;
4674
4675 /* setup DMA if needed */
4676 pciide_channel_dma_setup(cp);
4677
4678 if (ACARD_IS_850(sc)) {
4679 idetime = 0;
4680 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4681 udma_mode &= ~ATP850_UDMA_MASK(channel);
4682 } else {
4683 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4684 idetime &= ~ATP860_SETTIME_MASK(channel);
4685 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4686 udma_mode &= ~ATP860_UDMA_MASK(channel);
4687
4688 /* check 80 pins cable */
4689 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4690 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4691 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4692 & ATP860_CTRL_80P(chp->channel)) {
4693 if (chp->ch_drive[0].UDMA_mode > 2)
4694 chp->ch_drive[0].UDMA_mode = 2;
4695 if (chp->ch_drive[1].UDMA_mode > 2)
4696 chp->ch_drive[1].UDMA_mode = 2;
4697 }
4698 }
4699 }
4700
4701 idedma_ctl = 0;
4702
4703 /* Per drive settings */
4704 for (drive = 0; drive < 2; drive++) {
4705 drvp = &chp->ch_drive[drive];
4706 /* If no drive, skip */
4707 if ((drvp->drive_flags & DRIVE) == 0)
4708 continue;
4709 /* add timing values, setup DMA if needed */
4710 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4711 (drvp->drive_flags & DRIVE_UDMA)) {
4712 /* use Ultra/DMA */
4713 if (ACARD_IS_850(sc)) {
4714 idetime |= ATP850_SETTIME(drive,
4715 acard_act_udma[drvp->UDMA_mode],
4716 acard_rec_udma[drvp->UDMA_mode]);
4717 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4718 acard_udma_conf[drvp->UDMA_mode]);
4719 } else {
4720 idetime |= ATP860_SETTIME(channel, drive,
4721 acard_act_udma[drvp->UDMA_mode],
4722 acard_rec_udma[drvp->UDMA_mode]);
4723 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4724 acard_udma_conf[drvp->UDMA_mode]);
4725 }
4726 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4727 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4728 (drvp->drive_flags & DRIVE_DMA)) {
4729 /* use Multiword DMA */
4730 drvp->drive_flags &= ~DRIVE_UDMA;
4731 if (ACARD_IS_850(sc)) {
4732 idetime |= ATP850_SETTIME(drive,
4733 acard_act_dma[drvp->DMA_mode],
4734 acard_rec_dma[drvp->DMA_mode]);
4735 } else {
4736 idetime |= ATP860_SETTIME(channel, drive,
4737 acard_act_dma[drvp->DMA_mode],
4738 acard_rec_dma[drvp->DMA_mode]);
4739 }
4740 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4741 } else {
4742 /* PIO only */
4743 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4744 if (ACARD_IS_850(sc)) {
4745 idetime |= ATP850_SETTIME(drive,
4746 acard_act_pio[drvp->PIO_mode],
4747 acard_rec_pio[drvp->PIO_mode]);
4748 } else {
4749 idetime |= ATP860_SETTIME(channel, drive,
4750 acard_act_pio[drvp->PIO_mode],
4751 acard_rec_pio[drvp->PIO_mode]);
4752 }
4753 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4754 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4755 | ATP8x0_CTRL_EN(channel));
4756 }
4757 }
4758
4759 if (idedma_ctl != 0) {
4760 /* Add software bits in status register */
4761 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4762 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4763 }
4764 pciide_print_modes(cp);
4765
4766 if (ACARD_IS_850(sc)) {
4767 pci_conf_write(sc->sc_pc, sc->sc_tag,
4768 ATP850_IDETIME(channel), idetime);
4769 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4770 } else {
4771 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4772 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4773 }
4774 }
4775
4776 int
4777 acard_pci_intr(arg)
4778 void *arg;
4779 {
4780 struct pciide_softc *sc = arg;
4781 struct pciide_channel *cp;
4782 struct channel_softc *wdc_cp;
4783 int rv = 0;
4784 int dmastat, i, crv;
4785
4786 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4787 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4788 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4789 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4790 continue;
4791 cp = &sc->pciide_channels[i];
4792 wdc_cp = &cp->wdc_channel;
4793 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4794 (void)wdcintr(wdc_cp);
4795 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4796 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4797 continue;
4798 }
4799 crv = wdcintr(wdc_cp);
4800 if (crv == 0)
4801 printf("%s:%d: bogus intr\n",
4802 sc->sc_wdcdev.sc_dev.dv_xname, i);
4803 else if (crv == 1)
4804 rv = 1;
4805 else if (rv == 0)
4806 rv = crv;
4807 }
4808 return rv;
4809 }
4810
4811 static int
4812 sl82c105_bugchk(struct pci_attach_args *pa)
4813 {
4814
4815 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4816 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4817 return (0);
4818
4819 if (PCI_REVISION(pa->pa_class) <= 0x05)
4820 return (1);
4821
4822 return (0);
4823 }
4824
4825 void
4826 sl82c105_chip_map(sc, pa)
4827 struct pciide_softc *sc;
4828 struct pci_attach_args *pa;
4829 {
4830 struct pciide_channel *cp;
4831 bus_size_t cmdsize, ctlsize;
4832 pcireg_t interface, idecr;
4833 int channel;
4834
4835 if (pciide_chipen(sc, pa) == 0)
4836 return;
4837
4838 printf("%s: bus-master DMA support present",
4839 sc->sc_wdcdev.sc_dev.dv_xname);
4840
4841 /*
4842 * Check to see if we're part of the Winbond 83c553 Southbridge.
4843 * If so, we need to disable DMA on rev. <= 5 of that chip.
4844 */
4845 if (pci_find_device(pa, sl82c105_bugchk)) {
4846 printf(" but disabled due to 83c553 rev. <= 0x05");
4847 sc->sc_dma_ok = 0;
4848 } else
4849 pciide_mapreg_dma(sc, pa);
4850 printf("\n");
4851
4852 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4853 WDC_CAPABILITY_MODE;
4854 sc->sc_wdcdev.PIO_cap = 4;
4855 if (sc->sc_dma_ok) {
4856 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4857 sc->sc_wdcdev.irqack = pciide_irqack;
4858 sc->sc_wdcdev.DMA_cap = 2;
4859 }
4860 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4861
4862 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4863 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4864
4865 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4866
4867 interface = PCI_INTERFACE(pa->pa_class);
4868
4869 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4870 cp = &sc->pciide_channels[channel];
4871 if (pciide_chansetup(sc, channel, interface) == 0)
4872 continue;
4873 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4874 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4875 printf("%s: %s channel ignored (disabled)\n",
4876 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4877 continue;
4878 }
4879 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4880 pciide_pci_intr);
4881 if (cp->hw_ok == 0)
4882 continue;
4883 pciide_map_compat_intr(pa, cp, channel, interface);
4884 if (cp->hw_ok == 0)
4885 continue;
4886 sl82c105_setup_channel(&cp->wdc_channel);
4887 }
4888 }
4889
4890 void
4891 sl82c105_setup_channel(chp)
4892 struct channel_softc *chp;
4893 {
4894 struct ata_drive_datas *drvp;
4895 struct pciide_channel *cp = (struct pciide_channel*)chp;
4896 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4897 int pxdx_reg, drive;
4898 pcireg_t pxdx;
4899
4900 /* Set up DMA if needed. */
4901 pciide_channel_dma_setup(cp);
4902
4903 for (drive = 0; drive < 2; drive++) {
4904 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4905 : SYMPH_P1D0CR) + (drive * 4);
4906
4907 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4908
4909 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4910 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4911
4912 drvp = &chp->ch_drive[drive];
4913 /* If no drive, skip. */
4914 if ((drvp->drive_flags & DRIVE) == 0) {
4915 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4916 continue;
4917 }
4918
4919 if (drvp->drive_flags & DRIVE_DMA) {
4920 /*
4921 * Timings will be used for both PIO and DMA,
4922 * so adjust DMA mode if needed.
4923 */
4924 if (drvp->PIO_mode >= 3) {
4925 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4926 drvp->DMA_mode = drvp->PIO_mode - 2;
4927 if (drvp->DMA_mode < 1) {
4928 /*
4929 * Can't mix both PIO and DMA.
4930 * Disable DMA.
4931 */
4932 drvp->drive_flags &= ~DRIVE_DMA;
4933 }
4934 } else {
4935 /*
4936 * Can't mix both PIO and DMA. Disable
4937 * DMA.
4938 */
4939 drvp->drive_flags &= ~DRIVE_DMA;
4940 }
4941 }
4942
4943 if (drvp->drive_flags & DRIVE_DMA) {
4944 /* Use multi-word DMA. */
4945 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4946 PxDx_CMD_ON_SHIFT;
4947 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4948 } else {
4949 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4950 PxDx_CMD_ON_SHIFT;
4951 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4952 }
4953
4954 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4955
4956 /* ...and set the mode for this drive. */
4957 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4958 }
4959
4960 pciide_print_modes(cp);
4961 }
4962
4963 void
4964 serverworks_chip_map(sc, pa)
4965 struct pciide_softc *sc;
4966 struct pci_attach_args *pa;
4967 {
4968 struct pciide_channel *cp;
4969 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4970 pcitag_t pcib_tag;
4971 int channel;
4972 bus_size_t cmdsize, ctlsize;
4973
4974 if (pciide_chipen(sc, pa) == 0)
4975 return;
4976
4977 printf("%s: bus-master DMA support present",
4978 sc->sc_wdcdev.sc_dev.dv_xname);
4979 pciide_mapreg_dma(sc, pa);
4980 printf("\n");
4981 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4982 WDC_CAPABILITY_MODE;
4983
4984 if (sc->sc_dma_ok) {
4985 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4986 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4987 sc->sc_wdcdev.irqack = pciide_irqack;
4988 }
4989 sc->sc_wdcdev.PIO_cap = 4;
4990 sc->sc_wdcdev.DMA_cap = 2;
4991 switch (sc->sc_pp->ide_product) {
4992 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4993 sc->sc_wdcdev.UDMA_cap = 2;
4994 break;
4995 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4996 if (PCI_REVISION(pa->pa_class) < 0x92)
4997 sc->sc_wdcdev.UDMA_cap = 4;
4998 else
4999 sc->sc_wdcdev.UDMA_cap = 5;
5000 break;
5001 }
5002
5003 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5004 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5005 sc->sc_wdcdev.nchannels = 2;
5006
5007 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5008 cp = &sc->pciide_channels[channel];
5009 if (pciide_chansetup(sc, channel, interface) == 0)
5010 continue;
5011 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5012 serverworks_pci_intr);
5013 if (cp->hw_ok == 0)
5014 return;
5015 pciide_map_compat_intr(pa, cp, channel, interface);
5016 if (cp->hw_ok == 0)
5017 return;
5018 serverworks_setup_channel(&cp->wdc_channel);
5019 }
5020
5021 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5022 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5023 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5024 }
5025
5026 void
5027 serverworks_setup_channel(chp)
5028 struct channel_softc *chp;
5029 {
5030 struct ata_drive_datas *drvp;
5031 struct pciide_channel *cp = (struct pciide_channel*)chp;
5032 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5033 int channel = chp->channel;
5034 int drive, unit;
5035 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5036 u_int32_t idedma_ctl;
5037 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5038 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5039
5040 /* setup DMA if needed */
5041 pciide_channel_dma_setup(cp);
5042
5043 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5044 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5045 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5046 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5047
5048 pio_time &= ~(0xffff << (16 * channel));
5049 dma_time &= ~(0xffff << (16 * channel));
5050 pio_mode &= ~(0xff << (8 * channel + 16));
5051 udma_mode &= ~(0xff << (8 * channel + 16));
5052 udma_mode &= ~(3 << (2 * channel));
5053
5054 idedma_ctl = 0;
5055
5056 /* Per drive settings */
5057 for (drive = 0; drive < 2; drive++) {
5058 drvp = &chp->ch_drive[drive];
5059 /* If no drive, skip */
5060 if ((drvp->drive_flags & DRIVE) == 0)
5061 continue;
5062 unit = drive + 2 * channel;
5063 /* add timing values, setup DMA if needed */
5064 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5065 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5066 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5067 (drvp->drive_flags & DRIVE_UDMA)) {
5068 /* use Ultra/DMA, check for 80-pin cable */
5069 if (drvp->UDMA_mode > 2 &&
5070 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5071 drvp->UDMA_mode = 2;
5072 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5073 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5074 udma_mode |= 1 << unit;
5075 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5076 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5077 (drvp->drive_flags & DRIVE_DMA)) {
5078 /* use Multiword DMA */
5079 drvp->drive_flags &= ~DRIVE_UDMA;
5080 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5081 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5082 } else {
5083 /* PIO only */
5084 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5085 }
5086 }
5087
5088 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5089 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5090 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5091 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5092 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5093
5094 if (idedma_ctl != 0) {
5095 /* Add software bits in status register */
5096 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5097 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5098 }
5099 pciide_print_modes(cp);
5100 }
5101
5102 int
5103 serverworks_pci_intr(arg)
5104 void *arg;
5105 {
5106 struct pciide_softc *sc = arg;
5107 struct pciide_channel *cp;
5108 struct channel_softc *wdc_cp;
5109 int rv = 0;
5110 int dmastat, i, crv;
5111
5112 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5113 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5114 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5115 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5116 IDEDMA_CTL_INTR)
5117 continue;
5118 cp = &sc->pciide_channels[i];
5119 wdc_cp = &cp->wdc_channel;
5120 crv = wdcintr(wdc_cp);
5121 if (crv == 0) {
5122 printf("%s:%d: bogus intr\n",
5123 sc->sc_wdcdev.sc_dev.dv_xname, i);
5124 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5125 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5126 } else
5127 rv = 1;
5128 }
5129 return rv;
5130 }
5131