pciide.c revision 1.146 1 /* $NetBSD: pciide.c,v 1.146 2002/04/03 17:02:21 thorpej Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.146 2002/04/03 17:02:21 thorpej Exp $");
81
82 #ifndef WDCDEBUG
83 #define WDCDEBUG
84 #endif
85
86 #define DEBUG_DMA 0x01
87 #define DEBUG_XFERS 0x02
88 #define DEBUG_FUNCS 0x08
89 #define DEBUG_PROBE 0x10
90 #ifdef WDCDEBUG
91 int wdcdebug_pciide_mask = 0;
92 #define WDCDEBUG_PRINT(args, level) \
93 if (wdcdebug_pciide_mask & (level)) printf args
94 #else
95 #define WDCDEBUG_PRINT(args, level)
96 #endif
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/device.h>
100 #include <sys/malloc.h>
101
102 #include <uvm/uvm_extern.h>
103
104 #include <machine/endian.h>
105
106 #include <dev/pci/pcireg.h>
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109 #include <dev/pci/pciidereg.h>
110 #include <dev/pci/pciidevar.h>
111 #include <dev/pci/pciide_piix_reg.h>
112 #include <dev/pci/pciide_amd_reg.h>
113 #include <dev/pci/pciide_apollo_reg.h>
114 #include <dev/pci/pciide_cmd_reg.h>
115 #include <dev/pci/pciide_cy693_reg.h>
116 #include <dev/pci/pciide_sis_reg.h>
117 #include <dev/pci/pciide_acer_reg.h>
118 #include <dev/pci/pciide_pdc202xx_reg.h>
119 #include <dev/pci/pciide_opti_reg.h>
120 #include <dev/pci/pciide_hpt_reg.h>
121 #include <dev/pci/pciide_acard_reg.h>
122 #include <dev/pci/pciide_sl82c105_reg.h>
123 #include <dev/pci/cy82c693var.h>
124
125 #include "opt_pciide.h"
126
127 /* inlines for reading/writing 8-bit PCI registers */
128 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
129 int));
130 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
131 int, u_int8_t));
132
133 static __inline u_int8_t
134 pciide_pci_read(pc, pa, reg)
135 pci_chipset_tag_t pc;
136 pcitag_t pa;
137 int reg;
138 {
139
140 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
141 ((reg & 0x03) * 8) & 0xff);
142 }
143
144 static __inline void
145 pciide_pci_write(pc, pa, reg, val)
146 pci_chipset_tag_t pc;
147 pcitag_t pa;
148 int reg;
149 u_int8_t val;
150 {
151 pcireg_t pcival;
152
153 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
154 pcival &= ~(0xff << ((reg & 0x03) * 8));
155 pcival |= (val << ((reg & 0x03) * 8));
156 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
157 }
158
159 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
160
161 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
162 void piix_setup_channel __P((struct channel_softc*));
163 void piix3_4_setup_channel __P((struct channel_softc*));
164 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
165 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
166 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
167
168 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void amd7x6_setup_channel __P((struct channel_softc*));
170
171 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void apollo_setup_channel __P((struct channel_softc*));
173
174 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
176 void cmd0643_9_setup_channel __P((struct channel_softc*));
177 void cmd_channel_map __P((struct pci_attach_args *,
178 struct pciide_softc *, int));
179 int cmd_pci_intr __P((void *));
180 void cmd646_9_irqack __P((struct channel_softc *));
181
182 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void cy693_setup_channel __P((struct channel_softc*));
184
185 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void sis_setup_channel __P((struct channel_softc*));
187 static int sis_hostbr_match __P(( struct pci_attach_args *));
188
189 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void acer_setup_channel __P((struct channel_softc*));
191 int acer_pci_intr __P((void *));
192 static int acer_isabr_match __P(( struct pci_attach_args *));
193
194 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void pdc202xx_setup_channel __P((struct channel_softc*));
196 void pdc20268_setup_channel __P((struct channel_softc*));
197 int pdc202xx_pci_intr __P((void *));
198 int pdc20265_pci_intr __P((void *));
199
200 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
201 void opti_setup_channel __P((struct channel_softc*));
202
203 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
204 void hpt_setup_channel __P((struct channel_softc*));
205 int hpt_pci_intr __P((void *));
206
207 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
208 void acard_setup_channel __P((struct channel_softc*));
209 int acard_pci_intr __P((void *));
210
211 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
212 void sl82c105_setup_channel __P((struct channel_softc*));
213
214 void pciide_channel_dma_setup __P((struct pciide_channel *));
215 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
216 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
217 void pciide_dma_start __P((void*, int, int));
218 int pciide_dma_finish __P((void*, int, int, int));
219 void pciide_irqack __P((struct channel_softc *));
220 void pciide_print_modes __P((struct pciide_channel *));
221
222 struct pciide_product_desc {
223 u_int32_t ide_product;
224 int ide_flags;
225 const char *ide_name;
226 /* map and setup chip, probe drives */
227 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
228 };
229
230 /* Flags for ide_flags */
231 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
232 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
233
234 /* Default product description for devices not known from this controller */
235 const struct pciide_product_desc default_product_desc = {
236 0,
237 0,
238 "Generic PCI IDE controller",
239 default_chip_map,
240 };
241
242 const struct pciide_product_desc pciide_intel_products[] = {
243 { PCI_PRODUCT_INTEL_82092AA,
244 0,
245 "Intel 82092AA IDE controller",
246 default_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82371FB_IDE,
249 0,
250 "Intel 82371FB IDE controller (PIIX)",
251 piix_chip_map,
252 },
253 { PCI_PRODUCT_INTEL_82371SB_IDE,
254 0,
255 "Intel 82371SB IDE Interface (PIIX3)",
256 piix_chip_map,
257 },
258 { PCI_PRODUCT_INTEL_82371AB_IDE,
259 0,
260 "Intel 82371AB IDE controller (PIIX4)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82440MX_IDE,
264 0,
265 "Intel 82440MX IDE controller",
266 piix_chip_map
267 },
268 { PCI_PRODUCT_INTEL_82801AA_IDE,
269 0,
270 "Intel 82801AA IDE Controller (ICH)",
271 piix_chip_map,
272 },
273 { PCI_PRODUCT_INTEL_82801AB_IDE,
274 0,
275 "Intel 82801AB IDE Controller (ICH0)",
276 piix_chip_map,
277 },
278 { PCI_PRODUCT_INTEL_82801BA_IDE,
279 0,
280 "Intel 82801BA IDE Controller (ICH2)",
281 piix_chip_map,
282 },
283 { PCI_PRODUCT_INTEL_82801BAM_IDE,
284 0,
285 "Intel 82801BAM IDE Controller (ICH2)",
286 piix_chip_map,
287 },
288 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
289 0,
290 "Intel 82201CA IDE Controller",
291 piix_chip_map,
292 },
293 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
294 0,
295 "Intel 82201CA IDE Controller",
296 piix_chip_map,
297 },
298 { 0,
299 0,
300 NULL,
301 NULL
302 }
303 };
304
305 const struct pciide_product_desc pciide_amd_products[] = {
306 { PCI_PRODUCT_AMD_PBC756_IDE,
307 0,
308 "Advanced Micro Devices AMD756 IDE Controller",
309 amd7x6_chip_map
310 },
311 { PCI_PRODUCT_AMD_PBC766_IDE,
312 0,
313 "Advanced Micro Devices AMD766 IDE Controller",
314 amd7x6_chip_map
315 },
316 { PCI_PRODUCT_AMD_PBC768_IDE,
317 0,
318 "Advanced Micro Devices AMD768 IDE Controller",
319 amd7x6_chip_map
320 },
321 { 0,
322 0,
323 NULL,
324 NULL
325 }
326 };
327
328 const struct pciide_product_desc pciide_cmd_products[] = {
329 { PCI_PRODUCT_CMDTECH_640,
330 0,
331 "CMD Technology PCI0640",
332 cmd_chip_map
333 },
334 { PCI_PRODUCT_CMDTECH_643,
335 0,
336 "CMD Technology PCI0643",
337 cmd0643_9_chip_map,
338 },
339 { PCI_PRODUCT_CMDTECH_646,
340 0,
341 "CMD Technology PCI0646",
342 cmd0643_9_chip_map,
343 },
344 { PCI_PRODUCT_CMDTECH_648,
345 IDE_PCI_CLASS_OVERRIDE,
346 "CMD Technology PCI0648",
347 cmd0643_9_chip_map,
348 },
349 { PCI_PRODUCT_CMDTECH_649,
350 IDE_PCI_CLASS_OVERRIDE,
351 "CMD Technology PCI0649",
352 cmd0643_9_chip_map,
353 },
354 { 0,
355 0,
356 NULL,
357 NULL
358 }
359 };
360
361 const struct pciide_product_desc pciide_via_products[] = {
362 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
363 0,
364 NULL,
365 apollo_chip_map,
366 },
367 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
368 0,
369 NULL,
370 apollo_chip_map,
371 },
372 { 0,
373 0,
374 NULL,
375 NULL
376 }
377 };
378
379 const struct pciide_product_desc pciide_cypress_products[] = {
380 { PCI_PRODUCT_CONTAQ_82C693,
381 IDE_16BIT_IOSPACE,
382 "Cypress 82C693 IDE Controller",
383 cy693_chip_map,
384 },
385 { 0,
386 0,
387 NULL,
388 NULL
389 }
390 };
391
392 const struct pciide_product_desc pciide_sis_products[] = {
393 { PCI_PRODUCT_SIS_5597_IDE,
394 0,
395 "Silicon Integrated System 5597/5598 IDE controller",
396 sis_chip_map,
397 },
398 { 0,
399 0,
400 NULL,
401 NULL
402 }
403 };
404
405 const struct pciide_product_desc pciide_acer_products[] = {
406 { PCI_PRODUCT_ALI_M5229,
407 0,
408 "Acer Labs M5229 UDMA IDE Controller",
409 acer_chip_map,
410 },
411 { 0,
412 0,
413 NULL,
414 NULL
415 }
416 };
417
418 const struct pciide_product_desc pciide_promise_products[] = {
419 { PCI_PRODUCT_PROMISE_ULTRA33,
420 IDE_PCI_CLASS_OVERRIDE,
421 "Promise Ultra33/ATA Bus Master IDE Accelerator",
422 pdc202xx_chip_map,
423 },
424 { PCI_PRODUCT_PROMISE_ULTRA66,
425 IDE_PCI_CLASS_OVERRIDE,
426 "Promise Ultra66/ATA Bus Master IDE Accelerator",
427 pdc202xx_chip_map,
428 },
429 { PCI_PRODUCT_PROMISE_ULTRA100,
430 IDE_PCI_CLASS_OVERRIDE,
431 "Promise Ultra100/ATA Bus Master IDE Accelerator",
432 pdc202xx_chip_map,
433 },
434 { PCI_PRODUCT_PROMISE_ULTRA100X,
435 IDE_PCI_CLASS_OVERRIDE,
436 "Promise Ultra100/ATA Bus Master IDE Accelerator",
437 pdc202xx_chip_map,
438 },
439 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
440 IDE_PCI_CLASS_OVERRIDE,
441 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
442 pdc202xx_chip_map,
443 },
444 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
445 IDE_PCI_CLASS_OVERRIDE,
446 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
447 pdc202xx_chip_map,
448 },
449 { PCI_PRODUCT_PROMISE_ULTRA133,
450 IDE_PCI_CLASS_OVERRIDE,
451 "Promise Ultra133/ATA Bus Master IDE Accelerator",
452 pdc202xx_chip_map,
453 },
454 { 0,
455 0,
456 NULL,
457 NULL
458 }
459 };
460
461 const struct pciide_product_desc pciide_opti_products[] = {
462 { PCI_PRODUCT_OPTI_82C621,
463 0,
464 "OPTi 82c621 PCI IDE controller",
465 opti_chip_map,
466 },
467 { PCI_PRODUCT_OPTI_82C568,
468 0,
469 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
470 opti_chip_map,
471 },
472 { PCI_PRODUCT_OPTI_82D568,
473 0,
474 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
475 opti_chip_map,
476 },
477 { 0,
478 0,
479 NULL,
480 NULL
481 }
482 };
483
484 const struct pciide_product_desc pciide_triones_products[] = {
485 { PCI_PRODUCT_TRIONES_HPT366,
486 IDE_PCI_CLASS_OVERRIDE,
487 NULL,
488 hpt_chip_map,
489 },
490 { 0,
491 0,
492 NULL,
493 NULL
494 }
495 };
496
497 const struct pciide_product_desc pciide_acard_products[] = {
498 { PCI_PRODUCT_ACARD_ATP850U,
499 IDE_PCI_CLASS_OVERRIDE,
500 "Acard ATP850U Ultra33 IDE Controller",
501 acard_chip_map,
502 },
503 { PCI_PRODUCT_ACARD_ATP860,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Acard ATP860 Ultra66 IDE Controller",
506 acard_chip_map,
507 },
508 { PCI_PRODUCT_ACARD_ATP860A,
509 IDE_PCI_CLASS_OVERRIDE,
510 "Acard ATP860-A Ultra66 IDE Controller",
511 acard_chip_map,
512 },
513 { 0,
514 0,
515 NULL,
516 NULL
517 }
518 };
519
520 #ifdef PCIIDE_SERVERWORKS_ENABLE
521 const struct pciide_product_desc pciide_serverworks_products[] = {
522 { PCI_PRODUCT_SERVERWORKS_IDE,
523 0,
524 "ServerWorks ROSB4 IDE Controller",
525 piix_chip_map,
526 },
527 { 0,
528 0,
529 NULL,
530 }
531 };
532 #endif
533
534 const struct pciide_product_desc pciide_symphony_products[] = {
535 { PCI_PRODUCT_SYMPHONY_82C105,
536 0,
537 "Symphony Labs 82C105 IDE controller",
538 sl82c105_chip_map,
539 },
540 { 0,
541 0,
542 NULL,
543 }
544 };
545
546 const struct pciide_product_desc pciide_winbond_products[] = {
547 { PCI_PRODUCT_WINBOND_W83C553F_1,
548 0,
549 "Winbond W83C553F IDE controller",
550 sl82c105_chip_map,
551 },
552 { 0,
553 0,
554 NULL,
555 }
556 };
557
558 struct pciide_vendor_desc {
559 u_int32_t ide_vendor;
560 const struct pciide_product_desc *ide_products;
561 };
562
563 const struct pciide_vendor_desc pciide_vendors[] = {
564 { PCI_VENDOR_INTEL, pciide_intel_products },
565 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
566 { PCI_VENDOR_VIATECH, pciide_via_products },
567 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
568 { PCI_VENDOR_SIS, pciide_sis_products },
569 { PCI_VENDOR_ALI, pciide_acer_products },
570 { PCI_VENDOR_PROMISE, pciide_promise_products },
571 { PCI_VENDOR_AMD, pciide_amd_products },
572 { PCI_VENDOR_OPTI, pciide_opti_products },
573 { PCI_VENDOR_TRIONES, pciide_triones_products },
574 { PCI_VENDOR_ACARD, pciide_acard_products },
575 #ifdef PCIIDE_SERVERWORKS_ENABLE
576 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
577 #endif
578 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
579 { PCI_VENDOR_WINBOND, pciide_winbond_products },
580 { 0, NULL }
581 };
582
583 /* options passed via the 'flags' config keyword */
584 #define PCIIDE_OPTIONS_DMA 0x01
585 #define PCIIDE_OPTIONS_NODMA 0x02
586
587 int pciide_match __P((struct device *, struct cfdata *, void *));
588 void pciide_attach __P((struct device *, struct device *, void *));
589
590 struct cfattach pciide_ca = {
591 sizeof(struct pciide_softc), pciide_match, pciide_attach
592 };
593 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
594 int pciide_mapregs_compat __P(( struct pci_attach_args *,
595 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
596 int pciide_mapregs_native __P((struct pci_attach_args *,
597 struct pciide_channel *, bus_size_t *, bus_size_t *,
598 int (*pci_intr) __P((void *))));
599 void pciide_mapreg_dma __P((struct pciide_softc *,
600 struct pci_attach_args *));
601 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
602 void pciide_mapchan __P((struct pci_attach_args *,
603 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
604 int (*pci_intr) __P((void *))));
605 int pciide_chan_candisable __P((struct pciide_channel *));
606 void pciide_map_compat_intr __P(( struct pci_attach_args *,
607 struct pciide_channel *, int, int));
608 int pciide_compat_intr __P((void *));
609 int pciide_pci_intr __P((void *));
610 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
611
612 const struct pciide_product_desc *
613 pciide_lookup_product(id)
614 u_int32_t id;
615 {
616 const struct pciide_product_desc *pp;
617 const struct pciide_vendor_desc *vp;
618
619 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
620 if (PCI_VENDOR(id) == vp->ide_vendor)
621 break;
622
623 if ((pp = vp->ide_products) == NULL)
624 return NULL;
625
626 for (; pp->chip_map != NULL; pp++)
627 if (PCI_PRODUCT(id) == pp->ide_product)
628 break;
629
630 if (pp->chip_map == NULL)
631 return NULL;
632 return pp;
633 }
634
635 int
636 pciide_match(parent, match, aux)
637 struct device *parent;
638 struct cfdata *match;
639 void *aux;
640 {
641 struct pci_attach_args *pa = aux;
642 const struct pciide_product_desc *pp;
643
644 /*
645 * Check the ID register to see that it's a PCI IDE controller.
646 * If it is, we assume that we can deal with it; it _should_
647 * work in a standardized way...
648 */
649 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
650 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
651 return (1);
652 }
653
654 /*
655 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
656 * controllers. Let see if we can deal with it anyway.
657 */
658 pp = pciide_lookup_product(pa->pa_id);
659 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
660 return (1);
661 }
662
663 return (0);
664 }
665
666 void
667 pciide_attach(parent, self, aux)
668 struct device *parent, *self;
669 void *aux;
670 {
671 struct pci_attach_args *pa = aux;
672 pci_chipset_tag_t pc = pa->pa_pc;
673 pcitag_t tag = pa->pa_tag;
674 struct pciide_softc *sc = (struct pciide_softc *)self;
675 pcireg_t csr;
676 char devinfo[256];
677 const char *displaydev;
678
679 sc->sc_pp = pciide_lookup_product(pa->pa_id);
680 if (sc->sc_pp == NULL) {
681 sc->sc_pp = &default_product_desc;
682 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
683 displaydev = devinfo;
684 } else
685 displaydev = sc->sc_pp->ide_name;
686
687 /* if displaydev == NULL, printf is done in chip-specific map */
688 if (displaydev)
689 printf(": %s (rev. 0x%02x)\n", displaydev,
690 PCI_REVISION(pa->pa_class));
691
692 sc->sc_pc = pa->pa_pc;
693 sc->sc_tag = pa->pa_tag;
694 #ifdef WDCDEBUG
695 if (wdcdebug_pciide_mask & DEBUG_PROBE)
696 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
697 #endif
698 sc->sc_pp->chip_map(sc, pa);
699
700 if (sc->sc_dma_ok) {
701 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
702 csr |= PCI_COMMAND_MASTER_ENABLE;
703 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
704 }
705 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
706 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
707 }
708
709 /* tell wether the chip is enabled or not */
710 int
711 pciide_chipen(sc, pa)
712 struct pciide_softc *sc;
713 struct pci_attach_args *pa;
714 {
715 pcireg_t csr;
716 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
717 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
718 PCI_COMMAND_STATUS_REG);
719 printf("%s: device disabled (at %s)\n",
720 sc->sc_wdcdev.sc_dev.dv_xname,
721 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
722 "device" : "bridge");
723 return 0;
724 }
725 return 1;
726 }
727
728 int
729 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
730 struct pci_attach_args *pa;
731 struct pciide_channel *cp;
732 int compatchan;
733 bus_size_t *cmdsizep, *ctlsizep;
734 {
735 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
736 struct channel_softc *wdc_cp = &cp->wdc_channel;
737
738 cp->compat = 1;
739 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
740 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
741
742 wdc_cp->cmd_iot = pa->pa_iot;
743 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
744 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
745 printf("%s: couldn't map %s channel cmd regs\n",
746 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
747 return (0);
748 }
749
750 wdc_cp->ctl_iot = pa->pa_iot;
751 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
752 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
753 printf("%s: couldn't map %s channel ctl regs\n",
754 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
755 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
756 PCIIDE_COMPAT_CMD_SIZE);
757 return (0);
758 }
759
760 return (1);
761 }
762
763 int
764 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
765 struct pci_attach_args * pa;
766 struct pciide_channel *cp;
767 bus_size_t *cmdsizep, *ctlsizep;
768 int (*pci_intr) __P((void *));
769 {
770 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
771 struct channel_softc *wdc_cp = &cp->wdc_channel;
772 const char *intrstr;
773 pci_intr_handle_t intrhandle;
774
775 cp->compat = 0;
776
777 if (sc->sc_pci_ih == NULL) {
778 if (pci_intr_map(pa, &intrhandle) != 0) {
779 printf("%s: couldn't map native-PCI interrupt\n",
780 sc->sc_wdcdev.sc_dev.dv_xname);
781 return 0;
782 }
783 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
784 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
785 intrhandle, IPL_BIO, pci_intr, sc);
786 if (sc->sc_pci_ih != NULL) {
787 printf("%s: using %s for native-PCI interrupt\n",
788 sc->sc_wdcdev.sc_dev.dv_xname,
789 intrstr ? intrstr : "unknown interrupt");
790 } else {
791 printf("%s: couldn't establish native-PCI interrupt",
792 sc->sc_wdcdev.sc_dev.dv_xname);
793 if (intrstr != NULL)
794 printf(" at %s", intrstr);
795 printf("\n");
796 return 0;
797 }
798 }
799 cp->ih = sc->sc_pci_ih;
800 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
801 PCI_MAPREG_TYPE_IO, 0,
802 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
803 printf("%s: couldn't map %s channel cmd regs\n",
804 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
805 return 0;
806 }
807
808 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
809 PCI_MAPREG_TYPE_IO, 0,
810 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
811 printf("%s: couldn't map %s channel ctl regs\n",
812 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
813 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
814 return 0;
815 }
816 /*
817 * In native mode, 4 bytes of I/O space are mapped for the control
818 * register, the control register is at offset 2. Pass the generic
819 * code a handle for only one byte at the rigth offset.
820 */
821 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
822 &wdc_cp->ctl_ioh) != 0) {
823 printf("%s: unable to subregion %s channel ctl regs\n",
824 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
825 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
826 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
827 return 0;
828 }
829 return (1);
830 }
831
832 void
833 pciide_mapreg_dma(sc, pa)
834 struct pciide_softc *sc;
835 struct pci_attach_args *pa;
836 {
837 pcireg_t maptype;
838 bus_addr_t addr;
839
840 /*
841 * Map DMA registers
842 *
843 * Note that sc_dma_ok is the right variable to test to see if
844 * DMA can be done. If the interface doesn't support DMA,
845 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
846 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
847 * non-zero if the interface supports DMA and the registers
848 * could be mapped.
849 *
850 * XXX Note that despite the fact that the Bus Master IDE specs
851 * XXX say that "The bus master IDE function uses 16 bytes of IO
852 * XXX space," some controllers (at least the United
853 * XXX Microelectronics UM8886BF) place it in memory space.
854 */
855 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
856 PCIIDE_REG_BUS_MASTER_DMA);
857
858 switch (maptype) {
859 case PCI_MAPREG_TYPE_IO:
860 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
861 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
862 &addr, NULL, NULL) == 0);
863 if (sc->sc_dma_ok == 0) {
864 printf(", but unused (couldn't query registers)");
865 break;
866 }
867 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
868 && addr >= 0x10000) {
869 sc->sc_dma_ok = 0;
870 printf(", but unused (registers at unsafe address "
871 "%#lx)", (unsigned long)addr);
872 break;
873 }
874 /* FALLTHROUGH */
875
876 case PCI_MAPREG_MEM_TYPE_32BIT:
877 sc->sc_dma_ok = (pci_mapreg_map(pa,
878 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
879 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
880 sc->sc_dmat = pa->pa_dmat;
881 if (sc->sc_dma_ok == 0) {
882 printf(", but unused (couldn't map registers)");
883 } else {
884 sc->sc_wdcdev.dma_arg = sc;
885 sc->sc_wdcdev.dma_init = pciide_dma_init;
886 sc->sc_wdcdev.dma_start = pciide_dma_start;
887 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
888 }
889
890 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
891 PCIIDE_OPTIONS_NODMA) {
892 printf(", but unused (forced off by config file)");
893 sc->sc_dma_ok = 0;
894 }
895 break;
896
897 default:
898 sc->sc_dma_ok = 0;
899 printf(", but unsupported register maptype (0x%x)", maptype);
900 }
901 }
902
903 int
904 pciide_compat_intr(arg)
905 void *arg;
906 {
907 struct pciide_channel *cp = arg;
908
909 #ifdef DIAGNOSTIC
910 /* should only be called for a compat channel */
911 if (cp->compat == 0)
912 panic("pciide compat intr called for non-compat chan %p\n", cp);
913 #endif
914 return (wdcintr(&cp->wdc_channel));
915 }
916
917 int
918 pciide_pci_intr(arg)
919 void *arg;
920 {
921 struct pciide_softc *sc = arg;
922 struct pciide_channel *cp;
923 struct channel_softc *wdc_cp;
924 int i, rv, crv;
925
926 rv = 0;
927 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
928 cp = &sc->pciide_channels[i];
929 wdc_cp = &cp->wdc_channel;
930
931 /* If a compat channel skip. */
932 if (cp->compat)
933 continue;
934 /* if this channel not waiting for intr, skip */
935 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
936 continue;
937
938 crv = wdcintr(wdc_cp);
939 if (crv == 0)
940 ; /* leave rv alone */
941 else if (crv == 1)
942 rv = 1; /* claim the intr */
943 else if (rv == 0) /* crv should be -1 in this case */
944 rv = crv; /* if we've done no better, take it */
945 }
946 return (rv);
947 }
948
949 void
950 pciide_channel_dma_setup(cp)
951 struct pciide_channel *cp;
952 {
953 int drive;
954 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
955 struct ata_drive_datas *drvp;
956
957 for (drive = 0; drive < 2; drive++) {
958 drvp = &cp->wdc_channel.ch_drive[drive];
959 /* If no drive, skip */
960 if ((drvp->drive_flags & DRIVE) == 0)
961 continue;
962 /* setup DMA if needed */
963 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
964 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
965 sc->sc_dma_ok == 0) {
966 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
967 continue;
968 }
969 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
970 != 0) {
971 /* Abort DMA setup */
972 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
973 continue;
974 }
975 }
976 }
977
978 int
979 pciide_dma_table_setup(sc, channel, drive)
980 struct pciide_softc *sc;
981 int channel, drive;
982 {
983 bus_dma_segment_t seg;
984 int error, rseg;
985 const bus_size_t dma_table_size =
986 sizeof(struct idedma_table) * NIDEDMA_TABLES;
987 struct pciide_dma_maps *dma_maps =
988 &sc->pciide_channels[channel].dma_maps[drive];
989
990 /* If table was already allocated, just return */
991 if (dma_maps->dma_table)
992 return 0;
993
994 /* Allocate memory for the DMA tables and map it */
995 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
996 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
997 BUS_DMA_NOWAIT)) != 0) {
998 printf("%s:%d: unable to allocate table DMA for "
999 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1000 channel, drive, error);
1001 return error;
1002 }
1003 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1004 dma_table_size,
1005 (caddr_t *)&dma_maps->dma_table,
1006 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1007 printf("%s:%d: unable to map table DMA for"
1008 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1009 channel, drive, error);
1010 return error;
1011 }
1012 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1013 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1014 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1015
1016 /* Create and load table DMA map for this disk */
1017 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1018 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1019 &dma_maps->dmamap_table)) != 0) {
1020 printf("%s:%d: unable to create table DMA map for "
1021 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1022 channel, drive, error);
1023 return error;
1024 }
1025 if ((error = bus_dmamap_load(sc->sc_dmat,
1026 dma_maps->dmamap_table,
1027 dma_maps->dma_table,
1028 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1029 printf("%s:%d: unable to load table DMA map for "
1030 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1031 channel, drive, error);
1032 return error;
1033 }
1034 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1035 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1036 DEBUG_PROBE);
1037 /* Create a xfer DMA map for this drive */
1038 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1039 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1040 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1041 &dma_maps->dmamap_xfer)) != 0) {
1042 printf("%s:%d: unable to create xfer DMA map for "
1043 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1044 channel, drive, error);
1045 return error;
1046 }
1047 return 0;
1048 }
1049
1050 int
1051 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1052 void *v;
1053 int channel, drive;
1054 void *databuf;
1055 size_t datalen;
1056 int flags;
1057 {
1058 struct pciide_softc *sc = v;
1059 int error, seg;
1060 struct pciide_dma_maps *dma_maps =
1061 &sc->pciide_channels[channel].dma_maps[drive];
1062
1063 error = bus_dmamap_load(sc->sc_dmat,
1064 dma_maps->dmamap_xfer,
1065 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1066 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1067 if (error) {
1068 printf("%s:%d: unable to load xfer DMA map for"
1069 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1070 channel, drive, error);
1071 return error;
1072 }
1073
1074 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1075 dma_maps->dmamap_xfer->dm_mapsize,
1076 (flags & WDC_DMA_READ) ?
1077 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1078
1079 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1080 #ifdef DIAGNOSTIC
1081 /* A segment must not cross a 64k boundary */
1082 {
1083 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1084 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1085 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1086 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1087 printf("pciide_dma: segment %d physical addr 0x%lx"
1088 " len 0x%lx not properly aligned\n",
1089 seg, phys, len);
1090 panic("pciide_dma: buf align");
1091 }
1092 }
1093 #endif
1094 dma_maps->dma_table[seg].base_addr =
1095 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1096 dma_maps->dma_table[seg].byte_count =
1097 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1098 IDEDMA_BYTE_COUNT_MASK);
1099 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1100 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1101 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1102
1103 }
1104 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1105 htole32(IDEDMA_BYTE_COUNT_EOT);
1106
1107 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1108 dma_maps->dmamap_table->dm_mapsize,
1109 BUS_DMASYNC_PREWRITE);
1110
1111 /* Maps are ready. Start DMA function */
1112 #ifdef DIAGNOSTIC
1113 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1114 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1115 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1116 panic("pciide_dma_init: table align");
1117 }
1118 #endif
1119
1120 /* Clear status bits */
1121 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1122 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1123 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1124 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1125 /* Write table addr */
1126 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1127 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1128 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1129 /* set read/write */
1130 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1131 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1132 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1133 /* remember flags */
1134 dma_maps->dma_flags = flags;
1135 return 0;
1136 }
1137
1138 void
1139 pciide_dma_start(v, channel, drive)
1140 void *v;
1141 int channel, drive;
1142 {
1143 struct pciide_softc *sc = v;
1144
1145 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1146 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1147 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1148 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1149 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1150 }
1151
1152 int
1153 pciide_dma_finish(v, channel, drive, force)
1154 void *v;
1155 int channel, drive;
1156 int force;
1157 {
1158 struct pciide_softc *sc = v;
1159 u_int8_t status;
1160 int error = 0;
1161 struct pciide_dma_maps *dma_maps =
1162 &sc->pciide_channels[channel].dma_maps[drive];
1163
1164 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1165 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1166 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1167 DEBUG_XFERS);
1168
1169 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1170 return WDC_DMAST_NOIRQ;
1171
1172 /* stop DMA channel */
1173 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1174 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1175 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1176 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1177
1178 /* Unload the map of the data buffer */
1179 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1180 dma_maps->dmamap_xfer->dm_mapsize,
1181 (dma_maps->dma_flags & WDC_DMA_READ) ?
1182 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1183 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1184
1185 if ((status & IDEDMA_CTL_ERR) != 0) {
1186 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1187 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1188 error |= WDC_DMAST_ERR;
1189 }
1190
1191 if ((status & IDEDMA_CTL_INTR) == 0) {
1192 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1193 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1194 drive, status);
1195 error |= WDC_DMAST_NOIRQ;
1196 }
1197
1198 if ((status & IDEDMA_CTL_ACT) != 0) {
1199 /* data underrun, may be a valid condition for ATAPI */
1200 error |= WDC_DMAST_UNDER;
1201 }
1202 return error;
1203 }
1204
1205 void
1206 pciide_irqack(chp)
1207 struct channel_softc *chp;
1208 {
1209 struct pciide_channel *cp = (struct pciide_channel*)chp;
1210 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1211
1212 /* clear status bits in IDE DMA registers */
1213 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1214 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1215 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1216 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1217 }
1218
1219 /* some common code used by several chip_map */
1220 int
1221 pciide_chansetup(sc, channel, interface)
1222 struct pciide_softc *sc;
1223 int channel;
1224 pcireg_t interface;
1225 {
1226 struct pciide_channel *cp = &sc->pciide_channels[channel];
1227 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1228 cp->name = PCIIDE_CHANNEL_NAME(channel);
1229 cp->wdc_channel.channel = channel;
1230 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1231 cp->wdc_channel.ch_queue =
1232 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1233 if (cp->wdc_channel.ch_queue == NULL) {
1234 printf("%s %s channel: "
1235 "can't allocate memory for command queue",
1236 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1237 return 0;
1238 }
1239 printf("%s: %s channel %s to %s mode\n",
1240 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1241 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1242 "configured" : "wired",
1243 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1244 "native-PCI" : "compatibility");
1245 return 1;
1246 }
1247
1248 /* some common code used by several chip channel_map */
1249 void
1250 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1251 struct pci_attach_args *pa;
1252 struct pciide_channel *cp;
1253 pcireg_t interface;
1254 bus_size_t *cmdsizep, *ctlsizep;
1255 int (*pci_intr) __P((void *));
1256 {
1257 struct channel_softc *wdc_cp = &cp->wdc_channel;
1258
1259 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1260 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1261 pci_intr);
1262 else
1263 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1264 wdc_cp->channel, cmdsizep, ctlsizep);
1265
1266 if (cp->hw_ok == 0)
1267 return;
1268 wdc_cp->data32iot = wdc_cp->cmd_iot;
1269 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1270 wdcattach(wdc_cp);
1271 }
1272
1273 /*
1274 * Generic code to call to know if a channel can be disabled. Return 1
1275 * if channel can be disabled, 0 if not
1276 */
1277 int
1278 pciide_chan_candisable(cp)
1279 struct pciide_channel *cp;
1280 {
1281 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1282 struct channel_softc *wdc_cp = &cp->wdc_channel;
1283
1284 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1285 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1286 printf("%s: disabling %s channel (no drives)\n",
1287 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1288 cp->hw_ok = 0;
1289 return 1;
1290 }
1291 return 0;
1292 }
1293
1294 /*
1295 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1296 * Set hw_ok=0 on failure
1297 */
1298 void
1299 pciide_map_compat_intr(pa, cp, compatchan, interface)
1300 struct pci_attach_args *pa;
1301 struct pciide_channel *cp;
1302 int compatchan, interface;
1303 {
1304 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1305 struct channel_softc *wdc_cp = &cp->wdc_channel;
1306
1307 if (cp->hw_ok == 0)
1308 return;
1309 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1310 return;
1311
1312 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1313 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1314 pa, compatchan, pciide_compat_intr, cp);
1315 if (cp->ih == NULL) {
1316 #endif
1317 printf("%s: no compatibility interrupt for use by %s "
1318 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1319 cp->hw_ok = 0;
1320 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1321 }
1322 #endif
1323 }
1324
1325 void
1326 pciide_print_modes(cp)
1327 struct pciide_channel *cp;
1328 {
1329 wdc_print_modes(&cp->wdc_channel);
1330 }
1331
1332 void
1333 default_chip_map(sc, pa)
1334 struct pciide_softc *sc;
1335 struct pci_attach_args *pa;
1336 {
1337 struct pciide_channel *cp;
1338 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1339 pcireg_t csr;
1340 int channel, drive;
1341 struct ata_drive_datas *drvp;
1342 u_int8_t idedma_ctl;
1343 bus_size_t cmdsize, ctlsize;
1344 char *failreason;
1345
1346 if (pciide_chipen(sc, pa) == 0)
1347 return;
1348
1349 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1350 printf("%s: bus-master DMA support present",
1351 sc->sc_wdcdev.sc_dev.dv_xname);
1352 if (sc->sc_pp == &default_product_desc &&
1353 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1354 PCIIDE_OPTIONS_DMA) == 0) {
1355 printf(", but unused (no driver support)");
1356 sc->sc_dma_ok = 0;
1357 } else {
1358 pciide_mapreg_dma(sc, pa);
1359 if (sc->sc_dma_ok != 0)
1360 printf(", used without full driver "
1361 "support");
1362 }
1363 } else {
1364 printf("%s: hardware does not support DMA",
1365 sc->sc_wdcdev.sc_dev.dv_xname);
1366 sc->sc_dma_ok = 0;
1367 }
1368 printf("\n");
1369 if (sc->sc_dma_ok) {
1370 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1371 sc->sc_wdcdev.irqack = pciide_irqack;
1372 }
1373 sc->sc_wdcdev.PIO_cap = 0;
1374 sc->sc_wdcdev.DMA_cap = 0;
1375
1376 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1377 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1378 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1379
1380 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1381 cp = &sc->pciide_channels[channel];
1382 if (pciide_chansetup(sc, channel, interface) == 0)
1383 continue;
1384 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1385 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1386 &ctlsize, pciide_pci_intr);
1387 } else {
1388 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1389 channel, &cmdsize, &ctlsize);
1390 }
1391 if (cp->hw_ok == 0)
1392 continue;
1393 /*
1394 * Check to see if something appears to be there.
1395 */
1396 failreason = NULL;
1397 if (!wdcprobe(&cp->wdc_channel)) {
1398 failreason = "not responding; disabled or no drives?";
1399 goto next;
1400 }
1401 /*
1402 * Now, make sure it's actually attributable to this PCI IDE
1403 * channel by trying to access the channel again while the
1404 * PCI IDE controller's I/O space is disabled. (If the
1405 * channel no longer appears to be there, it belongs to
1406 * this controller.) YUCK!
1407 */
1408 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1409 PCI_COMMAND_STATUS_REG);
1410 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1411 csr & ~PCI_COMMAND_IO_ENABLE);
1412 if (wdcprobe(&cp->wdc_channel))
1413 failreason = "other hardware responding at addresses";
1414 pci_conf_write(sc->sc_pc, sc->sc_tag,
1415 PCI_COMMAND_STATUS_REG, csr);
1416 next:
1417 if (failreason) {
1418 printf("%s: %s channel ignored (%s)\n",
1419 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1420 failreason);
1421 cp->hw_ok = 0;
1422 bus_space_unmap(cp->wdc_channel.cmd_iot,
1423 cp->wdc_channel.cmd_ioh, cmdsize);
1424 bus_space_unmap(cp->wdc_channel.ctl_iot,
1425 cp->wdc_channel.ctl_ioh, ctlsize);
1426 } else {
1427 pciide_map_compat_intr(pa, cp, channel, interface);
1428 }
1429 if (cp->hw_ok) {
1430 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1431 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1432 wdcattach(&cp->wdc_channel);
1433 }
1434 }
1435
1436 if (sc->sc_dma_ok == 0)
1437 return;
1438
1439 /* Allocate DMA maps */
1440 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1441 idedma_ctl = 0;
1442 cp = &sc->pciide_channels[channel];
1443 for (drive = 0; drive < 2; drive++) {
1444 drvp = &cp->wdc_channel.ch_drive[drive];
1445 /* If no drive, skip */
1446 if ((drvp->drive_flags & DRIVE) == 0)
1447 continue;
1448 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1449 continue;
1450 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1451 /* Abort DMA setup */
1452 printf("%s:%d:%d: can't allocate DMA maps, "
1453 "using PIO transfers\n",
1454 sc->sc_wdcdev.sc_dev.dv_xname,
1455 channel, drive);
1456 drvp->drive_flags &= ~DRIVE_DMA;
1457 }
1458 printf("%s:%d:%d: using DMA data transfers\n",
1459 sc->sc_wdcdev.sc_dev.dv_xname,
1460 channel, drive);
1461 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1462 }
1463 if (idedma_ctl != 0) {
1464 /* Add software bits in status register */
1465 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1466 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1467 idedma_ctl);
1468 }
1469 }
1470 }
1471
1472 void
1473 piix_chip_map(sc, pa)
1474 struct pciide_softc *sc;
1475 struct pci_attach_args *pa;
1476 {
1477 struct pciide_channel *cp;
1478 int channel;
1479 u_int32_t idetim;
1480 bus_size_t cmdsize, ctlsize;
1481
1482 if (pciide_chipen(sc, pa) == 0)
1483 return;
1484
1485 printf("%s: bus-master DMA support present",
1486 sc->sc_wdcdev.sc_dev.dv_xname);
1487 pciide_mapreg_dma(sc, pa);
1488 printf("\n");
1489 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1490 WDC_CAPABILITY_MODE;
1491 if (sc->sc_dma_ok) {
1492 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1493 sc->sc_wdcdev.irqack = pciide_irqack;
1494 switch(sc->sc_pp->ide_product) {
1495 case PCI_PRODUCT_INTEL_82371AB_IDE:
1496 case PCI_PRODUCT_INTEL_82440MX_IDE:
1497 case PCI_PRODUCT_INTEL_82801AA_IDE:
1498 case PCI_PRODUCT_INTEL_82801AB_IDE:
1499 case PCI_PRODUCT_INTEL_82801BA_IDE:
1500 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1501 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1502 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1503 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1504 }
1505 }
1506 sc->sc_wdcdev.PIO_cap = 4;
1507 sc->sc_wdcdev.DMA_cap = 2;
1508 switch(sc->sc_pp->ide_product) {
1509 case PCI_PRODUCT_INTEL_82801AA_IDE:
1510 sc->sc_wdcdev.UDMA_cap = 4;
1511 break;
1512 case PCI_PRODUCT_INTEL_82801BA_IDE:
1513 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1514 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1515 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1516 sc->sc_wdcdev.UDMA_cap = 5;
1517 break;
1518 default:
1519 sc->sc_wdcdev.UDMA_cap = 2;
1520 }
1521 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1522 sc->sc_wdcdev.set_modes = piix_setup_channel;
1523 else
1524 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1525 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1526 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1527
1528 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1529 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1530 DEBUG_PROBE);
1531 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1532 WDCDEBUG_PRINT((", sidetim=0x%x",
1533 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1534 DEBUG_PROBE);
1535 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1536 WDCDEBUG_PRINT((", udamreg 0x%x",
1537 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1538 DEBUG_PROBE);
1539 }
1540 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1541 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1542 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1543 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1544 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1545 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1546 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1547 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1548 DEBUG_PROBE);
1549 }
1550
1551 }
1552 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1553
1554 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1555 cp = &sc->pciide_channels[channel];
1556 /* PIIX is compat-only */
1557 if (pciide_chansetup(sc, channel, 0) == 0)
1558 continue;
1559 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1560 if ((PIIX_IDETIM_READ(idetim, channel) &
1561 PIIX_IDETIM_IDE) == 0) {
1562 printf("%s: %s channel ignored (disabled)\n",
1563 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1564 continue;
1565 }
1566 /* PIIX are compat-only pciide devices */
1567 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1568 if (cp->hw_ok == 0)
1569 continue;
1570 if (pciide_chan_candisable(cp)) {
1571 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1572 channel);
1573 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1574 idetim);
1575 }
1576 pciide_map_compat_intr(pa, cp, channel, 0);
1577 if (cp->hw_ok == 0)
1578 continue;
1579 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1580 }
1581
1582 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1583 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1584 DEBUG_PROBE);
1585 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1586 WDCDEBUG_PRINT((", sidetim=0x%x",
1587 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1588 DEBUG_PROBE);
1589 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1590 WDCDEBUG_PRINT((", udamreg 0x%x",
1591 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1592 DEBUG_PROBE);
1593 }
1594 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1595 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1596 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1597 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1598 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1599 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1600 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1601 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1602 DEBUG_PROBE);
1603 }
1604 }
1605 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1606 }
1607
1608 void
1609 piix_setup_channel(chp)
1610 struct channel_softc *chp;
1611 {
1612 u_int8_t mode[2], drive;
1613 u_int32_t oidetim, idetim, idedma_ctl;
1614 struct pciide_channel *cp = (struct pciide_channel*)chp;
1615 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1616 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1617
1618 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1619 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1620 idedma_ctl = 0;
1621
1622 /* set up new idetim: Enable IDE registers decode */
1623 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1624 chp->channel);
1625
1626 /* setup DMA */
1627 pciide_channel_dma_setup(cp);
1628
1629 /*
1630 * Here we have to mess up with drives mode: PIIX can't have
1631 * different timings for master and slave drives.
1632 * We need to find the best combination.
1633 */
1634
1635 /* If both drives supports DMA, take the lower mode */
1636 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1637 (drvp[1].drive_flags & DRIVE_DMA)) {
1638 mode[0] = mode[1] =
1639 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1640 drvp[0].DMA_mode = mode[0];
1641 drvp[1].DMA_mode = mode[1];
1642 goto ok;
1643 }
1644 /*
1645 * If only one drive supports DMA, use its mode, and
1646 * put the other one in PIO mode 0 if mode not compatible
1647 */
1648 if (drvp[0].drive_flags & DRIVE_DMA) {
1649 mode[0] = drvp[0].DMA_mode;
1650 mode[1] = drvp[1].PIO_mode;
1651 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1652 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1653 mode[1] = drvp[1].PIO_mode = 0;
1654 goto ok;
1655 }
1656 if (drvp[1].drive_flags & DRIVE_DMA) {
1657 mode[1] = drvp[1].DMA_mode;
1658 mode[0] = drvp[0].PIO_mode;
1659 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1660 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1661 mode[0] = drvp[0].PIO_mode = 0;
1662 goto ok;
1663 }
1664 /*
1665 * If both drives are not DMA, takes the lower mode, unless
1666 * one of them is PIO mode < 2
1667 */
1668 if (drvp[0].PIO_mode < 2) {
1669 mode[0] = drvp[0].PIO_mode = 0;
1670 mode[1] = drvp[1].PIO_mode;
1671 } else if (drvp[1].PIO_mode < 2) {
1672 mode[1] = drvp[1].PIO_mode = 0;
1673 mode[0] = drvp[0].PIO_mode;
1674 } else {
1675 mode[0] = mode[1] =
1676 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1677 drvp[0].PIO_mode = mode[0];
1678 drvp[1].PIO_mode = mode[1];
1679 }
1680 ok: /* The modes are setup */
1681 for (drive = 0; drive < 2; drive++) {
1682 if (drvp[drive].drive_flags & DRIVE_DMA) {
1683 idetim |= piix_setup_idetim_timings(
1684 mode[drive], 1, chp->channel);
1685 goto end;
1686 }
1687 }
1688 /* If we are there, none of the drives are DMA */
1689 if (mode[0] >= 2)
1690 idetim |= piix_setup_idetim_timings(
1691 mode[0], 0, chp->channel);
1692 else
1693 idetim |= piix_setup_idetim_timings(
1694 mode[1], 0, chp->channel);
1695 end: /*
1696 * timing mode is now set up in the controller. Enable
1697 * it per-drive
1698 */
1699 for (drive = 0; drive < 2; drive++) {
1700 /* If no drive, skip */
1701 if ((drvp[drive].drive_flags & DRIVE) == 0)
1702 continue;
1703 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1704 if (drvp[drive].drive_flags & DRIVE_DMA)
1705 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1706 }
1707 if (idedma_ctl != 0) {
1708 /* Add software bits in status register */
1709 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1710 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1711 idedma_ctl);
1712 }
1713 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1714 pciide_print_modes(cp);
1715 }
1716
1717 void
1718 piix3_4_setup_channel(chp)
1719 struct channel_softc *chp;
1720 {
1721 struct ata_drive_datas *drvp;
1722 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1723 struct pciide_channel *cp = (struct pciide_channel*)chp;
1724 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1725 int drive;
1726 int channel = chp->channel;
1727
1728 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1729 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1730 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1731 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1732 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1733 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1734 PIIX_SIDETIM_RTC_MASK(channel));
1735
1736 idedma_ctl = 0;
1737 /* If channel disabled, no need to go further */
1738 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1739 return;
1740 /* set up new idetim: Enable IDE registers decode */
1741 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1742
1743 /* setup DMA if needed */
1744 pciide_channel_dma_setup(cp);
1745
1746 for (drive = 0; drive < 2; drive++) {
1747 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1748 PIIX_UDMATIM_SET(0x3, channel, drive));
1749 drvp = &chp->ch_drive[drive];
1750 /* If no drive, skip */
1751 if ((drvp->drive_flags & DRIVE) == 0)
1752 continue;
1753 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1754 (drvp->drive_flags & DRIVE_UDMA) == 0))
1755 goto pio;
1756
1757 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1758 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1759 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1760 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1761 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1762 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1763 ideconf |= PIIX_CONFIG_PINGPONG;
1764 }
1765 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1766 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1767 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1768 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1769 /* setup Ultra/100 */
1770 if (drvp->UDMA_mode > 2 &&
1771 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1772 drvp->UDMA_mode = 2;
1773 if (drvp->UDMA_mode > 4) {
1774 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1775 } else {
1776 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1777 if (drvp->UDMA_mode > 2) {
1778 ideconf |= PIIX_CONFIG_UDMA66(channel,
1779 drive);
1780 } else {
1781 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1782 drive);
1783 }
1784 }
1785 }
1786 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1787 /* setup Ultra/66 */
1788 if (drvp->UDMA_mode > 2 &&
1789 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1790 drvp->UDMA_mode = 2;
1791 if (drvp->UDMA_mode > 2)
1792 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1793 else
1794 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1795 }
1796 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1797 (drvp->drive_flags & DRIVE_UDMA)) {
1798 /* use Ultra/DMA */
1799 drvp->drive_flags &= ~DRIVE_DMA;
1800 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1801 udmareg |= PIIX_UDMATIM_SET(
1802 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1803 } else {
1804 /* use Multiword DMA */
1805 drvp->drive_flags &= ~DRIVE_UDMA;
1806 if (drive == 0) {
1807 idetim |= piix_setup_idetim_timings(
1808 drvp->DMA_mode, 1, channel);
1809 } else {
1810 sidetim |= piix_setup_sidetim_timings(
1811 drvp->DMA_mode, 1, channel);
1812 idetim =PIIX_IDETIM_SET(idetim,
1813 PIIX_IDETIM_SITRE, channel);
1814 }
1815 }
1816 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1817
1818 pio: /* use PIO mode */
1819 idetim |= piix_setup_idetim_drvs(drvp);
1820 if (drive == 0) {
1821 idetim |= piix_setup_idetim_timings(
1822 drvp->PIO_mode, 0, channel);
1823 } else {
1824 sidetim |= piix_setup_sidetim_timings(
1825 drvp->PIO_mode, 0, channel);
1826 idetim =PIIX_IDETIM_SET(idetim,
1827 PIIX_IDETIM_SITRE, channel);
1828 }
1829 }
1830 if (idedma_ctl != 0) {
1831 /* Add software bits in status register */
1832 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1833 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1834 idedma_ctl);
1835 }
1836 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1837 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1838 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1839 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1840 pciide_print_modes(cp);
1841 }
1842
1843
1844 /* setup ISP and RTC fields, based on mode */
1845 static u_int32_t
1846 piix_setup_idetim_timings(mode, dma, channel)
1847 u_int8_t mode;
1848 u_int8_t dma;
1849 u_int8_t channel;
1850 {
1851
1852 if (dma)
1853 return PIIX_IDETIM_SET(0,
1854 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1855 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1856 channel);
1857 else
1858 return PIIX_IDETIM_SET(0,
1859 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1860 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1861 channel);
1862 }
1863
1864 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1865 static u_int32_t
1866 piix_setup_idetim_drvs(drvp)
1867 struct ata_drive_datas *drvp;
1868 {
1869 u_int32_t ret = 0;
1870 struct channel_softc *chp = drvp->chnl_softc;
1871 u_int8_t channel = chp->channel;
1872 u_int8_t drive = drvp->drive;
1873
1874 /*
1875 * If drive is using UDMA, timings setups are independant
1876 * So just check DMA and PIO here.
1877 */
1878 if (drvp->drive_flags & DRIVE_DMA) {
1879 /* if mode = DMA mode 0, use compatible timings */
1880 if ((drvp->drive_flags & DRIVE_DMA) &&
1881 drvp->DMA_mode == 0) {
1882 drvp->PIO_mode = 0;
1883 return ret;
1884 }
1885 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1886 /*
1887 * PIO and DMA timings are the same, use fast timings for PIO
1888 * too, else use compat timings.
1889 */
1890 if ((piix_isp_pio[drvp->PIO_mode] !=
1891 piix_isp_dma[drvp->DMA_mode]) ||
1892 (piix_rtc_pio[drvp->PIO_mode] !=
1893 piix_rtc_dma[drvp->DMA_mode]))
1894 drvp->PIO_mode = 0;
1895 /* if PIO mode <= 2, use compat timings for PIO */
1896 if (drvp->PIO_mode <= 2) {
1897 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1898 channel);
1899 return ret;
1900 }
1901 }
1902
1903 /*
1904 * Now setup PIO modes. If mode < 2, use compat timings.
1905 * Else enable fast timings. Enable IORDY and prefetch/post
1906 * if PIO mode >= 3.
1907 */
1908
1909 if (drvp->PIO_mode < 2)
1910 return ret;
1911
1912 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1913 if (drvp->PIO_mode >= 3) {
1914 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1915 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1916 }
1917 return ret;
1918 }
1919
1920 /* setup values in SIDETIM registers, based on mode */
1921 static u_int32_t
1922 piix_setup_sidetim_timings(mode, dma, channel)
1923 u_int8_t mode;
1924 u_int8_t dma;
1925 u_int8_t channel;
1926 {
1927 if (dma)
1928 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1929 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1930 else
1931 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1932 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1933 }
1934
1935 void
1936 amd7x6_chip_map(sc, pa)
1937 struct pciide_softc *sc;
1938 struct pci_attach_args *pa;
1939 {
1940 struct pciide_channel *cp;
1941 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1942 int channel;
1943 pcireg_t chanenable;
1944 bus_size_t cmdsize, ctlsize;
1945
1946 if (pciide_chipen(sc, pa) == 0)
1947 return;
1948 printf("%s: bus-master DMA support present",
1949 sc->sc_wdcdev.sc_dev.dv_xname);
1950 pciide_mapreg_dma(sc, pa);
1951 printf("\n");
1952 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1953 WDC_CAPABILITY_MODE;
1954 if (sc->sc_dma_ok) {
1955 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1956 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1957 sc->sc_wdcdev.irqack = pciide_irqack;
1958 }
1959 sc->sc_wdcdev.PIO_cap = 4;
1960 sc->sc_wdcdev.DMA_cap = 2;
1961
1962 switch (sc->sc_pp->ide_product) {
1963 case PCI_PRODUCT_AMD_PBC766_IDE:
1964 case PCI_PRODUCT_AMD_PBC768_IDE:
1965 sc->sc_wdcdev.UDMA_cap = 5;
1966 break;
1967 default:
1968 sc->sc_wdcdev.UDMA_cap = 4;
1969 }
1970 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1971 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1972 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1973 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1974
1975 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1976 DEBUG_PROBE);
1977 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1978 cp = &sc->pciide_channels[channel];
1979 if (pciide_chansetup(sc, channel, interface) == 0)
1980 continue;
1981
1982 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1983 printf("%s: %s channel ignored (disabled)\n",
1984 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1985 continue;
1986 }
1987 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1988 pciide_pci_intr);
1989
1990 if (pciide_chan_candisable(cp))
1991 chanenable &= ~AMD7X6_CHAN_EN(channel);
1992 pciide_map_compat_intr(pa, cp, channel, interface);
1993 if (cp->hw_ok == 0)
1994 continue;
1995
1996 amd7x6_setup_channel(&cp->wdc_channel);
1997 }
1998 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1999 chanenable);
2000 return;
2001 }
2002
2003 void
2004 amd7x6_setup_channel(chp)
2005 struct channel_softc *chp;
2006 {
2007 u_int32_t udmatim_reg, datatim_reg;
2008 u_int8_t idedma_ctl;
2009 int mode, drive;
2010 struct ata_drive_datas *drvp;
2011 struct pciide_channel *cp = (struct pciide_channel*)chp;
2012 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2013 #ifndef PCIIDE_AMD756_ENABLEDMA
2014 int rev = PCI_REVISION(
2015 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2016 #endif
2017
2018 idedma_ctl = 0;
2019 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2020 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2021 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2022 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2023
2024 /* setup DMA if needed */
2025 pciide_channel_dma_setup(cp);
2026
2027 for (drive = 0; drive < 2; drive++) {
2028 drvp = &chp->ch_drive[drive];
2029 /* If no drive, skip */
2030 if ((drvp->drive_flags & DRIVE) == 0)
2031 continue;
2032 /* add timing values, setup DMA if needed */
2033 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2034 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2035 mode = drvp->PIO_mode;
2036 goto pio;
2037 }
2038 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2039 (drvp->drive_flags & DRIVE_UDMA)) {
2040 /* use Ultra/DMA */
2041 drvp->drive_flags &= ~DRIVE_DMA;
2042 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2043 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2044 AMD7X6_UDMA_TIME(chp->channel, drive,
2045 amd7x6_udma_tim[drvp->UDMA_mode]);
2046 /* can use PIO timings, MW DMA unused */
2047 mode = drvp->PIO_mode;
2048 } else {
2049 /* use Multiword DMA, but only if revision is OK */
2050 drvp->drive_flags &= ~DRIVE_UDMA;
2051 #ifndef PCIIDE_AMD756_ENABLEDMA
2052 /*
2053 * The workaround doesn't seem to be necessary
2054 * with all drives, so it can be disabled by
2055 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2056 * triggered.
2057 */
2058 if (sc->sc_pp->ide_product ==
2059 PCI_PRODUCT_AMD_PBC756_IDE &&
2060 AMD756_CHIPREV_DISABLEDMA(rev)) {
2061 printf("%s:%d:%d: multi-word DMA disabled due "
2062 "to chip revision\n",
2063 sc->sc_wdcdev.sc_dev.dv_xname,
2064 chp->channel, drive);
2065 mode = drvp->PIO_mode;
2066 drvp->drive_flags &= ~DRIVE_DMA;
2067 goto pio;
2068 }
2069 #endif
2070 /* mode = min(pio, dma+2) */
2071 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2072 mode = drvp->PIO_mode;
2073 else
2074 mode = drvp->DMA_mode + 2;
2075 }
2076 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2077
2078 pio: /* setup PIO mode */
2079 if (mode <= 2) {
2080 drvp->DMA_mode = 0;
2081 drvp->PIO_mode = 0;
2082 mode = 0;
2083 } else {
2084 drvp->PIO_mode = mode;
2085 drvp->DMA_mode = mode - 2;
2086 }
2087 datatim_reg |=
2088 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2089 amd7x6_pio_set[mode]) |
2090 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2091 amd7x6_pio_rec[mode]);
2092 }
2093 if (idedma_ctl != 0) {
2094 /* Add software bits in status register */
2095 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2096 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2097 idedma_ctl);
2098 }
2099 pciide_print_modes(cp);
2100 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2101 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2102 }
2103
2104 void
2105 apollo_chip_map(sc, pa)
2106 struct pciide_softc *sc;
2107 struct pci_attach_args *pa;
2108 {
2109 struct pciide_channel *cp;
2110 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2111 int channel;
2112 u_int32_t ideconf;
2113 bus_size_t cmdsize, ctlsize;
2114 pcitag_t pcib_tag;
2115 pcireg_t pcib_id, pcib_class;
2116
2117 if (pciide_chipen(sc, pa) == 0)
2118 return;
2119 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2120 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2121 /* and read ID and rev of the ISA bridge */
2122 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2123 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2124 printf(": VIA Technologies ");
2125 switch (PCI_PRODUCT(pcib_id)) {
2126 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2127 printf("VT82C586 (Apollo VP) ");
2128 if(PCI_REVISION(pcib_class) >= 0x02) {
2129 printf("ATA33 controller\n");
2130 sc->sc_wdcdev.UDMA_cap = 2;
2131 } else {
2132 printf("controller\n");
2133 sc->sc_wdcdev.UDMA_cap = 0;
2134 }
2135 break;
2136 case PCI_PRODUCT_VIATECH_VT82C596A:
2137 printf("VT82C596A (Apollo Pro) ");
2138 if (PCI_REVISION(pcib_class) >= 0x12) {
2139 printf("ATA66 controller\n");
2140 sc->sc_wdcdev.UDMA_cap = 4;
2141 } else {
2142 printf("ATA33 controller\n");
2143 sc->sc_wdcdev.UDMA_cap = 2;
2144 }
2145 break;
2146 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2147 printf("VT82C686A (Apollo KX133) ");
2148 if (PCI_REVISION(pcib_class) >= 0x40) {
2149 printf("ATA100 controller\n");
2150 sc->sc_wdcdev.UDMA_cap = 5;
2151 } else {
2152 printf("ATA66 controller\n");
2153 sc->sc_wdcdev.UDMA_cap = 4;
2154 }
2155 break;
2156 case PCI_PRODUCT_VIATECH_VT8233:
2157 printf("VT8233 ATA100 controller\n");
2158 sc->sc_wdcdev.UDMA_cap = 5;
2159 break;
2160 default:
2161 printf("unknown ATA controller\n");
2162 sc->sc_wdcdev.UDMA_cap = 0;
2163 }
2164
2165 printf("%s: bus-master DMA support present",
2166 sc->sc_wdcdev.sc_dev.dv_xname);
2167 pciide_mapreg_dma(sc, pa);
2168 printf("\n");
2169 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2170 WDC_CAPABILITY_MODE;
2171 if (sc->sc_dma_ok) {
2172 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2173 sc->sc_wdcdev.irqack = pciide_irqack;
2174 if (sc->sc_wdcdev.UDMA_cap > 0)
2175 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2176 }
2177 sc->sc_wdcdev.PIO_cap = 4;
2178 sc->sc_wdcdev.DMA_cap = 2;
2179 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2180 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2181 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2182
2183 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2184 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2185 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2186 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2187 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2188 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2189 DEBUG_PROBE);
2190
2191 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2192 cp = &sc->pciide_channels[channel];
2193 if (pciide_chansetup(sc, channel, interface) == 0)
2194 continue;
2195
2196 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2197 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2198 printf("%s: %s channel ignored (disabled)\n",
2199 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2200 continue;
2201 }
2202 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2203 pciide_pci_intr);
2204 if (cp->hw_ok == 0)
2205 continue;
2206 if (pciide_chan_candisable(cp)) {
2207 ideconf &= ~APO_IDECONF_EN(channel);
2208 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2209 ideconf);
2210 }
2211 pciide_map_compat_intr(pa, cp, channel, interface);
2212
2213 if (cp->hw_ok == 0)
2214 continue;
2215 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2216 }
2217 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2218 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2219 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2220 }
2221
2222 void
2223 apollo_setup_channel(chp)
2224 struct channel_softc *chp;
2225 {
2226 u_int32_t udmatim_reg, datatim_reg;
2227 u_int8_t idedma_ctl;
2228 int mode, drive;
2229 struct ata_drive_datas *drvp;
2230 struct pciide_channel *cp = (struct pciide_channel*)chp;
2231 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2232
2233 idedma_ctl = 0;
2234 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2235 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2236 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2237 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2238
2239 /* setup DMA if needed */
2240 pciide_channel_dma_setup(cp);
2241
2242 for (drive = 0; drive < 2; drive++) {
2243 drvp = &chp->ch_drive[drive];
2244 /* If no drive, skip */
2245 if ((drvp->drive_flags & DRIVE) == 0)
2246 continue;
2247 /* add timing values, setup DMA if needed */
2248 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2249 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2250 mode = drvp->PIO_mode;
2251 goto pio;
2252 }
2253 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2254 (drvp->drive_flags & DRIVE_UDMA)) {
2255 /* use Ultra/DMA */
2256 drvp->drive_flags &= ~DRIVE_DMA;
2257 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2258 APO_UDMA_EN_MTH(chp->channel, drive);
2259 if (sc->sc_wdcdev.UDMA_cap == 5) {
2260 /* 686b */
2261 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2262 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2263 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2264 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2265 /* 596b or 686a */
2266 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2267 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2268 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2269 } else {
2270 /* 596a or 586b */
2271 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2272 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2273 }
2274 /* can use PIO timings, MW DMA unused */
2275 mode = drvp->PIO_mode;
2276 } else {
2277 /* use Multiword DMA */
2278 drvp->drive_flags &= ~DRIVE_UDMA;
2279 /* mode = min(pio, dma+2) */
2280 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2281 mode = drvp->PIO_mode;
2282 else
2283 mode = drvp->DMA_mode + 2;
2284 }
2285 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2286
2287 pio: /* setup PIO mode */
2288 if (mode <= 2) {
2289 drvp->DMA_mode = 0;
2290 drvp->PIO_mode = 0;
2291 mode = 0;
2292 } else {
2293 drvp->PIO_mode = mode;
2294 drvp->DMA_mode = mode - 2;
2295 }
2296 datatim_reg |=
2297 APO_DATATIM_PULSE(chp->channel, drive,
2298 apollo_pio_set[mode]) |
2299 APO_DATATIM_RECOV(chp->channel, drive,
2300 apollo_pio_rec[mode]);
2301 }
2302 if (idedma_ctl != 0) {
2303 /* Add software bits in status register */
2304 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2305 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2306 idedma_ctl);
2307 }
2308 pciide_print_modes(cp);
2309 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2310 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2311 }
2312
2313 void
2314 cmd_channel_map(pa, sc, channel)
2315 struct pci_attach_args *pa;
2316 struct pciide_softc *sc;
2317 int channel;
2318 {
2319 struct pciide_channel *cp = &sc->pciide_channels[channel];
2320 bus_size_t cmdsize, ctlsize;
2321 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2322 int interface, one_channel;
2323
2324 /*
2325 * The 0648/0649 can be told to identify as a RAID controller.
2326 * In this case, we have to fake interface
2327 */
2328 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2329 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2330 PCIIDE_INTERFACE_SETTABLE(1);
2331 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2332 CMD_CONF_DSA1)
2333 interface |= PCIIDE_INTERFACE_PCI(0) |
2334 PCIIDE_INTERFACE_PCI(1);
2335 } else {
2336 interface = PCI_INTERFACE(pa->pa_class);
2337 }
2338
2339 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2340 cp->name = PCIIDE_CHANNEL_NAME(channel);
2341 cp->wdc_channel.channel = channel;
2342 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2343
2344 /*
2345 * Older CMD64X doesn't have independant channels
2346 */
2347 switch (sc->sc_pp->ide_product) {
2348 case PCI_PRODUCT_CMDTECH_649:
2349 one_channel = 0;
2350 break;
2351 default:
2352 one_channel = 1;
2353 break;
2354 }
2355
2356 if (channel > 0 && one_channel) {
2357 cp->wdc_channel.ch_queue =
2358 sc->pciide_channels[0].wdc_channel.ch_queue;
2359 } else {
2360 cp->wdc_channel.ch_queue =
2361 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2362 }
2363 if (cp->wdc_channel.ch_queue == NULL) {
2364 printf("%s %s channel: "
2365 "can't allocate memory for command queue",
2366 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2367 return;
2368 }
2369
2370 printf("%s: %s channel %s to %s mode\n",
2371 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2372 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2373 "configured" : "wired",
2374 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2375 "native-PCI" : "compatibility");
2376
2377 /*
2378 * with a CMD PCI64x, if we get here, the first channel is enabled:
2379 * there's no way to disable the first channel without disabling
2380 * the whole device
2381 */
2382 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2383 printf("%s: %s channel ignored (disabled)\n",
2384 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2385 return;
2386 }
2387
2388 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2389 if (cp->hw_ok == 0)
2390 return;
2391 if (channel == 1) {
2392 if (pciide_chan_candisable(cp)) {
2393 ctrl &= ~CMD_CTRL_2PORT;
2394 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2395 CMD_CTRL, ctrl);
2396 }
2397 }
2398 pciide_map_compat_intr(pa, cp, channel, interface);
2399 }
2400
2401 int
2402 cmd_pci_intr(arg)
2403 void *arg;
2404 {
2405 struct pciide_softc *sc = arg;
2406 struct pciide_channel *cp;
2407 struct channel_softc *wdc_cp;
2408 int i, rv, crv;
2409 u_int32_t priirq, secirq;
2410
2411 rv = 0;
2412 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2413 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2414 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2415 cp = &sc->pciide_channels[i];
2416 wdc_cp = &cp->wdc_channel;
2417 /* If a compat channel skip. */
2418 if (cp->compat)
2419 continue;
2420 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2421 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2422 crv = wdcintr(wdc_cp);
2423 if (crv == 0)
2424 printf("%s:%d: bogus intr\n",
2425 sc->sc_wdcdev.sc_dev.dv_xname, i);
2426 else
2427 rv = 1;
2428 }
2429 }
2430 return rv;
2431 }
2432
2433 void
2434 cmd_chip_map(sc, pa)
2435 struct pciide_softc *sc;
2436 struct pci_attach_args *pa;
2437 {
2438 int channel;
2439
2440 /*
2441 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2442 * and base adresses registers can be disabled at
2443 * hardware level. In this case, the device is wired
2444 * in compat mode and its first channel is always enabled,
2445 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2446 * In fact, it seems that the first channel of the CMD PCI0640
2447 * can't be disabled.
2448 */
2449
2450 #ifdef PCIIDE_CMD064x_DISABLE
2451 if (pciide_chipen(sc, pa) == 0)
2452 return;
2453 #endif
2454
2455 printf("%s: hardware does not support DMA\n",
2456 sc->sc_wdcdev.sc_dev.dv_xname);
2457 sc->sc_dma_ok = 0;
2458
2459 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2460 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2461 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2462
2463 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2464 cmd_channel_map(pa, sc, channel);
2465 }
2466 }
2467
2468 void
2469 cmd0643_9_chip_map(sc, pa)
2470 struct pciide_softc *sc;
2471 struct pci_attach_args *pa;
2472 {
2473 struct pciide_channel *cp;
2474 int channel;
2475 int rev = PCI_REVISION(
2476 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2477
2478 /*
2479 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2480 * and base adresses registers can be disabled at
2481 * hardware level. In this case, the device is wired
2482 * in compat mode and its first channel is always enabled,
2483 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2484 * In fact, it seems that the first channel of the CMD PCI0640
2485 * can't be disabled.
2486 */
2487
2488 #ifdef PCIIDE_CMD064x_DISABLE
2489 if (pciide_chipen(sc, pa) == 0)
2490 return;
2491 #endif
2492 printf("%s: bus-master DMA support present",
2493 sc->sc_wdcdev.sc_dev.dv_xname);
2494 pciide_mapreg_dma(sc, pa);
2495 printf("\n");
2496 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2497 WDC_CAPABILITY_MODE;
2498 if (sc->sc_dma_ok) {
2499 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2500 switch (sc->sc_pp->ide_product) {
2501 case PCI_PRODUCT_CMDTECH_649:
2502 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2503 sc->sc_wdcdev.UDMA_cap = 5;
2504 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2505 break;
2506 case PCI_PRODUCT_CMDTECH_648:
2507 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2508 sc->sc_wdcdev.UDMA_cap = 4;
2509 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2510 break;
2511 case PCI_PRODUCT_CMDTECH_646:
2512 if (rev >= CMD0646U2_REV) {
2513 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2514 sc->sc_wdcdev.UDMA_cap = 2;
2515 } else if (rev >= CMD0646U_REV) {
2516 /*
2517 * Linux's driver claims that the 646U is broken
2518 * with UDMA. Only enable it if we know what we're
2519 * doing
2520 */
2521 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2522 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2523 sc->sc_wdcdev.UDMA_cap = 2;
2524 #endif
2525 /* explicitly disable UDMA */
2526 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2527 CMD_UDMATIM(0), 0);
2528 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2529 CMD_UDMATIM(1), 0);
2530 }
2531 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2532 break;
2533 default:
2534 sc->sc_wdcdev.irqack = pciide_irqack;
2535 }
2536 }
2537
2538 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2539 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2540 sc->sc_wdcdev.PIO_cap = 4;
2541 sc->sc_wdcdev.DMA_cap = 2;
2542 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2543
2544 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2545 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2546 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2547 DEBUG_PROBE);
2548
2549 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2550 cp = &sc->pciide_channels[channel];
2551 cmd_channel_map(pa, sc, channel);
2552 if (cp->hw_ok == 0)
2553 continue;
2554 cmd0643_9_setup_channel(&cp->wdc_channel);
2555 }
2556 /*
2557 * note - this also makes sure we clear the irq disable and reset
2558 * bits
2559 */
2560 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2561 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2562 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2563 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2564 DEBUG_PROBE);
2565 }
2566
2567 void
2568 cmd0643_9_setup_channel(chp)
2569 struct channel_softc *chp;
2570 {
2571 struct ata_drive_datas *drvp;
2572 u_int8_t tim;
2573 u_int32_t idedma_ctl, udma_reg;
2574 int drive;
2575 struct pciide_channel *cp = (struct pciide_channel*)chp;
2576 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2577
2578 idedma_ctl = 0;
2579 /* setup DMA if needed */
2580 pciide_channel_dma_setup(cp);
2581
2582 for (drive = 0; drive < 2; drive++) {
2583 drvp = &chp->ch_drive[drive];
2584 /* If no drive, skip */
2585 if ((drvp->drive_flags & DRIVE) == 0)
2586 continue;
2587 /* add timing values, setup DMA if needed */
2588 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2589 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2590 if (drvp->drive_flags & DRIVE_UDMA) {
2591 /* UltraDMA on a 646U2, 0648 or 0649 */
2592 drvp->drive_flags &= ~DRIVE_DMA;
2593 udma_reg = pciide_pci_read(sc->sc_pc,
2594 sc->sc_tag, CMD_UDMATIM(chp->channel));
2595 if (drvp->UDMA_mode > 2 &&
2596 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2597 CMD_BICSR) &
2598 CMD_BICSR_80(chp->channel)) == 0)
2599 drvp->UDMA_mode = 2;
2600 if (drvp->UDMA_mode > 2)
2601 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2602 else if (sc->sc_wdcdev.UDMA_cap > 2)
2603 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2604 udma_reg |= CMD_UDMATIM_UDMA(drive);
2605 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2606 CMD_UDMATIM_TIM_OFF(drive));
2607 udma_reg |=
2608 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2609 CMD_UDMATIM_TIM_OFF(drive));
2610 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2611 CMD_UDMATIM(chp->channel), udma_reg);
2612 } else {
2613 /*
2614 * use Multiword DMA.
2615 * Timings will be used for both PIO and DMA,
2616 * so adjust DMA mode if needed
2617 * if we have a 0646U2/8/9, turn off UDMA
2618 */
2619 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2620 udma_reg = pciide_pci_read(sc->sc_pc,
2621 sc->sc_tag,
2622 CMD_UDMATIM(chp->channel));
2623 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2624 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2625 CMD_UDMATIM(chp->channel),
2626 udma_reg);
2627 }
2628 if (drvp->PIO_mode >= 3 &&
2629 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2630 drvp->DMA_mode = drvp->PIO_mode - 2;
2631 }
2632 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2633 }
2634 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2635 }
2636 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2637 CMD_DATA_TIM(chp->channel, drive), tim);
2638 }
2639 if (idedma_ctl != 0) {
2640 /* Add software bits in status register */
2641 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2642 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2643 idedma_ctl);
2644 }
2645 pciide_print_modes(cp);
2646 }
2647
2648 void
2649 cmd646_9_irqack(chp)
2650 struct channel_softc *chp;
2651 {
2652 u_int32_t priirq, secirq;
2653 struct pciide_channel *cp = (struct pciide_channel*)chp;
2654 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2655
2656 if (chp->channel == 0) {
2657 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2658 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2659 } else {
2660 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2661 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2662 }
2663 pciide_irqack(chp);
2664 }
2665
2666 void
2667 cy693_chip_map(sc, pa)
2668 struct pciide_softc *sc;
2669 struct pci_attach_args *pa;
2670 {
2671 struct pciide_channel *cp;
2672 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2673 bus_size_t cmdsize, ctlsize;
2674
2675 if (pciide_chipen(sc, pa) == 0)
2676 return;
2677 /*
2678 * this chip has 2 PCI IDE functions, one for primary and one for
2679 * secondary. So we need to call pciide_mapregs_compat() with
2680 * the real channel
2681 */
2682 if (pa->pa_function == 1) {
2683 sc->sc_cy_compatchan = 0;
2684 } else if (pa->pa_function == 2) {
2685 sc->sc_cy_compatchan = 1;
2686 } else {
2687 printf("%s: unexpected PCI function %d\n",
2688 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2689 return;
2690 }
2691 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2692 printf("%s: bus-master DMA support present",
2693 sc->sc_wdcdev.sc_dev.dv_xname);
2694 pciide_mapreg_dma(sc, pa);
2695 } else {
2696 printf("%s: hardware does not support DMA",
2697 sc->sc_wdcdev.sc_dev.dv_xname);
2698 sc->sc_dma_ok = 0;
2699 }
2700 printf("\n");
2701
2702 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2703 if (sc->sc_cy_handle == NULL) {
2704 printf("%s: unable to map hyperCache control registers\n",
2705 sc->sc_wdcdev.sc_dev.dv_xname);
2706 sc->sc_dma_ok = 0;
2707 }
2708
2709 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2710 WDC_CAPABILITY_MODE;
2711 if (sc->sc_dma_ok) {
2712 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2713 sc->sc_wdcdev.irqack = pciide_irqack;
2714 }
2715 sc->sc_wdcdev.PIO_cap = 4;
2716 sc->sc_wdcdev.DMA_cap = 2;
2717 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2718
2719 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2720 sc->sc_wdcdev.nchannels = 1;
2721
2722 /* Only one channel for this chip; if we are here it's enabled */
2723 cp = &sc->pciide_channels[0];
2724 sc->wdc_chanarray[0] = &cp->wdc_channel;
2725 cp->name = PCIIDE_CHANNEL_NAME(0);
2726 cp->wdc_channel.channel = 0;
2727 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2728 cp->wdc_channel.ch_queue =
2729 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2730 if (cp->wdc_channel.ch_queue == NULL) {
2731 printf("%s primary channel: "
2732 "can't allocate memory for command queue",
2733 sc->sc_wdcdev.sc_dev.dv_xname);
2734 return;
2735 }
2736 printf("%s: primary channel %s to ",
2737 sc->sc_wdcdev.sc_dev.dv_xname,
2738 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2739 "configured" : "wired");
2740 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2741 printf("native-PCI");
2742 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2743 pciide_pci_intr);
2744 } else {
2745 printf("compatibility");
2746 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2747 &cmdsize, &ctlsize);
2748 }
2749 printf(" mode\n");
2750 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2751 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2752 wdcattach(&cp->wdc_channel);
2753 if (pciide_chan_candisable(cp)) {
2754 pci_conf_write(sc->sc_pc, sc->sc_tag,
2755 PCI_COMMAND_STATUS_REG, 0);
2756 }
2757 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2758 if (cp->hw_ok == 0)
2759 return;
2760 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2761 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2762 cy693_setup_channel(&cp->wdc_channel);
2763 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2764 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2765 }
2766
2767 void
2768 cy693_setup_channel(chp)
2769 struct channel_softc *chp;
2770 {
2771 struct ata_drive_datas *drvp;
2772 int drive;
2773 u_int32_t cy_cmd_ctrl;
2774 u_int32_t idedma_ctl;
2775 struct pciide_channel *cp = (struct pciide_channel*)chp;
2776 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2777 int dma_mode = -1;
2778
2779 cy_cmd_ctrl = idedma_ctl = 0;
2780
2781 /* setup DMA if needed */
2782 pciide_channel_dma_setup(cp);
2783
2784 for (drive = 0; drive < 2; drive++) {
2785 drvp = &chp->ch_drive[drive];
2786 /* If no drive, skip */
2787 if ((drvp->drive_flags & DRIVE) == 0)
2788 continue;
2789 /* add timing values, setup DMA if needed */
2790 if (drvp->drive_flags & DRIVE_DMA) {
2791 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2792 /* use Multiword DMA */
2793 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2794 dma_mode = drvp->DMA_mode;
2795 }
2796 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2797 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2798 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2799 CY_CMD_CTRL_IOW_REC_OFF(drive));
2800 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2801 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2802 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2803 CY_CMD_CTRL_IOR_REC_OFF(drive));
2804 }
2805 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2806 chp->ch_drive[0].DMA_mode = dma_mode;
2807 chp->ch_drive[1].DMA_mode = dma_mode;
2808
2809 if (dma_mode == -1)
2810 dma_mode = 0;
2811
2812 if (sc->sc_cy_handle != NULL) {
2813 /* Note: `multiple' is implied. */
2814 cy82c693_write(sc->sc_cy_handle,
2815 (sc->sc_cy_compatchan == 0) ?
2816 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2817 }
2818
2819 pciide_print_modes(cp);
2820
2821 if (idedma_ctl != 0) {
2822 /* Add software bits in status register */
2823 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2824 IDEDMA_CTL, idedma_ctl);
2825 }
2826 }
2827
2828 static int
2829 sis_hostbr_match(pa)
2830 struct pci_attach_args *pa;
2831 {
2832 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2833 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2834 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2835 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2836 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2837 }
2838
2839 void
2840 sis_chip_map(sc, pa)
2841 struct pciide_softc *sc;
2842 struct pci_attach_args *pa;
2843 {
2844 struct pciide_channel *cp;
2845 int channel;
2846 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2847 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2848 pcireg_t rev = PCI_REVISION(pa->pa_class);
2849 bus_size_t cmdsize, ctlsize;
2850 pcitag_t pchb_tag;
2851 pcireg_t pchb_id, pchb_class;
2852
2853 if (pciide_chipen(sc, pa) == 0)
2854 return;
2855 printf("%s: bus-master DMA support present",
2856 sc->sc_wdcdev.sc_dev.dv_xname);
2857 pciide_mapreg_dma(sc, pa);
2858 printf("\n");
2859
2860 /* get a PCI tag for the host bridge (function 0 of the same device) */
2861 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2862 /* and read ID and rev of the ISA bridge */
2863 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2864 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2865
2866 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2867 WDC_CAPABILITY_MODE;
2868 if (sc->sc_dma_ok) {
2869 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2870 sc->sc_wdcdev.irqack = pciide_irqack;
2871 /*
2872 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2873 * have problems with UDMA (info provided by Christos)
2874 */
2875 if (rev >= 0xd0 &&
2876 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2877 PCI_REVISION(pchb_class) >= 0x03))
2878 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2879 }
2880
2881 sc->sc_wdcdev.PIO_cap = 4;
2882 sc->sc_wdcdev.DMA_cap = 2;
2883 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2884 /*
2885 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2886 * chipsets.
2887 */
2888 sc->sc_wdcdev.UDMA_cap =
2889 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2890 sc->sc_wdcdev.set_modes = sis_setup_channel;
2891
2892 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2893 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2894
2895 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2896 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2897 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2898
2899 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2900 cp = &sc->pciide_channels[channel];
2901 if (pciide_chansetup(sc, channel, interface) == 0)
2902 continue;
2903 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2904 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2905 printf("%s: %s channel ignored (disabled)\n",
2906 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2907 continue;
2908 }
2909 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2910 pciide_pci_intr);
2911 if (cp->hw_ok == 0)
2912 continue;
2913 if (pciide_chan_candisable(cp)) {
2914 if (channel == 0)
2915 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2916 else
2917 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2918 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2919 sis_ctr0);
2920 }
2921 pciide_map_compat_intr(pa, cp, channel, interface);
2922 if (cp->hw_ok == 0)
2923 continue;
2924 sis_setup_channel(&cp->wdc_channel);
2925 }
2926 }
2927
2928 void
2929 sis_setup_channel(chp)
2930 struct channel_softc *chp;
2931 {
2932 struct ata_drive_datas *drvp;
2933 int drive;
2934 u_int32_t sis_tim;
2935 u_int32_t idedma_ctl;
2936 struct pciide_channel *cp = (struct pciide_channel*)chp;
2937 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2938
2939 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2940 "channel %d 0x%x\n", chp->channel,
2941 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2942 DEBUG_PROBE);
2943 sis_tim = 0;
2944 idedma_ctl = 0;
2945 /* setup DMA if needed */
2946 pciide_channel_dma_setup(cp);
2947
2948 for (drive = 0; drive < 2; drive++) {
2949 drvp = &chp->ch_drive[drive];
2950 /* If no drive, skip */
2951 if ((drvp->drive_flags & DRIVE) == 0)
2952 continue;
2953 /* add timing values, setup DMA if needed */
2954 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2955 (drvp->drive_flags & DRIVE_UDMA) == 0)
2956 goto pio;
2957
2958 if (drvp->drive_flags & DRIVE_UDMA) {
2959 /* use Ultra/DMA */
2960 drvp->drive_flags &= ~DRIVE_DMA;
2961 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2962 SIS_TIM_UDMA_TIME_OFF(drive);
2963 sis_tim |= SIS_TIM_UDMA_EN(drive);
2964 } else {
2965 /*
2966 * use Multiword DMA
2967 * Timings will be used for both PIO and DMA,
2968 * so adjust DMA mode if needed
2969 */
2970 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2971 drvp->PIO_mode = drvp->DMA_mode + 2;
2972 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2973 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2974 drvp->PIO_mode - 2 : 0;
2975 if (drvp->DMA_mode == 0)
2976 drvp->PIO_mode = 0;
2977 }
2978 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2979 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2980 SIS_TIM_ACT_OFF(drive);
2981 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2982 SIS_TIM_REC_OFF(drive);
2983 }
2984 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2985 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2986 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2987 if (idedma_ctl != 0) {
2988 /* Add software bits in status register */
2989 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2990 IDEDMA_CTL, idedma_ctl);
2991 }
2992 pciide_print_modes(cp);
2993 }
2994
2995 static int
2996 acer_isabr_match(pa)
2997 struct pci_attach_args *pa;
2998 {
2999 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI) &&
3000 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543));
3001 }
3002
3003 void
3004 acer_chip_map(sc, pa)
3005 struct pciide_softc *sc;
3006 struct pci_attach_args *pa;
3007 {
3008 struct pci_attach_args isa_pa;
3009 struct pciide_channel *cp;
3010 int channel;
3011 pcireg_t cr, interface;
3012 bus_size_t cmdsize, ctlsize;
3013 pcireg_t rev = PCI_REVISION(pa->pa_class);
3014
3015 if (pciide_chipen(sc, pa) == 0)
3016 return;
3017 printf("%s: bus-master DMA support present",
3018 sc->sc_wdcdev.sc_dev.dv_xname);
3019 pciide_mapreg_dma(sc, pa);
3020 printf("\n");
3021 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3022 WDC_CAPABILITY_MODE;
3023 if (sc->sc_dma_ok) {
3024 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3025 if (rev >= 0x20) {
3026 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3027 if (rev >= 0xC4)
3028 sc->sc_wdcdev.UDMA_cap = 5;
3029 else if (rev >= 0xC2)
3030 sc->sc_wdcdev.UDMA_cap = 4;
3031 else
3032 sc->sc_wdcdev.UDMA_cap = 2;
3033 }
3034 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3035 sc->sc_wdcdev.irqack = pciide_irqack;
3036 }
3037
3038 sc->sc_wdcdev.PIO_cap = 4;
3039 sc->sc_wdcdev.DMA_cap = 2;
3040 sc->sc_wdcdev.set_modes = acer_setup_channel;
3041 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3042 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3043
3044 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3045 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3046 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3047
3048 /* Enable "microsoft register bits" R/W. */
3049 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3050 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3051 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3052 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3053 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3054 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3055 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3056 ~ACER_CHANSTATUSREGS_RO);
3057 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3058 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3059 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3060 /* Don't use cr, re-read the real register content instead */
3061 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3062 PCI_CLASS_REG));
3063
3064 /* From linux: enable "Cable Detection" */
3065 if (rev >= 0xC2) {
3066 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3067 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3068 | ACER_0x4B_CDETECT);
3069 /* set south-bridge's enable bit, m1533, 0x79 */
3070 if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
3071 printf("%s: can't find PCI/ISA bridge, downgrading "
3072 "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
3073 sc->sc_wdcdev.UDMA_cap = 2;
3074 } else {
3075 if (rev == 0xC2)
3076 /* 1543C-B0 (m1533, 0x79, bit 2) */
3077 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3078 ACER_0x79,
3079 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3080 ACER_0x79)
3081 | ACER_0x79_REVC2_EN);
3082 else
3083 /* 1553/1535 (m1533, 0x79, bit 1) */
3084 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3085 ACER_0x79,
3086 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3087 ACER_0x79)
3088 | ACER_0x79_EN);
3089 }
3090 }
3091
3092 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3093 cp = &sc->pciide_channels[channel];
3094 if (pciide_chansetup(sc, channel, interface) == 0)
3095 continue;
3096 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3097 printf("%s: %s channel ignored (disabled)\n",
3098 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3099 continue;
3100 }
3101 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3102 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3103 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3104 if (cp->hw_ok == 0)
3105 continue;
3106 if (pciide_chan_candisable(cp)) {
3107 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3108 pci_conf_write(sc->sc_pc, sc->sc_tag,
3109 PCI_CLASS_REG, cr);
3110 }
3111 pciide_map_compat_intr(pa, cp, channel, interface);
3112 acer_setup_channel(&cp->wdc_channel);
3113 }
3114 }
3115
3116 void
3117 acer_setup_channel(chp)
3118 struct channel_softc *chp;
3119 {
3120 struct ata_drive_datas *drvp;
3121 int drive;
3122 u_int32_t acer_fifo_udma;
3123 u_int32_t idedma_ctl;
3124 struct pciide_channel *cp = (struct pciide_channel*)chp;
3125 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3126
3127 idedma_ctl = 0;
3128 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3129 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3130 acer_fifo_udma), DEBUG_PROBE);
3131 /* setup DMA if needed */
3132 pciide_channel_dma_setup(cp);
3133
3134 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3135 DRIVE_UDMA) { /* check 80 pins cable */
3136 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3137 ACER_0x4A_80PIN(chp->channel)) {
3138 if (chp->ch_drive[0].UDMA_mode > 2)
3139 chp->ch_drive[0].UDMA_mode = 2;
3140 if (chp->ch_drive[1].UDMA_mode > 2)
3141 chp->ch_drive[1].UDMA_mode = 2;
3142 }
3143 }
3144
3145 for (drive = 0; drive < 2; drive++) {
3146 drvp = &chp->ch_drive[drive];
3147 /* If no drive, skip */
3148 if ((drvp->drive_flags & DRIVE) == 0)
3149 continue;
3150 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3151 "channel %d drive %d 0x%x\n", chp->channel, drive,
3152 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3153 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3154 /* clear FIFO/DMA mode */
3155 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3156 ACER_UDMA_EN(chp->channel, drive) |
3157 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3158
3159 /* add timing values, setup DMA if needed */
3160 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3161 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3162 acer_fifo_udma |=
3163 ACER_FTH_OPL(chp->channel, drive, 0x1);
3164 goto pio;
3165 }
3166
3167 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3168 if (drvp->drive_flags & DRIVE_UDMA) {
3169 /* use Ultra/DMA */
3170 drvp->drive_flags &= ~DRIVE_DMA;
3171 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3172 acer_fifo_udma |=
3173 ACER_UDMA_TIM(chp->channel, drive,
3174 acer_udma[drvp->UDMA_mode]);
3175 /* XXX disable if one drive < UDMA3 ? */
3176 if (drvp->UDMA_mode >= 3) {
3177 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3178 ACER_0x4B,
3179 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3180 ACER_0x4B) | ACER_0x4B_UDMA66);
3181 }
3182 } else {
3183 /*
3184 * use Multiword DMA
3185 * Timings will be used for both PIO and DMA,
3186 * so adjust DMA mode if needed
3187 */
3188 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3189 drvp->PIO_mode = drvp->DMA_mode + 2;
3190 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3191 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3192 drvp->PIO_mode - 2 : 0;
3193 if (drvp->DMA_mode == 0)
3194 drvp->PIO_mode = 0;
3195 }
3196 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3197 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3198 ACER_IDETIM(chp->channel, drive),
3199 acer_pio[drvp->PIO_mode]);
3200 }
3201 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3202 acer_fifo_udma), DEBUG_PROBE);
3203 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3204 if (idedma_ctl != 0) {
3205 /* Add software bits in status register */
3206 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3207 IDEDMA_CTL, idedma_ctl);
3208 }
3209 pciide_print_modes(cp);
3210 }
3211
3212 int
3213 acer_pci_intr(arg)
3214 void *arg;
3215 {
3216 struct pciide_softc *sc = arg;
3217 struct pciide_channel *cp;
3218 struct channel_softc *wdc_cp;
3219 int i, rv, crv;
3220 u_int32_t chids;
3221
3222 rv = 0;
3223 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3224 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3225 cp = &sc->pciide_channels[i];
3226 wdc_cp = &cp->wdc_channel;
3227 /* If a compat channel skip. */
3228 if (cp->compat)
3229 continue;
3230 if (chids & ACER_CHIDS_INT(i)) {
3231 crv = wdcintr(wdc_cp);
3232 if (crv == 0)
3233 printf("%s:%d: bogus intr\n",
3234 sc->sc_wdcdev.sc_dev.dv_xname, i);
3235 else
3236 rv = 1;
3237 }
3238 }
3239 return rv;
3240 }
3241
3242 void
3243 hpt_chip_map(sc, pa)
3244 struct pciide_softc *sc;
3245 struct pci_attach_args *pa;
3246 {
3247 struct pciide_channel *cp;
3248 int i, compatchan, revision;
3249 pcireg_t interface;
3250 bus_size_t cmdsize, ctlsize;
3251
3252 if (pciide_chipen(sc, pa) == 0)
3253 return;
3254 revision = PCI_REVISION(pa->pa_class);
3255 printf(": Triones/Highpoint ");
3256 if (revision == HPT370_REV)
3257 printf("HPT370 IDE Controller\n");
3258 else if (revision == HPT370A_REV)
3259 printf("HPT370A IDE Controller\n");
3260 else if (revision == HPT366_REV)
3261 printf("HPT366 IDE Controller\n");
3262 else
3263 printf("unknown HPT IDE controller rev %d\n", revision);
3264
3265 /*
3266 * when the chip is in native mode it identifies itself as a
3267 * 'misc mass storage'. Fake interface in this case.
3268 */
3269 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3270 interface = PCI_INTERFACE(pa->pa_class);
3271 } else {
3272 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3273 PCIIDE_INTERFACE_PCI(0);
3274 if (revision == HPT370_REV || revision == HPT370A_REV)
3275 interface |= PCIIDE_INTERFACE_PCI(1);
3276 }
3277
3278 printf("%s: bus-master DMA support present",
3279 sc->sc_wdcdev.sc_dev.dv_xname);
3280 pciide_mapreg_dma(sc, pa);
3281 printf("\n");
3282 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3283 WDC_CAPABILITY_MODE;
3284 if (sc->sc_dma_ok) {
3285 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3286 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3287 sc->sc_wdcdev.irqack = pciide_irqack;
3288 }
3289 sc->sc_wdcdev.PIO_cap = 4;
3290 sc->sc_wdcdev.DMA_cap = 2;
3291
3292 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3293 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3294 if (revision == HPT366_REV) {
3295 sc->sc_wdcdev.UDMA_cap = 4;
3296 /*
3297 * The 366 has 2 PCI IDE functions, one for primary and one
3298 * for secondary. So we need to call pciide_mapregs_compat()
3299 * with the real channel
3300 */
3301 if (pa->pa_function == 0) {
3302 compatchan = 0;
3303 } else if (pa->pa_function == 1) {
3304 compatchan = 1;
3305 } else {
3306 printf("%s: unexpected PCI function %d\n",
3307 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3308 return;
3309 }
3310 sc->sc_wdcdev.nchannels = 1;
3311 } else {
3312 sc->sc_wdcdev.nchannels = 2;
3313 sc->sc_wdcdev.UDMA_cap = 5;
3314 }
3315 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3316 cp = &sc->pciide_channels[i];
3317 if (sc->sc_wdcdev.nchannels > 1) {
3318 compatchan = i;
3319 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3320 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3321 printf("%s: %s channel ignored (disabled)\n",
3322 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3323 continue;
3324 }
3325 }
3326 if (pciide_chansetup(sc, i, interface) == 0)
3327 continue;
3328 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3329 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3330 &ctlsize, hpt_pci_intr);
3331 } else {
3332 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3333 &cmdsize, &ctlsize);
3334 }
3335 if (cp->hw_ok == 0)
3336 return;
3337 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3338 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3339 wdcattach(&cp->wdc_channel);
3340 hpt_setup_channel(&cp->wdc_channel);
3341 }
3342 if (revision == HPT370_REV || revision == HPT370A_REV) {
3343 /*
3344 * HPT370_REV has a bit to disable interrupts, make sure
3345 * to clear it
3346 */
3347 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3348 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3349 ~HPT_CSEL_IRQDIS);
3350 }
3351 return;
3352 }
3353
3354 void
3355 hpt_setup_channel(chp)
3356 struct channel_softc *chp;
3357 {
3358 struct ata_drive_datas *drvp;
3359 int drive;
3360 int cable;
3361 u_int32_t before, after;
3362 u_int32_t idedma_ctl;
3363 struct pciide_channel *cp = (struct pciide_channel*)chp;
3364 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3365
3366 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3367
3368 /* setup DMA if needed */
3369 pciide_channel_dma_setup(cp);
3370
3371 idedma_ctl = 0;
3372
3373 /* Per drive settings */
3374 for (drive = 0; drive < 2; drive++) {
3375 drvp = &chp->ch_drive[drive];
3376 /* If no drive, skip */
3377 if ((drvp->drive_flags & DRIVE) == 0)
3378 continue;
3379 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3380 HPT_IDETIM(chp->channel, drive));
3381
3382 /* add timing values, setup DMA if needed */
3383 if (drvp->drive_flags & DRIVE_UDMA) {
3384 /* use Ultra/DMA */
3385 drvp->drive_flags &= ~DRIVE_DMA;
3386 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3387 drvp->UDMA_mode > 2)
3388 drvp->UDMA_mode = 2;
3389 after = (sc->sc_wdcdev.nchannels == 2) ?
3390 hpt370_udma[drvp->UDMA_mode] :
3391 hpt366_udma[drvp->UDMA_mode];
3392 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3393 } else if (drvp->drive_flags & DRIVE_DMA) {
3394 /*
3395 * use Multiword DMA.
3396 * Timings will be used for both PIO and DMA, so adjust
3397 * DMA mode if needed
3398 */
3399 if (drvp->PIO_mode >= 3 &&
3400 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3401 drvp->DMA_mode = drvp->PIO_mode - 2;
3402 }
3403 after = (sc->sc_wdcdev.nchannels == 2) ?
3404 hpt370_dma[drvp->DMA_mode] :
3405 hpt366_dma[drvp->DMA_mode];
3406 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3407 } else {
3408 /* PIO only */
3409 after = (sc->sc_wdcdev.nchannels == 2) ?
3410 hpt370_pio[drvp->PIO_mode] :
3411 hpt366_pio[drvp->PIO_mode];
3412 }
3413 pci_conf_write(sc->sc_pc, sc->sc_tag,
3414 HPT_IDETIM(chp->channel, drive), after);
3415 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3416 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3417 after, before), DEBUG_PROBE);
3418 }
3419 if (idedma_ctl != 0) {
3420 /* Add software bits in status register */
3421 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3422 IDEDMA_CTL, idedma_ctl);
3423 }
3424 pciide_print_modes(cp);
3425 }
3426
3427 int
3428 hpt_pci_intr(arg)
3429 void *arg;
3430 {
3431 struct pciide_softc *sc = arg;
3432 struct pciide_channel *cp;
3433 struct channel_softc *wdc_cp;
3434 int rv = 0;
3435 int dmastat, i, crv;
3436
3437 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3438 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3439 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3440 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3441 IDEDMA_CTL_INTR)
3442 continue;
3443 cp = &sc->pciide_channels[i];
3444 wdc_cp = &cp->wdc_channel;
3445 crv = wdcintr(wdc_cp);
3446 if (crv == 0) {
3447 printf("%s:%d: bogus intr\n",
3448 sc->sc_wdcdev.sc_dev.dv_xname, i);
3449 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3450 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3451 } else
3452 rv = 1;
3453 }
3454 return rv;
3455 }
3456
3457
3458 /* Macros to test product */
3459 #define PDC_IS_262(sc) \
3460 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3461 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3462 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3463 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3464 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3465 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3466 #define PDC_IS_265(sc) \
3467 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3468 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3469 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3470 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3471 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3472 #define PDC_IS_268(sc) \
3473 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3474 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3475 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3476
3477 void
3478 pdc202xx_chip_map(sc, pa)
3479 struct pciide_softc *sc;
3480 struct pci_attach_args *pa;
3481 {
3482 struct pciide_channel *cp;
3483 int channel;
3484 pcireg_t interface, st, mode;
3485 bus_size_t cmdsize, ctlsize;
3486
3487 if (!PDC_IS_268(sc)) {
3488 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3489 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3490 st), DEBUG_PROBE);
3491 }
3492 if (pciide_chipen(sc, pa) == 0)
3493 return;
3494
3495 /* turn off RAID mode */
3496 if (!PDC_IS_268(sc))
3497 st &= ~PDC2xx_STATE_IDERAID;
3498
3499 /*
3500 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3501 * mode. We have to fake interface
3502 */
3503 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3504 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3505 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3506
3507 printf("%s: bus-master DMA support present",
3508 sc->sc_wdcdev.sc_dev.dv_xname);
3509 pciide_mapreg_dma(sc, pa);
3510 printf("\n");
3511 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3512 WDC_CAPABILITY_MODE;
3513 if (sc->sc_dma_ok) {
3514 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3515 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3516 sc->sc_wdcdev.irqack = pciide_irqack;
3517 }
3518 sc->sc_wdcdev.PIO_cap = 4;
3519 sc->sc_wdcdev.DMA_cap = 2;
3520 if (PDC_IS_265(sc))
3521 sc->sc_wdcdev.UDMA_cap = 5;
3522 else if (PDC_IS_262(sc))
3523 sc->sc_wdcdev.UDMA_cap = 4;
3524 else
3525 sc->sc_wdcdev.UDMA_cap = 2;
3526 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3527 pdc20268_setup_channel : pdc202xx_setup_channel;
3528 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3529 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3530
3531 if (!PDC_IS_268(sc)) {
3532 /* setup failsafe defaults */
3533 mode = 0;
3534 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3535 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3536 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3537 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3538 for (channel = 0;
3539 channel < sc->sc_wdcdev.nchannels;
3540 channel++) {
3541 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3542 "drive 0 initial timings 0x%x, now 0x%x\n",
3543 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3544 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3545 DEBUG_PROBE);
3546 pci_conf_write(sc->sc_pc, sc->sc_tag,
3547 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3548 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3549 "drive 1 initial timings 0x%x, now 0x%x\n",
3550 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3551 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3552 pci_conf_write(sc->sc_pc, sc->sc_tag,
3553 PDC2xx_TIM(channel, 1), mode);
3554 }
3555
3556 mode = PDC2xx_SCR_DMA;
3557 if (PDC_IS_262(sc)) {
3558 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3559 } else {
3560 /* the BIOS set it up this way */
3561 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3562 }
3563 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3564 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3565 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3566 "now 0x%x\n",
3567 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3568 PDC2xx_SCR),
3569 mode), DEBUG_PROBE);
3570 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3571 PDC2xx_SCR, mode);
3572
3573 /* controller initial state register is OK even without BIOS */
3574 /* Set DMA mode to IDE DMA compatibility */
3575 mode =
3576 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3577 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3578 DEBUG_PROBE);
3579 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3580 mode | 0x1);
3581 mode =
3582 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3583 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3584 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3585 mode | 0x1);
3586 }
3587
3588 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3589 cp = &sc->pciide_channels[channel];
3590 if (pciide_chansetup(sc, channel, interface) == 0)
3591 continue;
3592 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3593 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3594 printf("%s: %s channel ignored (disabled)\n",
3595 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3596 continue;
3597 }
3598 if (PDC_IS_265(sc))
3599 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3600 pdc20265_pci_intr);
3601 else
3602 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3603 pdc202xx_pci_intr);
3604 if (cp->hw_ok == 0)
3605 continue;
3606 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3607 st &= ~(PDC_IS_262(sc) ?
3608 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3609 pciide_map_compat_intr(pa, cp, channel, interface);
3610 pdc202xx_setup_channel(&cp->wdc_channel);
3611 }
3612 if (!PDC_IS_268(sc)) {
3613 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3614 "0x%x\n", st), DEBUG_PROBE);
3615 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3616 }
3617 return;
3618 }
3619
3620 void
3621 pdc202xx_setup_channel(chp)
3622 struct channel_softc *chp;
3623 {
3624 struct ata_drive_datas *drvp;
3625 int drive;
3626 pcireg_t mode, st;
3627 u_int32_t idedma_ctl, scr, atapi;
3628 struct pciide_channel *cp = (struct pciide_channel*)chp;
3629 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3630 int channel = chp->channel;
3631
3632 /* setup DMA if needed */
3633 pciide_channel_dma_setup(cp);
3634
3635 idedma_ctl = 0;
3636 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3637 sc->sc_wdcdev.sc_dev.dv_xname,
3638 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3639 DEBUG_PROBE);
3640
3641 /* Per channel settings */
3642 if (PDC_IS_262(sc)) {
3643 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3644 PDC262_U66);
3645 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3646 /* Trim UDMA mode */
3647 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3648 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3649 chp->ch_drive[0].UDMA_mode <= 2) ||
3650 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3651 chp->ch_drive[1].UDMA_mode <= 2)) {
3652 if (chp->ch_drive[0].UDMA_mode > 2)
3653 chp->ch_drive[0].UDMA_mode = 2;
3654 if (chp->ch_drive[1].UDMA_mode > 2)
3655 chp->ch_drive[1].UDMA_mode = 2;
3656 }
3657 /* Set U66 if needed */
3658 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3659 chp->ch_drive[0].UDMA_mode > 2) ||
3660 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3661 chp->ch_drive[1].UDMA_mode > 2))
3662 scr |= PDC262_U66_EN(channel);
3663 else
3664 scr &= ~PDC262_U66_EN(channel);
3665 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3666 PDC262_U66, scr);
3667 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3668 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3669 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3670 PDC262_ATAPI(channel))), DEBUG_PROBE);
3671 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3672 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3673 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3674 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3675 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3676 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3677 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3678 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3679 atapi = 0;
3680 else
3681 atapi = PDC262_ATAPI_UDMA;
3682 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3683 PDC262_ATAPI(channel), atapi);
3684 }
3685 }
3686 for (drive = 0; drive < 2; drive++) {
3687 drvp = &chp->ch_drive[drive];
3688 /* If no drive, skip */
3689 if ((drvp->drive_flags & DRIVE) == 0)
3690 continue;
3691 mode = 0;
3692 if (drvp->drive_flags & DRIVE_UDMA) {
3693 /* use Ultra/DMA */
3694 drvp->drive_flags &= ~DRIVE_DMA;
3695 mode = PDC2xx_TIM_SET_MB(mode,
3696 pdc2xx_udma_mb[drvp->UDMA_mode]);
3697 mode = PDC2xx_TIM_SET_MC(mode,
3698 pdc2xx_udma_mc[drvp->UDMA_mode]);
3699 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3700 } else if (drvp->drive_flags & DRIVE_DMA) {
3701 mode = PDC2xx_TIM_SET_MB(mode,
3702 pdc2xx_dma_mb[drvp->DMA_mode]);
3703 mode = PDC2xx_TIM_SET_MC(mode,
3704 pdc2xx_dma_mc[drvp->DMA_mode]);
3705 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3706 } else {
3707 mode = PDC2xx_TIM_SET_MB(mode,
3708 pdc2xx_dma_mb[0]);
3709 mode = PDC2xx_TIM_SET_MC(mode,
3710 pdc2xx_dma_mc[0]);
3711 }
3712 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3713 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3714 if (drvp->drive_flags & DRIVE_ATA)
3715 mode |= PDC2xx_TIM_PRE;
3716 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3717 if (drvp->PIO_mode >= 3) {
3718 mode |= PDC2xx_TIM_IORDY;
3719 if (drive == 0)
3720 mode |= PDC2xx_TIM_IORDYp;
3721 }
3722 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3723 "timings 0x%x\n",
3724 sc->sc_wdcdev.sc_dev.dv_xname,
3725 chp->channel, drive, mode), DEBUG_PROBE);
3726 pci_conf_write(sc->sc_pc, sc->sc_tag,
3727 PDC2xx_TIM(chp->channel, drive), mode);
3728 }
3729 if (idedma_ctl != 0) {
3730 /* Add software bits in status register */
3731 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3732 IDEDMA_CTL, idedma_ctl);
3733 }
3734 pciide_print_modes(cp);
3735 }
3736
3737 void
3738 pdc20268_setup_channel(chp)
3739 struct channel_softc *chp;
3740 {
3741 struct ata_drive_datas *drvp;
3742 int drive;
3743 u_int32_t idedma_ctl;
3744 struct pciide_channel *cp = (struct pciide_channel*)chp;
3745 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3746 int u100;
3747
3748 /* setup DMA if needed */
3749 pciide_channel_dma_setup(cp);
3750
3751 idedma_ctl = 0;
3752
3753 /* I don't know what this is for, FreeBSD does it ... */
3754 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3755 IDEDMA_CMD + 0x1, 0x0b);
3756
3757 /*
3758 * I don't know what this is for; FreeBSD checks this ... this is not
3759 * cable type detect.
3760 */
3761 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3762 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3763
3764 for (drive = 0; drive < 2; drive++) {
3765 drvp = &chp->ch_drive[drive];
3766 /* If no drive, skip */
3767 if ((drvp->drive_flags & DRIVE) == 0)
3768 continue;
3769 if (drvp->drive_flags & DRIVE_UDMA) {
3770 /* use Ultra/DMA */
3771 drvp->drive_flags &= ~DRIVE_DMA;
3772 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3773 if (drvp->UDMA_mode > 2 && u100 == 0)
3774 drvp->UDMA_mode = 2;
3775 } else if (drvp->drive_flags & DRIVE_DMA) {
3776 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3777 }
3778 }
3779 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3780 if (idedma_ctl != 0) {
3781 /* Add software bits in status register */
3782 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3783 IDEDMA_CTL, idedma_ctl);
3784 }
3785 pciide_print_modes(cp);
3786 }
3787
3788 int
3789 pdc202xx_pci_intr(arg)
3790 void *arg;
3791 {
3792 struct pciide_softc *sc = arg;
3793 struct pciide_channel *cp;
3794 struct channel_softc *wdc_cp;
3795 int i, rv, crv;
3796 u_int32_t scr;
3797
3798 rv = 0;
3799 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3800 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3801 cp = &sc->pciide_channels[i];
3802 wdc_cp = &cp->wdc_channel;
3803 /* If a compat channel skip. */
3804 if (cp->compat)
3805 continue;
3806 if (scr & PDC2xx_SCR_INT(i)) {
3807 crv = wdcintr(wdc_cp);
3808 if (crv == 0)
3809 printf("%s:%d: bogus intr (reg 0x%x)\n",
3810 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3811 else
3812 rv = 1;
3813 }
3814 }
3815 return rv;
3816 }
3817
3818 int
3819 pdc20265_pci_intr(arg)
3820 void *arg;
3821 {
3822 struct pciide_softc *sc = arg;
3823 struct pciide_channel *cp;
3824 struct channel_softc *wdc_cp;
3825 int i, rv, crv;
3826 u_int32_t dmastat;
3827
3828 rv = 0;
3829 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3830 cp = &sc->pciide_channels[i];
3831 wdc_cp = &cp->wdc_channel;
3832 /* If a compat channel skip. */
3833 if (cp->compat)
3834 continue;
3835 /*
3836 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3837 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3838 * So use it instead (requires 2 reg reads instead of 1,
3839 * but we can't do it another way).
3840 */
3841 dmastat = bus_space_read_1(sc->sc_dma_iot,
3842 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3843 if((dmastat & IDEDMA_CTL_INTR) == 0)
3844 continue;
3845 crv = wdcintr(wdc_cp);
3846 if (crv == 0)
3847 printf("%s:%d: bogus intr\n",
3848 sc->sc_wdcdev.sc_dev.dv_xname, i);
3849 else
3850 rv = 1;
3851 }
3852 return rv;
3853 }
3854
3855 void
3856 opti_chip_map(sc, pa)
3857 struct pciide_softc *sc;
3858 struct pci_attach_args *pa;
3859 {
3860 struct pciide_channel *cp;
3861 bus_size_t cmdsize, ctlsize;
3862 pcireg_t interface;
3863 u_int8_t init_ctrl;
3864 int channel;
3865
3866 if (pciide_chipen(sc, pa) == 0)
3867 return;
3868 printf("%s: bus-master DMA support present",
3869 sc->sc_wdcdev.sc_dev.dv_xname);
3870
3871 /*
3872 * XXXSCW:
3873 * There seem to be a couple of buggy revisions/implementations
3874 * of the OPTi pciide chipset. This kludge seems to fix one of
3875 * the reported problems (PR/11644) but still fails for the
3876 * other (PR/13151), although the latter may be due to other
3877 * issues too...
3878 */
3879 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3880 printf(" but disabled due to chip rev. <= 0x12");
3881 sc->sc_dma_ok = 0;
3882 sc->sc_wdcdev.cap = 0;
3883 } else {
3884 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3885 pciide_mapreg_dma(sc, pa);
3886 }
3887 printf("\n");
3888
3889 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3890 sc->sc_wdcdev.PIO_cap = 4;
3891 if (sc->sc_dma_ok) {
3892 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3893 sc->sc_wdcdev.irqack = pciide_irqack;
3894 sc->sc_wdcdev.DMA_cap = 2;
3895 }
3896 sc->sc_wdcdev.set_modes = opti_setup_channel;
3897
3898 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3899 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3900
3901 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3902 OPTI_REG_INIT_CONTROL);
3903
3904 interface = PCI_INTERFACE(pa->pa_class);
3905
3906 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3907 cp = &sc->pciide_channels[channel];
3908 if (pciide_chansetup(sc, channel, interface) == 0)
3909 continue;
3910 if (channel == 1 &&
3911 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3912 printf("%s: %s channel ignored (disabled)\n",
3913 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3914 continue;
3915 }
3916 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3917 pciide_pci_intr);
3918 if (cp->hw_ok == 0)
3919 continue;
3920 pciide_map_compat_intr(pa, cp, channel, interface);
3921 if (cp->hw_ok == 0)
3922 continue;
3923 opti_setup_channel(&cp->wdc_channel);
3924 }
3925 }
3926
3927 void
3928 opti_setup_channel(chp)
3929 struct channel_softc *chp;
3930 {
3931 struct ata_drive_datas *drvp;
3932 struct pciide_channel *cp = (struct pciide_channel*)chp;
3933 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3934 int drive, spd;
3935 int mode[2];
3936 u_int8_t rv, mr;
3937
3938 /*
3939 * The `Delay' and `Address Setup Time' fields of the
3940 * Miscellaneous Register are always zero initially.
3941 */
3942 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3943 mr &= ~(OPTI_MISC_DELAY_MASK |
3944 OPTI_MISC_ADDR_SETUP_MASK |
3945 OPTI_MISC_INDEX_MASK);
3946
3947 /* Prime the control register before setting timing values */
3948 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3949
3950 /* Determine the clockrate of the PCIbus the chip is attached to */
3951 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3952 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3953
3954 /* setup DMA if needed */
3955 pciide_channel_dma_setup(cp);
3956
3957 for (drive = 0; drive < 2; drive++) {
3958 drvp = &chp->ch_drive[drive];
3959 /* If no drive, skip */
3960 if ((drvp->drive_flags & DRIVE) == 0) {
3961 mode[drive] = -1;
3962 continue;
3963 }
3964
3965 if ((drvp->drive_flags & DRIVE_DMA)) {
3966 /*
3967 * Timings will be used for both PIO and DMA,
3968 * so adjust DMA mode if needed
3969 */
3970 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3971 drvp->PIO_mode = drvp->DMA_mode + 2;
3972 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3973 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3974 drvp->PIO_mode - 2 : 0;
3975 if (drvp->DMA_mode == 0)
3976 drvp->PIO_mode = 0;
3977
3978 mode[drive] = drvp->DMA_mode + 5;
3979 } else
3980 mode[drive] = drvp->PIO_mode;
3981
3982 if (drive && mode[0] >= 0 &&
3983 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3984 /*
3985 * Can't have two drives using different values
3986 * for `Address Setup Time'.
3987 * Slow down the faster drive to compensate.
3988 */
3989 int d = (opti_tim_as[spd][mode[0]] >
3990 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3991
3992 mode[d] = mode[1-d];
3993 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3994 chp->ch_drive[d].DMA_mode = 0;
3995 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3996 }
3997 }
3998
3999 for (drive = 0; drive < 2; drive++) {
4000 int m;
4001 if ((m = mode[drive]) < 0)
4002 continue;
4003
4004 /* Set the Address Setup Time and select appropriate index */
4005 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4006 rv |= OPTI_MISC_INDEX(drive);
4007 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4008
4009 /* Set the pulse width and recovery timing parameters */
4010 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4011 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4012 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4013 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4014
4015 /* Set the Enhanced Mode register appropriately */
4016 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4017 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4018 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4019 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4020 }
4021
4022 /* Finally, enable the timings */
4023 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4024
4025 pciide_print_modes(cp);
4026 }
4027
4028 #define ACARD_IS_850(sc) \
4029 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4030
4031 void
4032 acard_chip_map(sc, pa)
4033 struct pciide_softc *sc;
4034 struct pci_attach_args *pa;
4035 {
4036 struct pciide_channel *cp;
4037 int i;
4038 pcireg_t interface;
4039 bus_size_t cmdsize, ctlsize;
4040
4041 if (pciide_chipen(sc, pa) == 0)
4042 return;
4043
4044 /*
4045 * when the chip is in native mode it identifies itself as a
4046 * 'misc mass storage'. Fake interface in this case.
4047 */
4048 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4049 interface = PCI_INTERFACE(pa->pa_class);
4050 } else {
4051 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4052 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4053 }
4054
4055 printf("%s: bus-master DMA support present",
4056 sc->sc_wdcdev.sc_dev.dv_xname);
4057 pciide_mapreg_dma(sc, pa);
4058 printf("\n");
4059 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4060 WDC_CAPABILITY_MODE;
4061
4062 if (sc->sc_dma_ok) {
4063 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4064 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4065 sc->sc_wdcdev.irqack = pciide_irqack;
4066 }
4067 sc->sc_wdcdev.PIO_cap = 4;
4068 sc->sc_wdcdev.DMA_cap = 2;
4069 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4070
4071 sc->sc_wdcdev.set_modes = acard_setup_channel;
4072 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4073 sc->sc_wdcdev.nchannels = 2;
4074
4075 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4076 cp = &sc->pciide_channels[i];
4077 if (pciide_chansetup(sc, i, interface) == 0)
4078 continue;
4079 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4080 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4081 &ctlsize, pciide_pci_intr);
4082 } else {
4083 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4084 &cmdsize, &ctlsize);
4085 }
4086 if (cp->hw_ok == 0)
4087 return;
4088 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4089 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4090 wdcattach(&cp->wdc_channel);
4091 acard_setup_channel(&cp->wdc_channel);
4092 }
4093 if (!ACARD_IS_850(sc)) {
4094 u_int32_t reg;
4095 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4096 reg &= ~ATP860_CTRL_INT;
4097 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4098 }
4099 }
4100
4101 void
4102 acard_setup_channel(chp)
4103 struct channel_softc *chp;
4104 {
4105 struct ata_drive_datas *drvp;
4106 struct pciide_channel *cp = (struct pciide_channel*)chp;
4107 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4108 int channel = chp->channel;
4109 int drive;
4110 u_int32_t idetime, udma_mode;
4111 u_int32_t idedma_ctl;
4112
4113 /* setup DMA if needed */
4114 pciide_channel_dma_setup(cp);
4115
4116 if (ACARD_IS_850(sc)) {
4117 idetime = 0;
4118 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4119 udma_mode &= ~ATP850_UDMA_MASK(channel);
4120 } else {
4121 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4122 idetime &= ~ATP860_SETTIME_MASK(channel);
4123 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4124 udma_mode &= ~ATP860_UDMA_MASK(channel);
4125
4126 /* check 80 pins cable */
4127 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4128 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4129 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4130 & ATP860_CTRL_80P(chp->channel)) {
4131 if (chp->ch_drive[0].UDMA_mode > 2)
4132 chp->ch_drive[0].UDMA_mode = 2;
4133 if (chp->ch_drive[1].UDMA_mode > 2)
4134 chp->ch_drive[1].UDMA_mode = 2;
4135 }
4136 }
4137 }
4138
4139 idedma_ctl = 0;
4140
4141 /* Per drive settings */
4142 for (drive = 0; drive < 2; drive++) {
4143 drvp = &chp->ch_drive[drive];
4144 /* If no drive, skip */
4145 if ((drvp->drive_flags & DRIVE) == 0)
4146 continue;
4147 /* add timing values, setup DMA if needed */
4148 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4149 (drvp->drive_flags & DRIVE_UDMA)) {
4150 /* use Ultra/DMA */
4151 if (ACARD_IS_850(sc)) {
4152 idetime |= ATP850_SETTIME(drive,
4153 acard_act_udma[drvp->UDMA_mode],
4154 acard_rec_udma[drvp->UDMA_mode]);
4155 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4156 acard_udma_conf[drvp->UDMA_mode]);
4157 } else {
4158 idetime |= ATP860_SETTIME(channel, drive,
4159 acard_act_udma[drvp->UDMA_mode],
4160 acard_rec_udma[drvp->UDMA_mode]);
4161 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4162 acard_udma_conf[drvp->UDMA_mode]);
4163 }
4164 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4165 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4166 (drvp->drive_flags & DRIVE_DMA)) {
4167 /* use Multiword DMA */
4168 drvp->drive_flags &= ~DRIVE_UDMA;
4169 if (ACARD_IS_850(sc)) {
4170 idetime |= ATP850_SETTIME(drive,
4171 acard_act_dma[drvp->DMA_mode],
4172 acard_rec_dma[drvp->DMA_mode]);
4173 } else {
4174 idetime |= ATP860_SETTIME(channel, drive,
4175 acard_act_dma[drvp->DMA_mode],
4176 acard_rec_dma[drvp->DMA_mode]);
4177 }
4178 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4179 } else {
4180 /* PIO only */
4181 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4182 if (ACARD_IS_850(sc)) {
4183 idetime |= ATP850_SETTIME(drive,
4184 acard_act_pio[drvp->PIO_mode],
4185 acard_rec_pio[drvp->PIO_mode]);
4186 } else {
4187 idetime |= ATP860_SETTIME(channel, drive,
4188 acard_act_pio[drvp->PIO_mode],
4189 acard_rec_pio[drvp->PIO_mode]);
4190 }
4191 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4192 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4193 | ATP8x0_CTRL_EN(channel));
4194 }
4195 }
4196
4197 if (idedma_ctl != 0) {
4198 /* Add software bits in status register */
4199 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4200 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4201 }
4202 pciide_print_modes(cp);
4203
4204 if (ACARD_IS_850(sc)) {
4205 pci_conf_write(sc->sc_pc, sc->sc_tag,
4206 ATP850_IDETIME(channel), idetime);
4207 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4208 } else {
4209 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4210 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4211 }
4212 }
4213
4214 int
4215 acard_pci_intr(arg)
4216 void *arg;
4217 {
4218 struct pciide_softc *sc = arg;
4219 struct pciide_channel *cp;
4220 struct channel_softc *wdc_cp;
4221 int rv = 0;
4222 int dmastat, i, crv;
4223
4224 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4225 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4226 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4227 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4228 continue;
4229 cp = &sc->pciide_channels[i];
4230 wdc_cp = &cp->wdc_channel;
4231 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4232 (void)wdcintr(wdc_cp);
4233 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4234 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4235 continue;
4236 }
4237 crv = wdcintr(wdc_cp);
4238 if (crv == 0)
4239 printf("%s:%d: bogus intr\n",
4240 sc->sc_wdcdev.sc_dev.dv_xname, i);
4241 else if (crv == 1)
4242 rv = 1;
4243 else if (rv == 0)
4244 rv = crv;
4245 }
4246 return rv;
4247 }
4248
4249 static int
4250 sl82c105_bugchk(struct pci_attach_args *pa)
4251 {
4252
4253 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4254 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4255 return (0);
4256
4257 if (PCI_REVISION(pa->pa_class) <= 0x05)
4258 return (1);
4259
4260 return (0);
4261 }
4262
4263 void
4264 sl82c105_chip_map(sc, pa)
4265 struct pciide_softc *sc;
4266 struct pci_attach_args *pa;
4267 {
4268 struct pciide_channel *cp;
4269 bus_size_t cmdsize, ctlsize;
4270 pcireg_t interface, idecr;
4271 int channel;
4272
4273 if (pciide_chipen(sc, pa) == 0)
4274 return;
4275
4276 printf("%s: bus-master DMA support present",
4277 sc->sc_wdcdev.sc_dev.dv_xname);
4278
4279 /*
4280 * Check to see if we're part of the Winbond 83c553 Southbridge.
4281 * If so, we need to disable DMA on rev. <= 5 of that chip.
4282 */
4283 if (pci_find_device(pa, sl82c105_bugchk)) {
4284 printf(" but disabled due to 83c553 rev. <= 0x05");
4285 sc->sc_dma_ok = 0;
4286 } else
4287 pciide_mapreg_dma(sc, pa);
4288 printf("\n");
4289
4290 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4291 WDC_CAPABILITY_MODE;
4292 sc->sc_wdcdev.PIO_cap = 4;
4293 if (sc->sc_dma_ok) {
4294 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4295 sc->sc_wdcdev.irqack = pciide_irqack;
4296 sc->sc_wdcdev.DMA_cap = 2;
4297 }
4298 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4299
4300 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4301 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4302
4303 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4304
4305 interface = PCI_INTERFACE(pa->pa_class);
4306
4307 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4308 cp = &sc->pciide_channels[channel];
4309 if (pciide_chansetup(sc, channel, interface) == 0)
4310 continue;
4311 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4312 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4313 printf("%s: %s channel ignored (disabled)\n",
4314 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4315 continue;
4316 }
4317 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4318 pciide_pci_intr);
4319 if (cp->hw_ok == 0)
4320 continue;
4321 pciide_map_compat_intr(pa, cp, channel, interface);
4322 if (cp->hw_ok == 0)
4323 continue;
4324 sl82c105_setup_channel(&cp->wdc_channel);
4325 }
4326 }
4327
4328 void
4329 sl82c105_setup_channel(chp)
4330 struct channel_softc *chp;
4331 {
4332 struct ata_drive_datas *drvp;
4333 struct pciide_channel *cp = (struct pciide_channel*)chp;
4334 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4335 int pxdx_reg, drive;
4336 pcireg_t pxdx;
4337
4338 /* Set up DMA if needed. */
4339 pciide_channel_dma_setup(cp);
4340
4341 for (drive = 0; drive < 2; drive++) {
4342 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4343 : SYMPH_P1D0CR) + (drive * 4);
4344
4345 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4346
4347 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4348 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4349
4350 drvp = &chp->ch_drive[drive];
4351 /* If no drive, skip. */
4352 if ((drvp->drive_flags & DRIVE) == 0) {
4353 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4354 continue;
4355 }
4356
4357 if (drvp->drive_flags & DRIVE_DMA) {
4358 /*
4359 * Timings will be used for both PIO and DMA,
4360 * so adjust DMA mode if needed.
4361 */
4362 if (drvp->PIO_mode >= 3) {
4363 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4364 drvp->DMA_mode = drvp->PIO_mode - 2;
4365 if (drvp->DMA_mode < 1) {
4366 /*
4367 * Can't mix both PIO and DMA.
4368 * Disable DMA.
4369 */
4370 drvp->drive_flags &= ~DRIVE_DMA;
4371 }
4372 } else {
4373 /*
4374 * Can't mix both PIO and DMA. Disable
4375 * DMA.
4376 */
4377 drvp->drive_flags &= ~DRIVE_DMA;
4378 }
4379 }
4380
4381 if (drvp->drive_flags & DRIVE_DMA) {
4382 /* Use multi-word DMA. */
4383 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4384 PxDx_CMD_ON_SHIFT;
4385 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4386 } else {
4387 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4388 PxDx_CMD_ON_SHIFT;
4389 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4390 }
4391
4392 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4393
4394 /* ...and set the mode for this drive. */
4395 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4396 }
4397
4398 pciide_print_modes(cp);
4399 }
4400