pciide.c revision 1.153 1 /* $NetBSD: pciide.c,v 1.153 2002/05/19 17:40:46 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.153 2002/05/19 17:40:46 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191 static int acer_isabr_match __P(( struct pci_attach_args *));
192
193 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void pdc202xx_setup_channel __P((struct channel_softc*));
195 void pdc20268_setup_channel __P((struct channel_softc*));
196 int pdc202xx_pci_intr __P((void *));
197 int pdc20265_pci_intr __P((void *));
198
199 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void opti_setup_channel __P((struct channel_softc*));
201
202 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void hpt_setup_channel __P((struct channel_softc*));
204 int hpt_pci_intr __P((void *));
205
206 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
207 void acard_setup_channel __P((struct channel_softc*));
208 int acard_pci_intr __P((void *));
209
210 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
211 void serverworks_setup_channel __P((struct channel_softc*));
212 int serverworks_pci_intr __P((void *));
213
214 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
215 void sl82c105_setup_channel __P((struct channel_softc*));
216
217 void pciide_channel_dma_setup __P((struct pciide_channel *));
218 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
219 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
220 void pciide_dma_start __P((void*, int, int));
221 int pciide_dma_finish __P((void*, int, int, int));
222 void pciide_irqack __P((struct channel_softc *));
223 void pciide_print_modes __P((struct pciide_channel *));
224
225 struct pciide_product_desc {
226 u_int32_t ide_product;
227 int ide_flags;
228 const char *ide_name;
229 /* map and setup chip, probe drives */
230 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
231 };
232
233 /* Flags for ide_flags */
234 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
235 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
236
237 /* Default product description for devices not known from this controller */
238 const struct pciide_product_desc default_product_desc = {
239 0,
240 0,
241 "Generic PCI IDE controller",
242 default_chip_map,
243 };
244
245 const struct pciide_product_desc pciide_intel_products[] = {
246 { PCI_PRODUCT_INTEL_82092AA,
247 0,
248 "Intel 82092AA IDE controller",
249 default_chip_map,
250 },
251 { PCI_PRODUCT_INTEL_82371FB_IDE,
252 0,
253 "Intel 82371FB IDE controller (PIIX)",
254 piix_chip_map,
255 },
256 { PCI_PRODUCT_INTEL_82371SB_IDE,
257 0,
258 "Intel 82371SB IDE Interface (PIIX3)",
259 piix_chip_map,
260 },
261 { PCI_PRODUCT_INTEL_82371AB_IDE,
262 0,
263 "Intel 82371AB IDE controller (PIIX4)",
264 piix_chip_map,
265 },
266 { PCI_PRODUCT_INTEL_82440MX_IDE,
267 0,
268 "Intel 82440MX IDE controller",
269 piix_chip_map
270 },
271 { PCI_PRODUCT_INTEL_82801AA_IDE,
272 0,
273 "Intel 82801AA IDE Controller (ICH)",
274 piix_chip_map,
275 },
276 { PCI_PRODUCT_INTEL_82801AB_IDE,
277 0,
278 "Intel 82801AB IDE Controller (ICH0)",
279 piix_chip_map,
280 },
281 { PCI_PRODUCT_INTEL_82801BA_IDE,
282 0,
283 "Intel 82801BA IDE Controller (ICH2)",
284 piix_chip_map,
285 },
286 { PCI_PRODUCT_INTEL_82801BAM_IDE,
287 0,
288 "Intel 82801BAM IDE Controller (ICH2)",
289 piix_chip_map,
290 },
291 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
292 0,
293 "Intel 82201CA IDE Controller",
294 piix_chip_map,
295 },
296 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
297 0,
298 "Intel 82201CA IDE Controller",
299 piix_chip_map,
300 },
301 { 0,
302 0,
303 NULL,
304 NULL
305 }
306 };
307
308 const struct pciide_product_desc pciide_amd_products[] = {
309 { PCI_PRODUCT_AMD_PBC756_IDE,
310 0,
311 "Advanced Micro Devices AMD756 IDE Controller",
312 amd7x6_chip_map
313 },
314 { PCI_PRODUCT_AMD_PBC766_IDE,
315 0,
316 "Advanced Micro Devices AMD766 IDE Controller",
317 amd7x6_chip_map
318 },
319 { PCI_PRODUCT_AMD_PBC768_IDE,
320 0,
321 "Advanced Micro Devices AMD768 IDE Controller",
322 amd7x6_chip_map
323 },
324 { 0,
325 0,
326 NULL,
327 NULL
328 }
329 };
330
331 const struct pciide_product_desc pciide_cmd_products[] = {
332 { PCI_PRODUCT_CMDTECH_640,
333 0,
334 "CMD Technology PCI0640",
335 cmd_chip_map
336 },
337 { PCI_PRODUCT_CMDTECH_643,
338 0,
339 "CMD Technology PCI0643",
340 cmd0643_9_chip_map,
341 },
342 { PCI_PRODUCT_CMDTECH_646,
343 0,
344 "CMD Technology PCI0646",
345 cmd0643_9_chip_map,
346 },
347 { PCI_PRODUCT_CMDTECH_648,
348 IDE_PCI_CLASS_OVERRIDE,
349 "CMD Technology PCI0648",
350 cmd0643_9_chip_map,
351 },
352 { PCI_PRODUCT_CMDTECH_649,
353 IDE_PCI_CLASS_OVERRIDE,
354 "CMD Technology PCI0649",
355 cmd0643_9_chip_map,
356 },
357 { 0,
358 0,
359 NULL,
360 NULL
361 }
362 };
363
364 const struct pciide_product_desc pciide_via_products[] = {
365 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
366 0,
367 NULL,
368 apollo_chip_map,
369 },
370 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
371 0,
372 NULL,
373 apollo_chip_map,
374 },
375 { 0,
376 0,
377 NULL,
378 NULL
379 }
380 };
381
382 const struct pciide_product_desc pciide_cypress_products[] = {
383 { PCI_PRODUCT_CONTAQ_82C693,
384 IDE_16BIT_IOSPACE,
385 "Cypress 82C693 IDE Controller",
386 cy693_chip_map,
387 },
388 { 0,
389 0,
390 NULL,
391 NULL
392 }
393 };
394
395 const struct pciide_product_desc pciide_sis_products[] = {
396 { PCI_PRODUCT_SIS_5597_IDE,
397 0,
398 "Silicon Integrated System 5597/5598 IDE controller",
399 sis_chip_map,
400 },
401 { 0,
402 0,
403 NULL,
404 NULL
405 }
406 };
407
408 const struct pciide_product_desc pciide_acer_products[] = {
409 { PCI_PRODUCT_ALI_M5229,
410 0,
411 "Acer Labs M5229 UDMA IDE Controller",
412 acer_chip_map,
413 },
414 { 0,
415 0,
416 NULL,
417 NULL
418 }
419 };
420
421 const struct pciide_product_desc pciide_promise_products[] = {
422 { PCI_PRODUCT_PROMISE_ULTRA33,
423 IDE_PCI_CLASS_OVERRIDE,
424 "Promise Ultra33/ATA Bus Master IDE Accelerator",
425 pdc202xx_chip_map,
426 },
427 { PCI_PRODUCT_PROMISE_ULTRA66,
428 IDE_PCI_CLASS_OVERRIDE,
429 "Promise Ultra66/ATA Bus Master IDE Accelerator",
430 pdc202xx_chip_map,
431 },
432 { PCI_PRODUCT_PROMISE_ULTRA100,
433 IDE_PCI_CLASS_OVERRIDE,
434 "Promise Ultra100/ATA Bus Master IDE Accelerator",
435 pdc202xx_chip_map,
436 },
437 { PCI_PRODUCT_PROMISE_ULTRA100X,
438 IDE_PCI_CLASS_OVERRIDE,
439 "Promise Ultra100/ATA Bus Master IDE Accelerator",
440 pdc202xx_chip_map,
441 },
442 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
443 IDE_PCI_CLASS_OVERRIDE,
444 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
445 pdc202xx_chip_map,
446 },
447 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
448 IDE_PCI_CLASS_OVERRIDE,
449 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
450 pdc202xx_chip_map,
451 },
452 { PCI_PRODUCT_PROMISE_ULTRA133,
453 IDE_PCI_CLASS_OVERRIDE,
454 "Promise Ultra133/ATA Bus Master IDE Accelerator",
455 pdc202xx_chip_map,
456 },
457 { 0,
458 0,
459 NULL,
460 NULL
461 }
462 };
463
464 const struct pciide_product_desc pciide_opti_products[] = {
465 { PCI_PRODUCT_OPTI_82C621,
466 0,
467 "OPTi 82c621 PCI IDE controller",
468 opti_chip_map,
469 },
470 { PCI_PRODUCT_OPTI_82C568,
471 0,
472 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
473 opti_chip_map,
474 },
475 { PCI_PRODUCT_OPTI_82D568,
476 0,
477 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
478 opti_chip_map,
479 },
480 { 0,
481 0,
482 NULL,
483 NULL
484 }
485 };
486
487 const struct pciide_product_desc pciide_triones_products[] = {
488 { PCI_PRODUCT_TRIONES_HPT366,
489 IDE_PCI_CLASS_OVERRIDE,
490 NULL,
491 hpt_chip_map,
492 },
493 { PCI_PRODUCT_TRIONES_HPT374,
494 IDE_PCI_CLASS_OVERRIDE,
495 NULL,
496 hpt_chip_map
497 },
498 { 0,
499 0,
500 NULL,
501 NULL
502 }
503 };
504
505 const struct pciide_product_desc pciide_acard_products[] = {
506 { PCI_PRODUCT_ACARD_ATP850U,
507 IDE_PCI_CLASS_OVERRIDE,
508 "Acard ATP850U Ultra33 IDE Controller",
509 acard_chip_map,
510 },
511 { PCI_PRODUCT_ACARD_ATP860,
512 IDE_PCI_CLASS_OVERRIDE,
513 "Acard ATP860 Ultra66 IDE Controller",
514 acard_chip_map,
515 },
516 { PCI_PRODUCT_ACARD_ATP860A,
517 IDE_PCI_CLASS_OVERRIDE,
518 "Acard ATP860-A Ultra66 IDE Controller",
519 acard_chip_map,
520 },
521 { 0,
522 0,
523 NULL,
524 NULL
525 }
526 };
527
528 const struct pciide_product_desc pciide_serverworks_products[] = {
529 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
530 0,
531 "ServerWorks OSB4 IDE Controller",
532 serverworks_chip_map,
533 },
534 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
535 0,
536 "ServerWorks CSB5 IDE Controller",
537 serverworks_chip_map,
538 },
539 { 0,
540 0,
541 NULL,
542 }
543 };
544
545 const struct pciide_product_desc pciide_symphony_products[] = {
546 { PCI_PRODUCT_SYMPHONY_82C105,
547 0,
548 "Symphony Labs 82C105 IDE controller",
549 sl82c105_chip_map,
550 },
551 { 0,
552 0,
553 NULL,
554 }
555 };
556
557 const struct pciide_product_desc pciide_winbond_products[] = {
558 { PCI_PRODUCT_WINBOND_W83C553F_1,
559 0,
560 "Winbond W83C553F IDE controller",
561 sl82c105_chip_map,
562 },
563 { 0,
564 0,
565 NULL,
566 }
567 };
568
569 struct pciide_vendor_desc {
570 u_int32_t ide_vendor;
571 const struct pciide_product_desc *ide_products;
572 };
573
574 const struct pciide_vendor_desc pciide_vendors[] = {
575 { PCI_VENDOR_INTEL, pciide_intel_products },
576 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
577 { PCI_VENDOR_VIATECH, pciide_via_products },
578 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
579 { PCI_VENDOR_SIS, pciide_sis_products },
580 { PCI_VENDOR_ALI, pciide_acer_products },
581 { PCI_VENDOR_PROMISE, pciide_promise_products },
582 { PCI_VENDOR_AMD, pciide_amd_products },
583 { PCI_VENDOR_OPTI, pciide_opti_products },
584 { PCI_VENDOR_TRIONES, pciide_triones_products },
585 { PCI_VENDOR_ACARD, pciide_acard_products },
586 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
587 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
588 { PCI_VENDOR_WINBOND, pciide_winbond_products },
589 { 0, NULL }
590 };
591
592 /* options passed via the 'flags' config keyword */
593 #define PCIIDE_OPTIONS_DMA 0x01
594 #define PCIIDE_OPTIONS_NODMA 0x02
595
596 int pciide_match __P((struct device *, struct cfdata *, void *));
597 void pciide_attach __P((struct device *, struct device *, void *));
598
599 struct cfattach pciide_ca = {
600 sizeof(struct pciide_softc), pciide_match, pciide_attach
601 };
602 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
603 int pciide_mapregs_compat __P(( struct pci_attach_args *,
604 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
605 int pciide_mapregs_native __P((struct pci_attach_args *,
606 struct pciide_channel *, bus_size_t *, bus_size_t *,
607 int (*pci_intr) __P((void *))));
608 void pciide_mapreg_dma __P((struct pciide_softc *,
609 struct pci_attach_args *));
610 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
611 void pciide_mapchan __P((struct pci_attach_args *,
612 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
613 int (*pci_intr) __P((void *))));
614 int pciide_chan_candisable __P((struct pciide_channel *));
615 void pciide_map_compat_intr __P(( struct pci_attach_args *,
616 struct pciide_channel *, int, int));
617 int pciide_compat_intr __P((void *));
618 int pciide_pci_intr __P((void *));
619 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
620
621 const struct pciide_product_desc *
622 pciide_lookup_product(id)
623 u_int32_t id;
624 {
625 const struct pciide_product_desc *pp;
626 const struct pciide_vendor_desc *vp;
627
628 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
629 if (PCI_VENDOR(id) == vp->ide_vendor)
630 break;
631
632 if ((pp = vp->ide_products) == NULL)
633 return NULL;
634
635 for (; pp->chip_map != NULL; pp++)
636 if (PCI_PRODUCT(id) == pp->ide_product)
637 break;
638
639 if (pp->chip_map == NULL)
640 return NULL;
641 return pp;
642 }
643
644 int
645 pciide_match(parent, match, aux)
646 struct device *parent;
647 struct cfdata *match;
648 void *aux;
649 {
650 struct pci_attach_args *pa = aux;
651 const struct pciide_product_desc *pp;
652
653 /*
654 * Check the ID register to see that it's a PCI IDE controller.
655 * If it is, we assume that we can deal with it; it _should_
656 * work in a standardized way...
657 */
658 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
659 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
660 return (1);
661 }
662
663 /*
664 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
665 * controllers. Let see if we can deal with it anyway.
666 */
667 pp = pciide_lookup_product(pa->pa_id);
668 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
669 return (1);
670 }
671
672 return (0);
673 }
674
675 void
676 pciide_attach(parent, self, aux)
677 struct device *parent, *self;
678 void *aux;
679 {
680 struct pci_attach_args *pa = aux;
681 pci_chipset_tag_t pc = pa->pa_pc;
682 pcitag_t tag = pa->pa_tag;
683 struct pciide_softc *sc = (struct pciide_softc *)self;
684 pcireg_t csr;
685 char devinfo[256];
686 const char *displaydev;
687
688 sc->sc_pp = pciide_lookup_product(pa->pa_id);
689 if (sc->sc_pp == NULL) {
690 sc->sc_pp = &default_product_desc;
691 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
692 displaydev = devinfo;
693 } else
694 displaydev = sc->sc_pp->ide_name;
695
696 /* if displaydev == NULL, printf is done in chip-specific map */
697 if (displaydev)
698 printf(": %s (rev. 0x%02x)\n", displaydev,
699 PCI_REVISION(pa->pa_class));
700
701 sc->sc_pc = pa->pa_pc;
702 sc->sc_tag = pa->pa_tag;
703 #ifdef WDCDEBUG
704 if (wdcdebug_pciide_mask & DEBUG_PROBE)
705 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
706 #endif
707 sc->sc_pp->chip_map(sc, pa);
708
709 if (sc->sc_dma_ok) {
710 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
711 csr |= PCI_COMMAND_MASTER_ENABLE;
712 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
713 }
714 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
715 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
716 }
717
718 /* tell wether the chip is enabled or not */
719 int
720 pciide_chipen(sc, pa)
721 struct pciide_softc *sc;
722 struct pci_attach_args *pa;
723 {
724 pcireg_t csr;
725 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
726 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
727 PCI_COMMAND_STATUS_REG);
728 printf("%s: device disabled (at %s)\n",
729 sc->sc_wdcdev.sc_dev.dv_xname,
730 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
731 "device" : "bridge");
732 return 0;
733 }
734 return 1;
735 }
736
737 int
738 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
739 struct pci_attach_args *pa;
740 struct pciide_channel *cp;
741 int compatchan;
742 bus_size_t *cmdsizep, *ctlsizep;
743 {
744 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
745 struct channel_softc *wdc_cp = &cp->wdc_channel;
746
747 cp->compat = 1;
748 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
749 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
750
751 wdc_cp->cmd_iot = pa->pa_iot;
752 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
753 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
754 printf("%s: couldn't map %s channel cmd regs\n",
755 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
756 return (0);
757 }
758
759 wdc_cp->ctl_iot = pa->pa_iot;
760 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
761 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
762 printf("%s: couldn't map %s channel ctl regs\n",
763 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
764 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
765 PCIIDE_COMPAT_CMD_SIZE);
766 return (0);
767 }
768
769 return (1);
770 }
771
772 int
773 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
774 struct pci_attach_args * pa;
775 struct pciide_channel *cp;
776 bus_size_t *cmdsizep, *ctlsizep;
777 int (*pci_intr) __P((void *));
778 {
779 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
780 struct channel_softc *wdc_cp = &cp->wdc_channel;
781 const char *intrstr;
782 pci_intr_handle_t intrhandle;
783
784 cp->compat = 0;
785
786 if (sc->sc_pci_ih == NULL) {
787 if (pci_intr_map(pa, &intrhandle) != 0) {
788 printf("%s: couldn't map native-PCI interrupt\n",
789 sc->sc_wdcdev.sc_dev.dv_xname);
790 return 0;
791 }
792 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
793 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
794 intrhandle, IPL_BIO, pci_intr, sc);
795 if (sc->sc_pci_ih != NULL) {
796 printf("%s: using %s for native-PCI interrupt\n",
797 sc->sc_wdcdev.sc_dev.dv_xname,
798 intrstr ? intrstr : "unknown interrupt");
799 } else {
800 printf("%s: couldn't establish native-PCI interrupt",
801 sc->sc_wdcdev.sc_dev.dv_xname);
802 if (intrstr != NULL)
803 printf(" at %s", intrstr);
804 printf("\n");
805 return 0;
806 }
807 }
808 cp->ih = sc->sc_pci_ih;
809 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
810 PCI_MAPREG_TYPE_IO, 0,
811 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
812 printf("%s: couldn't map %s channel cmd regs\n",
813 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
814 return 0;
815 }
816
817 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
818 PCI_MAPREG_TYPE_IO, 0,
819 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
820 printf("%s: couldn't map %s channel ctl regs\n",
821 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
822 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
823 return 0;
824 }
825 /*
826 * In native mode, 4 bytes of I/O space are mapped for the control
827 * register, the control register is at offset 2. Pass the generic
828 * code a handle for only one byte at the rigth offset.
829 */
830 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
831 &wdc_cp->ctl_ioh) != 0) {
832 printf("%s: unable to subregion %s channel ctl regs\n",
833 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
834 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
835 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
836 return 0;
837 }
838 return (1);
839 }
840
841 void
842 pciide_mapreg_dma(sc, pa)
843 struct pciide_softc *sc;
844 struct pci_attach_args *pa;
845 {
846 pcireg_t maptype;
847 bus_addr_t addr;
848
849 /*
850 * Map DMA registers
851 *
852 * Note that sc_dma_ok is the right variable to test to see if
853 * DMA can be done. If the interface doesn't support DMA,
854 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
855 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
856 * non-zero if the interface supports DMA and the registers
857 * could be mapped.
858 *
859 * XXX Note that despite the fact that the Bus Master IDE specs
860 * XXX say that "The bus master IDE function uses 16 bytes of IO
861 * XXX space," some controllers (at least the United
862 * XXX Microelectronics UM8886BF) place it in memory space.
863 */
864 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
865 PCIIDE_REG_BUS_MASTER_DMA);
866
867 switch (maptype) {
868 case PCI_MAPREG_TYPE_IO:
869 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
870 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
871 &addr, NULL, NULL) == 0);
872 if (sc->sc_dma_ok == 0) {
873 printf(", but unused (couldn't query registers)");
874 break;
875 }
876 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
877 && addr >= 0x10000) {
878 sc->sc_dma_ok = 0;
879 printf(", but unused (registers at unsafe address "
880 "%#lx)", (unsigned long)addr);
881 break;
882 }
883 /* FALLTHROUGH */
884
885 case PCI_MAPREG_MEM_TYPE_32BIT:
886 sc->sc_dma_ok = (pci_mapreg_map(pa,
887 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
888 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
889 sc->sc_dmat = pa->pa_dmat;
890 if (sc->sc_dma_ok == 0) {
891 printf(", but unused (couldn't map registers)");
892 } else {
893 sc->sc_wdcdev.dma_arg = sc;
894 sc->sc_wdcdev.dma_init = pciide_dma_init;
895 sc->sc_wdcdev.dma_start = pciide_dma_start;
896 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
897 }
898
899 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
900 PCIIDE_OPTIONS_NODMA) {
901 printf(", but unused (forced off by config file)");
902 sc->sc_dma_ok = 0;
903 }
904 break;
905
906 default:
907 sc->sc_dma_ok = 0;
908 printf(", but unsupported register maptype (0x%x)", maptype);
909 }
910 }
911
912 int
913 pciide_compat_intr(arg)
914 void *arg;
915 {
916 struct pciide_channel *cp = arg;
917
918 #ifdef DIAGNOSTIC
919 /* should only be called for a compat channel */
920 if (cp->compat == 0)
921 panic("pciide compat intr called for non-compat chan %p\n", cp);
922 #endif
923 return (wdcintr(&cp->wdc_channel));
924 }
925
926 int
927 pciide_pci_intr(arg)
928 void *arg;
929 {
930 struct pciide_softc *sc = arg;
931 struct pciide_channel *cp;
932 struct channel_softc *wdc_cp;
933 int i, rv, crv;
934
935 rv = 0;
936 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
937 cp = &sc->pciide_channels[i];
938 wdc_cp = &cp->wdc_channel;
939
940 /* If a compat channel skip. */
941 if (cp->compat)
942 continue;
943 /* if this channel not waiting for intr, skip */
944 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
945 continue;
946
947 crv = wdcintr(wdc_cp);
948 if (crv == 0)
949 ; /* leave rv alone */
950 else if (crv == 1)
951 rv = 1; /* claim the intr */
952 else if (rv == 0) /* crv should be -1 in this case */
953 rv = crv; /* if we've done no better, take it */
954 }
955 return (rv);
956 }
957
958 void
959 pciide_channel_dma_setup(cp)
960 struct pciide_channel *cp;
961 {
962 int drive;
963 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
964 struct ata_drive_datas *drvp;
965
966 for (drive = 0; drive < 2; drive++) {
967 drvp = &cp->wdc_channel.ch_drive[drive];
968 /* If no drive, skip */
969 if ((drvp->drive_flags & DRIVE) == 0)
970 continue;
971 /* setup DMA if needed */
972 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
973 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
974 sc->sc_dma_ok == 0) {
975 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
976 continue;
977 }
978 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
979 != 0) {
980 /* Abort DMA setup */
981 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
982 continue;
983 }
984 }
985 }
986
987 int
988 pciide_dma_table_setup(sc, channel, drive)
989 struct pciide_softc *sc;
990 int channel, drive;
991 {
992 bus_dma_segment_t seg;
993 int error, rseg;
994 const bus_size_t dma_table_size =
995 sizeof(struct idedma_table) * NIDEDMA_TABLES;
996 struct pciide_dma_maps *dma_maps =
997 &sc->pciide_channels[channel].dma_maps[drive];
998
999 /* If table was already allocated, just return */
1000 if (dma_maps->dma_table)
1001 return 0;
1002
1003 /* Allocate memory for the DMA tables and map it */
1004 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1005 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1006 BUS_DMA_NOWAIT)) != 0) {
1007 printf("%s:%d: unable to allocate table DMA for "
1008 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1009 channel, drive, error);
1010 return error;
1011 }
1012 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1013 dma_table_size,
1014 (caddr_t *)&dma_maps->dma_table,
1015 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1016 printf("%s:%d: unable to map table DMA for"
1017 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1018 channel, drive, error);
1019 return error;
1020 }
1021 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1022 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1023 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1024
1025 /* Create and load table DMA map for this disk */
1026 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1027 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1028 &dma_maps->dmamap_table)) != 0) {
1029 printf("%s:%d: unable to create table DMA map for "
1030 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1031 channel, drive, error);
1032 return error;
1033 }
1034 if ((error = bus_dmamap_load(sc->sc_dmat,
1035 dma_maps->dmamap_table,
1036 dma_maps->dma_table,
1037 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1038 printf("%s:%d: unable to load table DMA map for "
1039 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1040 channel, drive, error);
1041 return error;
1042 }
1043 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1044 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1045 DEBUG_PROBE);
1046 /* Create a xfer DMA map for this drive */
1047 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1048 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1049 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1050 &dma_maps->dmamap_xfer)) != 0) {
1051 printf("%s:%d: unable to create xfer DMA map for "
1052 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1053 channel, drive, error);
1054 return error;
1055 }
1056 return 0;
1057 }
1058
1059 int
1060 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1061 void *v;
1062 int channel, drive;
1063 void *databuf;
1064 size_t datalen;
1065 int flags;
1066 {
1067 struct pciide_softc *sc = v;
1068 int error, seg;
1069 struct pciide_dma_maps *dma_maps =
1070 &sc->pciide_channels[channel].dma_maps[drive];
1071
1072 error = bus_dmamap_load(sc->sc_dmat,
1073 dma_maps->dmamap_xfer,
1074 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1075 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1076 if (error) {
1077 printf("%s:%d: unable to load xfer DMA map for"
1078 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1079 channel, drive, error);
1080 return error;
1081 }
1082
1083 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1084 dma_maps->dmamap_xfer->dm_mapsize,
1085 (flags & WDC_DMA_READ) ?
1086 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1087
1088 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1089 #ifdef DIAGNOSTIC
1090 /* A segment must not cross a 64k boundary */
1091 {
1092 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1093 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1094 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1095 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1096 printf("pciide_dma: segment %d physical addr 0x%lx"
1097 " len 0x%lx not properly aligned\n",
1098 seg, phys, len);
1099 panic("pciide_dma: buf align");
1100 }
1101 }
1102 #endif
1103 dma_maps->dma_table[seg].base_addr =
1104 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1105 dma_maps->dma_table[seg].byte_count =
1106 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1107 IDEDMA_BYTE_COUNT_MASK);
1108 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1109 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1110 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1111
1112 }
1113 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1114 htole32(IDEDMA_BYTE_COUNT_EOT);
1115
1116 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1117 dma_maps->dmamap_table->dm_mapsize,
1118 BUS_DMASYNC_PREWRITE);
1119
1120 /* Maps are ready. Start DMA function */
1121 #ifdef DIAGNOSTIC
1122 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1123 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1124 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1125 panic("pciide_dma_init: table align");
1126 }
1127 #endif
1128
1129 /* Clear status bits */
1130 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1131 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1132 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1133 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1134 /* Write table addr */
1135 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1136 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1137 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1138 /* set read/write */
1139 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1140 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1141 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1142 /* remember flags */
1143 dma_maps->dma_flags = flags;
1144 return 0;
1145 }
1146
1147 void
1148 pciide_dma_start(v, channel, drive)
1149 void *v;
1150 int channel, drive;
1151 {
1152 struct pciide_softc *sc = v;
1153
1154 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1155 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1156 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1157 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1158 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1159 }
1160
1161 int
1162 pciide_dma_finish(v, channel, drive, force)
1163 void *v;
1164 int channel, drive;
1165 int force;
1166 {
1167 struct pciide_softc *sc = v;
1168 u_int8_t status;
1169 int error = 0;
1170 struct pciide_dma_maps *dma_maps =
1171 &sc->pciide_channels[channel].dma_maps[drive];
1172
1173 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1174 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1175 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1176 DEBUG_XFERS);
1177
1178 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1179 return WDC_DMAST_NOIRQ;
1180
1181 /* stop DMA channel */
1182 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1183 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1184 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1185 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1186
1187 /* Unload the map of the data buffer */
1188 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1189 dma_maps->dmamap_xfer->dm_mapsize,
1190 (dma_maps->dma_flags & WDC_DMA_READ) ?
1191 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1192 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1193
1194 if ((status & IDEDMA_CTL_ERR) != 0) {
1195 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1196 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1197 error |= WDC_DMAST_ERR;
1198 }
1199
1200 if ((status & IDEDMA_CTL_INTR) == 0) {
1201 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1202 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1203 drive, status);
1204 error |= WDC_DMAST_NOIRQ;
1205 }
1206
1207 if ((status & IDEDMA_CTL_ACT) != 0) {
1208 /* data underrun, may be a valid condition for ATAPI */
1209 error |= WDC_DMAST_UNDER;
1210 }
1211 return error;
1212 }
1213
1214 void
1215 pciide_irqack(chp)
1216 struct channel_softc *chp;
1217 {
1218 struct pciide_channel *cp = (struct pciide_channel*)chp;
1219 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1220
1221 /* clear status bits in IDE DMA registers */
1222 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1223 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1224 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1225 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1226 }
1227
1228 /* some common code used by several chip_map */
1229 int
1230 pciide_chansetup(sc, channel, interface)
1231 struct pciide_softc *sc;
1232 int channel;
1233 pcireg_t interface;
1234 {
1235 struct pciide_channel *cp = &sc->pciide_channels[channel];
1236 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1237 cp->name = PCIIDE_CHANNEL_NAME(channel);
1238 cp->wdc_channel.channel = channel;
1239 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1240 cp->wdc_channel.ch_queue =
1241 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1242 if (cp->wdc_channel.ch_queue == NULL) {
1243 printf("%s %s channel: "
1244 "can't allocate memory for command queue",
1245 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1246 return 0;
1247 }
1248 printf("%s: %s channel %s to %s mode\n",
1249 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1250 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1251 "configured" : "wired",
1252 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1253 "native-PCI" : "compatibility");
1254 return 1;
1255 }
1256
1257 /* some common code used by several chip channel_map */
1258 void
1259 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1260 struct pci_attach_args *pa;
1261 struct pciide_channel *cp;
1262 pcireg_t interface;
1263 bus_size_t *cmdsizep, *ctlsizep;
1264 int (*pci_intr) __P((void *));
1265 {
1266 struct channel_softc *wdc_cp = &cp->wdc_channel;
1267
1268 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1269 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1270 pci_intr);
1271 else
1272 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1273 wdc_cp->channel, cmdsizep, ctlsizep);
1274
1275 if (cp->hw_ok == 0)
1276 return;
1277 wdc_cp->data32iot = wdc_cp->cmd_iot;
1278 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1279 wdcattach(wdc_cp);
1280 }
1281
1282 /*
1283 * Generic code to call to know if a channel can be disabled. Return 1
1284 * if channel can be disabled, 0 if not
1285 */
1286 int
1287 pciide_chan_candisable(cp)
1288 struct pciide_channel *cp;
1289 {
1290 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1291 struct channel_softc *wdc_cp = &cp->wdc_channel;
1292
1293 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1294 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1295 printf("%s: disabling %s channel (no drives)\n",
1296 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1297 cp->hw_ok = 0;
1298 return 1;
1299 }
1300 return 0;
1301 }
1302
1303 /*
1304 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1305 * Set hw_ok=0 on failure
1306 */
1307 void
1308 pciide_map_compat_intr(pa, cp, compatchan, interface)
1309 struct pci_attach_args *pa;
1310 struct pciide_channel *cp;
1311 int compatchan, interface;
1312 {
1313 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1314 struct channel_softc *wdc_cp = &cp->wdc_channel;
1315
1316 if (cp->hw_ok == 0)
1317 return;
1318 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1319 return;
1320
1321 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1322 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1323 pa, compatchan, pciide_compat_intr, cp);
1324 if (cp->ih == NULL) {
1325 #endif
1326 printf("%s: no compatibility interrupt for use by %s "
1327 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1328 cp->hw_ok = 0;
1329 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1330 }
1331 #endif
1332 }
1333
1334 void
1335 pciide_print_modes(cp)
1336 struct pciide_channel *cp;
1337 {
1338 wdc_print_modes(&cp->wdc_channel);
1339 }
1340
1341 void
1342 default_chip_map(sc, pa)
1343 struct pciide_softc *sc;
1344 struct pci_attach_args *pa;
1345 {
1346 struct pciide_channel *cp;
1347 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1348 pcireg_t csr;
1349 int channel, drive;
1350 struct ata_drive_datas *drvp;
1351 u_int8_t idedma_ctl;
1352 bus_size_t cmdsize, ctlsize;
1353 char *failreason;
1354
1355 if (pciide_chipen(sc, pa) == 0)
1356 return;
1357
1358 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1359 printf("%s: bus-master DMA support present",
1360 sc->sc_wdcdev.sc_dev.dv_xname);
1361 if (sc->sc_pp == &default_product_desc &&
1362 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1363 PCIIDE_OPTIONS_DMA) == 0) {
1364 printf(", but unused (no driver support)");
1365 sc->sc_dma_ok = 0;
1366 } else {
1367 pciide_mapreg_dma(sc, pa);
1368 if (sc->sc_dma_ok != 0)
1369 printf(", used without full driver "
1370 "support");
1371 }
1372 } else {
1373 printf("%s: hardware does not support DMA",
1374 sc->sc_wdcdev.sc_dev.dv_xname);
1375 sc->sc_dma_ok = 0;
1376 }
1377 printf("\n");
1378 if (sc->sc_dma_ok) {
1379 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1380 sc->sc_wdcdev.irqack = pciide_irqack;
1381 }
1382 sc->sc_wdcdev.PIO_cap = 0;
1383 sc->sc_wdcdev.DMA_cap = 0;
1384
1385 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1386 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1387 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1388
1389 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1390 cp = &sc->pciide_channels[channel];
1391 if (pciide_chansetup(sc, channel, interface) == 0)
1392 continue;
1393 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1394 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1395 &ctlsize, pciide_pci_intr);
1396 } else {
1397 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1398 channel, &cmdsize, &ctlsize);
1399 }
1400 if (cp->hw_ok == 0)
1401 continue;
1402 /*
1403 * Check to see if something appears to be there.
1404 */
1405 failreason = NULL;
1406 if (!wdcprobe(&cp->wdc_channel)) {
1407 failreason = "not responding; disabled or no drives?";
1408 goto next;
1409 }
1410 /*
1411 * Now, make sure it's actually attributable to this PCI IDE
1412 * channel by trying to access the channel again while the
1413 * PCI IDE controller's I/O space is disabled. (If the
1414 * channel no longer appears to be there, it belongs to
1415 * this controller.) YUCK!
1416 */
1417 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1418 PCI_COMMAND_STATUS_REG);
1419 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1420 csr & ~PCI_COMMAND_IO_ENABLE);
1421 if (wdcprobe(&cp->wdc_channel))
1422 failreason = "other hardware responding at addresses";
1423 pci_conf_write(sc->sc_pc, sc->sc_tag,
1424 PCI_COMMAND_STATUS_REG, csr);
1425 next:
1426 if (failreason) {
1427 printf("%s: %s channel ignored (%s)\n",
1428 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1429 failreason);
1430 cp->hw_ok = 0;
1431 bus_space_unmap(cp->wdc_channel.cmd_iot,
1432 cp->wdc_channel.cmd_ioh, cmdsize);
1433 if (interface & PCIIDE_INTERFACE_PCI(channel))
1434 bus_space_unmap(cp->wdc_channel.ctl_iot,
1435 cp->ctl_baseioh, ctlsize);
1436 else
1437 bus_space_unmap(cp->wdc_channel.ctl_iot,
1438 cp->wdc_channel.ctl_ioh, ctlsize);
1439 } else {
1440 pciide_map_compat_intr(pa, cp, channel, interface);
1441 }
1442 if (cp->hw_ok) {
1443 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1444 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1445 wdcattach(&cp->wdc_channel);
1446 }
1447 }
1448
1449 if (sc->sc_dma_ok == 0)
1450 return;
1451
1452 /* Allocate DMA maps */
1453 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1454 idedma_ctl = 0;
1455 cp = &sc->pciide_channels[channel];
1456 for (drive = 0; drive < 2; drive++) {
1457 drvp = &cp->wdc_channel.ch_drive[drive];
1458 /* If no drive, skip */
1459 if ((drvp->drive_flags & DRIVE) == 0)
1460 continue;
1461 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1462 continue;
1463 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1464 /* Abort DMA setup */
1465 printf("%s:%d:%d: can't allocate DMA maps, "
1466 "using PIO transfers\n",
1467 sc->sc_wdcdev.sc_dev.dv_xname,
1468 channel, drive);
1469 drvp->drive_flags &= ~DRIVE_DMA;
1470 }
1471 printf("%s:%d:%d: using DMA data transfers\n",
1472 sc->sc_wdcdev.sc_dev.dv_xname,
1473 channel, drive);
1474 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1475 }
1476 if (idedma_ctl != 0) {
1477 /* Add software bits in status register */
1478 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1479 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1480 idedma_ctl);
1481 }
1482 }
1483 }
1484
1485 void
1486 piix_chip_map(sc, pa)
1487 struct pciide_softc *sc;
1488 struct pci_attach_args *pa;
1489 {
1490 struct pciide_channel *cp;
1491 int channel;
1492 u_int32_t idetim;
1493 bus_size_t cmdsize, ctlsize;
1494
1495 if (pciide_chipen(sc, pa) == 0)
1496 return;
1497
1498 printf("%s: bus-master DMA support present",
1499 sc->sc_wdcdev.sc_dev.dv_xname);
1500 pciide_mapreg_dma(sc, pa);
1501 printf("\n");
1502 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1503 WDC_CAPABILITY_MODE;
1504 if (sc->sc_dma_ok) {
1505 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1506 sc->sc_wdcdev.irqack = pciide_irqack;
1507 switch(sc->sc_pp->ide_product) {
1508 case PCI_PRODUCT_INTEL_82371AB_IDE:
1509 case PCI_PRODUCT_INTEL_82440MX_IDE:
1510 case PCI_PRODUCT_INTEL_82801AA_IDE:
1511 case PCI_PRODUCT_INTEL_82801AB_IDE:
1512 case PCI_PRODUCT_INTEL_82801BA_IDE:
1513 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1514 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1515 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1516 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1517 }
1518 }
1519 sc->sc_wdcdev.PIO_cap = 4;
1520 sc->sc_wdcdev.DMA_cap = 2;
1521 switch(sc->sc_pp->ide_product) {
1522 case PCI_PRODUCT_INTEL_82801AA_IDE:
1523 sc->sc_wdcdev.UDMA_cap = 4;
1524 break;
1525 case PCI_PRODUCT_INTEL_82801BA_IDE:
1526 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1527 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1528 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1529 sc->sc_wdcdev.UDMA_cap = 5;
1530 break;
1531 default:
1532 sc->sc_wdcdev.UDMA_cap = 2;
1533 }
1534 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1535 sc->sc_wdcdev.set_modes = piix_setup_channel;
1536 else
1537 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1538 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1539 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1540
1541 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1542 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1543 DEBUG_PROBE);
1544 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1545 WDCDEBUG_PRINT((", sidetim=0x%x",
1546 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1547 DEBUG_PROBE);
1548 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1549 WDCDEBUG_PRINT((", udamreg 0x%x",
1550 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1551 DEBUG_PROBE);
1552 }
1553 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1554 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1555 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1556 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1557 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1558 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1559 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1560 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1561 DEBUG_PROBE);
1562 }
1563
1564 }
1565 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1566
1567 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1568 cp = &sc->pciide_channels[channel];
1569 /* PIIX is compat-only */
1570 if (pciide_chansetup(sc, channel, 0) == 0)
1571 continue;
1572 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1573 if ((PIIX_IDETIM_READ(idetim, channel) &
1574 PIIX_IDETIM_IDE) == 0) {
1575 printf("%s: %s channel ignored (disabled)\n",
1576 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1577 continue;
1578 }
1579 /* PIIX are compat-only pciide devices */
1580 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1581 if (cp->hw_ok == 0)
1582 continue;
1583 if (pciide_chan_candisable(cp)) {
1584 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1585 channel);
1586 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1587 idetim);
1588 }
1589 pciide_map_compat_intr(pa, cp, channel, 0);
1590 if (cp->hw_ok == 0)
1591 continue;
1592 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1593 }
1594
1595 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1596 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1597 DEBUG_PROBE);
1598 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1599 WDCDEBUG_PRINT((", sidetim=0x%x",
1600 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1601 DEBUG_PROBE);
1602 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1603 WDCDEBUG_PRINT((", udamreg 0x%x",
1604 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1605 DEBUG_PROBE);
1606 }
1607 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1608 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1609 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1610 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1611 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1612 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1613 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1614 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1615 DEBUG_PROBE);
1616 }
1617 }
1618 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1619 }
1620
1621 void
1622 piix_setup_channel(chp)
1623 struct channel_softc *chp;
1624 {
1625 u_int8_t mode[2], drive;
1626 u_int32_t oidetim, idetim, idedma_ctl;
1627 struct pciide_channel *cp = (struct pciide_channel*)chp;
1628 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1629 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1630
1631 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1632 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1633 idedma_ctl = 0;
1634
1635 /* set up new idetim: Enable IDE registers decode */
1636 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1637 chp->channel);
1638
1639 /* setup DMA */
1640 pciide_channel_dma_setup(cp);
1641
1642 /*
1643 * Here we have to mess up with drives mode: PIIX can't have
1644 * different timings for master and slave drives.
1645 * We need to find the best combination.
1646 */
1647
1648 /* If both drives supports DMA, take the lower mode */
1649 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1650 (drvp[1].drive_flags & DRIVE_DMA)) {
1651 mode[0] = mode[1] =
1652 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1653 drvp[0].DMA_mode = mode[0];
1654 drvp[1].DMA_mode = mode[1];
1655 goto ok;
1656 }
1657 /*
1658 * If only one drive supports DMA, use its mode, and
1659 * put the other one in PIO mode 0 if mode not compatible
1660 */
1661 if (drvp[0].drive_flags & DRIVE_DMA) {
1662 mode[0] = drvp[0].DMA_mode;
1663 mode[1] = drvp[1].PIO_mode;
1664 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1665 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1666 mode[1] = drvp[1].PIO_mode = 0;
1667 goto ok;
1668 }
1669 if (drvp[1].drive_flags & DRIVE_DMA) {
1670 mode[1] = drvp[1].DMA_mode;
1671 mode[0] = drvp[0].PIO_mode;
1672 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1673 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1674 mode[0] = drvp[0].PIO_mode = 0;
1675 goto ok;
1676 }
1677 /*
1678 * If both drives are not DMA, takes the lower mode, unless
1679 * one of them is PIO mode < 2
1680 */
1681 if (drvp[0].PIO_mode < 2) {
1682 mode[0] = drvp[0].PIO_mode = 0;
1683 mode[1] = drvp[1].PIO_mode;
1684 } else if (drvp[1].PIO_mode < 2) {
1685 mode[1] = drvp[1].PIO_mode = 0;
1686 mode[0] = drvp[0].PIO_mode;
1687 } else {
1688 mode[0] = mode[1] =
1689 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1690 drvp[0].PIO_mode = mode[0];
1691 drvp[1].PIO_mode = mode[1];
1692 }
1693 ok: /* The modes are setup */
1694 for (drive = 0; drive < 2; drive++) {
1695 if (drvp[drive].drive_flags & DRIVE_DMA) {
1696 idetim |= piix_setup_idetim_timings(
1697 mode[drive], 1, chp->channel);
1698 goto end;
1699 }
1700 }
1701 /* If we are there, none of the drives are DMA */
1702 if (mode[0] >= 2)
1703 idetim |= piix_setup_idetim_timings(
1704 mode[0], 0, chp->channel);
1705 else
1706 idetim |= piix_setup_idetim_timings(
1707 mode[1], 0, chp->channel);
1708 end: /*
1709 * timing mode is now set up in the controller. Enable
1710 * it per-drive
1711 */
1712 for (drive = 0; drive < 2; drive++) {
1713 /* If no drive, skip */
1714 if ((drvp[drive].drive_flags & DRIVE) == 0)
1715 continue;
1716 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1717 if (drvp[drive].drive_flags & DRIVE_DMA)
1718 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1719 }
1720 if (idedma_ctl != 0) {
1721 /* Add software bits in status register */
1722 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1723 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1724 idedma_ctl);
1725 }
1726 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1727 pciide_print_modes(cp);
1728 }
1729
1730 void
1731 piix3_4_setup_channel(chp)
1732 struct channel_softc *chp;
1733 {
1734 struct ata_drive_datas *drvp;
1735 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1736 struct pciide_channel *cp = (struct pciide_channel*)chp;
1737 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1738 int drive;
1739 int channel = chp->channel;
1740
1741 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1742 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1743 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1744 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1745 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1746 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1747 PIIX_SIDETIM_RTC_MASK(channel));
1748
1749 idedma_ctl = 0;
1750 /* If channel disabled, no need to go further */
1751 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1752 return;
1753 /* set up new idetim: Enable IDE registers decode */
1754 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1755
1756 /* setup DMA if needed */
1757 pciide_channel_dma_setup(cp);
1758
1759 for (drive = 0; drive < 2; drive++) {
1760 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1761 PIIX_UDMATIM_SET(0x3, channel, drive));
1762 drvp = &chp->ch_drive[drive];
1763 /* If no drive, skip */
1764 if ((drvp->drive_flags & DRIVE) == 0)
1765 continue;
1766 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1767 (drvp->drive_flags & DRIVE_UDMA) == 0))
1768 goto pio;
1769
1770 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1771 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1772 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1773 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1774 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1775 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1776 ideconf |= PIIX_CONFIG_PINGPONG;
1777 }
1778 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1779 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1780 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1781 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1782 /* setup Ultra/100 */
1783 if (drvp->UDMA_mode > 2 &&
1784 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1785 drvp->UDMA_mode = 2;
1786 if (drvp->UDMA_mode > 4) {
1787 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1788 } else {
1789 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1790 if (drvp->UDMA_mode > 2) {
1791 ideconf |= PIIX_CONFIG_UDMA66(channel,
1792 drive);
1793 } else {
1794 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1795 drive);
1796 }
1797 }
1798 }
1799 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1800 /* setup Ultra/66 */
1801 if (drvp->UDMA_mode > 2 &&
1802 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1803 drvp->UDMA_mode = 2;
1804 if (drvp->UDMA_mode > 2)
1805 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1806 else
1807 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1808 }
1809 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1810 (drvp->drive_flags & DRIVE_UDMA)) {
1811 /* use Ultra/DMA */
1812 drvp->drive_flags &= ~DRIVE_DMA;
1813 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1814 udmareg |= PIIX_UDMATIM_SET(
1815 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1816 } else {
1817 /* use Multiword DMA */
1818 drvp->drive_flags &= ~DRIVE_UDMA;
1819 if (drive == 0) {
1820 idetim |= piix_setup_idetim_timings(
1821 drvp->DMA_mode, 1, channel);
1822 } else {
1823 sidetim |= piix_setup_sidetim_timings(
1824 drvp->DMA_mode, 1, channel);
1825 idetim =PIIX_IDETIM_SET(idetim,
1826 PIIX_IDETIM_SITRE, channel);
1827 }
1828 }
1829 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1830
1831 pio: /* use PIO mode */
1832 idetim |= piix_setup_idetim_drvs(drvp);
1833 if (drive == 0) {
1834 idetim |= piix_setup_idetim_timings(
1835 drvp->PIO_mode, 0, channel);
1836 } else {
1837 sidetim |= piix_setup_sidetim_timings(
1838 drvp->PIO_mode, 0, channel);
1839 idetim =PIIX_IDETIM_SET(idetim,
1840 PIIX_IDETIM_SITRE, channel);
1841 }
1842 }
1843 if (idedma_ctl != 0) {
1844 /* Add software bits in status register */
1845 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1846 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1847 idedma_ctl);
1848 }
1849 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1850 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1851 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1852 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1853 pciide_print_modes(cp);
1854 }
1855
1856
1857 /* setup ISP and RTC fields, based on mode */
1858 static u_int32_t
1859 piix_setup_idetim_timings(mode, dma, channel)
1860 u_int8_t mode;
1861 u_int8_t dma;
1862 u_int8_t channel;
1863 {
1864
1865 if (dma)
1866 return PIIX_IDETIM_SET(0,
1867 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1868 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1869 channel);
1870 else
1871 return PIIX_IDETIM_SET(0,
1872 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1873 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1874 channel);
1875 }
1876
1877 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1878 static u_int32_t
1879 piix_setup_idetim_drvs(drvp)
1880 struct ata_drive_datas *drvp;
1881 {
1882 u_int32_t ret = 0;
1883 struct channel_softc *chp = drvp->chnl_softc;
1884 u_int8_t channel = chp->channel;
1885 u_int8_t drive = drvp->drive;
1886
1887 /*
1888 * If drive is using UDMA, timings setups are independant
1889 * So just check DMA and PIO here.
1890 */
1891 if (drvp->drive_flags & DRIVE_DMA) {
1892 /* if mode = DMA mode 0, use compatible timings */
1893 if ((drvp->drive_flags & DRIVE_DMA) &&
1894 drvp->DMA_mode == 0) {
1895 drvp->PIO_mode = 0;
1896 return ret;
1897 }
1898 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1899 /*
1900 * PIO and DMA timings are the same, use fast timings for PIO
1901 * too, else use compat timings.
1902 */
1903 if ((piix_isp_pio[drvp->PIO_mode] !=
1904 piix_isp_dma[drvp->DMA_mode]) ||
1905 (piix_rtc_pio[drvp->PIO_mode] !=
1906 piix_rtc_dma[drvp->DMA_mode]))
1907 drvp->PIO_mode = 0;
1908 /* if PIO mode <= 2, use compat timings for PIO */
1909 if (drvp->PIO_mode <= 2) {
1910 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1911 channel);
1912 return ret;
1913 }
1914 }
1915
1916 /*
1917 * Now setup PIO modes. If mode < 2, use compat timings.
1918 * Else enable fast timings. Enable IORDY and prefetch/post
1919 * if PIO mode >= 3.
1920 */
1921
1922 if (drvp->PIO_mode < 2)
1923 return ret;
1924
1925 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1926 if (drvp->PIO_mode >= 3) {
1927 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1928 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1929 }
1930 return ret;
1931 }
1932
1933 /* setup values in SIDETIM registers, based on mode */
1934 static u_int32_t
1935 piix_setup_sidetim_timings(mode, dma, channel)
1936 u_int8_t mode;
1937 u_int8_t dma;
1938 u_int8_t channel;
1939 {
1940 if (dma)
1941 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1942 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1943 else
1944 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1945 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1946 }
1947
1948 void
1949 amd7x6_chip_map(sc, pa)
1950 struct pciide_softc *sc;
1951 struct pci_attach_args *pa;
1952 {
1953 struct pciide_channel *cp;
1954 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1955 int channel;
1956 pcireg_t chanenable;
1957 bus_size_t cmdsize, ctlsize;
1958
1959 if (pciide_chipen(sc, pa) == 0)
1960 return;
1961 printf("%s: bus-master DMA support present",
1962 sc->sc_wdcdev.sc_dev.dv_xname);
1963 pciide_mapreg_dma(sc, pa);
1964 printf("\n");
1965 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1966 WDC_CAPABILITY_MODE;
1967 if (sc->sc_dma_ok) {
1968 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1969 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1970 sc->sc_wdcdev.irqack = pciide_irqack;
1971 }
1972 sc->sc_wdcdev.PIO_cap = 4;
1973 sc->sc_wdcdev.DMA_cap = 2;
1974
1975 switch (sc->sc_pp->ide_product) {
1976 case PCI_PRODUCT_AMD_PBC766_IDE:
1977 case PCI_PRODUCT_AMD_PBC768_IDE:
1978 sc->sc_wdcdev.UDMA_cap = 5;
1979 break;
1980 default:
1981 sc->sc_wdcdev.UDMA_cap = 4;
1982 }
1983 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1984 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1985 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1986 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1987
1988 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1989 DEBUG_PROBE);
1990 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1991 cp = &sc->pciide_channels[channel];
1992 if (pciide_chansetup(sc, channel, interface) == 0)
1993 continue;
1994
1995 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1996 printf("%s: %s channel ignored (disabled)\n",
1997 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1998 continue;
1999 }
2000 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2001 pciide_pci_intr);
2002
2003 if (pciide_chan_candisable(cp))
2004 chanenable &= ~AMD7X6_CHAN_EN(channel);
2005 pciide_map_compat_intr(pa, cp, channel, interface);
2006 if (cp->hw_ok == 0)
2007 continue;
2008
2009 amd7x6_setup_channel(&cp->wdc_channel);
2010 }
2011 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2012 chanenable);
2013 return;
2014 }
2015
2016 void
2017 amd7x6_setup_channel(chp)
2018 struct channel_softc *chp;
2019 {
2020 u_int32_t udmatim_reg, datatim_reg;
2021 u_int8_t idedma_ctl;
2022 int mode, drive;
2023 struct ata_drive_datas *drvp;
2024 struct pciide_channel *cp = (struct pciide_channel*)chp;
2025 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2026 #ifndef PCIIDE_AMD756_ENABLEDMA
2027 int rev = PCI_REVISION(
2028 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2029 #endif
2030
2031 idedma_ctl = 0;
2032 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2033 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2034 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2035 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2036
2037 /* setup DMA if needed */
2038 pciide_channel_dma_setup(cp);
2039
2040 for (drive = 0; drive < 2; drive++) {
2041 drvp = &chp->ch_drive[drive];
2042 /* If no drive, skip */
2043 if ((drvp->drive_flags & DRIVE) == 0)
2044 continue;
2045 /* add timing values, setup DMA if needed */
2046 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2047 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2048 mode = drvp->PIO_mode;
2049 goto pio;
2050 }
2051 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2052 (drvp->drive_flags & DRIVE_UDMA)) {
2053 /* use Ultra/DMA */
2054 drvp->drive_flags &= ~DRIVE_DMA;
2055 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2056 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2057 AMD7X6_UDMA_TIME(chp->channel, drive,
2058 amd7x6_udma_tim[drvp->UDMA_mode]);
2059 /* can use PIO timings, MW DMA unused */
2060 mode = drvp->PIO_mode;
2061 } else {
2062 /* use Multiword DMA, but only if revision is OK */
2063 drvp->drive_flags &= ~DRIVE_UDMA;
2064 #ifndef PCIIDE_AMD756_ENABLEDMA
2065 /*
2066 * The workaround doesn't seem to be necessary
2067 * with all drives, so it can be disabled by
2068 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2069 * triggered.
2070 */
2071 if (sc->sc_pp->ide_product ==
2072 PCI_PRODUCT_AMD_PBC756_IDE &&
2073 AMD756_CHIPREV_DISABLEDMA(rev)) {
2074 printf("%s:%d:%d: multi-word DMA disabled due "
2075 "to chip revision\n",
2076 sc->sc_wdcdev.sc_dev.dv_xname,
2077 chp->channel, drive);
2078 mode = drvp->PIO_mode;
2079 drvp->drive_flags &= ~DRIVE_DMA;
2080 goto pio;
2081 }
2082 #endif
2083 /* mode = min(pio, dma+2) */
2084 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2085 mode = drvp->PIO_mode;
2086 else
2087 mode = drvp->DMA_mode + 2;
2088 }
2089 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2090
2091 pio: /* setup PIO mode */
2092 if (mode <= 2) {
2093 drvp->DMA_mode = 0;
2094 drvp->PIO_mode = 0;
2095 mode = 0;
2096 } else {
2097 drvp->PIO_mode = mode;
2098 drvp->DMA_mode = mode - 2;
2099 }
2100 datatim_reg |=
2101 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2102 amd7x6_pio_set[mode]) |
2103 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2104 amd7x6_pio_rec[mode]);
2105 }
2106 if (idedma_ctl != 0) {
2107 /* Add software bits in status register */
2108 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2109 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2110 idedma_ctl);
2111 }
2112 pciide_print_modes(cp);
2113 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2114 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2115 }
2116
2117 void
2118 apollo_chip_map(sc, pa)
2119 struct pciide_softc *sc;
2120 struct pci_attach_args *pa;
2121 {
2122 struct pciide_channel *cp;
2123 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2124 int channel;
2125 u_int32_t ideconf;
2126 bus_size_t cmdsize, ctlsize;
2127 pcitag_t pcib_tag;
2128 pcireg_t pcib_id, pcib_class;
2129
2130 if (pciide_chipen(sc, pa) == 0)
2131 return;
2132 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2133 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2134 /* and read ID and rev of the ISA bridge */
2135 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2136 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2137 printf(": VIA Technologies ");
2138 switch (PCI_PRODUCT(pcib_id)) {
2139 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2140 printf("VT82C586 (Apollo VP) ");
2141 if(PCI_REVISION(pcib_class) >= 0x02) {
2142 printf("ATA33 controller\n");
2143 sc->sc_wdcdev.UDMA_cap = 2;
2144 } else {
2145 printf("controller\n");
2146 sc->sc_wdcdev.UDMA_cap = 0;
2147 }
2148 break;
2149 case PCI_PRODUCT_VIATECH_VT82C596A:
2150 printf("VT82C596A (Apollo Pro) ");
2151 if (PCI_REVISION(pcib_class) >= 0x12) {
2152 printf("ATA66 controller\n");
2153 sc->sc_wdcdev.UDMA_cap = 4;
2154 } else {
2155 printf("ATA33 controller\n");
2156 sc->sc_wdcdev.UDMA_cap = 2;
2157 }
2158 break;
2159 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2160 printf("VT82C686A (Apollo KX133) ");
2161 if (PCI_REVISION(pcib_class) >= 0x40) {
2162 printf("ATA100 controller\n");
2163 sc->sc_wdcdev.UDMA_cap = 5;
2164 } else {
2165 printf("ATA66 controller\n");
2166 sc->sc_wdcdev.UDMA_cap = 4;
2167 }
2168 break;
2169 case PCI_PRODUCT_VIATECH_VT8233:
2170 printf("VT8233 ATA100 controller\n");
2171 sc->sc_wdcdev.UDMA_cap = 5;
2172 break;
2173 default:
2174 printf("unknown ATA controller\n");
2175 sc->sc_wdcdev.UDMA_cap = 0;
2176 }
2177
2178 printf("%s: bus-master DMA support present",
2179 sc->sc_wdcdev.sc_dev.dv_xname);
2180 pciide_mapreg_dma(sc, pa);
2181 printf("\n");
2182 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2183 WDC_CAPABILITY_MODE;
2184 if (sc->sc_dma_ok) {
2185 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2186 sc->sc_wdcdev.irqack = pciide_irqack;
2187 if (sc->sc_wdcdev.UDMA_cap > 0)
2188 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2189 }
2190 sc->sc_wdcdev.PIO_cap = 4;
2191 sc->sc_wdcdev.DMA_cap = 2;
2192 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2193 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2194 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2195
2196 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2197 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2198 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2199 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2200 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2201 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2202 DEBUG_PROBE);
2203
2204 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2205 cp = &sc->pciide_channels[channel];
2206 if (pciide_chansetup(sc, channel, interface) == 0)
2207 continue;
2208
2209 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2210 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2211 printf("%s: %s channel ignored (disabled)\n",
2212 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2213 continue;
2214 }
2215 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2216 pciide_pci_intr);
2217 if (cp->hw_ok == 0)
2218 continue;
2219 if (pciide_chan_candisable(cp)) {
2220 ideconf &= ~APO_IDECONF_EN(channel);
2221 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2222 ideconf);
2223 }
2224 pciide_map_compat_intr(pa, cp, channel, interface);
2225
2226 if (cp->hw_ok == 0)
2227 continue;
2228 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2229 }
2230 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2231 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2232 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2233 }
2234
2235 void
2236 apollo_setup_channel(chp)
2237 struct channel_softc *chp;
2238 {
2239 u_int32_t udmatim_reg, datatim_reg;
2240 u_int8_t idedma_ctl;
2241 int mode, drive;
2242 struct ata_drive_datas *drvp;
2243 struct pciide_channel *cp = (struct pciide_channel*)chp;
2244 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2245
2246 idedma_ctl = 0;
2247 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2248 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2249 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2250 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2251
2252 /* setup DMA if needed */
2253 pciide_channel_dma_setup(cp);
2254
2255 for (drive = 0; drive < 2; drive++) {
2256 drvp = &chp->ch_drive[drive];
2257 /* If no drive, skip */
2258 if ((drvp->drive_flags & DRIVE) == 0)
2259 continue;
2260 /* add timing values, setup DMA if needed */
2261 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2262 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2263 mode = drvp->PIO_mode;
2264 goto pio;
2265 }
2266 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2267 (drvp->drive_flags & DRIVE_UDMA)) {
2268 /* use Ultra/DMA */
2269 drvp->drive_flags &= ~DRIVE_DMA;
2270 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2271 APO_UDMA_EN_MTH(chp->channel, drive);
2272 if (sc->sc_wdcdev.UDMA_cap == 5) {
2273 /* 686b */
2274 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2275 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2276 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2277 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2278 /* 596b or 686a */
2279 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2280 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2281 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2282 } else {
2283 /* 596a or 586b */
2284 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2285 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2286 }
2287 /* can use PIO timings, MW DMA unused */
2288 mode = drvp->PIO_mode;
2289 } else {
2290 /* use Multiword DMA */
2291 drvp->drive_flags &= ~DRIVE_UDMA;
2292 /* mode = min(pio, dma+2) */
2293 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2294 mode = drvp->PIO_mode;
2295 else
2296 mode = drvp->DMA_mode + 2;
2297 }
2298 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2299
2300 pio: /* setup PIO mode */
2301 if (mode <= 2) {
2302 drvp->DMA_mode = 0;
2303 drvp->PIO_mode = 0;
2304 mode = 0;
2305 } else {
2306 drvp->PIO_mode = mode;
2307 drvp->DMA_mode = mode - 2;
2308 }
2309 datatim_reg |=
2310 APO_DATATIM_PULSE(chp->channel, drive,
2311 apollo_pio_set[mode]) |
2312 APO_DATATIM_RECOV(chp->channel, drive,
2313 apollo_pio_rec[mode]);
2314 }
2315 if (idedma_ctl != 0) {
2316 /* Add software bits in status register */
2317 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2318 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2319 idedma_ctl);
2320 }
2321 pciide_print_modes(cp);
2322 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2323 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2324 }
2325
2326 void
2327 cmd_channel_map(pa, sc, channel)
2328 struct pci_attach_args *pa;
2329 struct pciide_softc *sc;
2330 int channel;
2331 {
2332 struct pciide_channel *cp = &sc->pciide_channels[channel];
2333 bus_size_t cmdsize, ctlsize;
2334 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2335 int interface, one_channel;
2336
2337 /*
2338 * The 0648/0649 can be told to identify as a RAID controller.
2339 * In this case, we have to fake interface
2340 */
2341 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2342 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2343 PCIIDE_INTERFACE_SETTABLE(1);
2344 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2345 CMD_CONF_DSA1)
2346 interface |= PCIIDE_INTERFACE_PCI(0) |
2347 PCIIDE_INTERFACE_PCI(1);
2348 } else {
2349 interface = PCI_INTERFACE(pa->pa_class);
2350 }
2351
2352 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2353 cp->name = PCIIDE_CHANNEL_NAME(channel);
2354 cp->wdc_channel.channel = channel;
2355 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2356
2357 /*
2358 * Older CMD64X doesn't have independant channels
2359 */
2360 switch (sc->sc_pp->ide_product) {
2361 case PCI_PRODUCT_CMDTECH_649:
2362 one_channel = 0;
2363 break;
2364 default:
2365 one_channel = 1;
2366 break;
2367 }
2368
2369 if (channel > 0 && one_channel) {
2370 cp->wdc_channel.ch_queue =
2371 sc->pciide_channels[0].wdc_channel.ch_queue;
2372 } else {
2373 cp->wdc_channel.ch_queue =
2374 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2375 }
2376 if (cp->wdc_channel.ch_queue == NULL) {
2377 printf("%s %s channel: "
2378 "can't allocate memory for command queue",
2379 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2380 return;
2381 }
2382
2383 printf("%s: %s channel %s to %s mode\n",
2384 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2385 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2386 "configured" : "wired",
2387 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2388 "native-PCI" : "compatibility");
2389
2390 /*
2391 * with a CMD PCI64x, if we get here, the first channel is enabled:
2392 * there's no way to disable the first channel without disabling
2393 * the whole device
2394 */
2395 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2396 printf("%s: %s channel ignored (disabled)\n",
2397 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2398 return;
2399 }
2400
2401 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2402 if (cp->hw_ok == 0)
2403 return;
2404 if (channel == 1) {
2405 if (pciide_chan_candisable(cp)) {
2406 ctrl &= ~CMD_CTRL_2PORT;
2407 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2408 CMD_CTRL, ctrl);
2409 }
2410 }
2411 pciide_map_compat_intr(pa, cp, channel, interface);
2412 }
2413
2414 int
2415 cmd_pci_intr(arg)
2416 void *arg;
2417 {
2418 struct pciide_softc *sc = arg;
2419 struct pciide_channel *cp;
2420 struct channel_softc *wdc_cp;
2421 int i, rv, crv;
2422 u_int32_t priirq, secirq;
2423
2424 rv = 0;
2425 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2426 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2427 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2428 cp = &sc->pciide_channels[i];
2429 wdc_cp = &cp->wdc_channel;
2430 /* If a compat channel skip. */
2431 if (cp->compat)
2432 continue;
2433 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2434 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2435 crv = wdcintr(wdc_cp);
2436 if (crv == 0)
2437 printf("%s:%d: bogus intr\n",
2438 sc->sc_wdcdev.sc_dev.dv_xname, i);
2439 else
2440 rv = 1;
2441 }
2442 }
2443 return rv;
2444 }
2445
2446 void
2447 cmd_chip_map(sc, pa)
2448 struct pciide_softc *sc;
2449 struct pci_attach_args *pa;
2450 {
2451 int channel;
2452
2453 /*
2454 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2455 * and base adresses registers can be disabled at
2456 * hardware level. In this case, the device is wired
2457 * in compat mode and its first channel is always enabled,
2458 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2459 * In fact, it seems that the first channel of the CMD PCI0640
2460 * can't be disabled.
2461 */
2462
2463 #ifdef PCIIDE_CMD064x_DISABLE
2464 if (pciide_chipen(sc, pa) == 0)
2465 return;
2466 #endif
2467
2468 printf("%s: hardware does not support DMA\n",
2469 sc->sc_wdcdev.sc_dev.dv_xname);
2470 sc->sc_dma_ok = 0;
2471
2472 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2473 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2474 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2475
2476 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2477 cmd_channel_map(pa, sc, channel);
2478 }
2479 }
2480
2481 void
2482 cmd0643_9_chip_map(sc, pa)
2483 struct pciide_softc *sc;
2484 struct pci_attach_args *pa;
2485 {
2486 struct pciide_channel *cp;
2487 int channel;
2488 pcireg_t rev = PCI_REVISION(pa->pa_class);
2489
2490 /*
2491 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2492 * and base adresses registers can be disabled at
2493 * hardware level. In this case, the device is wired
2494 * in compat mode and its first channel is always enabled,
2495 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2496 * In fact, it seems that the first channel of the CMD PCI0640
2497 * can't be disabled.
2498 */
2499
2500 #ifdef PCIIDE_CMD064x_DISABLE
2501 if (pciide_chipen(sc, pa) == 0)
2502 return;
2503 #endif
2504 printf("%s: bus-master DMA support present",
2505 sc->sc_wdcdev.sc_dev.dv_xname);
2506 pciide_mapreg_dma(sc, pa);
2507 printf("\n");
2508 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2509 WDC_CAPABILITY_MODE;
2510 if (sc->sc_dma_ok) {
2511 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2512 switch (sc->sc_pp->ide_product) {
2513 case PCI_PRODUCT_CMDTECH_649:
2514 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2515 sc->sc_wdcdev.UDMA_cap = 5;
2516 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2517 break;
2518 case PCI_PRODUCT_CMDTECH_648:
2519 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2520 sc->sc_wdcdev.UDMA_cap = 4;
2521 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2522 break;
2523 case PCI_PRODUCT_CMDTECH_646:
2524 if (rev >= CMD0646U2_REV) {
2525 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2526 sc->sc_wdcdev.UDMA_cap = 2;
2527 } else if (rev >= CMD0646U_REV) {
2528 /*
2529 * Linux's driver claims that the 646U is broken
2530 * with UDMA. Only enable it if we know what we're
2531 * doing
2532 */
2533 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2534 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2535 sc->sc_wdcdev.UDMA_cap = 2;
2536 #endif
2537 /* explicitly disable UDMA */
2538 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2539 CMD_UDMATIM(0), 0);
2540 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2541 CMD_UDMATIM(1), 0);
2542 }
2543 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2544 break;
2545 default:
2546 sc->sc_wdcdev.irqack = pciide_irqack;
2547 }
2548 }
2549
2550 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2551 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2552 sc->sc_wdcdev.PIO_cap = 4;
2553 sc->sc_wdcdev.DMA_cap = 2;
2554 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2555
2556 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2557 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2558 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2559 DEBUG_PROBE);
2560
2561 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2562 cp = &sc->pciide_channels[channel];
2563 cmd_channel_map(pa, sc, channel);
2564 if (cp->hw_ok == 0)
2565 continue;
2566 cmd0643_9_setup_channel(&cp->wdc_channel);
2567 }
2568 /*
2569 * note - this also makes sure we clear the irq disable and reset
2570 * bits
2571 */
2572 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2573 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2574 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2575 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2576 DEBUG_PROBE);
2577 }
2578
2579 void
2580 cmd0643_9_setup_channel(chp)
2581 struct channel_softc *chp;
2582 {
2583 struct ata_drive_datas *drvp;
2584 u_int8_t tim;
2585 u_int32_t idedma_ctl, udma_reg;
2586 int drive;
2587 struct pciide_channel *cp = (struct pciide_channel*)chp;
2588 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2589
2590 idedma_ctl = 0;
2591 /* setup DMA if needed */
2592 pciide_channel_dma_setup(cp);
2593
2594 for (drive = 0; drive < 2; drive++) {
2595 drvp = &chp->ch_drive[drive];
2596 /* If no drive, skip */
2597 if ((drvp->drive_flags & DRIVE) == 0)
2598 continue;
2599 /* add timing values, setup DMA if needed */
2600 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2601 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2602 if (drvp->drive_flags & DRIVE_UDMA) {
2603 /* UltraDMA on a 646U2, 0648 or 0649 */
2604 drvp->drive_flags &= ~DRIVE_DMA;
2605 udma_reg = pciide_pci_read(sc->sc_pc,
2606 sc->sc_tag, CMD_UDMATIM(chp->channel));
2607 if (drvp->UDMA_mode > 2 &&
2608 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2609 CMD_BICSR) &
2610 CMD_BICSR_80(chp->channel)) == 0)
2611 drvp->UDMA_mode = 2;
2612 if (drvp->UDMA_mode > 2)
2613 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2614 else if (sc->sc_wdcdev.UDMA_cap > 2)
2615 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2616 udma_reg |= CMD_UDMATIM_UDMA(drive);
2617 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2618 CMD_UDMATIM_TIM_OFF(drive));
2619 udma_reg |=
2620 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2621 CMD_UDMATIM_TIM_OFF(drive));
2622 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2623 CMD_UDMATIM(chp->channel), udma_reg);
2624 } else {
2625 /*
2626 * use Multiword DMA.
2627 * Timings will be used for both PIO and DMA,
2628 * so adjust DMA mode if needed
2629 * if we have a 0646U2/8/9, turn off UDMA
2630 */
2631 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2632 udma_reg = pciide_pci_read(sc->sc_pc,
2633 sc->sc_tag,
2634 CMD_UDMATIM(chp->channel));
2635 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2636 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2637 CMD_UDMATIM(chp->channel),
2638 udma_reg);
2639 }
2640 if (drvp->PIO_mode >= 3 &&
2641 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2642 drvp->DMA_mode = drvp->PIO_mode - 2;
2643 }
2644 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2645 }
2646 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2647 }
2648 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2649 CMD_DATA_TIM(chp->channel, drive), tim);
2650 }
2651 if (idedma_ctl != 0) {
2652 /* Add software bits in status register */
2653 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2654 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2655 idedma_ctl);
2656 }
2657 pciide_print_modes(cp);
2658 }
2659
2660 void
2661 cmd646_9_irqack(chp)
2662 struct channel_softc *chp;
2663 {
2664 u_int32_t priirq, secirq;
2665 struct pciide_channel *cp = (struct pciide_channel*)chp;
2666 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2667
2668 if (chp->channel == 0) {
2669 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2670 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2671 } else {
2672 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2673 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2674 }
2675 pciide_irqack(chp);
2676 }
2677
2678 void
2679 cy693_chip_map(sc, pa)
2680 struct pciide_softc *sc;
2681 struct pci_attach_args *pa;
2682 {
2683 struct pciide_channel *cp;
2684 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2685 bus_size_t cmdsize, ctlsize;
2686
2687 if (pciide_chipen(sc, pa) == 0)
2688 return;
2689 /*
2690 * this chip has 2 PCI IDE functions, one for primary and one for
2691 * secondary. So we need to call pciide_mapregs_compat() with
2692 * the real channel
2693 */
2694 if (pa->pa_function == 1) {
2695 sc->sc_cy_compatchan = 0;
2696 } else if (pa->pa_function == 2) {
2697 sc->sc_cy_compatchan = 1;
2698 } else {
2699 printf("%s: unexpected PCI function %d\n",
2700 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2701 return;
2702 }
2703 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2704 printf("%s: bus-master DMA support present",
2705 sc->sc_wdcdev.sc_dev.dv_xname);
2706 pciide_mapreg_dma(sc, pa);
2707 } else {
2708 printf("%s: hardware does not support DMA",
2709 sc->sc_wdcdev.sc_dev.dv_xname);
2710 sc->sc_dma_ok = 0;
2711 }
2712 printf("\n");
2713
2714 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2715 if (sc->sc_cy_handle == NULL) {
2716 printf("%s: unable to map hyperCache control registers\n",
2717 sc->sc_wdcdev.sc_dev.dv_xname);
2718 sc->sc_dma_ok = 0;
2719 }
2720
2721 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2722 WDC_CAPABILITY_MODE;
2723 if (sc->sc_dma_ok) {
2724 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2725 sc->sc_wdcdev.irqack = pciide_irqack;
2726 }
2727 sc->sc_wdcdev.PIO_cap = 4;
2728 sc->sc_wdcdev.DMA_cap = 2;
2729 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2730
2731 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2732 sc->sc_wdcdev.nchannels = 1;
2733
2734 /* Only one channel for this chip; if we are here it's enabled */
2735 cp = &sc->pciide_channels[0];
2736 sc->wdc_chanarray[0] = &cp->wdc_channel;
2737 cp->name = PCIIDE_CHANNEL_NAME(0);
2738 cp->wdc_channel.channel = 0;
2739 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2740 cp->wdc_channel.ch_queue =
2741 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2742 if (cp->wdc_channel.ch_queue == NULL) {
2743 printf("%s primary channel: "
2744 "can't allocate memory for command queue",
2745 sc->sc_wdcdev.sc_dev.dv_xname);
2746 return;
2747 }
2748 printf("%s: primary channel %s to ",
2749 sc->sc_wdcdev.sc_dev.dv_xname,
2750 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2751 "configured" : "wired");
2752 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2753 printf("native-PCI");
2754 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2755 pciide_pci_intr);
2756 } else {
2757 printf("compatibility");
2758 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2759 &cmdsize, &ctlsize);
2760 }
2761 printf(" mode\n");
2762 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2763 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2764 wdcattach(&cp->wdc_channel);
2765 if (pciide_chan_candisable(cp)) {
2766 pci_conf_write(sc->sc_pc, sc->sc_tag,
2767 PCI_COMMAND_STATUS_REG, 0);
2768 }
2769 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2770 if (cp->hw_ok == 0)
2771 return;
2772 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2773 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2774 cy693_setup_channel(&cp->wdc_channel);
2775 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2776 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2777 }
2778
2779 void
2780 cy693_setup_channel(chp)
2781 struct channel_softc *chp;
2782 {
2783 struct ata_drive_datas *drvp;
2784 int drive;
2785 u_int32_t cy_cmd_ctrl;
2786 u_int32_t idedma_ctl;
2787 struct pciide_channel *cp = (struct pciide_channel*)chp;
2788 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2789 int dma_mode = -1;
2790
2791 cy_cmd_ctrl = idedma_ctl = 0;
2792
2793 /* setup DMA if needed */
2794 pciide_channel_dma_setup(cp);
2795
2796 for (drive = 0; drive < 2; drive++) {
2797 drvp = &chp->ch_drive[drive];
2798 /* If no drive, skip */
2799 if ((drvp->drive_flags & DRIVE) == 0)
2800 continue;
2801 /* add timing values, setup DMA if needed */
2802 if (drvp->drive_flags & DRIVE_DMA) {
2803 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2804 /* use Multiword DMA */
2805 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2806 dma_mode = drvp->DMA_mode;
2807 }
2808 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2809 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2810 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2811 CY_CMD_CTRL_IOW_REC_OFF(drive));
2812 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2813 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2814 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2815 CY_CMD_CTRL_IOR_REC_OFF(drive));
2816 }
2817 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2818 chp->ch_drive[0].DMA_mode = dma_mode;
2819 chp->ch_drive[1].DMA_mode = dma_mode;
2820
2821 if (dma_mode == -1)
2822 dma_mode = 0;
2823
2824 if (sc->sc_cy_handle != NULL) {
2825 /* Note: `multiple' is implied. */
2826 cy82c693_write(sc->sc_cy_handle,
2827 (sc->sc_cy_compatchan == 0) ?
2828 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2829 }
2830
2831 pciide_print_modes(cp);
2832
2833 if (idedma_ctl != 0) {
2834 /* Add software bits in status register */
2835 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2836 IDEDMA_CTL, idedma_ctl);
2837 }
2838 }
2839
2840 static int
2841 sis_hostbr_match(pa)
2842 struct pci_attach_args *pa;
2843 {
2844 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2845 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2846 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2847 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2848 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2849 }
2850
2851 void
2852 sis_chip_map(sc, pa)
2853 struct pciide_softc *sc;
2854 struct pci_attach_args *pa;
2855 {
2856 struct pciide_channel *cp;
2857 int channel;
2858 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2859 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2860 pcireg_t rev = PCI_REVISION(pa->pa_class);
2861 bus_size_t cmdsize, ctlsize;
2862 pcitag_t pchb_tag;
2863 pcireg_t pchb_id, pchb_class;
2864
2865 if (pciide_chipen(sc, pa) == 0)
2866 return;
2867 printf("%s: bus-master DMA support present",
2868 sc->sc_wdcdev.sc_dev.dv_xname);
2869 pciide_mapreg_dma(sc, pa);
2870 printf("\n");
2871
2872 /* get a PCI tag for the host bridge (function 0 of the same device) */
2873 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2874 /* and read ID and rev of the ISA bridge */
2875 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2876 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2877
2878 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2879 WDC_CAPABILITY_MODE;
2880 if (sc->sc_dma_ok) {
2881 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2882 sc->sc_wdcdev.irqack = pciide_irqack;
2883 /*
2884 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2885 * have problems with UDMA (info provided by Christos)
2886 */
2887 if (rev >= 0xd0 &&
2888 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2889 PCI_REVISION(pchb_class) >= 0x03))
2890 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2891 }
2892
2893 sc->sc_wdcdev.PIO_cap = 4;
2894 sc->sc_wdcdev.DMA_cap = 2;
2895 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2896 /*
2897 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2898 * chipsets.
2899 */
2900 sc->sc_wdcdev.UDMA_cap =
2901 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2902 sc->sc_wdcdev.set_modes = sis_setup_channel;
2903
2904 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2905 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2906
2907 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2908 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2909 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2910
2911 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2912 cp = &sc->pciide_channels[channel];
2913 if (pciide_chansetup(sc, channel, interface) == 0)
2914 continue;
2915 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2916 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2917 printf("%s: %s channel ignored (disabled)\n",
2918 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2919 continue;
2920 }
2921 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2922 pciide_pci_intr);
2923 if (cp->hw_ok == 0)
2924 continue;
2925 if (pciide_chan_candisable(cp)) {
2926 if (channel == 0)
2927 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2928 else
2929 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2930 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2931 sis_ctr0);
2932 }
2933 pciide_map_compat_intr(pa, cp, channel, interface);
2934 if (cp->hw_ok == 0)
2935 continue;
2936 sis_setup_channel(&cp->wdc_channel);
2937 }
2938 }
2939
2940 void
2941 sis_setup_channel(chp)
2942 struct channel_softc *chp;
2943 {
2944 struct ata_drive_datas *drvp;
2945 int drive;
2946 u_int32_t sis_tim;
2947 u_int32_t idedma_ctl;
2948 struct pciide_channel *cp = (struct pciide_channel*)chp;
2949 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2950
2951 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2952 "channel %d 0x%x\n", chp->channel,
2953 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2954 DEBUG_PROBE);
2955 sis_tim = 0;
2956 idedma_ctl = 0;
2957 /* setup DMA if needed */
2958 pciide_channel_dma_setup(cp);
2959
2960 for (drive = 0; drive < 2; drive++) {
2961 drvp = &chp->ch_drive[drive];
2962 /* If no drive, skip */
2963 if ((drvp->drive_flags & DRIVE) == 0)
2964 continue;
2965 /* add timing values, setup DMA if needed */
2966 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2967 (drvp->drive_flags & DRIVE_UDMA) == 0)
2968 goto pio;
2969
2970 if (drvp->drive_flags & DRIVE_UDMA) {
2971 /* use Ultra/DMA */
2972 drvp->drive_flags &= ~DRIVE_DMA;
2973 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2974 SIS_TIM_UDMA_TIME_OFF(drive);
2975 sis_tim |= SIS_TIM_UDMA_EN(drive);
2976 } else {
2977 /*
2978 * use Multiword DMA
2979 * Timings will be used for both PIO and DMA,
2980 * so adjust DMA mode if needed
2981 */
2982 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2983 drvp->PIO_mode = drvp->DMA_mode + 2;
2984 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2985 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2986 drvp->PIO_mode - 2 : 0;
2987 if (drvp->DMA_mode == 0)
2988 drvp->PIO_mode = 0;
2989 }
2990 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2991 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2992 SIS_TIM_ACT_OFF(drive);
2993 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2994 SIS_TIM_REC_OFF(drive);
2995 }
2996 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2997 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2998 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2999 if (idedma_ctl != 0) {
3000 /* Add software bits in status register */
3001 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3002 IDEDMA_CTL, idedma_ctl);
3003 }
3004 pciide_print_modes(cp);
3005 }
3006
3007 static int
3008 acer_isabr_match(pa)
3009 struct pci_attach_args *pa;
3010 {
3011 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI) &&
3012 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543));
3013 }
3014
3015 void
3016 acer_chip_map(sc, pa)
3017 struct pciide_softc *sc;
3018 struct pci_attach_args *pa;
3019 {
3020 struct pci_attach_args isa_pa;
3021 struct pciide_channel *cp;
3022 int channel;
3023 pcireg_t cr, interface;
3024 bus_size_t cmdsize, ctlsize;
3025 pcireg_t rev = PCI_REVISION(pa->pa_class);
3026
3027 if (pciide_chipen(sc, pa) == 0)
3028 return;
3029 printf("%s: bus-master DMA support present",
3030 sc->sc_wdcdev.sc_dev.dv_xname);
3031 pciide_mapreg_dma(sc, pa);
3032 printf("\n");
3033 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3034 WDC_CAPABILITY_MODE;
3035 if (sc->sc_dma_ok) {
3036 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3037 if (rev >= 0x20) {
3038 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3039 if (rev >= 0xC4)
3040 sc->sc_wdcdev.UDMA_cap = 5;
3041 else if (rev >= 0xC2)
3042 sc->sc_wdcdev.UDMA_cap = 4;
3043 else
3044 sc->sc_wdcdev.UDMA_cap = 2;
3045 }
3046 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3047 sc->sc_wdcdev.irqack = pciide_irqack;
3048 }
3049
3050 sc->sc_wdcdev.PIO_cap = 4;
3051 sc->sc_wdcdev.DMA_cap = 2;
3052 sc->sc_wdcdev.set_modes = acer_setup_channel;
3053 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3054 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3055
3056 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3057 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3058 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3059
3060 /* Enable "microsoft register bits" R/W. */
3061 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3062 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3063 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3064 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3065 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3066 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3067 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3068 ~ACER_CHANSTATUSREGS_RO);
3069 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3070 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3071 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3072 /* Don't use cr, re-read the real register content instead */
3073 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3074 PCI_CLASS_REG));
3075
3076 /* From linux: enable "Cable Detection" */
3077 if (rev >= 0xC2) {
3078 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3079 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3080 | ACER_0x4B_CDETECT);
3081 /* set south-bridge's enable bit, m1533, 0x79 */
3082 if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
3083 printf("%s: can't find PCI/ISA bridge, downgrading "
3084 "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
3085 sc->sc_wdcdev.UDMA_cap = 2;
3086 } else {
3087 if (rev == 0xC2)
3088 /* 1543C-B0 (m1533, 0x79, bit 2) */
3089 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3090 ACER_0x79,
3091 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3092 ACER_0x79)
3093 | ACER_0x79_REVC2_EN);
3094 else
3095 /* 1553/1535 (m1533, 0x79, bit 1) */
3096 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3097 ACER_0x79,
3098 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3099 ACER_0x79)
3100 | ACER_0x79_EN);
3101 }
3102 }
3103
3104 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3105 cp = &sc->pciide_channels[channel];
3106 if (pciide_chansetup(sc, channel, interface) == 0)
3107 continue;
3108 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3109 printf("%s: %s channel ignored (disabled)\n",
3110 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3111 continue;
3112 }
3113 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3114 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3115 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3116 if (cp->hw_ok == 0)
3117 continue;
3118 if (pciide_chan_candisable(cp)) {
3119 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3120 pci_conf_write(sc->sc_pc, sc->sc_tag,
3121 PCI_CLASS_REG, cr);
3122 }
3123 pciide_map_compat_intr(pa, cp, channel, interface);
3124 acer_setup_channel(&cp->wdc_channel);
3125 }
3126 }
3127
3128 void
3129 acer_setup_channel(chp)
3130 struct channel_softc *chp;
3131 {
3132 struct ata_drive_datas *drvp;
3133 int drive;
3134 u_int32_t acer_fifo_udma;
3135 u_int32_t idedma_ctl;
3136 struct pciide_channel *cp = (struct pciide_channel*)chp;
3137 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3138
3139 idedma_ctl = 0;
3140 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3141 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3142 acer_fifo_udma), DEBUG_PROBE);
3143 /* setup DMA if needed */
3144 pciide_channel_dma_setup(cp);
3145
3146 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3147 DRIVE_UDMA) { /* check 80 pins cable */
3148 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3149 ACER_0x4A_80PIN(chp->channel)) {
3150 if (chp->ch_drive[0].UDMA_mode > 2)
3151 chp->ch_drive[0].UDMA_mode = 2;
3152 if (chp->ch_drive[1].UDMA_mode > 2)
3153 chp->ch_drive[1].UDMA_mode = 2;
3154 }
3155 }
3156
3157 for (drive = 0; drive < 2; drive++) {
3158 drvp = &chp->ch_drive[drive];
3159 /* If no drive, skip */
3160 if ((drvp->drive_flags & DRIVE) == 0)
3161 continue;
3162 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3163 "channel %d drive %d 0x%x\n", chp->channel, drive,
3164 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3165 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3166 /* clear FIFO/DMA mode */
3167 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3168 ACER_UDMA_EN(chp->channel, drive) |
3169 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3170
3171 /* add timing values, setup DMA if needed */
3172 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3173 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3174 acer_fifo_udma |=
3175 ACER_FTH_OPL(chp->channel, drive, 0x1);
3176 goto pio;
3177 }
3178
3179 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3180 if (drvp->drive_flags & DRIVE_UDMA) {
3181 /* use Ultra/DMA */
3182 drvp->drive_flags &= ~DRIVE_DMA;
3183 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3184 acer_fifo_udma |=
3185 ACER_UDMA_TIM(chp->channel, drive,
3186 acer_udma[drvp->UDMA_mode]);
3187 /* XXX disable if one drive < UDMA3 ? */
3188 if (drvp->UDMA_mode >= 3) {
3189 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3190 ACER_0x4B,
3191 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3192 ACER_0x4B) | ACER_0x4B_UDMA66);
3193 }
3194 } else {
3195 /*
3196 * use Multiword DMA
3197 * Timings will be used for both PIO and DMA,
3198 * so adjust DMA mode if needed
3199 */
3200 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3201 drvp->PIO_mode = drvp->DMA_mode + 2;
3202 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3203 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3204 drvp->PIO_mode - 2 : 0;
3205 if (drvp->DMA_mode == 0)
3206 drvp->PIO_mode = 0;
3207 }
3208 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3209 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3210 ACER_IDETIM(chp->channel, drive),
3211 acer_pio[drvp->PIO_mode]);
3212 }
3213 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3214 acer_fifo_udma), DEBUG_PROBE);
3215 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3216 if (idedma_ctl != 0) {
3217 /* Add software bits in status register */
3218 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3219 IDEDMA_CTL, idedma_ctl);
3220 }
3221 pciide_print_modes(cp);
3222 }
3223
3224 int
3225 acer_pci_intr(arg)
3226 void *arg;
3227 {
3228 struct pciide_softc *sc = arg;
3229 struct pciide_channel *cp;
3230 struct channel_softc *wdc_cp;
3231 int i, rv, crv;
3232 u_int32_t chids;
3233
3234 rv = 0;
3235 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3236 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3237 cp = &sc->pciide_channels[i];
3238 wdc_cp = &cp->wdc_channel;
3239 /* If a compat channel skip. */
3240 if (cp->compat)
3241 continue;
3242 if (chids & ACER_CHIDS_INT(i)) {
3243 crv = wdcintr(wdc_cp);
3244 if (crv == 0)
3245 printf("%s:%d: bogus intr\n",
3246 sc->sc_wdcdev.sc_dev.dv_xname, i);
3247 else
3248 rv = 1;
3249 }
3250 }
3251 return rv;
3252 }
3253
3254 void
3255 hpt_chip_map(sc, pa)
3256 struct pciide_softc *sc;
3257 struct pci_attach_args *pa;
3258 {
3259 struct pciide_channel *cp;
3260 int i, compatchan, revision;
3261 pcireg_t interface;
3262 bus_size_t cmdsize, ctlsize;
3263
3264 if (pciide_chipen(sc, pa) == 0)
3265 return;
3266 revision = PCI_REVISION(pa->pa_class);
3267 printf(": Triones/Highpoint ");
3268 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3269 printf("HPT374 IDE Controller\n");
3270 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3271 if (revision == HPT370_REV)
3272 printf("HPT370 IDE Controller\n");
3273 else if (revision == HPT370A_REV)
3274 printf("HPT370A IDE Controller\n");
3275 else if (revision == HPT366_REV)
3276 printf("HPT366 IDE Controller\n");
3277 else
3278 printf("unknown HPT IDE controller rev %d\n", revision);
3279 } else
3280 printf("unknown HPT IDE controller 0x%x\n",
3281 sc->sc_pp->ide_product);
3282
3283 /*
3284 * when the chip is in native mode it identifies itself as a
3285 * 'misc mass storage'. Fake interface in this case.
3286 */
3287 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3288 interface = PCI_INTERFACE(pa->pa_class);
3289 } else {
3290 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3291 PCIIDE_INTERFACE_PCI(0);
3292 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3293 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3294 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3295 interface |= PCIIDE_INTERFACE_PCI(1);
3296 }
3297
3298 printf("%s: bus-master DMA support present",
3299 sc->sc_wdcdev.sc_dev.dv_xname);
3300 pciide_mapreg_dma(sc, pa);
3301 printf("\n");
3302 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3303 WDC_CAPABILITY_MODE;
3304 if (sc->sc_dma_ok) {
3305 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3306 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3307 sc->sc_wdcdev.irqack = pciide_irqack;
3308 }
3309 sc->sc_wdcdev.PIO_cap = 4;
3310 sc->sc_wdcdev.DMA_cap = 2;
3311
3312 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3313 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3314 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3315 revision == HPT366_REV) {
3316 sc->sc_wdcdev.UDMA_cap = 4;
3317 /*
3318 * The 366 has 2 PCI IDE functions, one for primary and one
3319 * for secondary. So we need to call pciide_mapregs_compat()
3320 * with the real channel
3321 */
3322 if (pa->pa_function == 0) {
3323 compatchan = 0;
3324 } else if (pa->pa_function == 1) {
3325 compatchan = 1;
3326 } else {
3327 printf("%s: unexpected PCI function %d\n",
3328 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3329 return;
3330 }
3331 sc->sc_wdcdev.nchannels = 1;
3332 } else {
3333 sc->sc_wdcdev.nchannels = 2;
3334 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3335 sc->sc_wdcdev.UDMA_cap = 6;
3336 else
3337 sc->sc_wdcdev.UDMA_cap = 5;
3338 }
3339 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3340 cp = &sc->pciide_channels[i];
3341 if (sc->sc_wdcdev.nchannels > 1) {
3342 compatchan = i;
3343 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3344 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3345 printf("%s: %s channel ignored (disabled)\n",
3346 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3347 continue;
3348 }
3349 }
3350 if (pciide_chansetup(sc, i, interface) == 0)
3351 continue;
3352 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3353 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3354 &ctlsize, hpt_pci_intr);
3355 } else {
3356 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3357 &cmdsize, &ctlsize);
3358 }
3359 if (cp->hw_ok == 0)
3360 return;
3361 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3362 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3363 wdcattach(&cp->wdc_channel);
3364 hpt_setup_channel(&cp->wdc_channel);
3365 }
3366 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3367 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3368 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3369 /*
3370 * HPT370_REV and highter has a bit to disable interrupts,
3371 * make sure to clear it
3372 */
3373 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3374 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3375 ~HPT_CSEL_IRQDIS);
3376 }
3377 /* set clocks, etc (mandatory on 374, optional otherwise) */
3378 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3379 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3380 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3381 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3382 return;
3383 }
3384
3385 void
3386 hpt_setup_channel(chp)
3387 struct channel_softc *chp;
3388 {
3389 struct ata_drive_datas *drvp;
3390 int drive;
3391 int cable;
3392 u_int32_t before, after;
3393 u_int32_t idedma_ctl;
3394 struct pciide_channel *cp = (struct pciide_channel*)chp;
3395 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3396
3397 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3398
3399 /* setup DMA if needed */
3400 pciide_channel_dma_setup(cp);
3401
3402 idedma_ctl = 0;
3403
3404 /* Per drive settings */
3405 for (drive = 0; drive < 2; drive++) {
3406 drvp = &chp->ch_drive[drive];
3407 /* If no drive, skip */
3408 if ((drvp->drive_flags & DRIVE) == 0)
3409 continue;
3410 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3411 HPT_IDETIM(chp->channel, drive));
3412
3413 /* add timing values, setup DMA if needed */
3414 if (drvp->drive_flags & DRIVE_UDMA) {
3415 /* use Ultra/DMA */
3416 drvp->drive_flags &= ~DRIVE_DMA;
3417 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3418 drvp->UDMA_mode > 2)
3419 drvp->UDMA_mode = 2;
3420 after = (sc->sc_wdcdev.nchannels == 2) ?
3421 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3422 hpt374_udma[drvp->UDMA_mode] :
3423 hpt370_udma[drvp->UDMA_mode]) :
3424 hpt366_udma[drvp->UDMA_mode];
3425 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3426 } else if (drvp->drive_flags & DRIVE_DMA) {
3427 /*
3428 * use Multiword DMA.
3429 * Timings will be used for both PIO and DMA, so adjust
3430 * DMA mode if needed
3431 */
3432 if (drvp->PIO_mode >= 3 &&
3433 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3434 drvp->DMA_mode = drvp->PIO_mode - 2;
3435 }
3436 after = (sc->sc_wdcdev.nchannels == 2) ?
3437 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3438 hpt374_dma[drvp->DMA_mode] :
3439 hpt370_dma[drvp->DMA_mode]) :
3440 hpt366_dma[drvp->DMA_mode];
3441 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3442 } else {
3443 /* PIO only */
3444 after = (sc->sc_wdcdev.nchannels == 2) ?
3445 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3446 hpt374_pio[drvp->PIO_mode] :
3447 hpt370_pio[drvp->PIO_mode]) :
3448 hpt366_pio[drvp->PIO_mode];
3449 }
3450 pci_conf_write(sc->sc_pc, sc->sc_tag,
3451 HPT_IDETIM(chp->channel, drive), after);
3452 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3453 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3454 after, before), DEBUG_PROBE);
3455 }
3456 if (idedma_ctl != 0) {
3457 /* Add software bits in status register */
3458 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3459 IDEDMA_CTL, idedma_ctl);
3460 }
3461 pciide_print_modes(cp);
3462 }
3463
3464 int
3465 hpt_pci_intr(arg)
3466 void *arg;
3467 {
3468 struct pciide_softc *sc = arg;
3469 struct pciide_channel *cp;
3470 struct channel_softc *wdc_cp;
3471 int rv = 0;
3472 int dmastat, i, crv;
3473
3474 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3475 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3476 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3477 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3478 IDEDMA_CTL_INTR)
3479 continue;
3480 cp = &sc->pciide_channels[i];
3481 wdc_cp = &cp->wdc_channel;
3482 crv = wdcintr(wdc_cp);
3483 if (crv == 0) {
3484 printf("%s:%d: bogus intr\n",
3485 sc->sc_wdcdev.sc_dev.dv_xname, i);
3486 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3487 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3488 } else
3489 rv = 1;
3490 }
3491 return rv;
3492 }
3493
3494
3495 /* Macros to test product */
3496 #define PDC_IS_262(sc) \
3497 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3498 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3499 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3500 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3501 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3502 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3503 #define PDC_IS_265(sc) \
3504 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3505 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3506 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3507 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3508 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3509 #define PDC_IS_268(sc) \
3510 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3511 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3512 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3513
3514 void
3515 pdc202xx_chip_map(sc, pa)
3516 struct pciide_softc *sc;
3517 struct pci_attach_args *pa;
3518 {
3519 struct pciide_channel *cp;
3520 int channel;
3521 pcireg_t interface, st, mode;
3522 bus_size_t cmdsize, ctlsize;
3523
3524 if (!PDC_IS_268(sc)) {
3525 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3526 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3527 st), DEBUG_PROBE);
3528 }
3529 if (pciide_chipen(sc, pa) == 0)
3530 return;
3531
3532 /* turn off RAID mode */
3533 if (!PDC_IS_268(sc))
3534 st &= ~PDC2xx_STATE_IDERAID;
3535
3536 /*
3537 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3538 * mode. We have to fake interface
3539 */
3540 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3541 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3542 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3543
3544 printf("%s: bus-master DMA support present",
3545 sc->sc_wdcdev.sc_dev.dv_xname);
3546 pciide_mapreg_dma(sc, pa);
3547 printf("\n");
3548 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3549 WDC_CAPABILITY_MODE;
3550 if (sc->sc_dma_ok) {
3551 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3552 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3553 sc->sc_wdcdev.irqack = pciide_irqack;
3554 }
3555 sc->sc_wdcdev.PIO_cap = 4;
3556 sc->sc_wdcdev.DMA_cap = 2;
3557 if (PDC_IS_265(sc))
3558 sc->sc_wdcdev.UDMA_cap = 5;
3559 else if (PDC_IS_262(sc))
3560 sc->sc_wdcdev.UDMA_cap = 4;
3561 else
3562 sc->sc_wdcdev.UDMA_cap = 2;
3563 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3564 pdc20268_setup_channel : pdc202xx_setup_channel;
3565 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3566 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3567
3568 if (!PDC_IS_268(sc)) {
3569 /* setup failsafe defaults */
3570 mode = 0;
3571 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3572 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3573 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3574 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3575 for (channel = 0;
3576 channel < sc->sc_wdcdev.nchannels;
3577 channel++) {
3578 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3579 "drive 0 initial timings 0x%x, now 0x%x\n",
3580 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3581 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3582 DEBUG_PROBE);
3583 pci_conf_write(sc->sc_pc, sc->sc_tag,
3584 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3585 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3586 "drive 1 initial timings 0x%x, now 0x%x\n",
3587 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3588 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3589 pci_conf_write(sc->sc_pc, sc->sc_tag,
3590 PDC2xx_TIM(channel, 1), mode);
3591 }
3592
3593 mode = PDC2xx_SCR_DMA;
3594 if (PDC_IS_262(sc)) {
3595 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3596 } else {
3597 /* the BIOS set it up this way */
3598 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3599 }
3600 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3601 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3602 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3603 "now 0x%x\n",
3604 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3605 PDC2xx_SCR),
3606 mode), DEBUG_PROBE);
3607 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3608 PDC2xx_SCR, mode);
3609
3610 /* controller initial state register is OK even without BIOS */
3611 /* Set DMA mode to IDE DMA compatibility */
3612 mode =
3613 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3614 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3615 DEBUG_PROBE);
3616 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3617 mode | 0x1);
3618 mode =
3619 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3620 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3621 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3622 mode | 0x1);
3623 }
3624
3625 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3626 cp = &sc->pciide_channels[channel];
3627 if (pciide_chansetup(sc, channel, interface) == 0)
3628 continue;
3629 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3630 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3631 printf("%s: %s channel ignored (disabled)\n",
3632 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3633 continue;
3634 }
3635 if (PDC_IS_265(sc))
3636 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3637 pdc20265_pci_intr);
3638 else
3639 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3640 pdc202xx_pci_intr);
3641 if (cp->hw_ok == 0)
3642 continue;
3643 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3644 st &= ~(PDC_IS_262(sc) ?
3645 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3646 pciide_map_compat_intr(pa, cp, channel, interface);
3647 pdc202xx_setup_channel(&cp->wdc_channel);
3648 }
3649 if (!PDC_IS_268(sc)) {
3650 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3651 "0x%x\n", st), DEBUG_PROBE);
3652 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3653 }
3654 return;
3655 }
3656
3657 void
3658 pdc202xx_setup_channel(chp)
3659 struct channel_softc *chp;
3660 {
3661 struct ata_drive_datas *drvp;
3662 int drive;
3663 pcireg_t mode, st;
3664 u_int32_t idedma_ctl, scr, atapi;
3665 struct pciide_channel *cp = (struct pciide_channel*)chp;
3666 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3667 int channel = chp->channel;
3668
3669 /* setup DMA if needed */
3670 pciide_channel_dma_setup(cp);
3671
3672 idedma_ctl = 0;
3673 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3674 sc->sc_wdcdev.sc_dev.dv_xname,
3675 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3676 DEBUG_PROBE);
3677
3678 /* Per channel settings */
3679 if (PDC_IS_262(sc)) {
3680 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3681 PDC262_U66);
3682 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3683 /* Trim UDMA mode */
3684 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3685 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3686 chp->ch_drive[0].UDMA_mode <= 2) ||
3687 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3688 chp->ch_drive[1].UDMA_mode <= 2)) {
3689 if (chp->ch_drive[0].UDMA_mode > 2)
3690 chp->ch_drive[0].UDMA_mode = 2;
3691 if (chp->ch_drive[1].UDMA_mode > 2)
3692 chp->ch_drive[1].UDMA_mode = 2;
3693 }
3694 /* Set U66 if needed */
3695 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3696 chp->ch_drive[0].UDMA_mode > 2) ||
3697 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3698 chp->ch_drive[1].UDMA_mode > 2))
3699 scr |= PDC262_U66_EN(channel);
3700 else
3701 scr &= ~PDC262_U66_EN(channel);
3702 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3703 PDC262_U66, scr);
3704 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3705 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3706 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3707 PDC262_ATAPI(channel))), DEBUG_PROBE);
3708 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3709 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3710 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3711 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3712 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3713 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3714 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3715 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3716 atapi = 0;
3717 else
3718 atapi = PDC262_ATAPI_UDMA;
3719 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3720 PDC262_ATAPI(channel), atapi);
3721 }
3722 }
3723 for (drive = 0; drive < 2; drive++) {
3724 drvp = &chp->ch_drive[drive];
3725 /* If no drive, skip */
3726 if ((drvp->drive_flags & DRIVE) == 0)
3727 continue;
3728 mode = 0;
3729 if (drvp->drive_flags & DRIVE_UDMA) {
3730 /* use Ultra/DMA */
3731 drvp->drive_flags &= ~DRIVE_DMA;
3732 mode = PDC2xx_TIM_SET_MB(mode,
3733 pdc2xx_udma_mb[drvp->UDMA_mode]);
3734 mode = PDC2xx_TIM_SET_MC(mode,
3735 pdc2xx_udma_mc[drvp->UDMA_mode]);
3736 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3737 } else if (drvp->drive_flags & DRIVE_DMA) {
3738 mode = PDC2xx_TIM_SET_MB(mode,
3739 pdc2xx_dma_mb[drvp->DMA_mode]);
3740 mode = PDC2xx_TIM_SET_MC(mode,
3741 pdc2xx_dma_mc[drvp->DMA_mode]);
3742 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3743 } else {
3744 mode = PDC2xx_TIM_SET_MB(mode,
3745 pdc2xx_dma_mb[0]);
3746 mode = PDC2xx_TIM_SET_MC(mode,
3747 pdc2xx_dma_mc[0]);
3748 }
3749 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3750 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3751 if (drvp->drive_flags & DRIVE_ATA)
3752 mode |= PDC2xx_TIM_PRE;
3753 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3754 if (drvp->PIO_mode >= 3) {
3755 mode |= PDC2xx_TIM_IORDY;
3756 if (drive == 0)
3757 mode |= PDC2xx_TIM_IORDYp;
3758 }
3759 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3760 "timings 0x%x\n",
3761 sc->sc_wdcdev.sc_dev.dv_xname,
3762 chp->channel, drive, mode), DEBUG_PROBE);
3763 pci_conf_write(sc->sc_pc, sc->sc_tag,
3764 PDC2xx_TIM(chp->channel, drive), mode);
3765 }
3766 if (idedma_ctl != 0) {
3767 /* Add software bits in status register */
3768 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3769 IDEDMA_CTL, idedma_ctl);
3770 }
3771 pciide_print_modes(cp);
3772 }
3773
3774 void
3775 pdc20268_setup_channel(chp)
3776 struct channel_softc *chp;
3777 {
3778 struct ata_drive_datas *drvp;
3779 int drive;
3780 u_int32_t idedma_ctl;
3781 struct pciide_channel *cp = (struct pciide_channel*)chp;
3782 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3783 int u100;
3784
3785 /* setup DMA if needed */
3786 pciide_channel_dma_setup(cp);
3787
3788 idedma_ctl = 0;
3789
3790 /* I don't know what this is for, FreeBSD does it ... */
3791 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3792 IDEDMA_CMD + 0x1, 0x0b);
3793
3794 /*
3795 * I don't know what this is for; FreeBSD checks this ... this is not
3796 * cable type detect.
3797 */
3798 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3799 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3800
3801 for (drive = 0; drive < 2; drive++) {
3802 drvp = &chp->ch_drive[drive];
3803 /* If no drive, skip */
3804 if ((drvp->drive_flags & DRIVE) == 0)
3805 continue;
3806 if (drvp->drive_flags & DRIVE_UDMA) {
3807 /* use Ultra/DMA */
3808 drvp->drive_flags &= ~DRIVE_DMA;
3809 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3810 if (drvp->UDMA_mode > 2 && u100 == 0)
3811 drvp->UDMA_mode = 2;
3812 } else if (drvp->drive_flags & DRIVE_DMA) {
3813 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3814 }
3815 }
3816 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3817 if (idedma_ctl != 0) {
3818 /* Add software bits in status register */
3819 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3820 IDEDMA_CTL, idedma_ctl);
3821 }
3822 pciide_print_modes(cp);
3823 }
3824
3825 int
3826 pdc202xx_pci_intr(arg)
3827 void *arg;
3828 {
3829 struct pciide_softc *sc = arg;
3830 struct pciide_channel *cp;
3831 struct channel_softc *wdc_cp;
3832 int i, rv, crv;
3833 u_int32_t scr;
3834
3835 rv = 0;
3836 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3837 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3838 cp = &sc->pciide_channels[i];
3839 wdc_cp = &cp->wdc_channel;
3840 /* If a compat channel skip. */
3841 if (cp->compat)
3842 continue;
3843 if (scr & PDC2xx_SCR_INT(i)) {
3844 crv = wdcintr(wdc_cp);
3845 if (crv == 0)
3846 printf("%s:%d: bogus intr (reg 0x%x)\n",
3847 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3848 else
3849 rv = 1;
3850 }
3851 }
3852 return rv;
3853 }
3854
3855 int
3856 pdc20265_pci_intr(arg)
3857 void *arg;
3858 {
3859 struct pciide_softc *sc = arg;
3860 struct pciide_channel *cp;
3861 struct channel_softc *wdc_cp;
3862 int i, rv, crv;
3863 u_int32_t dmastat;
3864
3865 rv = 0;
3866 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3867 cp = &sc->pciide_channels[i];
3868 wdc_cp = &cp->wdc_channel;
3869 /* If a compat channel skip. */
3870 if (cp->compat)
3871 continue;
3872 /*
3873 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3874 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3875 * So use it instead (requires 2 reg reads instead of 1,
3876 * but we can't do it another way).
3877 */
3878 dmastat = bus_space_read_1(sc->sc_dma_iot,
3879 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3880 if((dmastat & IDEDMA_CTL_INTR) == 0)
3881 continue;
3882 crv = wdcintr(wdc_cp);
3883 if (crv == 0)
3884 printf("%s:%d: bogus intr\n",
3885 sc->sc_wdcdev.sc_dev.dv_xname, i);
3886 else
3887 rv = 1;
3888 }
3889 return rv;
3890 }
3891
3892 void
3893 opti_chip_map(sc, pa)
3894 struct pciide_softc *sc;
3895 struct pci_attach_args *pa;
3896 {
3897 struct pciide_channel *cp;
3898 bus_size_t cmdsize, ctlsize;
3899 pcireg_t interface;
3900 u_int8_t init_ctrl;
3901 int channel;
3902
3903 if (pciide_chipen(sc, pa) == 0)
3904 return;
3905 printf("%s: bus-master DMA support present",
3906 sc->sc_wdcdev.sc_dev.dv_xname);
3907
3908 /*
3909 * XXXSCW:
3910 * There seem to be a couple of buggy revisions/implementations
3911 * of the OPTi pciide chipset. This kludge seems to fix one of
3912 * the reported problems (PR/11644) but still fails for the
3913 * other (PR/13151), although the latter may be due to other
3914 * issues too...
3915 */
3916 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3917 printf(" but disabled due to chip rev. <= 0x12");
3918 sc->sc_dma_ok = 0;
3919 } else
3920 pciide_mapreg_dma(sc, pa);
3921
3922 printf("\n");
3923
3924 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
3925 WDC_CAPABILITY_MODE;
3926 sc->sc_wdcdev.PIO_cap = 4;
3927 if (sc->sc_dma_ok) {
3928 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3929 sc->sc_wdcdev.irqack = pciide_irqack;
3930 sc->sc_wdcdev.DMA_cap = 2;
3931 }
3932 sc->sc_wdcdev.set_modes = opti_setup_channel;
3933
3934 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3935 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3936
3937 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3938 OPTI_REG_INIT_CONTROL);
3939
3940 interface = PCI_INTERFACE(pa->pa_class);
3941
3942 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3943 cp = &sc->pciide_channels[channel];
3944 if (pciide_chansetup(sc, channel, interface) == 0)
3945 continue;
3946 if (channel == 1 &&
3947 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3948 printf("%s: %s channel ignored (disabled)\n",
3949 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3950 continue;
3951 }
3952 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3953 pciide_pci_intr);
3954 if (cp->hw_ok == 0)
3955 continue;
3956 pciide_map_compat_intr(pa, cp, channel, interface);
3957 if (cp->hw_ok == 0)
3958 continue;
3959 opti_setup_channel(&cp->wdc_channel);
3960 }
3961 }
3962
3963 void
3964 opti_setup_channel(chp)
3965 struct channel_softc *chp;
3966 {
3967 struct ata_drive_datas *drvp;
3968 struct pciide_channel *cp = (struct pciide_channel*)chp;
3969 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3970 int drive, spd;
3971 int mode[2];
3972 u_int8_t rv, mr;
3973
3974 /*
3975 * The `Delay' and `Address Setup Time' fields of the
3976 * Miscellaneous Register are always zero initially.
3977 */
3978 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3979 mr &= ~(OPTI_MISC_DELAY_MASK |
3980 OPTI_MISC_ADDR_SETUP_MASK |
3981 OPTI_MISC_INDEX_MASK);
3982
3983 /* Prime the control register before setting timing values */
3984 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3985
3986 /* Determine the clockrate of the PCIbus the chip is attached to */
3987 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3988 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3989
3990 /* setup DMA if needed */
3991 pciide_channel_dma_setup(cp);
3992
3993 for (drive = 0; drive < 2; drive++) {
3994 drvp = &chp->ch_drive[drive];
3995 /* If no drive, skip */
3996 if ((drvp->drive_flags & DRIVE) == 0) {
3997 mode[drive] = -1;
3998 continue;
3999 }
4000
4001 if ((drvp->drive_flags & DRIVE_DMA)) {
4002 /*
4003 * Timings will be used for both PIO and DMA,
4004 * so adjust DMA mode if needed
4005 */
4006 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4007 drvp->PIO_mode = drvp->DMA_mode + 2;
4008 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4009 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4010 drvp->PIO_mode - 2 : 0;
4011 if (drvp->DMA_mode == 0)
4012 drvp->PIO_mode = 0;
4013
4014 mode[drive] = drvp->DMA_mode + 5;
4015 } else
4016 mode[drive] = drvp->PIO_mode;
4017
4018 if (drive && mode[0] >= 0 &&
4019 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4020 /*
4021 * Can't have two drives using different values
4022 * for `Address Setup Time'.
4023 * Slow down the faster drive to compensate.
4024 */
4025 int d = (opti_tim_as[spd][mode[0]] >
4026 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4027
4028 mode[d] = mode[1-d];
4029 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4030 chp->ch_drive[d].DMA_mode = 0;
4031 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4032 }
4033 }
4034
4035 for (drive = 0; drive < 2; drive++) {
4036 int m;
4037 if ((m = mode[drive]) < 0)
4038 continue;
4039
4040 /* Set the Address Setup Time and select appropriate index */
4041 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4042 rv |= OPTI_MISC_INDEX(drive);
4043 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4044
4045 /* Set the pulse width and recovery timing parameters */
4046 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4047 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4048 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4049 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4050
4051 /* Set the Enhanced Mode register appropriately */
4052 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4053 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4054 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4055 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4056 }
4057
4058 /* Finally, enable the timings */
4059 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4060
4061 pciide_print_modes(cp);
4062 }
4063
4064 #define ACARD_IS_850(sc) \
4065 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4066
4067 void
4068 acard_chip_map(sc, pa)
4069 struct pciide_softc *sc;
4070 struct pci_attach_args *pa;
4071 {
4072 struct pciide_channel *cp;
4073 int i;
4074 pcireg_t interface;
4075 bus_size_t cmdsize, ctlsize;
4076
4077 if (pciide_chipen(sc, pa) == 0)
4078 return;
4079
4080 /*
4081 * when the chip is in native mode it identifies itself as a
4082 * 'misc mass storage'. Fake interface in this case.
4083 */
4084 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4085 interface = PCI_INTERFACE(pa->pa_class);
4086 } else {
4087 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4088 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4089 }
4090
4091 printf("%s: bus-master DMA support present",
4092 sc->sc_wdcdev.sc_dev.dv_xname);
4093 pciide_mapreg_dma(sc, pa);
4094 printf("\n");
4095 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4096 WDC_CAPABILITY_MODE;
4097
4098 if (sc->sc_dma_ok) {
4099 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4100 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4101 sc->sc_wdcdev.irqack = pciide_irqack;
4102 }
4103 sc->sc_wdcdev.PIO_cap = 4;
4104 sc->sc_wdcdev.DMA_cap = 2;
4105 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4106
4107 sc->sc_wdcdev.set_modes = acard_setup_channel;
4108 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4109 sc->sc_wdcdev.nchannels = 2;
4110
4111 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4112 cp = &sc->pciide_channels[i];
4113 if (pciide_chansetup(sc, i, interface) == 0)
4114 continue;
4115 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4116 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4117 &ctlsize, pciide_pci_intr);
4118 } else {
4119 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4120 &cmdsize, &ctlsize);
4121 }
4122 if (cp->hw_ok == 0)
4123 return;
4124 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4125 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4126 wdcattach(&cp->wdc_channel);
4127 acard_setup_channel(&cp->wdc_channel);
4128 }
4129 if (!ACARD_IS_850(sc)) {
4130 u_int32_t reg;
4131 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4132 reg &= ~ATP860_CTRL_INT;
4133 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4134 }
4135 }
4136
4137 void
4138 acard_setup_channel(chp)
4139 struct channel_softc *chp;
4140 {
4141 struct ata_drive_datas *drvp;
4142 struct pciide_channel *cp = (struct pciide_channel*)chp;
4143 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4144 int channel = chp->channel;
4145 int drive;
4146 u_int32_t idetime, udma_mode;
4147 u_int32_t idedma_ctl;
4148
4149 /* setup DMA if needed */
4150 pciide_channel_dma_setup(cp);
4151
4152 if (ACARD_IS_850(sc)) {
4153 idetime = 0;
4154 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4155 udma_mode &= ~ATP850_UDMA_MASK(channel);
4156 } else {
4157 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4158 idetime &= ~ATP860_SETTIME_MASK(channel);
4159 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4160 udma_mode &= ~ATP860_UDMA_MASK(channel);
4161
4162 /* check 80 pins cable */
4163 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4164 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4165 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4166 & ATP860_CTRL_80P(chp->channel)) {
4167 if (chp->ch_drive[0].UDMA_mode > 2)
4168 chp->ch_drive[0].UDMA_mode = 2;
4169 if (chp->ch_drive[1].UDMA_mode > 2)
4170 chp->ch_drive[1].UDMA_mode = 2;
4171 }
4172 }
4173 }
4174
4175 idedma_ctl = 0;
4176
4177 /* Per drive settings */
4178 for (drive = 0; drive < 2; drive++) {
4179 drvp = &chp->ch_drive[drive];
4180 /* If no drive, skip */
4181 if ((drvp->drive_flags & DRIVE) == 0)
4182 continue;
4183 /* add timing values, setup DMA if needed */
4184 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4185 (drvp->drive_flags & DRIVE_UDMA)) {
4186 /* use Ultra/DMA */
4187 if (ACARD_IS_850(sc)) {
4188 idetime |= ATP850_SETTIME(drive,
4189 acard_act_udma[drvp->UDMA_mode],
4190 acard_rec_udma[drvp->UDMA_mode]);
4191 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4192 acard_udma_conf[drvp->UDMA_mode]);
4193 } else {
4194 idetime |= ATP860_SETTIME(channel, drive,
4195 acard_act_udma[drvp->UDMA_mode],
4196 acard_rec_udma[drvp->UDMA_mode]);
4197 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4198 acard_udma_conf[drvp->UDMA_mode]);
4199 }
4200 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4201 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4202 (drvp->drive_flags & DRIVE_DMA)) {
4203 /* use Multiword DMA */
4204 drvp->drive_flags &= ~DRIVE_UDMA;
4205 if (ACARD_IS_850(sc)) {
4206 idetime |= ATP850_SETTIME(drive,
4207 acard_act_dma[drvp->DMA_mode],
4208 acard_rec_dma[drvp->DMA_mode]);
4209 } else {
4210 idetime |= ATP860_SETTIME(channel, drive,
4211 acard_act_dma[drvp->DMA_mode],
4212 acard_rec_dma[drvp->DMA_mode]);
4213 }
4214 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4215 } else {
4216 /* PIO only */
4217 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4218 if (ACARD_IS_850(sc)) {
4219 idetime |= ATP850_SETTIME(drive,
4220 acard_act_pio[drvp->PIO_mode],
4221 acard_rec_pio[drvp->PIO_mode]);
4222 } else {
4223 idetime |= ATP860_SETTIME(channel, drive,
4224 acard_act_pio[drvp->PIO_mode],
4225 acard_rec_pio[drvp->PIO_mode]);
4226 }
4227 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4228 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4229 | ATP8x0_CTRL_EN(channel));
4230 }
4231 }
4232
4233 if (idedma_ctl != 0) {
4234 /* Add software bits in status register */
4235 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4236 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4237 }
4238 pciide_print_modes(cp);
4239
4240 if (ACARD_IS_850(sc)) {
4241 pci_conf_write(sc->sc_pc, sc->sc_tag,
4242 ATP850_IDETIME(channel), idetime);
4243 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4244 } else {
4245 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4246 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4247 }
4248 }
4249
4250 int
4251 acard_pci_intr(arg)
4252 void *arg;
4253 {
4254 struct pciide_softc *sc = arg;
4255 struct pciide_channel *cp;
4256 struct channel_softc *wdc_cp;
4257 int rv = 0;
4258 int dmastat, i, crv;
4259
4260 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4261 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4262 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4263 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4264 continue;
4265 cp = &sc->pciide_channels[i];
4266 wdc_cp = &cp->wdc_channel;
4267 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4268 (void)wdcintr(wdc_cp);
4269 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4270 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4271 continue;
4272 }
4273 crv = wdcintr(wdc_cp);
4274 if (crv == 0)
4275 printf("%s:%d: bogus intr\n",
4276 sc->sc_wdcdev.sc_dev.dv_xname, i);
4277 else if (crv == 1)
4278 rv = 1;
4279 else if (rv == 0)
4280 rv = crv;
4281 }
4282 return rv;
4283 }
4284
4285 static int
4286 sl82c105_bugchk(struct pci_attach_args *pa)
4287 {
4288
4289 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4290 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4291 return (0);
4292
4293 if (PCI_REVISION(pa->pa_class) <= 0x05)
4294 return (1);
4295
4296 return (0);
4297 }
4298
4299 void
4300 sl82c105_chip_map(sc, pa)
4301 struct pciide_softc *sc;
4302 struct pci_attach_args *pa;
4303 {
4304 struct pciide_channel *cp;
4305 bus_size_t cmdsize, ctlsize;
4306 pcireg_t interface, idecr;
4307 int channel;
4308
4309 if (pciide_chipen(sc, pa) == 0)
4310 return;
4311
4312 printf("%s: bus-master DMA support present",
4313 sc->sc_wdcdev.sc_dev.dv_xname);
4314
4315 /*
4316 * Check to see if we're part of the Winbond 83c553 Southbridge.
4317 * If so, we need to disable DMA on rev. <= 5 of that chip.
4318 */
4319 if (pci_find_device(pa, sl82c105_bugchk)) {
4320 printf(" but disabled due to 83c553 rev. <= 0x05");
4321 sc->sc_dma_ok = 0;
4322 } else
4323 pciide_mapreg_dma(sc, pa);
4324 printf("\n");
4325
4326 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4327 WDC_CAPABILITY_MODE;
4328 sc->sc_wdcdev.PIO_cap = 4;
4329 if (sc->sc_dma_ok) {
4330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4331 sc->sc_wdcdev.irqack = pciide_irqack;
4332 sc->sc_wdcdev.DMA_cap = 2;
4333 }
4334 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4335
4336 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4337 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4338
4339 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4340
4341 interface = PCI_INTERFACE(pa->pa_class);
4342
4343 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4344 cp = &sc->pciide_channels[channel];
4345 if (pciide_chansetup(sc, channel, interface) == 0)
4346 continue;
4347 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4348 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4349 printf("%s: %s channel ignored (disabled)\n",
4350 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4351 continue;
4352 }
4353 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4354 pciide_pci_intr);
4355 if (cp->hw_ok == 0)
4356 continue;
4357 pciide_map_compat_intr(pa, cp, channel, interface);
4358 if (cp->hw_ok == 0)
4359 continue;
4360 sl82c105_setup_channel(&cp->wdc_channel);
4361 }
4362 }
4363
4364 void
4365 sl82c105_setup_channel(chp)
4366 struct channel_softc *chp;
4367 {
4368 struct ata_drive_datas *drvp;
4369 struct pciide_channel *cp = (struct pciide_channel*)chp;
4370 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4371 int pxdx_reg, drive;
4372 pcireg_t pxdx;
4373
4374 /* Set up DMA if needed. */
4375 pciide_channel_dma_setup(cp);
4376
4377 for (drive = 0; drive < 2; drive++) {
4378 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4379 : SYMPH_P1D0CR) + (drive * 4);
4380
4381 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4382
4383 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4384 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4385
4386 drvp = &chp->ch_drive[drive];
4387 /* If no drive, skip. */
4388 if ((drvp->drive_flags & DRIVE) == 0) {
4389 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4390 continue;
4391 }
4392
4393 if (drvp->drive_flags & DRIVE_DMA) {
4394 /*
4395 * Timings will be used for both PIO and DMA,
4396 * so adjust DMA mode if needed.
4397 */
4398 if (drvp->PIO_mode >= 3) {
4399 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4400 drvp->DMA_mode = drvp->PIO_mode - 2;
4401 if (drvp->DMA_mode < 1) {
4402 /*
4403 * Can't mix both PIO and DMA.
4404 * Disable DMA.
4405 */
4406 drvp->drive_flags &= ~DRIVE_DMA;
4407 }
4408 } else {
4409 /*
4410 * Can't mix both PIO and DMA. Disable
4411 * DMA.
4412 */
4413 drvp->drive_flags &= ~DRIVE_DMA;
4414 }
4415 }
4416
4417 if (drvp->drive_flags & DRIVE_DMA) {
4418 /* Use multi-word DMA. */
4419 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4420 PxDx_CMD_ON_SHIFT;
4421 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4422 } else {
4423 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4424 PxDx_CMD_ON_SHIFT;
4425 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4426 }
4427
4428 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4429
4430 /* ...and set the mode for this drive. */
4431 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4432 }
4433
4434 pciide_print_modes(cp);
4435 }
4436
4437 void
4438 serverworks_chip_map(sc, pa)
4439 struct pciide_softc *sc;
4440 struct pci_attach_args *pa;
4441 {
4442 struct pciide_channel *cp;
4443 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4444 pcitag_t pcib_tag;
4445 int channel;
4446 bus_size_t cmdsize, ctlsize;
4447
4448 if (pciide_chipen(sc, pa) == 0)
4449 return;
4450
4451 printf("%s: bus-master DMA support present",
4452 sc->sc_wdcdev.sc_dev.dv_xname);
4453 pciide_mapreg_dma(sc, pa);
4454 printf("\n");
4455 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4456 WDC_CAPABILITY_MODE;
4457
4458 if (sc->sc_dma_ok) {
4459 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4460 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4461 sc->sc_wdcdev.irqack = pciide_irqack;
4462 }
4463 sc->sc_wdcdev.PIO_cap = 4;
4464 sc->sc_wdcdev.DMA_cap = 2;
4465 switch (sc->sc_pp->ide_product) {
4466 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4467 sc->sc_wdcdev.UDMA_cap = 2;
4468 break;
4469 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4470 if (PCI_REVISION(pa->pa_class) < 0x92)
4471 sc->sc_wdcdev.UDMA_cap = 4;
4472 else
4473 sc->sc_wdcdev.UDMA_cap = 5;
4474 break;
4475 }
4476
4477 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4478 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4479 sc->sc_wdcdev.nchannels = 2;
4480
4481 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4482 cp = &sc->pciide_channels[channel];
4483 if (pciide_chansetup(sc, channel, interface) == 0)
4484 continue;
4485 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4486 serverworks_pci_intr);
4487 if (cp->hw_ok == 0)
4488 return;
4489 pciide_map_compat_intr(pa, cp, channel, interface);
4490 if (cp->hw_ok == 0)
4491 return;
4492 serverworks_setup_channel(&cp->wdc_channel);
4493 }
4494
4495 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4496 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4497 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4498 }
4499
4500 void
4501 serverworks_setup_channel(chp)
4502 struct channel_softc *chp;
4503 {
4504 struct ata_drive_datas *drvp;
4505 struct pciide_channel *cp = (struct pciide_channel*)chp;
4506 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4507 int channel = chp->channel;
4508 int drive, unit;
4509 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4510 u_int32_t idedma_ctl;
4511 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4512 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4513
4514 /* setup DMA if needed */
4515 pciide_channel_dma_setup(cp);
4516
4517 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4518 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4519 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4520 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4521
4522 pio_time &= ~(0xffff << (16 * channel));
4523 dma_time &= ~(0xffff << (16 * channel));
4524 pio_mode &= ~(0xff << (8 * channel + 16));
4525 udma_mode &= ~(0xff << (8 * channel + 16));
4526 udma_mode &= ~(3 << (2 * channel));
4527
4528 idedma_ctl = 0;
4529
4530 /* Per drive settings */
4531 for (drive = 0; drive < 2; drive++) {
4532 drvp = &chp->ch_drive[drive];
4533 /* If no drive, skip */
4534 if ((drvp->drive_flags & DRIVE) == 0)
4535 continue;
4536 unit = drive + 2 * channel;
4537 /* add timing values, setup DMA if needed */
4538 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4539 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4540 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4541 (drvp->drive_flags & DRIVE_UDMA)) {
4542 /* use Ultra/DMA, check for 80-pin cable */
4543 if (drvp->UDMA_mode > 2 &&
4544 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4545 drvp->UDMA_mode = 2;
4546 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4547 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4548 udma_mode |= 1 << unit;
4549 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4550 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4551 (drvp->drive_flags & DRIVE_DMA)) {
4552 /* use Multiword DMA */
4553 drvp->drive_flags &= ~DRIVE_UDMA;
4554 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4555 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4556 } else {
4557 /* PIO only */
4558 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4559 }
4560 }
4561
4562 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4563 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4564 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4565 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4566 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4567
4568 if (idedma_ctl != 0) {
4569 /* Add software bits in status register */
4570 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4571 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4572 }
4573 pciide_print_modes(cp);
4574 }
4575
4576 int
4577 serverworks_pci_intr(arg)
4578 void *arg;
4579 {
4580 struct pciide_softc *sc = arg;
4581 struct pciide_channel *cp;
4582 struct channel_softc *wdc_cp;
4583 int rv = 0;
4584 int dmastat, i, crv;
4585
4586 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4587 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4588 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4589 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4590 IDEDMA_CTL_INTR)
4591 continue;
4592 cp = &sc->pciide_channels[i];
4593 wdc_cp = &cp->wdc_channel;
4594 crv = wdcintr(wdc_cp);
4595 if (crv == 0) {
4596 printf("%s:%d: bogus intr\n",
4597 sc->sc_wdcdev.sc_dev.dv_xname, i);
4598 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4599 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4600 } else
4601 rv = 1;
4602 }
4603 return rv;
4604 }
4605