pciide.c revision 1.156 1 /* $NetBSD: pciide.c,v 1.156 2002/06/08 17:54:59 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.156 2002/06/08 17:54:59 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191
192 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void pdc202xx_setup_channel __P((struct channel_softc*));
194 void pdc20268_setup_channel __P((struct channel_softc*));
195 int pdc202xx_pci_intr __P((void *));
196 int pdc20265_pci_intr __P((void *));
197
198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void opti_setup_channel __P((struct channel_softc*));
200
201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void hpt_setup_channel __P((struct channel_softc*));
203 int hpt_pci_intr __P((void *));
204
205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acard_setup_channel __P((struct channel_softc*));
207 int acard_pci_intr __P((void *));
208
209 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void serverworks_setup_channel __P((struct channel_softc*));
211 int serverworks_pci_intr __P((void *));
212
213 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void sl82c105_setup_channel __P((struct channel_softc*));
215
216 void pciide_channel_dma_setup __P((struct pciide_channel *));
217 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
218 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
219 void pciide_dma_start __P((void*, int, int));
220 int pciide_dma_finish __P((void*, int, int, int));
221 void pciide_irqack __P((struct channel_softc *));
222 void pciide_print_modes __P((struct pciide_channel *));
223
224 struct pciide_product_desc {
225 u_int32_t ide_product;
226 int ide_flags;
227 const char *ide_name;
228 /* map and setup chip, probe drives */
229 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
230 };
231
232 /* Flags for ide_flags */
233 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
234 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
235
236 /* Default product description for devices not known from this controller */
237 const struct pciide_product_desc default_product_desc = {
238 0,
239 0,
240 "Generic PCI IDE controller",
241 default_chip_map,
242 };
243
244 const struct pciide_product_desc pciide_intel_products[] = {
245 { PCI_PRODUCT_INTEL_82092AA,
246 0,
247 "Intel 82092AA IDE controller",
248 default_chip_map,
249 },
250 { PCI_PRODUCT_INTEL_82371FB_IDE,
251 0,
252 "Intel 82371FB IDE controller (PIIX)",
253 piix_chip_map,
254 },
255 { PCI_PRODUCT_INTEL_82371SB_IDE,
256 0,
257 "Intel 82371SB IDE Interface (PIIX3)",
258 piix_chip_map,
259 },
260 { PCI_PRODUCT_INTEL_82371AB_IDE,
261 0,
262 "Intel 82371AB IDE controller (PIIX4)",
263 piix_chip_map,
264 },
265 { PCI_PRODUCT_INTEL_82440MX_IDE,
266 0,
267 "Intel 82440MX IDE controller",
268 piix_chip_map
269 },
270 { PCI_PRODUCT_INTEL_82801AA_IDE,
271 0,
272 "Intel 82801AA IDE Controller (ICH)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82801AB_IDE,
276 0,
277 "Intel 82801AB IDE Controller (ICH0)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82801BA_IDE,
281 0,
282 "Intel 82801BA IDE Controller (ICH2)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82801BAM_IDE,
286 0,
287 "Intel 82801BAM IDE Controller (ICH2)",
288 piix_chip_map,
289 },
290 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
291 0,
292 "Intel 82201CA IDE Controller",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
296 0,
297 "Intel 82201CA IDE Controller",
298 piix_chip_map,
299 },
300 { 0,
301 0,
302 NULL,
303 NULL
304 }
305 };
306
307 const struct pciide_product_desc pciide_amd_products[] = {
308 { PCI_PRODUCT_AMD_PBC756_IDE,
309 0,
310 "Advanced Micro Devices AMD756 IDE Controller",
311 amd7x6_chip_map
312 },
313 { PCI_PRODUCT_AMD_PBC766_IDE,
314 0,
315 "Advanced Micro Devices AMD766 IDE Controller",
316 amd7x6_chip_map
317 },
318 { PCI_PRODUCT_AMD_PBC768_IDE,
319 0,
320 "Advanced Micro Devices AMD768 IDE Controller",
321 amd7x6_chip_map
322 },
323 { PCI_PRODUCT_AMD_PBC8111_IDE,
324 0,
325 "Advanced Micro Devices AMD8111 IDE Controller",
326 amd7x6_chip_map
327 },
328 { 0,
329 0,
330 NULL,
331 NULL
332 }
333 };
334
335 const struct pciide_product_desc pciide_cmd_products[] = {
336 { PCI_PRODUCT_CMDTECH_640,
337 0,
338 "CMD Technology PCI0640",
339 cmd_chip_map
340 },
341 { PCI_PRODUCT_CMDTECH_643,
342 0,
343 "CMD Technology PCI0643",
344 cmd0643_9_chip_map,
345 },
346 { PCI_PRODUCT_CMDTECH_646,
347 0,
348 "CMD Technology PCI0646",
349 cmd0643_9_chip_map,
350 },
351 { PCI_PRODUCT_CMDTECH_648,
352 IDE_PCI_CLASS_OVERRIDE,
353 "CMD Technology PCI0648",
354 cmd0643_9_chip_map,
355 },
356 { PCI_PRODUCT_CMDTECH_649,
357 IDE_PCI_CLASS_OVERRIDE,
358 "CMD Technology PCI0649",
359 cmd0643_9_chip_map,
360 },
361 { 0,
362 0,
363 NULL,
364 NULL
365 }
366 };
367
368 const struct pciide_product_desc pciide_via_products[] = {
369 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
370 0,
371 NULL,
372 apollo_chip_map,
373 },
374 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
375 0,
376 NULL,
377 apollo_chip_map,
378 },
379 { 0,
380 0,
381 NULL,
382 NULL
383 }
384 };
385
386 const struct pciide_product_desc pciide_cypress_products[] = {
387 { PCI_PRODUCT_CONTAQ_82C693,
388 IDE_16BIT_IOSPACE,
389 "Cypress 82C693 IDE Controller",
390 cy693_chip_map,
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_sis_products[] = {
400 { PCI_PRODUCT_SIS_5597_IDE,
401 0,
402 "Silicon Integrated System 5597/5598 IDE controller",
403 sis_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_acer_products[] = {
413 { PCI_PRODUCT_ALI_M5229,
414 0,
415 "Acer Labs M5229 UDMA IDE Controller",
416 acer_chip_map,
417 },
418 { 0,
419 0,
420 NULL,
421 NULL
422 }
423 };
424
425 const struct pciide_product_desc pciide_promise_products[] = {
426 { PCI_PRODUCT_PROMISE_ULTRA33,
427 IDE_PCI_CLASS_OVERRIDE,
428 "Promise Ultra33/ATA Bus Master IDE Accelerator",
429 pdc202xx_chip_map,
430 },
431 { PCI_PRODUCT_PROMISE_ULTRA66,
432 IDE_PCI_CLASS_OVERRIDE,
433 "Promise Ultra66/ATA Bus Master IDE Accelerator",
434 pdc202xx_chip_map,
435 },
436 { PCI_PRODUCT_PROMISE_ULTRA100,
437 IDE_PCI_CLASS_OVERRIDE,
438 "Promise Ultra100/ATA Bus Master IDE Accelerator",
439 pdc202xx_chip_map,
440 },
441 { PCI_PRODUCT_PROMISE_ULTRA100X,
442 IDE_PCI_CLASS_OVERRIDE,
443 "Promise Ultra100/ATA Bus Master IDE Accelerator",
444 pdc202xx_chip_map,
445 },
446 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
447 IDE_PCI_CLASS_OVERRIDE,
448 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
449 pdc202xx_chip_map,
450 },
451 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
452 IDE_PCI_CLASS_OVERRIDE,
453 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
454 pdc202xx_chip_map,
455 },
456 { PCI_PRODUCT_PROMISE_ULTRA133,
457 IDE_PCI_CLASS_OVERRIDE,
458 "Promise Ultra133/ATA Bus Master IDE Accelerator",
459 pdc202xx_chip_map,
460 },
461 { 0,
462 0,
463 NULL,
464 NULL
465 }
466 };
467
468 const struct pciide_product_desc pciide_opti_products[] = {
469 { PCI_PRODUCT_OPTI_82C621,
470 0,
471 "OPTi 82c621 PCI IDE controller",
472 opti_chip_map,
473 },
474 { PCI_PRODUCT_OPTI_82C568,
475 0,
476 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
477 opti_chip_map,
478 },
479 { PCI_PRODUCT_OPTI_82D568,
480 0,
481 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
482 opti_chip_map,
483 },
484 { 0,
485 0,
486 NULL,
487 NULL
488 }
489 };
490
491 const struct pciide_product_desc pciide_triones_products[] = {
492 { PCI_PRODUCT_TRIONES_HPT366,
493 IDE_PCI_CLASS_OVERRIDE,
494 NULL,
495 hpt_chip_map,
496 },
497 { PCI_PRODUCT_TRIONES_HPT374,
498 IDE_PCI_CLASS_OVERRIDE,
499 NULL,
500 hpt_chip_map
501 },
502 { 0,
503 0,
504 NULL,
505 NULL
506 }
507 };
508
509 const struct pciide_product_desc pciide_acard_products[] = {
510 { PCI_PRODUCT_ACARD_ATP850U,
511 IDE_PCI_CLASS_OVERRIDE,
512 "Acard ATP850U Ultra33 IDE Controller",
513 acard_chip_map,
514 },
515 { PCI_PRODUCT_ACARD_ATP860,
516 IDE_PCI_CLASS_OVERRIDE,
517 "Acard ATP860 Ultra66 IDE Controller",
518 acard_chip_map,
519 },
520 { PCI_PRODUCT_ACARD_ATP860A,
521 IDE_PCI_CLASS_OVERRIDE,
522 "Acard ATP860-A Ultra66 IDE Controller",
523 acard_chip_map,
524 },
525 { 0,
526 0,
527 NULL,
528 NULL
529 }
530 };
531
532 const struct pciide_product_desc pciide_serverworks_products[] = {
533 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
534 0,
535 "ServerWorks OSB4 IDE Controller",
536 serverworks_chip_map,
537 },
538 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
539 0,
540 "ServerWorks CSB5 IDE Controller",
541 serverworks_chip_map,
542 },
543 { 0,
544 0,
545 NULL,
546 }
547 };
548
549 const struct pciide_product_desc pciide_symphony_products[] = {
550 { PCI_PRODUCT_SYMPHONY_82C105,
551 0,
552 "Symphony Labs 82C105 IDE controller",
553 sl82c105_chip_map,
554 },
555 { 0,
556 0,
557 NULL,
558 }
559 };
560
561 const struct pciide_product_desc pciide_winbond_products[] = {
562 { PCI_PRODUCT_WINBOND_W83C553F_1,
563 0,
564 "Winbond W83C553F IDE controller",
565 sl82c105_chip_map,
566 },
567 { 0,
568 0,
569 NULL,
570 }
571 };
572
573 struct pciide_vendor_desc {
574 u_int32_t ide_vendor;
575 const struct pciide_product_desc *ide_products;
576 };
577
578 const struct pciide_vendor_desc pciide_vendors[] = {
579 { PCI_VENDOR_INTEL, pciide_intel_products },
580 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
581 { PCI_VENDOR_VIATECH, pciide_via_products },
582 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
583 { PCI_VENDOR_SIS, pciide_sis_products },
584 { PCI_VENDOR_ALI, pciide_acer_products },
585 { PCI_VENDOR_PROMISE, pciide_promise_products },
586 { PCI_VENDOR_AMD, pciide_amd_products },
587 { PCI_VENDOR_OPTI, pciide_opti_products },
588 { PCI_VENDOR_TRIONES, pciide_triones_products },
589 { PCI_VENDOR_ACARD, pciide_acard_products },
590 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
591 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
592 { PCI_VENDOR_WINBOND, pciide_winbond_products },
593 { 0, NULL }
594 };
595
596 /* options passed via the 'flags' config keyword */
597 #define PCIIDE_OPTIONS_DMA 0x01
598 #define PCIIDE_OPTIONS_NODMA 0x02
599
600 int pciide_match __P((struct device *, struct cfdata *, void *));
601 void pciide_attach __P((struct device *, struct device *, void *));
602
603 struct cfattach pciide_ca = {
604 sizeof(struct pciide_softc), pciide_match, pciide_attach
605 };
606 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
607 int pciide_mapregs_compat __P(( struct pci_attach_args *,
608 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
609 int pciide_mapregs_native __P((struct pci_attach_args *,
610 struct pciide_channel *, bus_size_t *, bus_size_t *,
611 int (*pci_intr) __P((void *))));
612 void pciide_mapreg_dma __P((struct pciide_softc *,
613 struct pci_attach_args *));
614 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
615 void pciide_mapchan __P((struct pci_attach_args *,
616 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
617 int (*pci_intr) __P((void *))));
618 int pciide_chan_candisable __P((struct pciide_channel *));
619 void pciide_map_compat_intr __P(( struct pci_attach_args *,
620 struct pciide_channel *, int, int));
621 int pciide_compat_intr __P((void *));
622 int pciide_pci_intr __P((void *));
623 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
624
625 const struct pciide_product_desc *
626 pciide_lookup_product(id)
627 u_int32_t id;
628 {
629 const struct pciide_product_desc *pp;
630 const struct pciide_vendor_desc *vp;
631
632 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
633 if (PCI_VENDOR(id) == vp->ide_vendor)
634 break;
635
636 if ((pp = vp->ide_products) == NULL)
637 return NULL;
638
639 for (; pp->chip_map != NULL; pp++)
640 if (PCI_PRODUCT(id) == pp->ide_product)
641 break;
642
643 if (pp->chip_map == NULL)
644 return NULL;
645 return pp;
646 }
647
648 int
649 pciide_match(parent, match, aux)
650 struct device *parent;
651 struct cfdata *match;
652 void *aux;
653 {
654 struct pci_attach_args *pa = aux;
655 const struct pciide_product_desc *pp;
656
657 /*
658 * Check the ID register to see that it's a PCI IDE controller.
659 * If it is, we assume that we can deal with it; it _should_
660 * work in a standardized way...
661 */
662 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
663 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
664 return (1);
665 }
666
667 /*
668 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
669 * controllers. Let see if we can deal with it anyway.
670 */
671 pp = pciide_lookup_product(pa->pa_id);
672 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
673 return (1);
674 }
675
676 return (0);
677 }
678
679 void
680 pciide_attach(parent, self, aux)
681 struct device *parent, *self;
682 void *aux;
683 {
684 struct pci_attach_args *pa = aux;
685 pci_chipset_tag_t pc = pa->pa_pc;
686 pcitag_t tag = pa->pa_tag;
687 struct pciide_softc *sc = (struct pciide_softc *)self;
688 pcireg_t csr;
689 char devinfo[256];
690 const char *displaydev;
691
692 sc->sc_pp = pciide_lookup_product(pa->pa_id);
693 if (sc->sc_pp == NULL) {
694 sc->sc_pp = &default_product_desc;
695 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
696 displaydev = devinfo;
697 } else
698 displaydev = sc->sc_pp->ide_name;
699
700 /* if displaydev == NULL, printf is done in chip-specific map */
701 if (displaydev)
702 printf(": %s (rev. 0x%02x)\n", displaydev,
703 PCI_REVISION(pa->pa_class));
704
705 sc->sc_pc = pa->pa_pc;
706 sc->sc_tag = pa->pa_tag;
707 #ifdef WDCDEBUG
708 if (wdcdebug_pciide_mask & DEBUG_PROBE)
709 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
710 #endif
711 sc->sc_pp->chip_map(sc, pa);
712
713 if (sc->sc_dma_ok) {
714 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
715 csr |= PCI_COMMAND_MASTER_ENABLE;
716 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
717 }
718 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
719 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
720 }
721
722 /* tell wether the chip is enabled or not */
723 int
724 pciide_chipen(sc, pa)
725 struct pciide_softc *sc;
726 struct pci_attach_args *pa;
727 {
728 pcireg_t csr;
729 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
730 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
731 PCI_COMMAND_STATUS_REG);
732 printf("%s: device disabled (at %s)\n",
733 sc->sc_wdcdev.sc_dev.dv_xname,
734 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
735 "device" : "bridge");
736 return 0;
737 }
738 return 1;
739 }
740
741 int
742 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
743 struct pci_attach_args *pa;
744 struct pciide_channel *cp;
745 int compatchan;
746 bus_size_t *cmdsizep, *ctlsizep;
747 {
748 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
749 struct channel_softc *wdc_cp = &cp->wdc_channel;
750
751 cp->compat = 1;
752 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
753 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
754
755 wdc_cp->cmd_iot = pa->pa_iot;
756 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
757 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
758 printf("%s: couldn't map %s channel cmd regs\n",
759 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
760 return (0);
761 }
762
763 wdc_cp->ctl_iot = pa->pa_iot;
764 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
765 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
766 printf("%s: couldn't map %s channel ctl regs\n",
767 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
768 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
769 PCIIDE_COMPAT_CMD_SIZE);
770 return (0);
771 }
772
773 return (1);
774 }
775
776 int
777 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
778 struct pci_attach_args * pa;
779 struct pciide_channel *cp;
780 bus_size_t *cmdsizep, *ctlsizep;
781 int (*pci_intr) __P((void *));
782 {
783 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
784 struct channel_softc *wdc_cp = &cp->wdc_channel;
785 const char *intrstr;
786 pci_intr_handle_t intrhandle;
787
788 cp->compat = 0;
789
790 if (sc->sc_pci_ih == NULL) {
791 if (pci_intr_map(pa, &intrhandle) != 0) {
792 printf("%s: couldn't map native-PCI interrupt\n",
793 sc->sc_wdcdev.sc_dev.dv_xname);
794 return 0;
795 }
796 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
797 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
798 intrhandle, IPL_BIO, pci_intr, sc);
799 if (sc->sc_pci_ih != NULL) {
800 printf("%s: using %s for native-PCI interrupt\n",
801 sc->sc_wdcdev.sc_dev.dv_xname,
802 intrstr ? intrstr : "unknown interrupt");
803 } else {
804 printf("%s: couldn't establish native-PCI interrupt",
805 sc->sc_wdcdev.sc_dev.dv_xname);
806 if (intrstr != NULL)
807 printf(" at %s", intrstr);
808 printf("\n");
809 return 0;
810 }
811 }
812 cp->ih = sc->sc_pci_ih;
813 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
814 PCI_MAPREG_TYPE_IO, 0,
815 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
816 printf("%s: couldn't map %s channel cmd regs\n",
817 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
818 return 0;
819 }
820
821 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
822 PCI_MAPREG_TYPE_IO, 0,
823 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
824 printf("%s: couldn't map %s channel ctl regs\n",
825 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
826 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
827 return 0;
828 }
829 /*
830 * In native mode, 4 bytes of I/O space are mapped for the control
831 * register, the control register is at offset 2. Pass the generic
832 * code a handle for only one byte at the rigth offset.
833 */
834 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
835 &wdc_cp->ctl_ioh) != 0) {
836 printf("%s: unable to subregion %s channel ctl regs\n",
837 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
838 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
839 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
840 return 0;
841 }
842 return (1);
843 }
844
845 void
846 pciide_mapreg_dma(sc, pa)
847 struct pciide_softc *sc;
848 struct pci_attach_args *pa;
849 {
850 pcireg_t maptype;
851 bus_addr_t addr;
852
853 /*
854 * Map DMA registers
855 *
856 * Note that sc_dma_ok is the right variable to test to see if
857 * DMA can be done. If the interface doesn't support DMA,
858 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
859 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
860 * non-zero if the interface supports DMA and the registers
861 * could be mapped.
862 *
863 * XXX Note that despite the fact that the Bus Master IDE specs
864 * XXX say that "The bus master IDE function uses 16 bytes of IO
865 * XXX space," some controllers (at least the United
866 * XXX Microelectronics UM8886BF) place it in memory space.
867 */
868 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
869 PCIIDE_REG_BUS_MASTER_DMA);
870
871 switch (maptype) {
872 case PCI_MAPREG_TYPE_IO:
873 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
874 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
875 &addr, NULL, NULL) == 0);
876 if (sc->sc_dma_ok == 0) {
877 printf(", but unused (couldn't query registers)");
878 break;
879 }
880 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
881 && addr >= 0x10000) {
882 sc->sc_dma_ok = 0;
883 printf(", but unused (registers at unsafe address "
884 "%#lx)", (unsigned long)addr);
885 break;
886 }
887 /* FALLTHROUGH */
888
889 case PCI_MAPREG_MEM_TYPE_32BIT:
890 sc->sc_dma_ok = (pci_mapreg_map(pa,
891 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
892 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
893 sc->sc_dmat = pa->pa_dmat;
894 if (sc->sc_dma_ok == 0) {
895 printf(", but unused (couldn't map registers)");
896 } else {
897 sc->sc_wdcdev.dma_arg = sc;
898 sc->sc_wdcdev.dma_init = pciide_dma_init;
899 sc->sc_wdcdev.dma_start = pciide_dma_start;
900 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
901 }
902
903 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
904 PCIIDE_OPTIONS_NODMA) {
905 printf(", but unused (forced off by config file)");
906 sc->sc_dma_ok = 0;
907 }
908 break;
909
910 default:
911 sc->sc_dma_ok = 0;
912 printf(", but unsupported register maptype (0x%x)", maptype);
913 }
914 }
915
916 int
917 pciide_compat_intr(arg)
918 void *arg;
919 {
920 struct pciide_channel *cp = arg;
921
922 #ifdef DIAGNOSTIC
923 /* should only be called for a compat channel */
924 if (cp->compat == 0)
925 panic("pciide compat intr called for non-compat chan %p\n", cp);
926 #endif
927 return (wdcintr(&cp->wdc_channel));
928 }
929
930 int
931 pciide_pci_intr(arg)
932 void *arg;
933 {
934 struct pciide_softc *sc = arg;
935 struct pciide_channel *cp;
936 struct channel_softc *wdc_cp;
937 int i, rv, crv;
938
939 rv = 0;
940 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
941 cp = &sc->pciide_channels[i];
942 wdc_cp = &cp->wdc_channel;
943
944 /* If a compat channel skip. */
945 if (cp->compat)
946 continue;
947 /* if this channel not waiting for intr, skip */
948 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
949 continue;
950
951 crv = wdcintr(wdc_cp);
952 if (crv == 0)
953 ; /* leave rv alone */
954 else if (crv == 1)
955 rv = 1; /* claim the intr */
956 else if (rv == 0) /* crv should be -1 in this case */
957 rv = crv; /* if we've done no better, take it */
958 }
959 return (rv);
960 }
961
962 void
963 pciide_channel_dma_setup(cp)
964 struct pciide_channel *cp;
965 {
966 int drive;
967 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
968 struct ata_drive_datas *drvp;
969
970 for (drive = 0; drive < 2; drive++) {
971 drvp = &cp->wdc_channel.ch_drive[drive];
972 /* If no drive, skip */
973 if ((drvp->drive_flags & DRIVE) == 0)
974 continue;
975 /* setup DMA if needed */
976 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
977 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
978 sc->sc_dma_ok == 0) {
979 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
980 continue;
981 }
982 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
983 != 0) {
984 /* Abort DMA setup */
985 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
986 continue;
987 }
988 }
989 }
990
991 int
992 pciide_dma_table_setup(sc, channel, drive)
993 struct pciide_softc *sc;
994 int channel, drive;
995 {
996 bus_dma_segment_t seg;
997 int error, rseg;
998 const bus_size_t dma_table_size =
999 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1000 struct pciide_dma_maps *dma_maps =
1001 &sc->pciide_channels[channel].dma_maps[drive];
1002
1003 /* If table was already allocated, just return */
1004 if (dma_maps->dma_table)
1005 return 0;
1006
1007 /* Allocate memory for the DMA tables and map it */
1008 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1009 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1010 BUS_DMA_NOWAIT)) != 0) {
1011 printf("%s:%d: unable to allocate table DMA for "
1012 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1013 channel, drive, error);
1014 return error;
1015 }
1016 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1017 dma_table_size,
1018 (caddr_t *)&dma_maps->dma_table,
1019 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1020 printf("%s:%d: unable to map table DMA for"
1021 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1022 channel, drive, error);
1023 return error;
1024 }
1025 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1026 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1027 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1028
1029 /* Create and load table DMA map for this disk */
1030 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1031 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1032 &dma_maps->dmamap_table)) != 0) {
1033 printf("%s:%d: unable to create table DMA map for "
1034 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1035 channel, drive, error);
1036 return error;
1037 }
1038 if ((error = bus_dmamap_load(sc->sc_dmat,
1039 dma_maps->dmamap_table,
1040 dma_maps->dma_table,
1041 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1042 printf("%s:%d: unable to load table DMA map for "
1043 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1044 channel, drive, error);
1045 return error;
1046 }
1047 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1048 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1049 DEBUG_PROBE);
1050 /* Create a xfer DMA map for this drive */
1051 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1052 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1053 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1054 &dma_maps->dmamap_xfer)) != 0) {
1055 printf("%s:%d: unable to create xfer DMA map for "
1056 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1057 channel, drive, error);
1058 return error;
1059 }
1060 return 0;
1061 }
1062
1063 int
1064 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1065 void *v;
1066 int channel, drive;
1067 void *databuf;
1068 size_t datalen;
1069 int flags;
1070 {
1071 struct pciide_softc *sc = v;
1072 int error, seg;
1073 struct pciide_dma_maps *dma_maps =
1074 &sc->pciide_channels[channel].dma_maps[drive];
1075
1076 error = bus_dmamap_load(sc->sc_dmat,
1077 dma_maps->dmamap_xfer,
1078 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1079 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1080 if (error) {
1081 printf("%s:%d: unable to load xfer DMA map for"
1082 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1083 channel, drive, error);
1084 return error;
1085 }
1086
1087 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1088 dma_maps->dmamap_xfer->dm_mapsize,
1089 (flags & WDC_DMA_READ) ?
1090 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1091
1092 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1093 #ifdef DIAGNOSTIC
1094 /* A segment must not cross a 64k boundary */
1095 {
1096 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1097 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1098 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1099 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1100 printf("pciide_dma: segment %d physical addr 0x%lx"
1101 " len 0x%lx not properly aligned\n",
1102 seg, phys, len);
1103 panic("pciide_dma: buf align");
1104 }
1105 }
1106 #endif
1107 dma_maps->dma_table[seg].base_addr =
1108 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1109 dma_maps->dma_table[seg].byte_count =
1110 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1111 IDEDMA_BYTE_COUNT_MASK);
1112 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1113 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1114 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1115
1116 }
1117 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1118 htole32(IDEDMA_BYTE_COUNT_EOT);
1119
1120 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1121 dma_maps->dmamap_table->dm_mapsize,
1122 BUS_DMASYNC_PREWRITE);
1123
1124 /* Maps are ready. Start DMA function */
1125 #ifdef DIAGNOSTIC
1126 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1127 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1128 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1129 panic("pciide_dma_init: table align");
1130 }
1131 #endif
1132
1133 /* Clear status bits */
1134 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1135 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1136 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1137 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1138 /* Write table addr */
1139 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1140 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1141 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1142 /* set read/write */
1143 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1144 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1145 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1146 /* remember flags */
1147 dma_maps->dma_flags = flags;
1148 return 0;
1149 }
1150
1151 void
1152 pciide_dma_start(v, channel, drive)
1153 void *v;
1154 int channel, drive;
1155 {
1156 struct pciide_softc *sc = v;
1157
1158 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1159 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1160 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1161 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1162 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1163 }
1164
1165 int
1166 pciide_dma_finish(v, channel, drive, force)
1167 void *v;
1168 int channel, drive;
1169 int force;
1170 {
1171 struct pciide_softc *sc = v;
1172 u_int8_t status;
1173 int error = 0;
1174 struct pciide_dma_maps *dma_maps =
1175 &sc->pciide_channels[channel].dma_maps[drive];
1176
1177 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1178 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1179 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1180 DEBUG_XFERS);
1181
1182 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1183 return WDC_DMAST_NOIRQ;
1184
1185 /* stop DMA channel */
1186 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1187 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1188 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1189 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1190
1191 /* Unload the map of the data buffer */
1192 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1193 dma_maps->dmamap_xfer->dm_mapsize,
1194 (dma_maps->dma_flags & WDC_DMA_READ) ?
1195 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1196 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1197
1198 if ((status & IDEDMA_CTL_ERR) != 0) {
1199 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1200 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1201 error |= WDC_DMAST_ERR;
1202 }
1203
1204 if ((status & IDEDMA_CTL_INTR) == 0) {
1205 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1206 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1207 drive, status);
1208 error |= WDC_DMAST_NOIRQ;
1209 }
1210
1211 if ((status & IDEDMA_CTL_ACT) != 0) {
1212 /* data underrun, may be a valid condition for ATAPI */
1213 error |= WDC_DMAST_UNDER;
1214 }
1215 return error;
1216 }
1217
1218 void
1219 pciide_irqack(chp)
1220 struct channel_softc *chp;
1221 {
1222 struct pciide_channel *cp = (struct pciide_channel*)chp;
1223 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1224
1225 /* clear status bits in IDE DMA registers */
1226 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1227 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1228 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1229 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1230 }
1231
1232 /* some common code used by several chip_map */
1233 int
1234 pciide_chansetup(sc, channel, interface)
1235 struct pciide_softc *sc;
1236 int channel;
1237 pcireg_t interface;
1238 {
1239 struct pciide_channel *cp = &sc->pciide_channels[channel];
1240 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1241 cp->name = PCIIDE_CHANNEL_NAME(channel);
1242 cp->wdc_channel.channel = channel;
1243 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1244 cp->wdc_channel.ch_queue =
1245 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1246 if (cp->wdc_channel.ch_queue == NULL) {
1247 printf("%s %s channel: "
1248 "can't allocate memory for command queue",
1249 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1250 return 0;
1251 }
1252 printf("%s: %s channel %s to %s mode\n",
1253 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1254 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1255 "configured" : "wired",
1256 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1257 "native-PCI" : "compatibility");
1258 return 1;
1259 }
1260
1261 /* some common code used by several chip channel_map */
1262 void
1263 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1264 struct pci_attach_args *pa;
1265 struct pciide_channel *cp;
1266 pcireg_t interface;
1267 bus_size_t *cmdsizep, *ctlsizep;
1268 int (*pci_intr) __P((void *));
1269 {
1270 struct channel_softc *wdc_cp = &cp->wdc_channel;
1271
1272 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1273 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1274 pci_intr);
1275 else
1276 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1277 wdc_cp->channel, cmdsizep, ctlsizep);
1278
1279 if (cp->hw_ok == 0)
1280 return;
1281 wdc_cp->data32iot = wdc_cp->cmd_iot;
1282 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1283 wdcattach(wdc_cp);
1284 }
1285
1286 /*
1287 * Generic code to call to know if a channel can be disabled. Return 1
1288 * if channel can be disabled, 0 if not
1289 */
1290 int
1291 pciide_chan_candisable(cp)
1292 struct pciide_channel *cp;
1293 {
1294 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1295 struct channel_softc *wdc_cp = &cp->wdc_channel;
1296
1297 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1298 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1299 printf("%s: disabling %s channel (no drives)\n",
1300 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1301 cp->hw_ok = 0;
1302 return 1;
1303 }
1304 return 0;
1305 }
1306
1307 /*
1308 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1309 * Set hw_ok=0 on failure
1310 */
1311 void
1312 pciide_map_compat_intr(pa, cp, compatchan, interface)
1313 struct pci_attach_args *pa;
1314 struct pciide_channel *cp;
1315 int compatchan, interface;
1316 {
1317 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1318 struct channel_softc *wdc_cp = &cp->wdc_channel;
1319
1320 if (cp->hw_ok == 0)
1321 return;
1322 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1323 return;
1324
1325 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1326 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1327 pa, compatchan, pciide_compat_intr, cp);
1328 if (cp->ih == NULL) {
1329 #endif
1330 printf("%s: no compatibility interrupt for use by %s "
1331 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1332 cp->hw_ok = 0;
1333 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1334 }
1335 #endif
1336 }
1337
1338 void
1339 pciide_print_modes(cp)
1340 struct pciide_channel *cp;
1341 {
1342 wdc_print_modes(&cp->wdc_channel);
1343 }
1344
1345 void
1346 default_chip_map(sc, pa)
1347 struct pciide_softc *sc;
1348 struct pci_attach_args *pa;
1349 {
1350 struct pciide_channel *cp;
1351 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1352 pcireg_t csr;
1353 int channel, drive;
1354 struct ata_drive_datas *drvp;
1355 u_int8_t idedma_ctl;
1356 bus_size_t cmdsize, ctlsize;
1357 char *failreason;
1358
1359 if (pciide_chipen(sc, pa) == 0)
1360 return;
1361
1362 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1363 printf("%s: bus-master DMA support present",
1364 sc->sc_wdcdev.sc_dev.dv_xname);
1365 if (sc->sc_pp == &default_product_desc &&
1366 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1367 PCIIDE_OPTIONS_DMA) == 0) {
1368 printf(", but unused (no driver support)");
1369 sc->sc_dma_ok = 0;
1370 } else {
1371 pciide_mapreg_dma(sc, pa);
1372 if (sc->sc_dma_ok != 0)
1373 printf(", used without full driver "
1374 "support");
1375 }
1376 } else {
1377 printf("%s: hardware does not support DMA",
1378 sc->sc_wdcdev.sc_dev.dv_xname);
1379 sc->sc_dma_ok = 0;
1380 }
1381 printf("\n");
1382 if (sc->sc_dma_ok) {
1383 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1384 sc->sc_wdcdev.irqack = pciide_irqack;
1385 }
1386 sc->sc_wdcdev.PIO_cap = 0;
1387 sc->sc_wdcdev.DMA_cap = 0;
1388
1389 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1390 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1391 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1392
1393 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1394 cp = &sc->pciide_channels[channel];
1395 if (pciide_chansetup(sc, channel, interface) == 0)
1396 continue;
1397 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1398 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1399 &ctlsize, pciide_pci_intr);
1400 } else {
1401 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1402 channel, &cmdsize, &ctlsize);
1403 }
1404 if (cp->hw_ok == 0)
1405 continue;
1406 /*
1407 * Check to see if something appears to be there.
1408 */
1409 failreason = NULL;
1410 if (!wdcprobe(&cp->wdc_channel)) {
1411 failreason = "not responding; disabled or no drives?";
1412 goto next;
1413 }
1414 /*
1415 * Now, make sure it's actually attributable to this PCI IDE
1416 * channel by trying to access the channel again while the
1417 * PCI IDE controller's I/O space is disabled. (If the
1418 * channel no longer appears to be there, it belongs to
1419 * this controller.) YUCK!
1420 */
1421 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1422 PCI_COMMAND_STATUS_REG);
1423 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1424 csr & ~PCI_COMMAND_IO_ENABLE);
1425 if (wdcprobe(&cp->wdc_channel))
1426 failreason = "other hardware responding at addresses";
1427 pci_conf_write(sc->sc_pc, sc->sc_tag,
1428 PCI_COMMAND_STATUS_REG, csr);
1429 next:
1430 if (failreason) {
1431 printf("%s: %s channel ignored (%s)\n",
1432 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1433 failreason);
1434 cp->hw_ok = 0;
1435 bus_space_unmap(cp->wdc_channel.cmd_iot,
1436 cp->wdc_channel.cmd_ioh, cmdsize);
1437 if (interface & PCIIDE_INTERFACE_PCI(channel))
1438 bus_space_unmap(cp->wdc_channel.ctl_iot,
1439 cp->ctl_baseioh, ctlsize);
1440 else
1441 bus_space_unmap(cp->wdc_channel.ctl_iot,
1442 cp->wdc_channel.ctl_ioh, ctlsize);
1443 } else {
1444 pciide_map_compat_intr(pa, cp, channel, interface);
1445 }
1446 if (cp->hw_ok) {
1447 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1448 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1449 wdcattach(&cp->wdc_channel);
1450 }
1451 }
1452
1453 if (sc->sc_dma_ok == 0)
1454 return;
1455
1456 /* Allocate DMA maps */
1457 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1458 idedma_ctl = 0;
1459 cp = &sc->pciide_channels[channel];
1460 for (drive = 0; drive < 2; drive++) {
1461 drvp = &cp->wdc_channel.ch_drive[drive];
1462 /* If no drive, skip */
1463 if ((drvp->drive_flags & DRIVE) == 0)
1464 continue;
1465 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1466 continue;
1467 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1468 /* Abort DMA setup */
1469 printf("%s:%d:%d: can't allocate DMA maps, "
1470 "using PIO transfers\n",
1471 sc->sc_wdcdev.sc_dev.dv_xname,
1472 channel, drive);
1473 drvp->drive_flags &= ~DRIVE_DMA;
1474 }
1475 printf("%s:%d:%d: using DMA data transfers\n",
1476 sc->sc_wdcdev.sc_dev.dv_xname,
1477 channel, drive);
1478 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1479 }
1480 if (idedma_ctl != 0) {
1481 /* Add software bits in status register */
1482 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1483 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1484 idedma_ctl);
1485 }
1486 }
1487 }
1488
1489 void
1490 piix_chip_map(sc, pa)
1491 struct pciide_softc *sc;
1492 struct pci_attach_args *pa;
1493 {
1494 struct pciide_channel *cp;
1495 int channel;
1496 u_int32_t idetim;
1497 bus_size_t cmdsize, ctlsize;
1498
1499 if (pciide_chipen(sc, pa) == 0)
1500 return;
1501
1502 printf("%s: bus-master DMA support present",
1503 sc->sc_wdcdev.sc_dev.dv_xname);
1504 pciide_mapreg_dma(sc, pa);
1505 printf("\n");
1506 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1507 WDC_CAPABILITY_MODE;
1508 if (sc->sc_dma_ok) {
1509 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1510 sc->sc_wdcdev.irqack = pciide_irqack;
1511 switch(sc->sc_pp->ide_product) {
1512 case PCI_PRODUCT_INTEL_82371AB_IDE:
1513 case PCI_PRODUCT_INTEL_82440MX_IDE:
1514 case PCI_PRODUCT_INTEL_82801AA_IDE:
1515 case PCI_PRODUCT_INTEL_82801AB_IDE:
1516 case PCI_PRODUCT_INTEL_82801BA_IDE:
1517 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1518 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1519 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1521 }
1522 }
1523 sc->sc_wdcdev.PIO_cap = 4;
1524 sc->sc_wdcdev.DMA_cap = 2;
1525 switch(sc->sc_pp->ide_product) {
1526 case PCI_PRODUCT_INTEL_82801AA_IDE:
1527 sc->sc_wdcdev.UDMA_cap = 4;
1528 break;
1529 case PCI_PRODUCT_INTEL_82801BA_IDE:
1530 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1531 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1532 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1533 sc->sc_wdcdev.UDMA_cap = 5;
1534 break;
1535 default:
1536 sc->sc_wdcdev.UDMA_cap = 2;
1537 }
1538 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1539 sc->sc_wdcdev.set_modes = piix_setup_channel;
1540 else
1541 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1542 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1543 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1544
1545 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1546 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1547 DEBUG_PROBE);
1548 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1549 WDCDEBUG_PRINT((", sidetim=0x%x",
1550 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1551 DEBUG_PROBE);
1552 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1553 WDCDEBUG_PRINT((", udamreg 0x%x",
1554 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1555 DEBUG_PROBE);
1556 }
1557 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1558 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1559 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1560 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1561 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1562 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1563 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1564 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1565 DEBUG_PROBE);
1566 }
1567
1568 }
1569 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1570
1571 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1572 cp = &sc->pciide_channels[channel];
1573 /* PIIX is compat-only */
1574 if (pciide_chansetup(sc, channel, 0) == 0)
1575 continue;
1576 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1577 if ((PIIX_IDETIM_READ(idetim, channel) &
1578 PIIX_IDETIM_IDE) == 0) {
1579 printf("%s: %s channel ignored (disabled)\n",
1580 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1581 continue;
1582 }
1583 /* PIIX are compat-only pciide devices */
1584 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1585 if (cp->hw_ok == 0)
1586 continue;
1587 if (pciide_chan_candisable(cp)) {
1588 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1589 channel);
1590 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1591 idetim);
1592 }
1593 pciide_map_compat_intr(pa, cp, channel, 0);
1594 if (cp->hw_ok == 0)
1595 continue;
1596 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1597 }
1598
1599 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1600 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1601 DEBUG_PROBE);
1602 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1603 WDCDEBUG_PRINT((", sidetim=0x%x",
1604 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1605 DEBUG_PROBE);
1606 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1607 WDCDEBUG_PRINT((", udamreg 0x%x",
1608 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1609 DEBUG_PROBE);
1610 }
1611 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1612 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1613 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1614 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1615 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1616 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1617 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1618 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1619 DEBUG_PROBE);
1620 }
1621 }
1622 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1623 }
1624
1625 void
1626 piix_setup_channel(chp)
1627 struct channel_softc *chp;
1628 {
1629 u_int8_t mode[2], drive;
1630 u_int32_t oidetim, idetim, idedma_ctl;
1631 struct pciide_channel *cp = (struct pciide_channel*)chp;
1632 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1633 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1634
1635 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1636 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1637 idedma_ctl = 0;
1638
1639 /* set up new idetim: Enable IDE registers decode */
1640 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1641 chp->channel);
1642
1643 /* setup DMA */
1644 pciide_channel_dma_setup(cp);
1645
1646 /*
1647 * Here we have to mess up with drives mode: PIIX can't have
1648 * different timings for master and slave drives.
1649 * We need to find the best combination.
1650 */
1651
1652 /* If both drives supports DMA, take the lower mode */
1653 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1654 (drvp[1].drive_flags & DRIVE_DMA)) {
1655 mode[0] = mode[1] =
1656 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1657 drvp[0].DMA_mode = mode[0];
1658 drvp[1].DMA_mode = mode[1];
1659 goto ok;
1660 }
1661 /*
1662 * If only one drive supports DMA, use its mode, and
1663 * put the other one in PIO mode 0 if mode not compatible
1664 */
1665 if (drvp[0].drive_flags & DRIVE_DMA) {
1666 mode[0] = drvp[0].DMA_mode;
1667 mode[1] = drvp[1].PIO_mode;
1668 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1669 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1670 mode[1] = drvp[1].PIO_mode = 0;
1671 goto ok;
1672 }
1673 if (drvp[1].drive_flags & DRIVE_DMA) {
1674 mode[1] = drvp[1].DMA_mode;
1675 mode[0] = drvp[0].PIO_mode;
1676 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1677 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1678 mode[0] = drvp[0].PIO_mode = 0;
1679 goto ok;
1680 }
1681 /*
1682 * If both drives are not DMA, takes the lower mode, unless
1683 * one of them is PIO mode < 2
1684 */
1685 if (drvp[0].PIO_mode < 2) {
1686 mode[0] = drvp[0].PIO_mode = 0;
1687 mode[1] = drvp[1].PIO_mode;
1688 } else if (drvp[1].PIO_mode < 2) {
1689 mode[1] = drvp[1].PIO_mode = 0;
1690 mode[0] = drvp[0].PIO_mode;
1691 } else {
1692 mode[0] = mode[1] =
1693 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1694 drvp[0].PIO_mode = mode[0];
1695 drvp[1].PIO_mode = mode[1];
1696 }
1697 ok: /* The modes are setup */
1698 for (drive = 0; drive < 2; drive++) {
1699 if (drvp[drive].drive_flags & DRIVE_DMA) {
1700 idetim |= piix_setup_idetim_timings(
1701 mode[drive], 1, chp->channel);
1702 goto end;
1703 }
1704 }
1705 /* If we are there, none of the drives are DMA */
1706 if (mode[0] >= 2)
1707 idetim |= piix_setup_idetim_timings(
1708 mode[0], 0, chp->channel);
1709 else
1710 idetim |= piix_setup_idetim_timings(
1711 mode[1], 0, chp->channel);
1712 end: /*
1713 * timing mode is now set up in the controller. Enable
1714 * it per-drive
1715 */
1716 for (drive = 0; drive < 2; drive++) {
1717 /* If no drive, skip */
1718 if ((drvp[drive].drive_flags & DRIVE) == 0)
1719 continue;
1720 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1721 if (drvp[drive].drive_flags & DRIVE_DMA)
1722 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1723 }
1724 if (idedma_ctl != 0) {
1725 /* Add software bits in status register */
1726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1727 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1728 idedma_ctl);
1729 }
1730 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1731 pciide_print_modes(cp);
1732 }
1733
1734 void
1735 piix3_4_setup_channel(chp)
1736 struct channel_softc *chp;
1737 {
1738 struct ata_drive_datas *drvp;
1739 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1740 struct pciide_channel *cp = (struct pciide_channel*)chp;
1741 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1742 int drive;
1743 int channel = chp->channel;
1744
1745 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1746 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1747 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1748 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1749 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1750 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1751 PIIX_SIDETIM_RTC_MASK(channel));
1752
1753 idedma_ctl = 0;
1754 /* If channel disabled, no need to go further */
1755 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1756 return;
1757 /* set up new idetim: Enable IDE registers decode */
1758 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1759
1760 /* setup DMA if needed */
1761 pciide_channel_dma_setup(cp);
1762
1763 for (drive = 0; drive < 2; drive++) {
1764 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1765 PIIX_UDMATIM_SET(0x3, channel, drive));
1766 drvp = &chp->ch_drive[drive];
1767 /* If no drive, skip */
1768 if ((drvp->drive_flags & DRIVE) == 0)
1769 continue;
1770 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1771 (drvp->drive_flags & DRIVE_UDMA) == 0))
1772 goto pio;
1773
1774 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1775 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1776 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1777 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1778 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1779 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1780 ideconf |= PIIX_CONFIG_PINGPONG;
1781 }
1782 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1783 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1784 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1785 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1786 /* setup Ultra/100 */
1787 if (drvp->UDMA_mode > 2 &&
1788 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1789 drvp->UDMA_mode = 2;
1790 if (drvp->UDMA_mode > 4) {
1791 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1792 } else {
1793 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1794 if (drvp->UDMA_mode > 2) {
1795 ideconf |= PIIX_CONFIG_UDMA66(channel,
1796 drive);
1797 } else {
1798 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1799 drive);
1800 }
1801 }
1802 }
1803 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1804 /* setup Ultra/66 */
1805 if (drvp->UDMA_mode > 2 &&
1806 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1807 drvp->UDMA_mode = 2;
1808 if (drvp->UDMA_mode > 2)
1809 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1810 else
1811 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1812 }
1813 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1814 (drvp->drive_flags & DRIVE_UDMA)) {
1815 /* use Ultra/DMA */
1816 drvp->drive_flags &= ~DRIVE_DMA;
1817 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1818 udmareg |= PIIX_UDMATIM_SET(
1819 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1820 } else {
1821 /* use Multiword DMA */
1822 drvp->drive_flags &= ~DRIVE_UDMA;
1823 if (drive == 0) {
1824 idetim |= piix_setup_idetim_timings(
1825 drvp->DMA_mode, 1, channel);
1826 } else {
1827 sidetim |= piix_setup_sidetim_timings(
1828 drvp->DMA_mode, 1, channel);
1829 idetim =PIIX_IDETIM_SET(idetim,
1830 PIIX_IDETIM_SITRE, channel);
1831 }
1832 }
1833 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1834
1835 pio: /* use PIO mode */
1836 idetim |= piix_setup_idetim_drvs(drvp);
1837 if (drive == 0) {
1838 idetim |= piix_setup_idetim_timings(
1839 drvp->PIO_mode, 0, channel);
1840 } else {
1841 sidetim |= piix_setup_sidetim_timings(
1842 drvp->PIO_mode, 0, channel);
1843 idetim =PIIX_IDETIM_SET(idetim,
1844 PIIX_IDETIM_SITRE, channel);
1845 }
1846 }
1847 if (idedma_ctl != 0) {
1848 /* Add software bits in status register */
1849 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1850 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1851 idedma_ctl);
1852 }
1853 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1854 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1855 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1856 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1857 pciide_print_modes(cp);
1858 }
1859
1860
1861 /* setup ISP and RTC fields, based on mode */
1862 static u_int32_t
1863 piix_setup_idetim_timings(mode, dma, channel)
1864 u_int8_t mode;
1865 u_int8_t dma;
1866 u_int8_t channel;
1867 {
1868
1869 if (dma)
1870 return PIIX_IDETIM_SET(0,
1871 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1872 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1873 channel);
1874 else
1875 return PIIX_IDETIM_SET(0,
1876 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1877 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1878 channel);
1879 }
1880
1881 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1882 static u_int32_t
1883 piix_setup_idetim_drvs(drvp)
1884 struct ata_drive_datas *drvp;
1885 {
1886 u_int32_t ret = 0;
1887 struct channel_softc *chp = drvp->chnl_softc;
1888 u_int8_t channel = chp->channel;
1889 u_int8_t drive = drvp->drive;
1890
1891 /*
1892 * If drive is using UDMA, timings setups are independant
1893 * So just check DMA and PIO here.
1894 */
1895 if (drvp->drive_flags & DRIVE_DMA) {
1896 /* if mode = DMA mode 0, use compatible timings */
1897 if ((drvp->drive_flags & DRIVE_DMA) &&
1898 drvp->DMA_mode == 0) {
1899 drvp->PIO_mode = 0;
1900 return ret;
1901 }
1902 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1903 /*
1904 * PIO and DMA timings are the same, use fast timings for PIO
1905 * too, else use compat timings.
1906 */
1907 if ((piix_isp_pio[drvp->PIO_mode] !=
1908 piix_isp_dma[drvp->DMA_mode]) ||
1909 (piix_rtc_pio[drvp->PIO_mode] !=
1910 piix_rtc_dma[drvp->DMA_mode]))
1911 drvp->PIO_mode = 0;
1912 /* if PIO mode <= 2, use compat timings for PIO */
1913 if (drvp->PIO_mode <= 2) {
1914 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1915 channel);
1916 return ret;
1917 }
1918 }
1919
1920 /*
1921 * Now setup PIO modes. If mode < 2, use compat timings.
1922 * Else enable fast timings. Enable IORDY and prefetch/post
1923 * if PIO mode >= 3.
1924 */
1925
1926 if (drvp->PIO_mode < 2)
1927 return ret;
1928
1929 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1930 if (drvp->PIO_mode >= 3) {
1931 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1932 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1933 }
1934 return ret;
1935 }
1936
1937 /* setup values in SIDETIM registers, based on mode */
1938 static u_int32_t
1939 piix_setup_sidetim_timings(mode, dma, channel)
1940 u_int8_t mode;
1941 u_int8_t dma;
1942 u_int8_t channel;
1943 {
1944 if (dma)
1945 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1946 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1947 else
1948 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1949 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1950 }
1951
1952 void
1953 amd7x6_chip_map(sc, pa)
1954 struct pciide_softc *sc;
1955 struct pci_attach_args *pa;
1956 {
1957 struct pciide_channel *cp;
1958 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1959 int channel;
1960 pcireg_t chanenable;
1961 bus_size_t cmdsize, ctlsize;
1962
1963 if (pciide_chipen(sc, pa) == 0)
1964 return;
1965 printf("%s: bus-master DMA support present",
1966 sc->sc_wdcdev.sc_dev.dv_xname);
1967 pciide_mapreg_dma(sc, pa);
1968 printf("\n");
1969 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1970 WDC_CAPABILITY_MODE;
1971 if (sc->sc_dma_ok) {
1972 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1973 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1974 sc->sc_wdcdev.irqack = pciide_irqack;
1975 }
1976 sc->sc_wdcdev.PIO_cap = 4;
1977 sc->sc_wdcdev.DMA_cap = 2;
1978
1979 switch (sc->sc_pp->ide_product) {
1980 case PCI_PRODUCT_AMD_PBC766_IDE:
1981 case PCI_PRODUCT_AMD_PBC768_IDE:
1982 case PCI_PRODUCT_AMD_PBC8111_IDE:
1983 sc->sc_wdcdev.UDMA_cap = 5;
1984 break;
1985 default:
1986 sc->sc_wdcdev.UDMA_cap = 4;
1987 }
1988 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1989 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1990 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1991 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1992
1993 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1994 DEBUG_PROBE);
1995 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1996 cp = &sc->pciide_channels[channel];
1997 if (pciide_chansetup(sc, channel, interface) == 0)
1998 continue;
1999
2000 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2001 printf("%s: %s channel ignored (disabled)\n",
2002 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2003 continue;
2004 }
2005 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2006 pciide_pci_intr);
2007
2008 if (pciide_chan_candisable(cp))
2009 chanenable &= ~AMD7X6_CHAN_EN(channel);
2010 pciide_map_compat_intr(pa, cp, channel, interface);
2011 if (cp->hw_ok == 0)
2012 continue;
2013
2014 amd7x6_setup_channel(&cp->wdc_channel);
2015 }
2016 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2017 chanenable);
2018 return;
2019 }
2020
2021 void
2022 amd7x6_setup_channel(chp)
2023 struct channel_softc *chp;
2024 {
2025 u_int32_t udmatim_reg, datatim_reg;
2026 u_int8_t idedma_ctl;
2027 int mode, drive;
2028 struct ata_drive_datas *drvp;
2029 struct pciide_channel *cp = (struct pciide_channel*)chp;
2030 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2031 #ifndef PCIIDE_AMD756_ENABLEDMA
2032 int rev = PCI_REVISION(
2033 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2034 #endif
2035
2036 idedma_ctl = 0;
2037 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2038 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2039 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2040 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2041
2042 /* setup DMA if needed */
2043 pciide_channel_dma_setup(cp);
2044
2045 for (drive = 0; drive < 2; drive++) {
2046 drvp = &chp->ch_drive[drive];
2047 /* If no drive, skip */
2048 if ((drvp->drive_flags & DRIVE) == 0)
2049 continue;
2050 /* add timing values, setup DMA if needed */
2051 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2052 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2053 mode = drvp->PIO_mode;
2054 goto pio;
2055 }
2056 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2057 (drvp->drive_flags & DRIVE_UDMA)) {
2058 /* use Ultra/DMA */
2059 drvp->drive_flags &= ~DRIVE_DMA;
2060 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2061 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2062 AMD7X6_UDMA_TIME(chp->channel, drive,
2063 amd7x6_udma_tim[drvp->UDMA_mode]);
2064 /* can use PIO timings, MW DMA unused */
2065 mode = drvp->PIO_mode;
2066 } else {
2067 /* use Multiword DMA, but only if revision is OK */
2068 drvp->drive_flags &= ~DRIVE_UDMA;
2069 #ifndef PCIIDE_AMD756_ENABLEDMA
2070 /*
2071 * The workaround doesn't seem to be necessary
2072 * with all drives, so it can be disabled by
2073 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2074 * triggered.
2075 */
2076 if (sc->sc_pp->ide_product ==
2077 PCI_PRODUCT_AMD_PBC756_IDE &&
2078 AMD756_CHIPREV_DISABLEDMA(rev)) {
2079 printf("%s:%d:%d: multi-word DMA disabled due "
2080 "to chip revision\n",
2081 sc->sc_wdcdev.sc_dev.dv_xname,
2082 chp->channel, drive);
2083 mode = drvp->PIO_mode;
2084 drvp->drive_flags &= ~DRIVE_DMA;
2085 goto pio;
2086 }
2087 #endif
2088 /* mode = min(pio, dma+2) */
2089 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2090 mode = drvp->PIO_mode;
2091 else
2092 mode = drvp->DMA_mode + 2;
2093 }
2094 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2095
2096 pio: /* setup PIO mode */
2097 if (mode <= 2) {
2098 drvp->DMA_mode = 0;
2099 drvp->PIO_mode = 0;
2100 mode = 0;
2101 } else {
2102 drvp->PIO_mode = mode;
2103 drvp->DMA_mode = mode - 2;
2104 }
2105 datatim_reg |=
2106 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2107 amd7x6_pio_set[mode]) |
2108 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2109 amd7x6_pio_rec[mode]);
2110 }
2111 if (idedma_ctl != 0) {
2112 /* Add software bits in status register */
2113 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2114 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2115 idedma_ctl);
2116 }
2117 pciide_print_modes(cp);
2118 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2119 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2120 }
2121
2122 void
2123 apollo_chip_map(sc, pa)
2124 struct pciide_softc *sc;
2125 struct pci_attach_args *pa;
2126 {
2127 struct pciide_channel *cp;
2128 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2129 int channel;
2130 u_int32_t ideconf;
2131 bus_size_t cmdsize, ctlsize;
2132 pcitag_t pcib_tag;
2133 pcireg_t pcib_id, pcib_class;
2134
2135 if (pciide_chipen(sc, pa) == 0)
2136 return;
2137 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2138 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2139 /* and read ID and rev of the ISA bridge */
2140 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2141 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2142 printf(": VIA Technologies ");
2143 switch (PCI_PRODUCT(pcib_id)) {
2144 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2145 printf("VT82C586 (Apollo VP) ");
2146 if(PCI_REVISION(pcib_class) >= 0x02) {
2147 printf("ATA33 controller\n");
2148 sc->sc_wdcdev.UDMA_cap = 2;
2149 } else {
2150 printf("controller\n");
2151 sc->sc_wdcdev.UDMA_cap = 0;
2152 }
2153 break;
2154 case PCI_PRODUCT_VIATECH_VT82C596A:
2155 printf("VT82C596A (Apollo Pro) ");
2156 if (PCI_REVISION(pcib_class) >= 0x12) {
2157 printf("ATA66 controller\n");
2158 sc->sc_wdcdev.UDMA_cap = 4;
2159 } else {
2160 printf("ATA33 controller\n");
2161 sc->sc_wdcdev.UDMA_cap = 2;
2162 }
2163 break;
2164 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2165 printf("VT82C686A (Apollo KX133) ");
2166 if (PCI_REVISION(pcib_class) >= 0x40) {
2167 printf("ATA100 controller\n");
2168 sc->sc_wdcdev.UDMA_cap = 5;
2169 } else {
2170 printf("ATA66 controller\n");
2171 sc->sc_wdcdev.UDMA_cap = 4;
2172 }
2173 break;
2174 case PCI_PRODUCT_VIATECH_VT8233:
2175 printf("VT8233 ATA100 controller\n");
2176 sc->sc_wdcdev.UDMA_cap = 5;
2177 break;
2178 default:
2179 printf("unknown ATA controller\n");
2180 sc->sc_wdcdev.UDMA_cap = 0;
2181 }
2182
2183 printf("%s: bus-master DMA support present",
2184 sc->sc_wdcdev.sc_dev.dv_xname);
2185 pciide_mapreg_dma(sc, pa);
2186 printf("\n");
2187 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2188 WDC_CAPABILITY_MODE;
2189 if (sc->sc_dma_ok) {
2190 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2191 sc->sc_wdcdev.irqack = pciide_irqack;
2192 if (sc->sc_wdcdev.UDMA_cap > 0)
2193 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2194 }
2195 sc->sc_wdcdev.PIO_cap = 4;
2196 sc->sc_wdcdev.DMA_cap = 2;
2197 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2198 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2199 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2200
2201 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2202 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2203 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2204 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2205 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2206 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2207 DEBUG_PROBE);
2208
2209 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2210 cp = &sc->pciide_channels[channel];
2211 if (pciide_chansetup(sc, channel, interface) == 0)
2212 continue;
2213
2214 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2215 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2216 printf("%s: %s channel ignored (disabled)\n",
2217 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2218 continue;
2219 }
2220 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2221 pciide_pci_intr);
2222 if (cp->hw_ok == 0)
2223 continue;
2224 if (pciide_chan_candisable(cp)) {
2225 ideconf &= ~APO_IDECONF_EN(channel);
2226 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2227 ideconf);
2228 }
2229 pciide_map_compat_intr(pa, cp, channel, interface);
2230
2231 if (cp->hw_ok == 0)
2232 continue;
2233 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2234 }
2235 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2236 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2237 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2238 }
2239
2240 void
2241 apollo_setup_channel(chp)
2242 struct channel_softc *chp;
2243 {
2244 u_int32_t udmatim_reg, datatim_reg;
2245 u_int8_t idedma_ctl;
2246 int mode, drive;
2247 struct ata_drive_datas *drvp;
2248 struct pciide_channel *cp = (struct pciide_channel*)chp;
2249 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2250
2251 idedma_ctl = 0;
2252 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2253 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2254 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2255 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2256
2257 /* setup DMA if needed */
2258 pciide_channel_dma_setup(cp);
2259
2260 for (drive = 0; drive < 2; drive++) {
2261 drvp = &chp->ch_drive[drive];
2262 /* If no drive, skip */
2263 if ((drvp->drive_flags & DRIVE) == 0)
2264 continue;
2265 /* add timing values, setup DMA if needed */
2266 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2267 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2268 mode = drvp->PIO_mode;
2269 goto pio;
2270 }
2271 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2272 (drvp->drive_flags & DRIVE_UDMA)) {
2273 /* use Ultra/DMA */
2274 drvp->drive_flags &= ~DRIVE_DMA;
2275 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2276 APO_UDMA_EN_MTH(chp->channel, drive);
2277 if (sc->sc_wdcdev.UDMA_cap == 5) {
2278 /* 686b */
2279 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2280 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2281 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2282 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2283 /* 596b or 686a */
2284 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2285 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2286 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2287 } else {
2288 /* 596a or 586b */
2289 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2290 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2291 }
2292 /* can use PIO timings, MW DMA unused */
2293 mode = drvp->PIO_mode;
2294 } else {
2295 /* use Multiword DMA */
2296 drvp->drive_flags &= ~DRIVE_UDMA;
2297 /* mode = min(pio, dma+2) */
2298 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2299 mode = drvp->PIO_mode;
2300 else
2301 mode = drvp->DMA_mode + 2;
2302 }
2303 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2304
2305 pio: /* setup PIO mode */
2306 if (mode <= 2) {
2307 drvp->DMA_mode = 0;
2308 drvp->PIO_mode = 0;
2309 mode = 0;
2310 } else {
2311 drvp->PIO_mode = mode;
2312 drvp->DMA_mode = mode - 2;
2313 }
2314 datatim_reg |=
2315 APO_DATATIM_PULSE(chp->channel, drive,
2316 apollo_pio_set[mode]) |
2317 APO_DATATIM_RECOV(chp->channel, drive,
2318 apollo_pio_rec[mode]);
2319 }
2320 if (idedma_ctl != 0) {
2321 /* Add software bits in status register */
2322 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2323 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2324 idedma_ctl);
2325 }
2326 pciide_print_modes(cp);
2327 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2328 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2329 }
2330
2331 void
2332 cmd_channel_map(pa, sc, channel)
2333 struct pci_attach_args *pa;
2334 struct pciide_softc *sc;
2335 int channel;
2336 {
2337 struct pciide_channel *cp = &sc->pciide_channels[channel];
2338 bus_size_t cmdsize, ctlsize;
2339 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2340 int interface, one_channel;
2341
2342 /*
2343 * The 0648/0649 can be told to identify as a RAID controller.
2344 * In this case, we have to fake interface
2345 */
2346 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2347 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2348 PCIIDE_INTERFACE_SETTABLE(1);
2349 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2350 CMD_CONF_DSA1)
2351 interface |= PCIIDE_INTERFACE_PCI(0) |
2352 PCIIDE_INTERFACE_PCI(1);
2353 } else {
2354 interface = PCI_INTERFACE(pa->pa_class);
2355 }
2356
2357 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2358 cp->name = PCIIDE_CHANNEL_NAME(channel);
2359 cp->wdc_channel.channel = channel;
2360 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2361
2362 /*
2363 * Older CMD64X doesn't have independant channels
2364 */
2365 switch (sc->sc_pp->ide_product) {
2366 case PCI_PRODUCT_CMDTECH_649:
2367 one_channel = 0;
2368 break;
2369 default:
2370 one_channel = 1;
2371 break;
2372 }
2373
2374 if (channel > 0 && one_channel) {
2375 cp->wdc_channel.ch_queue =
2376 sc->pciide_channels[0].wdc_channel.ch_queue;
2377 } else {
2378 cp->wdc_channel.ch_queue =
2379 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2380 }
2381 if (cp->wdc_channel.ch_queue == NULL) {
2382 printf("%s %s channel: "
2383 "can't allocate memory for command queue",
2384 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2385 return;
2386 }
2387
2388 printf("%s: %s channel %s to %s mode\n",
2389 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2390 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2391 "configured" : "wired",
2392 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2393 "native-PCI" : "compatibility");
2394
2395 /*
2396 * with a CMD PCI64x, if we get here, the first channel is enabled:
2397 * there's no way to disable the first channel without disabling
2398 * the whole device
2399 */
2400 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2401 printf("%s: %s channel ignored (disabled)\n",
2402 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2403 return;
2404 }
2405
2406 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2407 if (cp->hw_ok == 0)
2408 return;
2409 if (channel == 1) {
2410 if (pciide_chan_candisable(cp)) {
2411 ctrl &= ~CMD_CTRL_2PORT;
2412 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2413 CMD_CTRL, ctrl);
2414 }
2415 }
2416 pciide_map_compat_intr(pa, cp, channel, interface);
2417 }
2418
2419 int
2420 cmd_pci_intr(arg)
2421 void *arg;
2422 {
2423 struct pciide_softc *sc = arg;
2424 struct pciide_channel *cp;
2425 struct channel_softc *wdc_cp;
2426 int i, rv, crv;
2427 u_int32_t priirq, secirq;
2428
2429 rv = 0;
2430 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2431 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2432 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2433 cp = &sc->pciide_channels[i];
2434 wdc_cp = &cp->wdc_channel;
2435 /* If a compat channel skip. */
2436 if (cp->compat)
2437 continue;
2438 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2439 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2440 crv = wdcintr(wdc_cp);
2441 if (crv == 0)
2442 printf("%s:%d: bogus intr\n",
2443 sc->sc_wdcdev.sc_dev.dv_xname, i);
2444 else
2445 rv = 1;
2446 }
2447 }
2448 return rv;
2449 }
2450
2451 void
2452 cmd_chip_map(sc, pa)
2453 struct pciide_softc *sc;
2454 struct pci_attach_args *pa;
2455 {
2456 int channel;
2457
2458 /*
2459 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2460 * and base adresses registers can be disabled at
2461 * hardware level. In this case, the device is wired
2462 * in compat mode and its first channel is always enabled,
2463 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2464 * In fact, it seems that the first channel of the CMD PCI0640
2465 * can't be disabled.
2466 */
2467
2468 #ifdef PCIIDE_CMD064x_DISABLE
2469 if (pciide_chipen(sc, pa) == 0)
2470 return;
2471 #endif
2472
2473 printf("%s: hardware does not support DMA\n",
2474 sc->sc_wdcdev.sc_dev.dv_xname);
2475 sc->sc_dma_ok = 0;
2476
2477 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2478 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2479 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2480
2481 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2482 cmd_channel_map(pa, sc, channel);
2483 }
2484 }
2485
2486 void
2487 cmd0643_9_chip_map(sc, pa)
2488 struct pciide_softc *sc;
2489 struct pci_attach_args *pa;
2490 {
2491 struct pciide_channel *cp;
2492 int channel;
2493 pcireg_t rev = PCI_REVISION(pa->pa_class);
2494
2495 /*
2496 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2497 * and base adresses registers can be disabled at
2498 * hardware level. In this case, the device is wired
2499 * in compat mode and its first channel is always enabled,
2500 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2501 * In fact, it seems that the first channel of the CMD PCI0640
2502 * can't be disabled.
2503 */
2504
2505 #ifdef PCIIDE_CMD064x_DISABLE
2506 if (pciide_chipen(sc, pa) == 0)
2507 return;
2508 #endif
2509 printf("%s: bus-master DMA support present",
2510 sc->sc_wdcdev.sc_dev.dv_xname);
2511 pciide_mapreg_dma(sc, pa);
2512 printf("\n");
2513 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2514 WDC_CAPABILITY_MODE;
2515 if (sc->sc_dma_ok) {
2516 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2517 switch (sc->sc_pp->ide_product) {
2518 case PCI_PRODUCT_CMDTECH_649:
2519 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2520 sc->sc_wdcdev.UDMA_cap = 5;
2521 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2522 break;
2523 case PCI_PRODUCT_CMDTECH_648:
2524 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2525 sc->sc_wdcdev.UDMA_cap = 4;
2526 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2527 break;
2528 case PCI_PRODUCT_CMDTECH_646:
2529 if (rev >= CMD0646U2_REV) {
2530 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2531 sc->sc_wdcdev.UDMA_cap = 2;
2532 } else if (rev >= CMD0646U_REV) {
2533 /*
2534 * Linux's driver claims that the 646U is broken
2535 * with UDMA. Only enable it if we know what we're
2536 * doing
2537 */
2538 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2539 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2540 sc->sc_wdcdev.UDMA_cap = 2;
2541 #endif
2542 /* explicitly disable UDMA */
2543 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2544 CMD_UDMATIM(0), 0);
2545 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2546 CMD_UDMATIM(1), 0);
2547 }
2548 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2549 break;
2550 default:
2551 sc->sc_wdcdev.irqack = pciide_irqack;
2552 }
2553 }
2554
2555 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2556 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2557 sc->sc_wdcdev.PIO_cap = 4;
2558 sc->sc_wdcdev.DMA_cap = 2;
2559 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2560
2561 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2562 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2563 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2564 DEBUG_PROBE);
2565
2566 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2567 cp = &sc->pciide_channels[channel];
2568 cmd_channel_map(pa, sc, channel);
2569 if (cp->hw_ok == 0)
2570 continue;
2571 cmd0643_9_setup_channel(&cp->wdc_channel);
2572 }
2573 /*
2574 * note - this also makes sure we clear the irq disable and reset
2575 * bits
2576 */
2577 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2578 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2579 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2580 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2581 DEBUG_PROBE);
2582 }
2583
2584 void
2585 cmd0643_9_setup_channel(chp)
2586 struct channel_softc *chp;
2587 {
2588 struct ata_drive_datas *drvp;
2589 u_int8_t tim;
2590 u_int32_t idedma_ctl, udma_reg;
2591 int drive;
2592 struct pciide_channel *cp = (struct pciide_channel*)chp;
2593 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2594
2595 idedma_ctl = 0;
2596 /* setup DMA if needed */
2597 pciide_channel_dma_setup(cp);
2598
2599 for (drive = 0; drive < 2; drive++) {
2600 drvp = &chp->ch_drive[drive];
2601 /* If no drive, skip */
2602 if ((drvp->drive_flags & DRIVE) == 0)
2603 continue;
2604 /* add timing values, setup DMA if needed */
2605 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2606 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2607 if (drvp->drive_flags & DRIVE_UDMA) {
2608 /* UltraDMA on a 646U2, 0648 or 0649 */
2609 drvp->drive_flags &= ~DRIVE_DMA;
2610 udma_reg = pciide_pci_read(sc->sc_pc,
2611 sc->sc_tag, CMD_UDMATIM(chp->channel));
2612 if (drvp->UDMA_mode > 2 &&
2613 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2614 CMD_BICSR) &
2615 CMD_BICSR_80(chp->channel)) == 0)
2616 drvp->UDMA_mode = 2;
2617 if (drvp->UDMA_mode > 2)
2618 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2619 else if (sc->sc_wdcdev.UDMA_cap > 2)
2620 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2621 udma_reg |= CMD_UDMATIM_UDMA(drive);
2622 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2623 CMD_UDMATIM_TIM_OFF(drive));
2624 udma_reg |=
2625 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2626 CMD_UDMATIM_TIM_OFF(drive));
2627 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2628 CMD_UDMATIM(chp->channel), udma_reg);
2629 } else {
2630 /*
2631 * use Multiword DMA.
2632 * Timings will be used for both PIO and DMA,
2633 * so adjust DMA mode if needed
2634 * if we have a 0646U2/8/9, turn off UDMA
2635 */
2636 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2637 udma_reg = pciide_pci_read(sc->sc_pc,
2638 sc->sc_tag,
2639 CMD_UDMATIM(chp->channel));
2640 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2641 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2642 CMD_UDMATIM(chp->channel),
2643 udma_reg);
2644 }
2645 if (drvp->PIO_mode >= 3 &&
2646 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2647 drvp->DMA_mode = drvp->PIO_mode - 2;
2648 }
2649 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2650 }
2651 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2652 }
2653 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2654 CMD_DATA_TIM(chp->channel, drive), tim);
2655 }
2656 if (idedma_ctl != 0) {
2657 /* Add software bits in status register */
2658 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2659 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2660 idedma_ctl);
2661 }
2662 pciide_print_modes(cp);
2663 }
2664
2665 void
2666 cmd646_9_irqack(chp)
2667 struct channel_softc *chp;
2668 {
2669 u_int32_t priirq, secirq;
2670 struct pciide_channel *cp = (struct pciide_channel*)chp;
2671 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2672
2673 if (chp->channel == 0) {
2674 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2675 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2676 } else {
2677 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2678 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2679 }
2680 pciide_irqack(chp);
2681 }
2682
2683 void
2684 cy693_chip_map(sc, pa)
2685 struct pciide_softc *sc;
2686 struct pci_attach_args *pa;
2687 {
2688 struct pciide_channel *cp;
2689 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2690 bus_size_t cmdsize, ctlsize;
2691
2692 if (pciide_chipen(sc, pa) == 0)
2693 return;
2694 /*
2695 * this chip has 2 PCI IDE functions, one for primary and one for
2696 * secondary. So we need to call pciide_mapregs_compat() with
2697 * the real channel
2698 */
2699 if (pa->pa_function == 1) {
2700 sc->sc_cy_compatchan = 0;
2701 } else if (pa->pa_function == 2) {
2702 sc->sc_cy_compatchan = 1;
2703 } else {
2704 printf("%s: unexpected PCI function %d\n",
2705 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2706 return;
2707 }
2708 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2709 printf("%s: bus-master DMA support present",
2710 sc->sc_wdcdev.sc_dev.dv_xname);
2711 pciide_mapreg_dma(sc, pa);
2712 } else {
2713 printf("%s: hardware does not support DMA",
2714 sc->sc_wdcdev.sc_dev.dv_xname);
2715 sc->sc_dma_ok = 0;
2716 }
2717 printf("\n");
2718
2719 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2720 if (sc->sc_cy_handle == NULL) {
2721 printf("%s: unable to map hyperCache control registers\n",
2722 sc->sc_wdcdev.sc_dev.dv_xname);
2723 sc->sc_dma_ok = 0;
2724 }
2725
2726 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2727 WDC_CAPABILITY_MODE;
2728 if (sc->sc_dma_ok) {
2729 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2730 sc->sc_wdcdev.irqack = pciide_irqack;
2731 }
2732 sc->sc_wdcdev.PIO_cap = 4;
2733 sc->sc_wdcdev.DMA_cap = 2;
2734 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2735
2736 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2737 sc->sc_wdcdev.nchannels = 1;
2738
2739 /* Only one channel for this chip; if we are here it's enabled */
2740 cp = &sc->pciide_channels[0];
2741 sc->wdc_chanarray[0] = &cp->wdc_channel;
2742 cp->name = PCIIDE_CHANNEL_NAME(0);
2743 cp->wdc_channel.channel = 0;
2744 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2745 cp->wdc_channel.ch_queue =
2746 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2747 if (cp->wdc_channel.ch_queue == NULL) {
2748 printf("%s primary channel: "
2749 "can't allocate memory for command queue",
2750 sc->sc_wdcdev.sc_dev.dv_xname);
2751 return;
2752 }
2753 printf("%s: primary channel %s to ",
2754 sc->sc_wdcdev.sc_dev.dv_xname,
2755 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2756 "configured" : "wired");
2757 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2758 printf("native-PCI");
2759 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2760 pciide_pci_intr);
2761 } else {
2762 printf("compatibility");
2763 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2764 &cmdsize, &ctlsize);
2765 }
2766 printf(" mode\n");
2767 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2768 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2769 wdcattach(&cp->wdc_channel);
2770 if (pciide_chan_candisable(cp)) {
2771 pci_conf_write(sc->sc_pc, sc->sc_tag,
2772 PCI_COMMAND_STATUS_REG, 0);
2773 }
2774 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2775 if (cp->hw_ok == 0)
2776 return;
2777 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2778 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2779 cy693_setup_channel(&cp->wdc_channel);
2780 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2781 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2782 }
2783
2784 void
2785 cy693_setup_channel(chp)
2786 struct channel_softc *chp;
2787 {
2788 struct ata_drive_datas *drvp;
2789 int drive;
2790 u_int32_t cy_cmd_ctrl;
2791 u_int32_t idedma_ctl;
2792 struct pciide_channel *cp = (struct pciide_channel*)chp;
2793 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2794 int dma_mode = -1;
2795
2796 cy_cmd_ctrl = idedma_ctl = 0;
2797
2798 /* setup DMA if needed */
2799 pciide_channel_dma_setup(cp);
2800
2801 for (drive = 0; drive < 2; drive++) {
2802 drvp = &chp->ch_drive[drive];
2803 /* If no drive, skip */
2804 if ((drvp->drive_flags & DRIVE) == 0)
2805 continue;
2806 /* add timing values, setup DMA if needed */
2807 if (drvp->drive_flags & DRIVE_DMA) {
2808 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2809 /* use Multiword DMA */
2810 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2811 dma_mode = drvp->DMA_mode;
2812 }
2813 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2814 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2815 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2816 CY_CMD_CTRL_IOW_REC_OFF(drive));
2817 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2818 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2819 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2820 CY_CMD_CTRL_IOR_REC_OFF(drive));
2821 }
2822 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2823 chp->ch_drive[0].DMA_mode = dma_mode;
2824 chp->ch_drive[1].DMA_mode = dma_mode;
2825
2826 if (dma_mode == -1)
2827 dma_mode = 0;
2828
2829 if (sc->sc_cy_handle != NULL) {
2830 /* Note: `multiple' is implied. */
2831 cy82c693_write(sc->sc_cy_handle,
2832 (sc->sc_cy_compatchan == 0) ?
2833 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2834 }
2835
2836 pciide_print_modes(cp);
2837
2838 if (idedma_ctl != 0) {
2839 /* Add software bits in status register */
2840 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2841 IDEDMA_CTL, idedma_ctl);
2842 }
2843 }
2844
2845 static int
2846 sis_hostbr_match(pa)
2847 struct pci_attach_args *pa;
2848 {
2849 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2850 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2851 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2852 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2853 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2854 }
2855
2856 void
2857 sis_chip_map(sc, pa)
2858 struct pciide_softc *sc;
2859 struct pci_attach_args *pa;
2860 {
2861 struct pciide_channel *cp;
2862 int channel;
2863 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2864 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2865 pcireg_t rev = PCI_REVISION(pa->pa_class);
2866 bus_size_t cmdsize, ctlsize;
2867 pcitag_t pchb_tag;
2868 pcireg_t pchb_id, pchb_class;
2869
2870 if (pciide_chipen(sc, pa) == 0)
2871 return;
2872 printf("%s: bus-master DMA support present",
2873 sc->sc_wdcdev.sc_dev.dv_xname);
2874 pciide_mapreg_dma(sc, pa);
2875 printf("\n");
2876
2877 /* get a PCI tag for the host bridge (function 0 of the same device) */
2878 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2879 /* and read ID and rev of the ISA bridge */
2880 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2881 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2882
2883 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2884 WDC_CAPABILITY_MODE;
2885 if (sc->sc_dma_ok) {
2886 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2887 sc->sc_wdcdev.irqack = pciide_irqack;
2888 /*
2889 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2890 * have problems with UDMA (info provided by Christos)
2891 */
2892 if (rev >= 0xd0 &&
2893 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2894 PCI_REVISION(pchb_class) >= 0x03))
2895 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2896 }
2897
2898 sc->sc_wdcdev.PIO_cap = 4;
2899 sc->sc_wdcdev.DMA_cap = 2;
2900 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2901 /*
2902 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2903 * chipsets.
2904 */
2905 sc->sc_wdcdev.UDMA_cap =
2906 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2907 sc->sc_wdcdev.set_modes = sis_setup_channel;
2908
2909 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2910 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2911
2912 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2913 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2914 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2915
2916 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2917 cp = &sc->pciide_channels[channel];
2918 if (pciide_chansetup(sc, channel, interface) == 0)
2919 continue;
2920 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2921 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2922 printf("%s: %s channel ignored (disabled)\n",
2923 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2924 continue;
2925 }
2926 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2927 pciide_pci_intr);
2928 if (cp->hw_ok == 0)
2929 continue;
2930 if (pciide_chan_candisable(cp)) {
2931 if (channel == 0)
2932 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2933 else
2934 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2935 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2936 sis_ctr0);
2937 }
2938 pciide_map_compat_intr(pa, cp, channel, interface);
2939 if (cp->hw_ok == 0)
2940 continue;
2941 sis_setup_channel(&cp->wdc_channel);
2942 }
2943 }
2944
2945 void
2946 sis_setup_channel(chp)
2947 struct channel_softc *chp;
2948 {
2949 struct ata_drive_datas *drvp;
2950 int drive;
2951 u_int32_t sis_tim;
2952 u_int32_t idedma_ctl;
2953 struct pciide_channel *cp = (struct pciide_channel*)chp;
2954 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2955
2956 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2957 "channel %d 0x%x\n", chp->channel,
2958 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2959 DEBUG_PROBE);
2960 sis_tim = 0;
2961 idedma_ctl = 0;
2962 /* setup DMA if needed */
2963 pciide_channel_dma_setup(cp);
2964
2965 for (drive = 0; drive < 2; drive++) {
2966 drvp = &chp->ch_drive[drive];
2967 /* If no drive, skip */
2968 if ((drvp->drive_flags & DRIVE) == 0)
2969 continue;
2970 /* add timing values, setup DMA if needed */
2971 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2972 (drvp->drive_flags & DRIVE_UDMA) == 0)
2973 goto pio;
2974
2975 if (drvp->drive_flags & DRIVE_UDMA) {
2976 /* use Ultra/DMA */
2977 drvp->drive_flags &= ~DRIVE_DMA;
2978 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2979 SIS_TIM_UDMA_TIME_OFF(drive);
2980 sis_tim |= SIS_TIM_UDMA_EN(drive);
2981 } else {
2982 /*
2983 * use Multiword DMA
2984 * Timings will be used for both PIO and DMA,
2985 * so adjust DMA mode if needed
2986 */
2987 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2988 drvp->PIO_mode = drvp->DMA_mode + 2;
2989 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2990 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2991 drvp->PIO_mode - 2 : 0;
2992 if (drvp->DMA_mode == 0)
2993 drvp->PIO_mode = 0;
2994 }
2995 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2996 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2997 SIS_TIM_ACT_OFF(drive);
2998 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2999 SIS_TIM_REC_OFF(drive);
3000 }
3001 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3002 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3003 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3004 if (idedma_ctl != 0) {
3005 /* Add software bits in status register */
3006 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3007 IDEDMA_CTL, idedma_ctl);
3008 }
3009 pciide_print_modes(cp);
3010 }
3011
3012 void
3013 acer_chip_map(sc, pa)
3014 struct pciide_softc *sc;
3015 struct pci_attach_args *pa;
3016 {
3017 struct pciide_channel *cp;
3018 int channel;
3019 pcireg_t cr, interface;
3020 bus_size_t cmdsize, ctlsize;
3021 pcireg_t rev = PCI_REVISION(pa->pa_class);
3022
3023 if (pciide_chipen(sc, pa) == 0)
3024 return;
3025 printf("%s: bus-master DMA support present",
3026 sc->sc_wdcdev.sc_dev.dv_xname);
3027 pciide_mapreg_dma(sc, pa);
3028 printf("\n");
3029 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3030 WDC_CAPABILITY_MODE;
3031 if (sc->sc_dma_ok) {
3032 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3033 if (rev >= 0x20) {
3034 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3035 if (rev >= 0xC4)
3036 sc->sc_wdcdev.UDMA_cap = 5;
3037 else if (rev >= 0xC2)
3038 sc->sc_wdcdev.UDMA_cap = 4;
3039 else
3040 sc->sc_wdcdev.UDMA_cap = 2;
3041 }
3042 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3043 sc->sc_wdcdev.irqack = pciide_irqack;
3044 }
3045
3046 sc->sc_wdcdev.PIO_cap = 4;
3047 sc->sc_wdcdev.DMA_cap = 2;
3048 sc->sc_wdcdev.set_modes = acer_setup_channel;
3049 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3050 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3051
3052 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3053 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3054 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3055
3056 /* Enable "microsoft register bits" R/W. */
3057 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3058 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3059 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3060 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3061 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3062 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3063 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3064 ~ACER_CHANSTATUSREGS_RO);
3065 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3066 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3067 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3068 /* Don't use cr, re-read the real register content instead */
3069 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3070 PCI_CLASS_REG));
3071
3072 /* From linux: enable "Cable Detection" */
3073 if (rev >= 0xC2) {
3074 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3075 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3076 | ACER_0x4B_CDETECT);
3077 }
3078
3079 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3080 cp = &sc->pciide_channels[channel];
3081 if (pciide_chansetup(sc, channel, interface) == 0)
3082 continue;
3083 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3084 printf("%s: %s channel ignored (disabled)\n",
3085 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3086 continue;
3087 }
3088 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3089 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3090 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3091 if (cp->hw_ok == 0)
3092 continue;
3093 if (pciide_chan_candisable(cp)) {
3094 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3095 pci_conf_write(sc->sc_pc, sc->sc_tag,
3096 PCI_CLASS_REG, cr);
3097 }
3098 pciide_map_compat_intr(pa, cp, channel, interface);
3099 acer_setup_channel(&cp->wdc_channel);
3100 }
3101 }
3102
3103 void
3104 acer_setup_channel(chp)
3105 struct channel_softc *chp;
3106 {
3107 struct ata_drive_datas *drvp;
3108 int drive;
3109 u_int32_t acer_fifo_udma;
3110 u_int32_t idedma_ctl;
3111 struct pciide_channel *cp = (struct pciide_channel*)chp;
3112 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3113
3114 idedma_ctl = 0;
3115 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3116 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3117 acer_fifo_udma), DEBUG_PROBE);
3118 /* setup DMA if needed */
3119 pciide_channel_dma_setup(cp);
3120
3121 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3122 DRIVE_UDMA) { /* check 80 pins cable */
3123 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3124 ACER_0x4A_80PIN(chp->channel)) {
3125 if (chp->ch_drive[0].UDMA_mode > 2)
3126 chp->ch_drive[0].UDMA_mode = 2;
3127 if (chp->ch_drive[1].UDMA_mode > 2)
3128 chp->ch_drive[1].UDMA_mode = 2;
3129 }
3130 }
3131
3132 for (drive = 0; drive < 2; drive++) {
3133 drvp = &chp->ch_drive[drive];
3134 /* If no drive, skip */
3135 if ((drvp->drive_flags & DRIVE) == 0)
3136 continue;
3137 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3138 "channel %d drive %d 0x%x\n", chp->channel, drive,
3139 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3140 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3141 /* clear FIFO/DMA mode */
3142 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3143 ACER_UDMA_EN(chp->channel, drive) |
3144 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3145
3146 /* add timing values, setup DMA if needed */
3147 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3148 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3149 acer_fifo_udma |=
3150 ACER_FTH_OPL(chp->channel, drive, 0x1);
3151 goto pio;
3152 }
3153
3154 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3155 if (drvp->drive_flags & DRIVE_UDMA) {
3156 /* use Ultra/DMA */
3157 drvp->drive_flags &= ~DRIVE_DMA;
3158 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3159 acer_fifo_udma |=
3160 ACER_UDMA_TIM(chp->channel, drive,
3161 acer_udma[drvp->UDMA_mode]);
3162 /* XXX disable if one drive < UDMA3 ? */
3163 if (drvp->UDMA_mode >= 3) {
3164 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3165 ACER_0x4B,
3166 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3167 ACER_0x4B) | ACER_0x4B_UDMA66);
3168 }
3169 } else {
3170 /*
3171 * use Multiword DMA
3172 * Timings will be used for both PIO and DMA,
3173 * so adjust DMA mode if needed
3174 */
3175 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3176 drvp->PIO_mode = drvp->DMA_mode + 2;
3177 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3178 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3179 drvp->PIO_mode - 2 : 0;
3180 if (drvp->DMA_mode == 0)
3181 drvp->PIO_mode = 0;
3182 }
3183 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3184 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3185 ACER_IDETIM(chp->channel, drive),
3186 acer_pio[drvp->PIO_mode]);
3187 }
3188 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3189 acer_fifo_udma), DEBUG_PROBE);
3190 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3191 if (idedma_ctl != 0) {
3192 /* Add software bits in status register */
3193 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3194 IDEDMA_CTL, idedma_ctl);
3195 }
3196 pciide_print_modes(cp);
3197 }
3198
3199 int
3200 acer_pci_intr(arg)
3201 void *arg;
3202 {
3203 struct pciide_softc *sc = arg;
3204 struct pciide_channel *cp;
3205 struct channel_softc *wdc_cp;
3206 int i, rv, crv;
3207 u_int32_t chids;
3208
3209 rv = 0;
3210 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3211 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3212 cp = &sc->pciide_channels[i];
3213 wdc_cp = &cp->wdc_channel;
3214 /* If a compat channel skip. */
3215 if (cp->compat)
3216 continue;
3217 if (chids & ACER_CHIDS_INT(i)) {
3218 crv = wdcintr(wdc_cp);
3219 if (crv == 0)
3220 printf("%s:%d: bogus intr\n",
3221 sc->sc_wdcdev.sc_dev.dv_xname, i);
3222 else
3223 rv = 1;
3224 }
3225 }
3226 return rv;
3227 }
3228
3229 void
3230 hpt_chip_map(sc, pa)
3231 struct pciide_softc *sc;
3232 struct pci_attach_args *pa;
3233 {
3234 struct pciide_channel *cp;
3235 int i, compatchan, revision;
3236 pcireg_t interface;
3237 bus_size_t cmdsize, ctlsize;
3238
3239 if (pciide_chipen(sc, pa) == 0)
3240 return;
3241 revision = PCI_REVISION(pa->pa_class);
3242 printf(": Triones/Highpoint ");
3243 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3244 printf("HPT374 IDE Controller\n");
3245 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3246 if (revision == HPT370_REV)
3247 printf("HPT370 IDE Controller\n");
3248 else if (revision == HPT370A_REV)
3249 printf("HPT370A IDE Controller\n");
3250 else if (revision == HPT366_REV)
3251 printf("HPT366 IDE Controller\n");
3252 else
3253 printf("unknown HPT IDE controller rev %d\n", revision);
3254 } else
3255 printf("unknown HPT IDE controller 0x%x\n",
3256 sc->sc_pp->ide_product);
3257
3258 /*
3259 * when the chip is in native mode it identifies itself as a
3260 * 'misc mass storage'. Fake interface in this case.
3261 */
3262 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3263 interface = PCI_INTERFACE(pa->pa_class);
3264 } else {
3265 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3266 PCIIDE_INTERFACE_PCI(0);
3267 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3268 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3269 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3270 interface |= PCIIDE_INTERFACE_PCI(1);
3271 }
3272
3273 printf("%s: bus-master DMA support present",
3274 sc->sc_wdcdev.sc_dev.dv_xname);
3275 pciide_mapreg_dma(sc, pa);
3276 printf("\n");
3277 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3278 WDC_CAPABILITY_MODE;
3279 if (sc->sc_dma_ok) {
3280 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3281 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3282 sc->sc_wdcdev.irqack = pciide_irqack;
3283 }
3284 sc->sc_wdcdev.PIO_cap = 4;
3285 sc->sc_wdcdev.DMA_cap = 2;
3286
3287 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3288 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3289 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3290 revision == HPT366_REV) {
3291 sc->sc_wdcdev.UDMA_cap = 4;
3292 /*
3293 * The 366 has 2 PCI IDE functions, one for primary and one
3294 * for secondary. So we need to call pciide_mapregs_compat()
3295 * with the real channel
3296 */
3297 if (pa->pa_function == 0) {
3298 compatchan = 0;
3299 } else if (pa->pa_function == 1) {
3300 compatchan = 1;
3301 } else {
3302 printf("%s: unexpected PCI function %d\n",
3303 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3304 return;
3305 }
3306 sc->sc_wdcdev.nchannels = 1;
3307 } else {
3308 sc->sc_wdcdev.nchannels = 2;
3309 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3310 sc->sc_wdcdev.UDMA_cap = 6;
3311 else
3312 sc->sc_wdcdev.UDMA_cap = 5;
3313 }
3314 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3315 cp = &sc->pciide_channels[i];
3316 if (sc->sc_wdcdev.nchannels > 1) {
3317 compatchan = i;
3318 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3319 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3320 printf("%s: %s channel ignored (disabled)\n",
3321 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3322 continue;
3323 }
3324 }
3325 if (pciide_chansetup(sc, i, interface) == 0)
3326 continue;
3327 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3328 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3329 &ctlsize, hpt_pci_intr);
3330 } else {
3331 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3332 &cmdsize, &ctlsize);
3333 }
3334 if (cp->hw_ok == 0)
3335 return;
3336 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3337 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3338 wdcattach(&cp->wdc_channel);
3339 hpt_setup_channel(&cp->wdc_channel);
3340 }
3341 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3342 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3343 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3344 /*
3345 * HPT370_REV and highter has a bit to disable interrupts,
3346 * make sure to clear it
3347 */
3348 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3349 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3350 ~HPT_CSEL_IRQDIS);
3351 }
3352 /* set clocks, etc (mandatory on 374, optional otherwise) */
3353 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3354 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3355 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3356 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3357 return;
3358 }
3359
3360 void
3361 hpt_setup_channel(chp)
3362 struct channel_softc *chp;
3363 {
3364 struct ata_drive_datas *drvp;
3365 int drive;
3366 int cable;
3367 u_int32_t before, after;
3368 u_int32_t idedma_ctl;
3369 struct pciide_channel *cp = (struct pciide_channel*)chp;
3370 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3371
3372 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3373
3374 /* setup DMA if needed */
3375 pciide_channel_dma_setup(cp);
3376
3377 idedma_ctl = 0;
3378
3379 /* Per drive settings */
3380 for (drive = 0; drive < 2; drive++) {
3381 drvp = &chp->ch_drive[drive];
3382 /* If no drive, skip */
3383 if ((drvp->drive_flags & DRIVE) == 0)
3384 continue;
3385 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3386 HPT_IDETIM(chp->channel, drive));
3387
3388 /* add timing values, setup DMA if needed */
3389 if (drvp->drive_flags & DRIVE_UDMA) {
3390 /* use Ultra/DMA */
3391 drvp->drive_flags &= ~DRIVE_DMA;
3392 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3393 drvp->UDMA_mode > 2)
3394 drvp->UDMA_mode = 2;
3395 after = (sc->sc_wdcdev.nchannels == 2) ?
3396 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3397 hpt374_udma[drvp->UDMA_mode] :
3398 hpt370_udma[drvp->UDMA_mode]) :
3399 hpt366_udma[drvp->UDMA_mode];
3400 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3401 } else if (drvp->drive_flags & DRIVE_DMA) {
3402 /*
3403 * use Multiword DMA.
3404 * Timings will be used for both PIO and DMA, so adjust
3405 * DMA mode if needed
3406 */
3407 if (drvp->PIO_mode >= 3 &&
3408 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3409 drvp->DMA_mode = drvp->PIO_mode - 2;
3410 }
3411 after = (sc->sc_wdcdev.nchannels == 2) ?
3412 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3413 hpt374_dma[drvp->DMA_mode] :
3414 hpt370_dma[drvp->DMA_mode]) :
3415 hpt366_dma[drvp->DMA_mode];
3416 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3417 } else {
3418 /* PIO only */
3419 after = (sc->sc_wdcdev.nchannels == 2) ?
3420 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3421 hpt374_pio[drvp->PIO_mode] :
3422 hpt370_pio[drvp->PIO_mode]) :
3423 hpt366_pio[drvp->PIO_mode];
3424 }
3425 pci_conf_write(sc->sc_pc, sc->sc_tag,
3426 HPT_IDETIM(chp->channel, drive), after);
3427 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3428 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3429 after, before), DEBUG_PROBE);
3430 }
3431 if (idedma_ctl != 0) {
3432 /* Add software bits in status register */
3433 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3434 IDEDMA_CTL, idedma_ctl);
3435 }
3436 pciide_print_modes(cp);
3437 }
3438
3439 int
3440 hpt_pci_intr(arg)
3441 void *arg;
3442 {
3443 struct pciide_softc *sc = arg;
3444 struct pciide_channel *cp;
3445 struct channel_softc *wdc_cp;
3446 int rv = 0;
3447 int dmastat, i, crv;
3448
3449 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3450 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3451 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3452 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3453 IDEDMA_CTL_INTR)
3454 continue;
3455 cp = &sc->pciide_channels[i];
3456 wdc_cp = &cp->wdc_channel;
3457 crv = wdcintr(wdc_cp);
3458 if (crv == 0) {
3459 printf("%s:%d: bogus intr\n",
3460 sc->sc_wdcdev.sc_dev.dv_xname, i);
3461 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3462 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3463 } else
3464 rv = 1;
3465 }
3466 return rv;
3467 }
3468
3469
3470 /* Macros to test product */
3471 #define PDC_IS_262(sc) \
3472 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3473 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3474 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3475 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3476 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3477 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3478 #define PDC_IS_265(sc) \
3479 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3480 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3481 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3482 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3483 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3484 #define PDC_IS_268(sc) \
3485 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3486 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3487 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3488
3489 void
3490 pdc202xx_chip_map(sc, pa)
3491 struct pciide_softc *sc;
3492 struct pci_attach_args *pa;
3493 {
3494 struct pciide_channel *cp;
3495 int channel;
3496 pcireg_t interface, st, mode;
3497 bus_size_t cmdsize, ctlsize;
3498
3499 if (!PDC_IS_268(sc)) {
3500 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3501 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3502 st), DEBUG_PROBE);
3503 }
3504 if (pciide_chipen(sc, pa) == 0)
3505 return;
3506
3507 /* turn off RAID mode */
3508 if (!PDC_IS_268(sc))
3509 st &= ~PDC2xx_STATE_IDERAID;
3510
3511 /*
3512 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3513 * mode. We have to fake interface
3514 */
3515 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3516 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3517 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3518
3519 printf("%s: bus-master DMA support present",
3520 sc->sc_wdcdev.sc_dev.dv_xname);
3521 pciide_mapreg_dma(sc, pa);
3522 printf("\n");
3523 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3524 WDC_CAPABILITY_MODE;
3525 if (sc->sc_dma_ok) {
3526 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3527 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3528 sc->sc_wdcdev.irqack = pciide_irqack;
3529 }
3530 sc->sc_wdcdev.PIO_cap = 4;
3531 sc->sc_wdcdev.DMA_cap = 2;
3532 if (PDC_IS_265(sc))
3533 sc->sc_wdcdev.UDMA_cap = 5;
3534 else if (PDC_IS_262(sc))
3535 sc->sc_wdcdev.UDMA_cap = 4;
3536 else
3537 sc->sc_wdcdev.UDMA_cap = 2;
3538 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3539 pdc20268_setup_channel : pdc202xx_setup_channel;
3540 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3541 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3542
3543 if (!PDC_IS_268(sc)) {
3544 /* setup failsafe defaults */
3545 mode = 0;
3546 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3547 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3548 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3549 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3550 for (channel = 0;
3551 channel < sc->sc_wdcdev.nchannels;
3552 channel++) {
3553 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3554 "drive 0 initial timings 0x%x, now 0x%x\n",
3555 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3556 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3557 DEBUG_PROBE);
3558 pci_conf_write(sc->sc_pc, sc->sc_tag,
3559 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3560 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3561 "drive 1 initial timings 0x%x, now 0x%x\n",
3562 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3563 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3564 pci_conf_write(sc->sc_pc, sc->sc_tag,
3565 PDC2xx_TIM(channel, 1), mode);
3566 }
3567
3568 mode = PDC2xx_SCR_DMA;
3569 if (PDC_IS_262(sc)) {
3570 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3571 } else {
3572 /* the BIOS set it up this way */
3573 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3574 }
3575 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3576 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3577 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3578 "now 0x%x\n",
3579 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3580 PDC2xx_SCR),
3581 mode), DEBUG_PROBE);
3582 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3583 PDC2xx_SCR, mode);
3584
3585 /* controller initial state register is OK even without BIOS */
3586 /* Set DMA mode to IDE DMA compatibility */
3587 mode =
3588 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3589 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3590 DEBUG_PROBE);
3591 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3592 mode | 0x1);
3593 mode =
3594 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3595 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3596 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3597 mode | 0x1);
3598 }
3599
3600 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3601 cp = &sc->pciide_channels[channel];
3602 if (pciide_chansetup(sc, channel, interface) == 0)
3603 continue;
3604 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3605 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3606 printf("%s: %s channel ignored (disabled)\n",
3607 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3608 continue;
3609 }
3610 if (PDC_IS_265(sc))
3611 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3612 pdc20265_pci_intr);
3613 else
3614 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3615 pdc202xx_pci_intr);
3616 if (cp->hw_ok == 0)
3617 continue;
3618 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3619 st &= ~(PDC_IS_262(sc) ?
3620 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3621 pciide_map_compat_intr(pa, cp, channel, interface);
3622 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3623 }
3624 if (!PDC_IS_268(sc)) {
3625 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3626 "0x%x\n", st), DEBUG_PROBE);
3627 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3628 }
3629 return;
3630 }
3631
3632 void
3633 pdc202xx_setup_channel(chp)
3634 struct channel_softc *chp;
3635 {
3636 struct ata_drive_datas *drvp;
3637 int drive;
3638 pcireg_t mode, st;
3639 u_int32_t idedma_ctl, scr, atapi;
3640 struct pciide_channel *cp = (struct pciide_channel*)chp;
3641 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3642 int channel = chp->channel;
3643
3644 /* setup DMA if needed */
3645 pciide_channel_dma_setup(cp);
3646
3647 idedma_ctl = 0;
3648 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3649 sc->sc_wdcdev.sc_dev.dv_xname,
3650 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3651 DEBUG_PROBE);
3652
3653 /* Per channel settings */
3654 if (PDC_IS_262(sc)) {
3655 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3656 PDC262_U66);
3657 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3658 /* Trim UDMA mode */
3659 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3660 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3661 chp->ch_drive[0].UDMA_mode <= 2) ||
3662 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3663 chp->ch_drive[1].UDMA_mode <= 2)) {
3664 if (chp->ch_drive[0].UDMA_mode > 2)
3665 chp->ch_drive[0].UDMA_mode = 2;
3666 if (chp->ch_drive[1].UDMA_mode > 2)
3667 chp->ch_drive[1].UDMA_mode = 2;
3668 }
3669 /* Set U66 if needed */
3670 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3671 chp->ch_drive[0].UDMA_mode > 2) ||
3672 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3673 chp->ch_drive[1].UDMA_mode > 2))
3674 scr |= PDC262_U66_EN(channel);
3675 else
3676 scr &= ~PDC262_U66_EN(channel);
3677 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3678 PDC262_U66, scr);
3679 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3680 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3681 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3682 PDC262_ATAPI(channel))), DEBUG_PROBE);
3683 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3684 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3685 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3686 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3687 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3688 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3689 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3690 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3691 atapi = 0;
3692 else
3693 atapi = PDC262_ATAPI_UDMA;
3694 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3695 PDC262_ATAPI(channel), atapi);
3696 }
3697 }
3698 for (drive = 0; drive < 2; drive++) {
3699 drvp = &chp->ch_drive[drive];
3700 /* If no drive, skip */
3701 if ((drvp->drive_flags & DRIVE) == 0)
3702 continue;
3703 mode = 0;
3704 if (drvp->drive_flags & DRIVE_UDMA) {
3705 /* use Ultra/DMA */
3706 drvp->drive_flags &= ~DRIVE_DMA;
3707 mode = PDC2xx_TIM_SET_MB(mode,
3708 pdc2xx_udma_mb[drvp->UDMA_mode]);
3709 mode = PDC2xx_TIM_SET_MC(mode,
3710 pdc2xx_udma_mc[drvp->UDMA_mode]);
3711 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3712 } else if (drvp->drive_flags & DRIVE_DMA) {
3713 mode = PDC2xx_TIM_SET_MB(mode,
3714 pdc2xx_dma_mb[drvp->DMA_mode]);
3715 mode = PDC2xx_TIM_SET_MC(mode,
3716 pdc2xx_dma_mc[drvp->DMA_mode]);
3717 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3718 } else {
3719 mode = PDC2xx_TIM_SET_MB(mode,
3720 pdc2xx_dma_mb[0]);
3721 mode = PDC2xx_TIM_SET_MC(mode,
3722 pdc2xx_dma_mc[0]);
3723 }
3724 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3725 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3726 if (drvp->drive_flags & DRIVE_ATA)
3727 mode |= PDC2xx_TIM_PRE;
3728 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3729 if (drvp->PIO_mode >= 3) {
3730 mode |= PDC2xx_TIM_IORDY;
3731 if (drive == 0)
3732 mode |= PDC2xx_TIM_IORDYp;
3733 }
3734 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3735 "timings 0x%x\n",
3736 sc->sc_wdcdev.sc_dev.dv_xname,
3737 chp->channel, drive, mode), DEBUG_PROBE);
3738 pci_conf_write(sc->sc_pc, sc->sc_tag,
3739 PDC2xx_TIM(chp->channel, drive), mode);
3740 }
3741 if (idedma_ctl != 0) {
3742 /* Add software bits in status register */
3743 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3744 IDEDMA_CTL, idedma_ctl);
3745 }
3746 pciide_print_modes(cp);
3747 }
3748
3749 void
3750 pdc20268_setup_channel(chp)
3751 struct channel_softc *chp;
3752 {
3753 struct ata_drive_datas *drvp;
3754 int drive;
3755 u_int32_t idedma_ctl;
3756 struct pciide_channel *cp = (struct pciide_channel*)chp;
3757 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3758 int u100;
3759
3760 /* setup DMA if needed */
3761 pciide_channel_dma_setup(cp);
3762
3763 idedma_ctl = 0;
3764
3765 /* I don't know what this is for, FreeBSD does it ... */
3766 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3767 IDEDMA_CMD + 0x1, 0x0b);
3768
3769 /*
3770 * I don't know what this is for; FreeBSD checks this ... this is not
3771 * cable type detect.
3772 */
3773 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3774 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3775
3776 for (drive = 0; drive < 2; drive++) {
3777 drvp = &chp->ch_drive[drive];
3778 /* If no drive, skip */
3779 if ((drvp->drive_flags & DRIVE) == 0)
3780 continue;
3781 if (drvp->drive_flags & DRIVE_UDMA) {
3782 /* use Ultra/DMA */
3783 drvp->drive_flags &= ~DRIVE_DMA;
3784 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3785 if (drvp->UDMA_mode > 2 && u100 == 0)
3786 drvp->UDMA_mode = 2;
3787 } else if (drvp->drive_flags & DRIVE_DMA) {
3788 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3789 }
3790 }
3791 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3792 if (idedma_ctl != 0) {
3793 /* Add software bits in status register */
3794 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3795 IDEDMA_CTL, idedma_ctl);
3796 }
3797 pciide_print_modes(cp);
3798 }
3799
3800 int
3801 pdc202xx_pci_intr(arg)
3802 void *arg;
3803 {
3804 struct pciide_softc *sc = arg;
3805 struct pciide_channel *cp;
3806 struct channel_softc *wdc_cp;
3807 int i, rv, crv;
3808 u_int32_t scr;
3809
3810 rv = 0;
3811 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3812 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3813 cp = &sc->pciide_channels[i];
3814 wdc_cp = &cp->wdc_channel;
3815 /* If a compat channel skip. */
3816 if (cp->compat)
3817 continue;
3818 if (scr & PDC2xx_SCR_INT(i)) {
3819 crv = wdcintr(wdc_cp);
3820 if (crv == 0)
3821 printf("%s:%d: bogus intr (reg 0x%x)\n",
3822 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3823 else
3824 rv = 1;
3825 }
3826 }
3827 return rv;
3828 }
3829
3830 int
3831 pdc20265_pci_intr(arg)
3832 void *arg;
3833 {
3834 struct pciide_softc *sc = arg;
3835 struct pciide_channel *cp;
3836 struct channel_softc *wdc_cp;
3837 int i, rv, crv;
3838 u_int32_t dmastat;
3839
3840 rv = 0;
3841 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3842 cp = &sc->pciide_channels[i];
3843 wdc_cp = &cp->wdc_channel;
3844 /* If a compat channel skip. */
3845 if (cp->compat)
3846 continue;
3847 /*
3848 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3849 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3850 * So use it instead (requires 2 reg reads instead of 1,
3851 * but we can't do it another way).
3852 */
3853 dmastat = bus_space_read_1(sc->sc_dma_iot,
3854 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3855 if((dmastat & IDEDMA_CTL_INTR) == 0)
3856 continue;
3857 crv = wdcintr(wdc_cp);
3858 if (crv == 0)
3859 printf("%s:%d: bogus intr\n",
3860 sc->sc_wdcdev.sc_dev.dv_xname, i);
3861 else
3862 rv = 1;
3863 }
3864 return rv;
3865 }
3866
3867 void
3868 opti_chip_map(sc, pa)
3869 struct pciide_softc *sc;
3870 struct pci_attach_args *pa;
3871 {
3872 struct pciide_channel *cp;
3873 bus_size_t cmdsize, ctlsize;
3874 pcireg_t interface;
3875 u_int8_t init_ctrl;
3876 int channel;
3877
3878 if (pciide_chipen(sc, pa) == 0)
3879 return;
3880 printf("%s: bus-master DMA support present",
3881 sc->sc_wdcdev.sc_dev.dv_xname);
3882
3883 /*
3884 * XXXSCW:
3885 * There seem to be a couple of buggy revisions/implementations
3886 * of the OPTi pciide chipset. This kludge seems to fix one of
3887 * the reported problems (PR/11644) but still fails for the
3888 * other (PR/13151), although the latter may be due to other
3889 * issues too...
3890 */
3891 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3892 printf(" but disabled due to chip rev. <= 0x12");
3893 sc->sc_dma_ok = 0;
3894 } else
3895 pciide_mapreg_dma(sc, pa);
3896
3897 printf("\n");
3898
3899 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
3900 WDC_CAPABILITY_MODE;
3901 sc->sc_wdcdev.PIO_cap = 4;
3902 if (sc->sc_dma_ok) {
3903 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3904 sc->sc_wdcdev.irqack = pciide_irqack;
3905 sc->sc_wdcdev.DMA_cap = 2;
3906 }
3907 sc->sc_wdcdev.set_modes = opti_setup_channel;
3908
3909 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3910 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3911
3912 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3913 OPTI_REG_INIT_CONTROL);
3914
3915 interface = PCI_INTERFACE(pa->pa_class);
3916
3917 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3918 cp = &sc->pciide_channels[channel];
3919 if (pciide_chansetup(sc, channel, interface) == 0)
3920 continue;
3921 if (channel == 1 &&
3922 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3923 printf("%s: %s channel ignored (disabled)\n",
3924 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3925 continue;
3926 }
3927 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3928 pciide_pci_intr);
3929 if (cp->hw_ok == 0)
3930 continue;
3931 pciide_map_compat_intr(pa, cp, channel, interface);
3932 if (cp->hw_ok == 0)
3933 continue;
3934 opti_setup_channel(&cp->wdc_channel);
3935 }
3936 }
3937
3938 void
3939 opti_setup_channel(chp)
3940 struct channel_softc *chp;
3941 {
3942 struct ata_drive_datas *drvp;
3943 struct pciide_channel *cp = (struct pciide_channel*)chp;
3944 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3945 int drive, spd;
3946 int mode[2];
3947 u_int8_t rv, mr;
3948
3949 /*
3950 * The `Delay' and `Address Setup Time' fields of the
3951 * Miscellaneous Register are always zero initially.
3952 */
3953 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3954 mr &= ~(OPTI_MISC_DELAY_MASK |
3955 OPTI_MISC_ADDR_SETUP_MASK |
3956 OPTI_MISC_INDEX_MASK);
3957
3958 /* Prime the control register before setting timing values */
3959 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3960
3961 /* Determine the clockrate of the PCIbus the chip is attached to */
3962 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3963 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3964
3965 /* setup DMA if needed */
3966 pciide_channel_dma_setup(cp);
3967
3968 for (drive = 0; drive < 2; drive++) {
3969 drvp = &chp->ch_drive[drive];
3970 /* If no drive, skip */
3971 if ((drvp->drive_flags & DRIVE) == 0) {
3972 mode[drive] = -1;
3973 continue;
3974 }
3975
3976 if ((drvp->drive_flags & DRIVE_DMA)) {
3977 /*
3978 * Timings will be used for both PIO and DMA,
3979 * so adjust DMA mode if needed
3980 */
3981 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3982 drvp->PIO_mode = drvp->DMA_mode + 2;
3983 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3984 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3985 drvp->PIO_mode - 2 : 0;
3986 if (drvp->DMA_mode == 0)
3987 drvp->PIO_mode = 0;
3988
3989 mode[drive] = drvp->DMA_mode + 5;
3990 } else
3991 mode[drive] = drvp->PIO_mode;
3992
3993 if (drive && mode[0] >= 0 &&
3994 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3995 /*
3996 * Can't have two drives using different values
3997 * for `Address Setup Time'.
3998 * Slow down the faster drive to compensate.
3999 */
4000 int d = (opti_tim_as[spd][mode[0]] >
4001 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4002
4003 mode[d] = mode[1-d];
4004 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4005 chp->ch_drive[d].DMA_mode = 0;
4006 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4007 }
4008 }
4009
4010 for (drive = 0; drive < 2; drive++) {
4011 int m;
4012 if ((m = mode[drive]) < 0)
4013 continue;
4014
4015 /* Set the Address Setup Time and select appropriate index */
4016 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4017 rv |= OPTI_MISC_INDEX(drive);
4018 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4019
4020 /* Set the pulse width and recovery timing parameters */
4021 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4022 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4023 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4024 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4025
4026 /* Set the Enhanced Mode register appropriately */
4027 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4028 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4029 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4030 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4031 }
4032
4033 /* Finally, enable the timings */
4034 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4035
4036 pciide_print_modes(cp);
4037 }
4038
4039 #define ACARD_IS_850(sc) \
4040 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4041
4042 void
4043 acard_chip_map(sc, pa)
4044 struct pciide_softc *sc;
4045 struct pci_attach_args *pa;
4046 {
4047 struct pciide_channel *cp;
4048 int i;
4049 pcireg_t interface;
4050 bus_size_t cmdsize, ctlsize;
4051
4052 if (pciide_chipen(sc, pa) == 0)
4053 return;
4054
4055 /*
4056 * when the chip is in native mode it identifies itself as a
4057 * 'misc mass storage'. Fake interface in this case.
4058 */
4059 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4060 interface = PCI_INTERFACE(pa->pa_class);
4061 } else {
4062 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4063 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4064 }
4065
4066 printf("%s: bus-master DMA support present",
4067 sc->sc_wdcdev.sc_dev.dv_xname);
4068 pciide_mapreg_dma(sc, pa);
4069 printf("\n");
4070 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4071 WDC_CAPABILITY_MODE;
4072
4073 if (sc->sc_dma_ok) {
4074 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4075 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4076 sc->sc_wdcdev.irqack = pciide_irqack;
4077 }
4078 sc->sc_wdcdev.PIO_cap = 4;
4079 sc->sc_wdcdev.DMA_cap = 2;
4080 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4081
4082 sc->sc_wdcdev.set_modes = acard_setup_channel;
4083 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4084 sc->sc_wdcdev.nchannels = 2;
4085
4086 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4087 cp = &sc->pciide_channels[i];
4088 if (pciide_chansetup(sc, i, interface) == 0)
4089 continue;
4090 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4091 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4092 &ctlsize, pciide_pci_intr);
4093 } else {
4094 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4095 &cmdsize, &ctlsize);
4096 }
4097 if (cp->hw_ok == 0)
4098 return;
4099 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4100 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4101 wdcattach(&cp->wdc_channel);
4102 acard_setup_channel(&cp->wdc_channel);
4103 }
4104 if (!ACARD_IS_850(sc)) {
4105 u_int32_t reg;
4106 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4107 reg &= ~ATP860_CTRL_INT;
4108 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4109 }
4110 }
4111
4112 void
4113 acard_setup_channel(chp)
4114 struct channel_softc *chp;
4115 {
4116 struct ata_drive_datas *drvp;
4117 struct pciide_channel *cp = (struct pciide_channel*)chp;
4118 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4119 int channel = chp->channel;
4120 int drive;
4121 u_int32_t idetime, udma_mode;
4122 u_int32_t idedma_ctl;
4123
4124 /* setup DMA if needed */
4125 pciide_channel_dma_setup(cp);
4126
4127 if (ACARD_IS_850(sc)) {
4128 idetime = 0;
4129 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4130 udma_mode &= ~ATP850_UDMA_MASK(channel);
4131 } else {
4132 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4133 idetime &= ~ATP860_SETTIME_MASK(channel);
4134 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4135 udma_mode &= ~ATP860_UDMA_MASK(channel);
4136
4137 /* check 80 pins cable */
4138 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4139 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4140 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4141 & ATP860_CTRL_80P(chp->channel)) {
4142 if (chp->ch_drive[0].UDMA_mode > 2)
4143 chp->ch_drive[0].UDMA_mode = 2;
4144 if (chp->ch_drive[1].UDMA_mode > 2)
4145 chp->ch_drive[1].UDMA_mode = 2;
4146 }
4147 }
4148 }
4149
4150 idedma_ctl = 0;
4151
4152 /* Per drive settings */
4153 for (drive = 0; drive < 2; drive++) {
4154 drvp = &chp->ch_drive[drive];
4155 /* If no drive, skip */
4156 if ((drvp->drive_flags & DRIVE) == 0)
4157 continue;
4158 /* add timing values, setup DMA if needed */
4159 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4160 (drvp->drive_flags & DRIVE_UDMA)) {
4161 /* use Ultra/DMA */
4162 if (ACARD_IS_850(sc)) {
4163 idetime |= ATP850_SETTIME(drive,
4164 acard_act_udma[drvp->UDMA_mode],
4165 acard_rec_udma[drvp->UDMA_mode]);
4166 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4167 acard_udma_conf[drvp->UDMA_mode]);
4168 } else {
4169 idetime |= ATP860_SETTIME(channel, drive,
4170 acard_act_udma[drvp->UDMA_mode],
4171 acard_rec_udma[drvp->UDMA_mode]);
4172 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4173 acard_udma_conf[drvp->UDMA_mode]);
4174 }
4175 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4176 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4177 (drvp->drive_flags & DRIVE_DMA)) {
4178 /* use Multiword DMA */
4179 drvp->drive_flags &= ~DRIVE_UDMA;
4180 if (ACARD_IS_850(sc)) {
4181 idetime |= ATP850_SETTIME(drive,
4182 acard_act_dma[drvp->DMA_mode],
4183 acard_rec_dma[drvp->DMA_mode]);
4184 } else {
4185 idetime |= ATP860_SETTIME(channel, drive,
4186 acard_act_dma[drvp->DMA_mode],
4187 acard_rec_dma[drvp->DMA_mode]);
4188 }
4189 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4190 } else {
4191 /* PIO only */
4192 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4193 if (ACARD_IS_850(sc)) {
4194 idetime |= ATP850_SETTIME(drive,
4195 acard_act_pio[drvp->PIO_mode],
4196 acard_rec_pio[drvp->PIO_mode]);
4197 } else {
4198 idetime |= ATP860_SETTIME(channel, drive,
4199 acard_act_pio[drvp->PIO_mode],
4200 acard_rec_pio[drvp->PIO_mode]);
4201 }
4202 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4203 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4204 | ATP8x0_CTRL_EN(channel));
4205 }
4206 }
4207
4208 if (idedma_ctl != 0) {
4209 /* Add software bits in status register */
4210 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4211 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4212 }
4213 pciide_print_modes(cp);
4214
4215 if (ACARD_IS_850(sc)) {
4216 pci_conf_write(sc->sc_pc, sc->sc_tag,
4217 ATP850_IDETIME(channel), idetime);
4218 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4219 } else {
4220 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4221 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4222 }
4223 }
4224
4225 int
4226 acard_pci_intr(arg)
4227 void *arg;
4228 {
4229 struct pciide_softc *sc = arg;
4230 struct pciide_channel *cp;
4231 struct channel_softc *wdc_cp;
4232 int rv = 0;
4233 int dmastat, i, crv;
4234
4235 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4236 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4237 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4238 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4239 continue;
4240 cp = &sc->pciide_channels[i];
4241 wdc_cp = &cp->wdc_channel;
4242 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4243 (void)wdcintr(wdc_cp);
4244 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4245 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4246 continue;
4247 }
4248 crv = wdcintr(wdc_cp);
4249 if (crv == 0)
4250 printf("%s:%d: bogus intr\n",
4251 sc->sc_wdcdev.sc_dev.dv_xname, i);
4252 else if (crv == 1)
4253 rv = 1;
4254 else if (rv == 0)
4255 rv = crv;
4256 }
4257 return rv;
4258 }
4259
4260 static int
4261 sl82c105_bugchk(struct pci_attach_args *pa)
4262 {
4263
4264 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4265 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4266 return (0);
4267
4268 if (PCI_REVISION(pa->pa_class) <= 0x05)
4269 return (1);
4270
4271 return (0);
4272 }
4273
4274 void
4275 sl82c105_chip_map(sc, pa)
4276 struct pciide_softc *sc;
4277 struct pci_attach_args *pa;
4278 {
4279 struct pciide_channel *cp;
4280 bus_size_t cmdsize, ctlsize;
4281 pcireg_t interface, idecr;
4282 int channel;
4283
4284 if (pciide_chipen(sc, pa) == 0)
4285 return;
4286
4287 printf("%s: bus-master DMA support present",
4288 sc->sc_wdcdev.sc_dev.dv_xname);
4289
4290 /*
4291 * Check to see if we're part of the Winbond 83c553 Southbridge.
4292 * If so, we need to disable DMA on rev. <= 5 of that chip.
4293 */
4294 if (pci_find_device(pa, sl82c105_bugchk)) {
4295 printf(" but disabled due to 83c553 rev. <= 0x05");
4296 sc->sc_dma_ok = 0;
4297 } else
4298 pciide_mapreg_dma(sc, pa);
4299 printf("\n");
4300
4301 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4302 WDC_CAPABILITY_MODE;
4303 sc->sc_wdcdev.PIO_cap = 4;
4304 if (sc->sc_dma_ok) {
4305 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4306 sc->sc_wdcdev.irqack = pciide_irqack;
4307 sc->sc_wdcdev.DMA_cap = 2;
4308 }
4309 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4310
4311 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4312 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4313
4314 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4315
4316 interface = PCI_INTERFACE(pa->pa_class);
4317
4318 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4319 cp = &sc->pciide_channels[channel];
4320 if (pciide_chansetup(sc, channel, interface) == 0)
4321 continue;
4322 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4323 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4324 printf("%s: %s channel ignored (disabled)\n",
4325 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4326 continue;
4327 }
4328 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4329 pciide_pci_intr);
4330 if (cp->hw_ok == 0)
4331 continue;
4332 pciide_map_compat_intr(pa, cp, channel, interface);
4333 if (cp->hw_ok == 0)
4334 continue;
4335 sl82c105_setup_channel(&cp->wdc_channel);
4336 }
4337 }
4338
4339 void
4340 sl82c105_setup_channel(chp)
4341 struct channel_softc *chp;
4342 {
4343 struct ata_drive_datas *drvp;
4344 struct pciide_channel *cp = (struct pciide_channel*)chp;
4345 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4346 int pxdx_reg, drive;
4347 pcireg_t pxdx;
4348
4349 /* Set up DMA if needed. */
4350 pciide_channel_dma_setup(cp);
4351
4352 for (drive = 0; drive < 2; drive++) {
4353 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4354 : SYMPH_P1D0CR) + (drive * 4);
4355
4356 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4357
4358 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4359 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4360
4361 drvp = &chp->ch_drive[drive];
4362 /* If no drive, skip. */
4363 if ((drvp->drive_flags & DRIVE) == 0) {
4364 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4365 continue;
4366 }
4367
4368 if (drvp->drive_flags & DRIVE_DMA) {
4369 /*
4370 * Timings will be used for both PIO and DMA,
4371 * so adjust DMA mode if needed.
4372 */
4373 if (drvp->PIO_mode >= 3) {
4374 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4375 drvp->DMA_mode = drvp->PIO_mode - 2;
4376 if (drvp->DMA_mode < 1) {
4377 /*
4378 * Can't mix both PIO and DMA.
4379 * Disable DMA.
4380 */
4381 drvp->drive_flags &= ~DRIVE_DMA;
4382 }
4383 } else {
4384 /*
4385 * Can't mix both PIO and DMA. Disable
4386 * DMA.
4387 */
4388 drvp->drive_flags &= ~DRIVE_DMA;
4389 }
4390 }
4391
4392 if (drvp->drive_flags & DRIVE_DMA) {
4393 /* Use multi-word DMA. */
4394 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4395 PxDx_CMD_ON_SHIFT;
4396 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4397 } else {
4398 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4399 PxDx_CMD_ON_SHIFT;
4400 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4401 }
4402
4403 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4404
4405 /* ...and set the mode for this drive. */
4406 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4407 }
4408
4409 pciide_print_modes(cp);
4410 }
4411
4412 void
4413 serverworks_chip_map(sc, pa)
4414 struct pciide_softc *sc;
4415 struct pci_attach_args *pa;
4416 {
4417 struct pciide_channel *cp;
4418 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4419 pcitag_t pcib_tag;
4420 int channel;
4421 bus_size_t cmdsize, ctlsize;
4422
4423 if (pciide_chipen(sc, pa) == 0)
4424 return;
4425
4426 printf("%s: bus-master DMA support present",
4427 sc->sc_wdcdev.sc_dev.dv_xname);
4428 pciide_mapreg_dma(sc, pa);
4429 printf("\n");
4430 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4431 WDC_CAPABILITY_MODE;
4432
4433 if (sc->sc_dma_ok) {
4434 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4435 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4436 sc->sc_wdcdev.irqack = pciide_irqack;
4437 }
4438 sc->sc_wdcdev.PIO_cap = 4;
4439 sc->sc_wdcdev.DMA_cap = 2;
4440 switch (sc->sc_pp->ide_product) {
4441 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4442 sc->sc_wdcdev.UDMA_cap = 2;
4443 break;
4444 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4445 if (PCI_REVISION(pa->pa_class) < 0x92)
4446 sc->sc_wdcdev.UDMA_cap = 4;
4447 else
4448 sc->sc_wdcdev.UDMA_cap = 5;
4449 break;
4450 }
4451
4452 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4453 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4454 sc->sc_wdcdev.nchannels = 2;
4455
4456 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4457 cp = &sc->pciide_channels[channel];
4458 if (pciide_chansetup(sc, channel, interface) == 0)
4459 continue;
4460 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4461 serverworks_pci_intr);
4462 if (cp->hw_ok == 0)
4463 return;
4464 pciide_map_compat_intr(pa, cp, channel, interface);
4465 if (cp->hw_ok == 0)
4466 return;
4467 serverworks_setup_channel(&cp->wdc_channel);
4468 }
4469
4470 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4471 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4472 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4473 }
4474
4475 void
4476 serverworks_setup_channel(chp)
4477 struct channel_softc *chp;
4478 {
4479 struct ata_drive_datas *drvp;
4480 struct pciide_channel *cp = (struct pciide_channel*)chp;
4481 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4482 int channel = chp->channel;
4483 int drive, unit;
4484 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4485 u_int32_t idedma_ctl;
4486 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4487 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4488
4489 /* setup DMA if needed */
4490 pciide_channel_dma_setup(cp);
4491
4492 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4493 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4494 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4495 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4496
4497 pio_time &= ~(0xffff << (16 * channel));
4498 dma_time &= ~(0xffff << (16 * channel));
4499 pio_mode &= ~(0xff << (8 * channel + 16));
4500 udma_mode &= ~(0xff << (8 * channel + 16));
4501 udma_mode &= ~(3 << (2 * channel));
4502
4503 idedma_ctl = 0;
4504
4505 /* Per drive settings */
4506 for (drive = 0; drive < 2; drive++) {
4507 drvp = &chp->ch_drive[drive];
4508 /* If no drive, skip */
4509 if ((drvp->drive_flags & DRIVE) == 0)
4510 continue;
4511 unit = drive + 2 * channel;
4512 /* add timing values, setup DMA if needed */
4513 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4514 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4515 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4516 (drvp->drive_flags & DRIVE_UDMA)) {
4517 /* use Ultra/DMA, check for 80-pin cable */
4518 if (drvp->UDMA_mode > 2 &&
4519 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4520 drvp->UDMA_mode = 2;
4521 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4522 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4523 udma_mode |= 1 << unit;
4524 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4525 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4526 (drvp->drive_flags & DRIVE_DMA)) {
4527 /* use Multiword DMA */
4528 drvp->drive_flags &= ~DRIVE_UDMA;
4529 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4530 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4531 } else {
4532 /* PIO only */
4533 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4534 }
4535 }
4536
4537 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4538 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4539 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4540 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4541 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4542
4543 if (idedma_ctl != 0) {
4544 /* Add software bits in status register */
4545 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4546 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4547 }
4548 pciide_print_modes(cp);
4549 }
4550
4551 int
4552 serverworks_pci_intr(arg)
4553 void *arg;
4554 {
4555 struct pciide_softc *sc = arg;
4556 struct pciide_channel *cp;
4557 struct channel_softc *wdc_cp;
4558 int rv = 0;
4559 int dmastat, i, crv;
4560
4561 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4562 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4563 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4564 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4565 IDEDMA_CTL_INTR)
4566 continue;
4567 cp = &sc->pciide_channels[i];
4568 wdc_cp = &cp->wdc_channel;
4569 crv = wdcintr(wdc_cp);
4570 if (crv == 0) {
4571 printf("%s:%d: bogus intr\n",
4572 sc->sc_wdcdev.sc_dev.dv_xname, i);
4573 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4574 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4575 } else
4576 rv = 1;
4577 }
4578 return rv;
4579 }
4580