pciide.c revision 1.160 1 /* $NetBSD: pciide.c,v 1.160 2002/07/22 20:56:57 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.160 2002/07/22 20:56:57 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191
192 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void pdc202xx_setup_channel __P((struct channel_softc*));
194 void pdc20268_setup_channel __P((struct channel_softc*));
195 int pdc202xx_pci_intr __P((void *));
196 int pdc20265_pci_intr __P((void *));
197
198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void opti_setup_channel __P((struct channel_softc*));
200
201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void hpt_setup_channel __P((struct channel_softc*));
203 int hpt_pci_intr __P((void *));
204
205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acard_setup_channel __P((struct channel_softc*));
207 int acard_pci_intr __P((void *));
208
209 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void serverworks_setup_channel __P((struct channel_softc*));
211 int serverworks_pci_intr __P((void *));
212
213 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void sl82c105_setup_channel __P((struct channel_softc*));
215
216 void pciide_channel_dma_setup __P((struct pciide_channel *));
217 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
218 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
219 void pciide_dma_start __P((void*, int, int));
220 int pciide_dma_finish __P((void*, int, int, int));
221 void pciide_irqack __P((struct channel_softc *));
222 void pciide_print_modes __P((struct pciide_channel *));
223
224 struct pciide_product_desc {
225 u_int32_t ide_product;
226 int ide_flags;
227 const char *ide_name;
228 /* map and setup chip, probe drives */
229 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
230 };
231
232 /* Flags for ide_flags */
233 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
234 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
235
236 /* Default product description for devices not known from this controller */
237 const struct pciide_product_desc default_product_desc = {
238 0,
239 0,
240 "Generic PCI IDE controller",
241 default_chip_map,
242 };
243
244 const struct pciide_product_desc pciide_intel_products[] = {
245 { PCI_PRODUCT_INTEL_82092AA,
246 0,
247 "Intel 82092AA IDE controller",
248 default_chip_map,
249 },
250 { PCI_PRODUCT_INTEL_82371FB_IDE,
251 0,
252 "Intel 82371FB IDE controller (PIIX)",
253 piix_chip_map,
254 },
255 { PCI_PRODUCT_INTEL_82371SB_IDE,
256 0,
257 "Intel 82371SB IDE Interface (PIIX3)",
258 piix_chip_map,
259 },
260 { PCI_PRODUCT_INTEL_82371AB_IDE,
261 0,
262 "Intel 82371AB IDE controller (PIIX4)",
263 piix_chip_map,
264 },
265 { PCI_PRODUCT_INTEL_82440MX_IDE,
266 0,
267 "Intel 82440MX IDE controller",
268 piix_chip_map
269 },
270 { PCI_PRODUCT_INTEL_82801AA_IDE,
271 0,
272 "Intel 82801AA IDE Controller (ICH)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82801AB_IDE,
276 0,
277 "Intel 82801AB IDE Controller (ICH0)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82801BA_IDE,
281 0,
282 "Intel 82801BA IDE Controller (ICH2)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82801BAM_IDE,
286 0,
287 "Intel 82801BAM IDE Controller (ICH2)",
288 piix_chip_map,
289 },
290 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
291 0,
292 "Intel 82201CA IDE Controller",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
296 0,
297 "Intel 82201CA IDE Controller",
298 piix_chip_map,
299 },
300 { 0,
301 0,
302 NULL,
303 NULL
304 }
305 };
306
307 const struct pciide_product_desc pciide_amd_products[] = {
308 { PCI_PRODUCT_AMD_PBC756_IDE,
309 0,
310 "Advanced Micro Devices AMD756 IDE Controller",
311 amd7x6_chip_map
312 },
313 { PCI_PRODUCT_AMD_PBC766_IDE,
314 0,
315 "Advanced Micro Devices AMD766 IDE Controller",
316 amd7x6_chip_map
317 },
318 { PCI_PRODUCT_AMD_PBC768_IDE,
319 0,
320 "Advanced Micro Devices AMD768 IDE Controller",
321 amd7x6_chip_map
322 },
323 { PCI_PRODUCT_AMD_PBC8111_IDE,
324 0,
325 "Advanced Micro Devices AMD8111 IDE Controller",
326 amd7x6_chip_map
327 },
328 { 0,
329 0,
330 NULL,
331 NULL
332 }
333 };
334
335 const struct pciide_product_desc pciide_cmd_products[] = {
336 { PCI_PRODUCT_CMDTECH_640,
337 0,
338 "CMD Technology PCI0640",
339 cmd_chip_map
340 },
341 { PCI_PRODUCT_CMDTECH_643,
342 0,
343 "CMD Technology PCI0643",
344 cmd0643_9_chip_map,
345 },
346 { PCI_PRODUCT_CMDTECH_646,
347 0,
348 "CMD Technology PCI0646",
349 cmd0643_9_chip_map,
350 },
351 { PCI_PRODUCT_CMDTECH_648,
352 IDE_PCI_CLASS_OVERRIDE,
353 "CMD Technology PCI0648",
354 cmd0643_9_chip_map,
355 },
356 { PCI_PRODUCT_CMDTECH_649,
357 IDE_PCI_CLASS_OVERRIDE,
358 "CMD Technology PCI0649",
359 cmd0643_9_chip_map,
360 },
361 { 0,
362 0,
363 NULL,
364 NULL
365 }
366 };
367
368 const struct pciide_product_desc pciide_via_products[] = {
369 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
370 0,
371 NULL,
372 apollo_chip_map,
373 },
374 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
375 0,
376 NULL,
377 apollo_chip_map,
378 },
379 { 0,
380 0,
381 NULL,
382 NULL
383 }
384 };
385
386 const struct pciide_product_desc pciide_cypress_products[] = {
387 { PCI_PRODUCT_CONTAQ_82C693,
388 IDE_16BIT_IOSPACE,
389 "Cypress 82C693 IDE Controller",
390 cy693_chip_map,
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_sis_products[] = {
400 { PCI_PRODUCT_SIS_5597_IDE,
401 0,
402 "Silicon Integrated System 5597/5598 IDE controller",
403 sis_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_acer_products[] = {
413 { PCI_PRODUCT_ALI_M5229,
414 0,
415 "Acer Labs M5229 UDMA IDE Controller",
416 acer_chip_map,
417 },
418 { 0,
419 0,
420 NULL,
421 NULL
422 }
423 };
424
425 const struct pciide_product_desc pciide_promise_products[] = {
426 { PCI_PRODUCT_PROMISE_ULTRA33,
427 IDE_PCI_CLASS_OVERRIDE,
428 "Promise Ultra33/ATA Bus Master IDE Accelerator",
429 pdc202xx_chip_map,
430 },
431 { PCI_PRODUCT_PROMISE_ULTRA66,
432 IDE_PCI_CLASS_OVERRIDE,
433 "Promise Ultra66/ATA Bus Master IDE Accelerator",
434 pdc202xx_chip_map,
435 },
436 { PCI_PRODUCT_PROMISE_ULTRA100,
437 IDE_PCI_CLASS_OVERRIDE,
438 "Promise Ultra100/ATA Bus Master IDE Accelerator",
439 pdc202xx_chip_map,
440 },
441 { PCI_PRODUCT_PROMISE_ULTRA100X,
442 IDE_PCI_CLASS_OVERRIDE,
443 "Promise Ultra100/ATA Bus Master IDE Accelerator",
444 pdc202xx_chip_map,
445 },
446 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
447 IDE_PCI_CLASS_OVERRIDE,
448 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
449 pdc202xx_chip_map,
450 },
451 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
452 IDE_PCI_CLASS_OVERRIDE,
453 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
454 pdc202xx_chip_map,
455 },
456 { PCI_PRODUCT_PROMISE_ULTRA133,
457 IDE_PCI_CLASS_OVERRIDE,
458 "Promise Ultra133/ATA Bus Master IDE Accelerator",
459 pdc202xx_chip_map,
460 },
461 { 0,
462 0,
463 NULL,
464 NULL
465 }
466 };
467
468 const struct pciide_product_desc pciide_opti_products[] = {
469 { PCI_PRODUCT_OPTI_82C621,
470 0,
471 "OPTi 82c621 PCI IDE controller",
472 opti_chip_map,
473 },
474 { PCI_PRODUCT_OPTI_82C568,
475 0,
476 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
477 opti_chip_map,
478 },
479 { PCI_PRODUCT_OPTI_82D568,
480 0,
481 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
482 opti_chip_map,
483 },
484 { 0,
485 0,
486 NULL,
487 NULL
488 }
489 };
490
491 const struct pciide_product_desc pciide_triones_products[] = {
492 { PCI_PRODUCT_TRIONES_HPT366,
493 IDE_PCI_CLASS_OVERRIDE,
494 NULL,
495 hpt_chip_map,
496 },
497 { PCI_PRODUCT_TRIONES_HPT374,
498 IDE_PCI_CLASS_OVERRIDE,
499 NULL,
500 hpt_chip_map
501 },
502 { 0,
503 0,
504 NULL,
505 NULL
506 }
507 };
508
509 const struct pciide_product_desc pciide_acard_products[] = {
510 { PCI_PRODUCT_ACARD_ATP850U,
511 IDE_PCI_CLASS_OVERRIDE,
512 "Acard ATP850U Ultra33 IDE Controller",
513 acard_chip_map,
514 },
515 { PCI_PRODUCT_ACARD_ATP860,
516 IDE_PCI_CLASS_OVERRIDE,
517 "Acard ATP860 Ultra66 IDE Controller",
518 acard_chip_map,
519 },
520 { PCI_PRODUCT_ACARD_ATP860A,
521 IDE_PCI_CLASS_OVERRIDE,
522 "Acard ATP860-A Ultra66 IDE Controller",
523 acard_chip_map,
524 },
525 { 0,
526 0,
527 NULL,
528 NULL
529 }
530 };
531
532 const struct pciide_product_desc pciide_serverworks_products[] = {
533 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
534 0,
535 "ServerWorks OSB4 IDE Controller",
536 serverworks_chip_map,
537 },
538 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
539 0,
540 "ServerWorks CSB5 IDE Controller",
541 serverworks_chip_map,
542 },
543 { 0,
544 0,
545 NULL,
546 }
547 };
548
549 const struct pciide_product_desc pciide_symphony_products[] = {
550 { PCI_PRODUCT_SYMPHONY_82C105,
551 0,
552 "Symphony Labs 82C105 IDE controller",
553 sl82c105_chip_map,
554 },
555 { 0,
556 0,
557 NULL,
558 }
559 };
560
561 const struct pciide_product_desc pciide_winbond_products[] = {
562 { PCI_PRODUCT_WINBOND_W83C553F_1,
563 0,
564 "Winbond W83C553F IDE controller",
565 sl82c105_chip_map,
566 },
567 { 0,
568 0,
569 NULL,
570 }
571 };
572
573 struct pciide_vendor_desc {
574 u_int32_t ide_vendor;
575 const struct pciide_product_desc *ide_products;
576 };
577
578 const struct pciide_vendor_desc pciide_vendors[] = {
579 { PCI_VENDOR_INTEL, pciide_intel_products },
580 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
581 { PCI_VENDOR_VIATECH, pciide_via_products },
582 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
583 { PCI_VENDOR_SIS, pciide_sis_products },
584 { PCI_VENDOR_ALI, pciide_acer_products },
585 { PCI_VENDOR_PROMISE, pciide_promise_products },
586 { PCI_VENDOR_AMD, pciide_amd_products },
587 { PCI_VENDOR_OPTI, pciide_opti_products },
588 { PCI_VENDOR_TRIONES, pciide_triones_products },
589 { PCI_VENDOR_ACARD, pciide_acard_products },
590 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
591 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
592 { PCI_VENDOR_WINBOND, pciide_winbond_products },
593 { 0, NULL }
594 };
595
596 /* options passed via the 'flags' config keyword */
597 #define PCIIDE_OPTIONS_DMA 0x01
598 #define PCIIDE_OPTIONS_NODMA 0x02
599
600 int pciide_match __P((struct device *, struct cfdata *, void *));
601 void pciide_attach __P((struct device *, struct device *, void *));
602
603 struct cfattach pciide_ca = {
604 sizeof(struct pciide_softc), pciide_match, pciide_attach
605 };
606 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
607 int pciide_mapregs_compat __P(( struct pci_attach_args *,
608 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
609 int pciide_mapregs_native __P((struct pci_attach_args *,
610 struct pciide_channel *, bus_size_t *, bus_size_t *,
611 int (*pci_intr) __P((void *))));
612 void pciide_mapreg_dma __P((struct pciide_softc *,
613 struct pci_attach_args *));
614 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
615 void pciide_mapchan __P((struct pci_attach_args *,
616 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
617 int (*pci_intr) __P((void *))));
618 int pciide_chan_candisable __P((struct pciide_channel *));
619 void pciide_map_compat_intr __P(( struct pci_attach_args *,
620 struct pciide_channel *, int, int));
621 int pciide_compat_intr __P((void *));
622 int pciide_pci_intr __P((void *));
623 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
624
625 const struct pciide_product_desc *
626 pciide_lookup_product(id)
627 u_int32_t id;
628 {
629 const struct pciide_product_desc *pp;
630 const struct pciide_vendor_desc *vp;
631
632 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
633 if (PCI_VENDOR(id) == vp->ide_vendor)
634 break;
635
636 if ((pp = vp->ide_products) == NULL)
637 return NULL;
638
639 for (; pp->chip_map != NULL; pp++)
640 if (PCI_PRODUCT(id) == pp->ide_product)
641 break;
642
643 if (pp->chip_map == NULL)
644 return NULL;
645 return pp;
646 }
647
648 int
649 pciide_match(parent, match, aux)
650 struct device *parent;
651 struct cfdata *match;
652 void *aux;
653 {
654 struct pci_attach_args *pa = aux;
655 const struct pciide_product_desc *pp;
656
657 /*
658 * Check the ID register to see that it's a PCI IDE controller.
659 * If it is, we assume that we can deal with it; it _should_
660 * work in a standardized way...
661 */
662 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
663 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
664 return (1);
665 }
666
667 /*
668 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
669 * controllers. Let see if we can deal with it anyway.
670 */
671 pp = pciide_lookup_product(pa->pa_id);
672 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
673 return (1);
674 }
675
676 return (0);
677 }
678
679 void
680 pciide_attach(parent, self, aux)
681 struct device *parent, *self;
682 void *aux;
683 {
684 struct pci_attach_args *pa = aux;
685 pci_chipset_tag_t pc = pa->pa_pc;
686 pcitag_t tag = pa->pa_tag;
687 struct pciide_softc *sc = (struct pciide_softc *)self;
688 pcireg_t csr;
689 char devinfo[256];
690 const char *displaydev;
691
692 sc->sc_pp = pciide_lookup_product(pa->pa_id);
693 if (sc->sc_pp == NULL) {
694 sc->sc_pp = &default_product_desc;
695 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
696 displaydev = devinfo;
697 } else
698 displaydev = sc->sc_pp->ide_name;
699
700 /* if displaydev == NULL, printf is done in chip-specific map */
701 if (displaydev)
702 printf(": %s (rev. 0x%02x)\n", displaydev,
703 PCI_REVISION(pa->pa_class));
704
705 sc->sc_pc = pa->pa_pc;
706 sc->sc_tag = pa->pa_tag;
707 #ifdef WDCDEBUG
708 if (wdcdebug_pciide_mask & DEBUG_PROBE)
709 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
710 #endif
711 sc->sc_pp->chip_map(sc, pa);
712
713 if (sc->sc_dma_ok) {
714 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
715 csr |= PCI_COMMAND_MASTER_ENABLE;
716 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
717 }
718 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
719 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
720 }
721
722 /* tell wether the chip is enabled or not */
723 int
724 pciide_chipen(sc, pa)
725 struct pciide_softc *sc;
726 struct pci_attach_args *pa;
727 {
728 pcireg_t csr;
729 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
730 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
731 PCI_COMMAND_STATUS_REG);
732 printf("%s: device disabled (at %s)\n",
733 sc->sc_wdcdev.sc_dev.dv_xname,
734 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
735 "device" : "bridge");
736 return 0;
737 }
738 return 1;
739 }
740
741 int
742 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
743 struct pci_attach_args *pa;
744 struct pciide_channel *cp;
745 int compatchan;
746 bus_size_t *cmdsizep, *ctlsizep;
747 {
748 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
749 struct channel_softc *wdc_cp = &cp->wdc_channel;
750
751 cp->compat = 1;
752 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
753 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
754
755 wdc_cp->cmd_iot = pa->pa_iot;
756 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
757 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
758 printf("%s: couldn't map %s channel cmd regs\n",
759 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
760 return (0);
761 }
762
763 wdc_cp->ctl_iot = pa->pa_iot;
764 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
765 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
766 printf("%s: couldn't map %s channel ctl regs\n",
767 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
768 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
769 PCIIDE_COMPAT_CMD_SIZE);
770 return (0);
771 }
772
773 return (1);
774 }
775
776 int
777 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
778 struct pci_attach_args * pa;
779 struct pciide_channel *cp;
780 bus_size_t *cmdsizep, *ctlsizep;
781 int (*pci_intr) __P((void *));
782 {
783 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
784 struct channel_softc *wdc_cp = &cp->wdc_channel;
785 const char *intrstr;
786 pci_intr_handle_t intrhandle;
787
788 cp->compat = 0;
789
790 if (sc->sc_pci_ih == NULL) {
791 if (pci_intr_map(pa, &intrhandle) != 0) {
792 printf("%s: couldn't map native-PCI interrupt\n",
793 sc->sc_wdcdev.sc_dev.dv_xname);
794 return 0;
795 }
796 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
797 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
798 intrhandle, IPL_BIO, pci_intr, sc);
799 if (sc->sc_pci_ih != NULL) {
800 printf("%s: using %s for native-PCI interrupt\n",
801 sc->sc_wdcdev.sc_dev.dv_xname,
802 intrstr ? intrstr : "unknown interrupt");
803 } else {
804 printf("%s: couldn't establish native-PCI interrupt",
805 sc->sc_wdcdev.sc_dev.dv_xname);
806 if (intrstr != NULL)
807 printf(" at %s", intrstr);
808 printf("\n");
809 return 0;
810 }
811 }
812 cp->ih = sc->sc_pci_ih;
813 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
814 PCI_MAPREG_TYPE_IO, 0,
815 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
816 printf("%s: couldn't map %s channel cmd regs\n",
817 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
818 return 0;
819 }
820
821 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
822 PCI_MAPREG_TYPE_IO, 0,
823 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
824 printf("%s: couldn't map %s channel ctl regs\n",
825 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
826 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
827 return 0;
828 }
829 /*
830 * In native mode, 4 bytes of I/O space are mapped for the control
831 * register, the control register is at offset 2. Pass the generic
832 * code a handle for only one byte at the rigth offset.
833 */
834 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
835 &wdc_cp->ctl_ioh) != 0) {
836 printf("%s: unable to subregion %s channel ctl regs\n",
837 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
838 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
839 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
840 return 0;
841 }
842 return (1);
843 }
844
845 void
846 pciide_mapreg_dma(sc, pa)
847 struct pciide_softc *sc;
848 struct pci_attach_args *pa;
849 {
850 pcireg_t maptype;
851 bus_addr_t addr;
852
853 /*
854 * Map DMA registers
855 *
856 * Note that sc_dma_ok is the right variable to test to see if
857 * DMA can be done. If the interface doesn't support DMA,
858 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
859 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
860 * non-zero if the interface supports DMA and the registers
861 * could be mapped.
862 *
863 * XXX Note that despite the fact that the Bus Master IDE specs
864 * XXX say that "The bus master IDE function uses 16 bytes of IO
865 * XXX space," some controllers (at least the United
866 * XXX Microelectronics UM8886BF) place it in memory space.
867 */
868 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
869 PCIIDE_REG_BUS_MASTER_DMA);
870
871 switch (maptype) {
872 case PCI_MAPREG_TYPE_IO:
873 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
874 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
875 &addr, NULL, NULL) == 0);
876 if (sc->sc_dma_ok == 0) {
877 printf(", but unused (couldn't query registers)");
878 break;
879 }
880 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
881 && addr >= 0x10000) {
882 sc->sc_dma_ok = 0;
883 printf(", but unused (registers at unsafe address "
884 "%#lx)", (unsigned long)addr);
885 break;
886 }
887 /* FALLTHROUGH */
888
889 case PCI_MAPREG_MEM_TYPE_32BIT:
890 sc->sc_dma_ok = (pci_mapreg_map(pa,
891 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
892 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
893 sc->sc_dmat = pa->pa_dmat;
894 if (sc->sc_dma_ok == 0) {
895 printf(", but unused (couldn't map registers)");
896 } else {
897 sc->sc_wdcdev.dma_arg = sc;
898 sc->sc_wdcdev.dma_init = pciide_dma_init;
899 sc->sc_wdcdev.dma_start = pciide_dma_start;
900 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
901 }
902
903 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
904 PCIIDE_OPTIONS_NODMA) {
905 printf(", but unused (forced off by config file)");
906 sc->sc_dma_ok = 0;
907 }
908 break;
909
910 default:
911 sc->sc_dma_ok = 0;
912 printf(", but unsupported register maptype (0x%x)", maptype);
913 }
914 }
915
916 int
917 pciide_compat_intr(arg)
918 void *arg;
919 {
920 struct pciide_channel *cp = arg;
921
922 #ifdef DIAGNOSTIC
923 /* should only be called for a compat channel */
924 if (cp->compat == 0)
925 panic("pciide compat intr called for non-compat chan %p\n", cp);
926 #endif
927 return (wdcintr(&cp->wdc_channel));
928 }
929
930 int
931 pciide_pci_intr(arg)
932 void *arg;
933 {
934 struct pciide_softc *sc = arg;
935 struct pciide_channel *cp;
936 struct channel_softc *wdc_cp;
937 int i, rv, crv;
938
939 rv = 0;
940 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
941 cp = &sc->pciide_channels[i];
942 wdc_cp = &cp->wdc_channel;
943
944 /* If a compat channel skip. */
945 if (cp->compat)
946 continue;
947 /* if this channel not waiting for intr, skip */
948 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
949 continue;
950
951 crv = wdcintr(wdc_cp);
952 if (crv == 0)
953 ; /* leave rv alone */
954 else if (crv == 1)
955 rv = 1; /* claim the intr */
956 else if (rv == 0) /* crv should be -1 in this case */
957 rv = crv; /* if we've done no better, take it */
958 }
959 return (rv);
960 }
961
962 void
963 pciide_channel_dma_setup(cp)
964 struct pciide_channel *cp;
965 {
966 int drive;
967 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
968 struct ata_drive_datas *drvp;
969
970 for (drive = 0; drive < 2; drive++) {
971 drvp = &cp->wdc_channel.ch_drive[drive];
972 /* If no drive, skip */
973 if ((drvp->drive_flags & DRIVE) == 0)
974 continue;
975 /* setup DMA if needed */
976 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
977 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
978 sc->sc_dma_ok == 0) {
979 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
980 continue;
981 }
982 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
983 != 0) {
984 /* Abort DMA setup */
985 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
986 continue;
987 }
988 }
989 }
990
991 int
992 pciide_dma_table_setup(sc, channel, drive)
993 struct pciide_softc *sc;
994 int channel, drive;
995 {
996 bus_dma_segment_t seg;
997 int error, rseg;
998 const bus_size_t dma_table_size =
999 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1000 struct pciide_dma_maps *dma_maps =
1001 &sc->pciide_channels[channel].dma_maps[drive];
1002
1003 /* If table was already allocated, just return */
1004 if (dma_maps->dma_table)
1005 return 0;
1006
1007 /* Allocate memory for the DMA tables and map it */
1008 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1009 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1010 BUS_DMA_NOWAIT)) != 0) {
1011 printf("%s:%d: unable to allocate table DMA for "
1012 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1013 channel, drive, error);
1014 return error;
1015 }
1016 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1017 dma_table_size,
1018 (caddr_t *)&dma_maps->dma_table,
1019 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1020 printf("%s:%d: unable to map table DMA for"
1021 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1022 channel, drive, error);
1023 return error;
1024 }
1025 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1026 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1027 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1028
1029 /* Create and load table DMA map for this disk */
1030 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1031 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1032 &dma_maps->dmamap_table)) != 0) {
1033 printf("%s:%d: unable to create table DMA map for "
1034 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1035 channel, drive, error);
1036 return error;
1037 }
1038 if ((error = bus_dmamap_load(sc->sc_dmat,
1039 dma_maps->dmamap_table,
1040 dma_maps->dma_table,
1041 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1042 printf("%s:%d: unable to load table DMA map for "
1043 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1044 channel, drive, error);
1045 return error;
1046 }
1047 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1048 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1049 DEBUG_PROBE);
1050 /* Create a xfer DMA map for this drive */
1051 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1052 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1053 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1054 &dma_maps->dmamap_xfer)) != 0) {
1055 printf("%s:%d: unable to create xfer DMA map for "
1056 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1057 channel, drive, error);
1058 return error;
1059 }
1060 return 0;
1061 }
1062
1063 int
1064 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1065 void *v;
1066 int channel, drive;
1067 void *databuf;
1068 size_t datalen;
1069 int flags;
1070 {
1071 struct pciide_softc *sc = v;
1072 int error, seg;
1073 struct pciide_dma_maps *dma_maps =
1074 &sc->pciide_channels[channel].dma_maps[drive];
1075
1076 error = bus_dmamap_load(sc->sc_dmat,
1077 dma_maps->dmamap_xfer,
1078 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1079 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1080 if (error) {
1081 printf("%s:%d: unable to load xfer DMA map for"
1082 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1083 channel, drive, error);
1084 return error;
1085 }
1086
1087 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1088 dma_maps->dmamap_xfer->dm_mapsize,
1089 (flags & WDC_DMA_READ) ?
1090 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1091
1092 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1093 #ifdef DIAGNOSTIC
1094 /* A segment must not cross a 64k boundary */
1095 {
1096 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1097 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1098 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1099 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1100 printf("pciide_dma: segment %d physical addr 0x%lx"
1101 " len 0x%lx not properly aligned\n",
1102 seg, phys, len);
1103 panic("pciide_dma: buf align");
1104 }
1105 }
1106 #endif
1107 dma_maps->dma_table[seg].base_addr =
1108 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1109 dma_maps->dma_table[seg].byte_count =
1110 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1111 IDEDMA_BYTE_COUNT_MASK);
1112 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1113 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1114 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1115
1116 }
1117 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1118 htole32(IDEDMA_BYTE_COUNT_EOT);
1119
1120 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1121 dma_maps->dmamap_table->dm_mapsize,
1122 BUS_DMASYNC_PREWRITE);
1123
1124 /* Maps are ready. Start DMA function */
1125 #ifdef DIAGNOSTIC
1126 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1127 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1128 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1129 panic("pciide_dma_init: table align");
1130 }
1131 #endif
1132
1133 /* Clear status bits */
1134 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1135 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1136 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1137 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1138 /* Write table addr */
1139 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1140 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1141 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1142 /* set read/write */
1143 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1144 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1145 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1146 /* remember flags */
1147 dma_maps->dma_flags = flags;
1148 return 0;
1149 }
1150
1151 void
1152 pciide_dma_start(v, channel, drive)
1153 void *v;
1154 int channel, drive;
1155 {
1156 struct pciide_softc *sc = v;
1157
1158 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1159 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1160 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1161 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1162 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1163 }
1164
1165 int
1166 pciide_dma_finish(v, channel, drive, force)
1167 void *v;
1168 int channel, drive;
1169 int force;
1170 {
1171 struct pciide_softc *sc = v;
1172 u_int8_t status;
1173 int error = 0;
1174 struct pciide_dma_maps *dma_maps =
1175 &sc->pciide_channels[channel].dma_maps[drive];
1176
1177 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1178 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1179 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1180 DEBUG_XFERS);
1181
1182 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1183 return WDC_DMAST_NOIRQ;
1184
1185 /* stop DMA channel */
1186 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1187 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1188 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1189 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1190
1191 /* Unload the map of the data buffer */
1192 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1193 dma_maps->dmamap_xfer->dm_mapsize,
1194 (dma_maps->dma_flags & WDC_DMA_READ) ?
1195 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1196 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1197
1198 if ((status & IDEDMA_CTL_ERR) != 0) {
1199 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1200 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1201 error |= WDC_DMAST_ERR;
1202 }
1203
1204 if ((status & IDEDMA_CTL_INTR) == 0) {
1205 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1206 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1207 drive, status);
1208 error |= WDC_DMAST_NOIRQ;
1209 }
1210
1211 if ((status & IDEDMA_CTL_ACT) != 0) {
1212 /* data underrun, may be a valid condition for ATAPI */
1213 error |= WDC_DMAST_UNDER;
1214 }
1215 return error;
1216 }
1217
1218 void
1219 pciide_irqack(chp)
1220 struct channel_softc *chp;
1221 {
1222 struct pciide_channel *cp = (struct pciide_channel*)chp;
1223 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1224
1225 /* clear status bits in IDE DMA registers */
1226 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1227 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1228 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1229 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1230 }
1231
1232 /* some common code used by several chip_map */
1233 int
1234 pciide_chansetup(sc, channel, interface)
1235 struct pciide_softc *sc;
1236 int channel;
1237 pcireg_t interface;
1238 {
1239 struct pciide_channel *cp = &sc->pciide_channels[channel];
1240 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1241 cp->name = PCIIDE_CHANNEL_NAME(channel);
1242 cp->wdc_channel.channel = channel;
1243 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1244 cp->wdc_channel.ch_queue =
1245 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1246 if (cp->wdc_channel.ch_queue == NULL) {
1247 printf("%s %s channel: "
1248 "can't allocate memory for command queue",
1249 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1250 return 0;
1251 }
1252 printf("%s: %s channel %s to %s mode\n",
1253 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1254 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1255 "configured" : "wired",
1256 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1257 "native-PCI" : "compatibility");
1258 return 1;
1259 }
1260
1261 /* some common code used by several chip channel_map */
1262 void
1263 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1264 struct pci_attach_args *pa;
1265 struct pciide_channel *cp;
1266 pcireg_t interface;
1267 bus_size_t *cmdsizep, *ctlsizep;
1268 int (*pci_intr) __P((void *));
1269 {
1270 struct channel_softc *wdc_cp = &cp->wdc_channel;
1271
1272 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1273 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1274 pci_intr);
1275 else
1276 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1277 wdc_cp->channel, cmdsizep, ctlsizep);
1278
1279 if (cp->hw_ok == 0)
1280 return;
1281 wdc_cp->data32iot = wdc_cp->cmd_iot;
1282 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1283 wdcattach(wdc_cp);
1284 }
1285
1286 /*
1287 * Generic code to call to know if a channel can be disabled. Return 1
1288 * if channel can be disabled, 0 if not
1289 */
1290 int
1291 pciide_chan_candisable(cp)
1292 struct pciide_channel *cp;
1293 {
1294 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1295 struct channel_softc *wdc_cp = &cp->wdc_channel;
1296
1297 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1298 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1299 printf("%s: disabling %s channel (no drives)\n",
1300 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1301 cp->hw_ok = 0;
1302 return 1;
1303 }
1304 return 0;
1305 }
1306
1307 /*
1308 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1309 * Set hw_ok=0 on failure
1310 */
1311 void
1312 pciide_map_compat_intr(pa, cp, compatchan, interface)
1313 struct pci_attach_args *pa;
1314 struct pciide_channel *cp;
1315 int compatchan, interface;
1316 {
1317 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1318 struct channel_softc *wdc_cp = &cp->wdc_channel;
1319
1320 if (cp->hw_ok == 0)
1321 return;
1322 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1323 return;
1324
1325 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1326 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1327 pa, compatchan, pciide_compat_intr, cp);
1328 if (cp->ih == NULL) {
1329 #endif
1330 printf("%s: no compatibility interrupt for use by %s "
1331 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1332 cp->hw_ok = 0;
1333 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1334 }
1335 #endif
1336 }
1337
1338 void
1339 pciide_print_modes(cp)
1340 struct pciide_channel *cp;
1341 {
1342 wdc_print_modes(&cp->wdc_channel);
1343 }
1344
1345 void
1346 default_chip_map(sc, pa)
1347 struct pciide_softc *sc;
1348 struct pci_attach_args *pa;
1349 {
1350 struct pciide_channel *cp;
1351 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1352 pcireg_t csr;
1353 int channel, drive;
1354 struct ata_drive_datas *drvp;
1355 u_int8_t idedma_ctl;
1356 bus_size_t cmdsize, ctlsize;
1357 char *failreason;
1358
1359 if (pciide_chipen(sc, pa) == 0)
1360 return;
1361
1362 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1363 printf("%s: bus-master DMA support present",
1364 sc->sc_wdcdev.sc_dev.dv_xname);
1365 if (sc->sc_pp == &default_product_desc &&
1366 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1367 PCIIDE_OPTIONS_DMA) == 0) {
1368 printf(", but unused (no driver support)");
1369 sc->sc_dma_ok = 0;
1370 } else {
1371 pciide_mapreg_dma(sc, pa);
1372 if (sc->sc_dma_ok != 0)
1373 printf(", used without full driver "
1374 "support");
1375 }
1376 } else {
1377 printf("%s: hardware does not support DMA",
1378 sc->sc_wdcdev.sc_dev.dv_xname);
1379 sc->sc_dma_ok = 0;
1380 }
1381 printf("\n");
1382 if (sc->sc_dma_ok) {
1383 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1384 sc->sc_wdcdev.irqack = pciide_irqack;
1385 }
1386 sc->sc_wdcdev.PIO_cap = 0;
1387 sc->sc_wdcdev.DMA_cap = 0;
1388
1389 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1390 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1391 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1392
1393 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1394 cp = &sc->pciide_channels[channel];
1395 if (pciide_chansetup(sc, channel, interface) == 0)
1396 continue;
1397 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1398 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1399 &ctlsize, pciide_pci_intr);
1400 } else {
1401 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1402 channel, &cmdsize, &ctlsize);
1403 }
1404 if (cp->hw_ok == 0)
1405 continue;
1406 /*
1407 * Check to see if something appears to be there.
1408 */
1409 failreason = NULL;
1410 if (!wdcprobe(&cp->wdc_channel)) {
1411 failreason = "not responding; disabled or no drives?";
1412 goto next;
1413 }
1414 /*
1415 * Now, make sure it's actually attributable to this PCI IDE
1416 * channel by trying to access the channel again while the
1417 * PCI IDE controller's I/O space is disabled. (If the
1418 * channel no longer appears to be there, it belongs to
1419 * this controller.) YUCK!
1420 */
1421 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1422 PCI_COMMAND_STATUS_REG);
1423 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1424 csr & ~PCI_COMMAND_IO_ENABLE);
1425 if (wdcprobe(&cp->wdc_channel))
1426 failreason = "other hardware responding at addresses";
1427 pci_conf_write(sc->sc_pc, sc->sc_tag,
1428 PCI_COMMAND_STATUS_REG, csr);
1429 next:
1430 if (failreason) {
1431 printf("%s: %s channel ignored (%s)\n",
1432 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1433 failreason);
1434 cp->hw_ok = 0;
1435 bus_space_unmap(cp->wdc_channel.cmd_iot,
1436 cp->wdc_channel.cmd_ioh, cmdsize);
1437 if (interface & PCIIDE_INTERFACE_PCI(channel))
1438 bus_space_unmap(cp->wdc_channel.ctl_iot,
1439 cp->ctl_baseioh, ctlsize);
1440 else
1441 bus_space_unmap(cp->wdc_channel.ctl_iot,
1442 cp->wdc_channel.ctl_ioh, ctlsize);
1443 } else {
1444 pciide_map_compat_intr(pa, cp, channel, interface);
1445 }
1446 if (cp->hw_ok) {
1447 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1448 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1449 wdcattach(&cp->wdc_channel);
1450 }
1451 }
1452
1453 if (sc->sc_dma_ok == 0)
1454 return;
1455
1456 /* Allocate DMA maps */
1457 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1458 idedma_ctl = 0;
1459 cp = &sc->pciide_channels[channel];
1460 for (drive = 0; drive < 2; drive++) {
1461 drvp = &cp->wdc_channel.ch_drive[drive];
1462 /* If no drive, skip */
1463 if ((drvp->drive_flags & DRIVE) == 0)
1464 continue;
1465 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1466 continue;
1467 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1468 /* Abort DMA setup */
1469 printf("%s:%d:%d: can't allocate DMA maps, "
1470 "using PIO transfers\n",
1471 sc->sc_wdcdev.sc_dev.dv_xname,
1472 channel, drive);
1473 drvp->drive_flags &= ~DRIVE_DMA;
1474 }
1475 printf("%s:%d:%d: using DMA data transfers\n",
1476 sc->sc_wdcdev.sc_dev.dv_xname,
1477 channel, drive);
1478 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1479 }
1480 if (idedma_ctl != 0) {
1481 /* Add software bits in status register */
1482 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1483 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1484 idedma_ctl);
1485 }
1486 }
1487 }
1488
1489 void
1490 piix_chip_map(sc, pa)
1491 struct pciide_softc *sc;
1492 struct pci_attach_args *pa;
1493 {
1494 struct pciide_channel *cp;
1495 int channel;
1496 u_int32_t idetim;
1497 bus_size_t cmdsize, ctlsize;
1498
1499 if (pciide_chipen(sc, pa) == 0)
1500 return;
1501
1502 printf("%s: bus-master DMA support present",
1503 sc->sc_wdcdev.sc_dev.dv_xname);
1504 pciide_mapreg_dma(sc, pa);
1505 printf("\n");
1506 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1507 WDC_CAPABILITY_MODE;
1508 if (sc->sc_dma_ok) {
1509 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1510 sc->sc_wdcdev.irqack = pciide_irqack;
1511 switch(sc->sc_pp->ide_product) {
1512 case PCI_PRODUCT_INTEL_82371AB_IDE:
1513 case PCI_PRODUCT_INTEL_82440MX_IDE:
1514 case PCI_PRODUCT_INTEL_82801AA_IDE:
1515 case PCI_PRODUCT_INTEL_82801AB_IDE:
1516 case PCI_PRODUCT_INTEL_82801BA_IDE:
1517 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1518 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1519 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1521 }
1522 }
1523 sc->sc_wdcdev.PIO_cap = 4;
1524 sc->sc_wdcdev.DMA_cap = 2;
1525 switch(sc->sc_pp->ide_product) {
1526 case PCI_PRODUCT_INTEL_82801AA_IDE:
1527 sc->sc_wdcdev.UDMA_cap = 4;
1528 break;
1529 case PCI_PRODUCT_INTEL_82801BA_IDE:
1530 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1531 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1532 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1533 sc->sc_wdcdev.UDMA_cap = 5;
1534 break;
1535 default:
1536 sc->sc_wdcdev.UDMA_cap = 2;
1537 }
1538 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1539 sc->sc_wdcdev.set_modes = piix_setup_channel;
1540 else
1541 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1542 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1543 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1544
1545 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1546 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1547 DEBUG_PROBE);
1548 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1549 WDCDEBUG_PRINT((", sidetim=0x%x",
1550 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1551 DEBUG_PROBE);
1552 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1553 WDCDEBUG_PRINT((", udamreg 0x%x",
1554 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1555 DEBUG_PROBE);
1556 }
1557 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1558 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1559 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1560 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1561 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1562 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1563 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1564 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1565 DEBUG_PROBE);
1566 }
1567
1568 }
1569 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1570
1571 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1572 cp = &sc->pciide_channels[channel];
1573 /* PIIX is compat-only */
1574 if (pciide_chansetup(sc, channel, 0) == 0)
1575 continue;
1576 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1577 if ((PIIX_IDETIM_READ(idetim, channel) &
1578 PIIX_IDETIM_IDE) == 0) {
1579 printf("%s: %s channel ignored (disabled)\n",
1580 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1581 continue;
1582 }
1583 /* PIIX are compat-only pciide devices */
1584 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1585 if (cp->hw_ok == 0)
1586 continue;
1587 if (pciide_chan_candisable(cp)) {
1588 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1589 channel);
1590 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1591 idetim);
1592 }
1593 pciide_map_compat_intr(pa, cp, channel, 0);
1594 if (cp->hw_ok == 0)
1595 continue;
1596 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1597 }
1598
1599 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1600 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1601 DEBUG_PROBE);
1602 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1603 WDCDEBUG_PRINT((", sidetim=0x%x",
1604 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1605 DEBUG_PROBE);
1606 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1607 WDCDEBUG_PRINT((", udamreg 0x%x",
1608 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1609 DEBUG_PROBE);
1610 }
1611 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1612 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1613 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1614 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1615 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1616 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1617 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1618 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1619 DEBUG_PROBE);
1620 }
1621 }
1622 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1623 }
1624
1625 void
1626 piix_setup_channel(chp)
1627 struct channel_softc *chp;
1628 {
1629 u_int8_t mode[2], drive;
1630 u_int32_t oidetim, idetim, idedma_ctl;
1631 struct pciide_channel *cp = (struct pciide_channel*)chp;
1632 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1633 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1634
1635 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1636 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1637 idedma_ctl = 0;
1638
1639 /* set up new idetim: Enable IDE registers decode */
1640 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1641 chp->channel);
1642
1643 /* setup DMA */
1644 pciide_channel_dma_setup(cp);
1645
1646 /*
1647 * Here we have to mess up with drives mode: PIIX can't have
1648 * different timings for master and slave drives.
1649 * We need to find the best combination.
1650 */
1651
1652 /* If both drives supports DMA, take the lower mode */
1653 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1654 (drvp[1].drive_flags & DRIVE_DMA)) {
1655 mode[0] = mode[1] =
1656 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1657 drvp[0].DMA_mode = mode[0];
1658 drvp[1].DMA_mode = mode[1];
1659 goto ok;
1660 }
1661 /*
1662 * If only one drive supports DMA, use its mode, and
1663 * put the other one in PIO mode 0 if mode not compatible
1664 */
1665 if (drvp[0].drive_flags & DRIVE_DMA) {
1666 mode[0] = drvp[0].DMA_mode;
1667 mode[1] = drvp[1].PIO_mode;
1668 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1669 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1670 mode[1] = drvp[1].PIO_mode = 0;
1671 goto ok;
1672 }
1673 if (drvp[1].drive_flags & DRIVE_DMA) {
1674 mode[1] = drvp[1].DMA_mode;
1675 mode[0] = drvp[0].PIO_mode;
1676 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1677 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1678 mode[0] = drvp[0].PIO_mode = 0;
1679 goto ok;
1680 }
1681 /*
1682 * If both drives are not DMA, takes the lower mode, unless
1683 * one of them is PIO mode < 2
1684 */
1685 if (drvp[0].PIO_mode < 2) {
1686 mode[0] = drvp[0].PIO_mode = 0;
1687 mode[1] = drvp[1].PIO_mode;
1688 } else if (drvp[1].PIO_mode < 2) {
1689 mode[1] = drvp[1].PIO_mode = 0;
1690 mode[0] = drvp[0].PIO_mode;
1691 } else {
1692 mode[0] = mode[1] =
1693 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1694 drvp[0].PIO_mode = mode[0];
1695 drvp[1].PIO_mode = mode[1];
1696 }
1697 ok: /* The modes are setup */
1698 for (drive = 0; drive < 2; drive++) {
1699 if (drvp[drive].drive_flags & DRIVE_DMA) {
1700 idetim |= piix_setup_idetim_timings(
1701 mode[drive], 1, chp->channel);
1702 goto end;
1703 }
1704 }
1705 /* If we are there, none of the drives are DMA */
1706 if (mode[0] >= 2)
1707 idetim |= piix_setup_idetim_timings(
1708 mode[0], 0, chp->channel);
1709 else
1710 idetim |= piix_setup_idetim_timings(
1711 mode[1], 0, chp->channel);
1712 end: /*
1713 * timing mode is now set up in the controller. Enable
1714 * it per-drive
1715 */
1716 for (drive = 0; drive < 2; drive++) {
1717 /* If no drive, skip */
1718 if ((drvp[drive].drive_flags & DRIVE) == 0)
1719 continue;
1720 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1721 if (drvp[drive].drive_flags & DRIVE_DMA)
1722 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1723 }
1724 if (idedma_ctl != 0) {
1725 /* Add software bits in status register */
1726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1727 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1728 idedma_ctl);
1729 }
1730 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1731 pciide_print_modes(cp);
1732 }
1733
1734 void
1735 piix3_4_setup_channel(chp)
1736 struct channel_softc *chp;
1737 {
1738 struct ata_drive_datas *drvp;
1739 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1740 struct pciide_channel *cp = (struct pciide_channel*)chp;
1741 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1742 int drive;
1743 int channel = chp->channel;
1744
1745 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1746 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1747 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1748 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1749 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1750 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1751 PIIX_SIDETIM_RTC_MASK(channel));
1752
1753 idedma_ctl = 0;
1754 /* If channel disabled, no need to go further */
1755 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1756 return;
1757 /* set up new idetim: Enable IDE registers decode */
1758 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1759
1760 /* setup DMA if needed */
1761 pciide_channel_dma_setup(cp);
1762
1763 for (drive = 0; drive < 2; drive++) {
1764 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1765 PIIX_UDMATIM_SET(0x3, channel, drive));
1766 drvp = &chp->ch_drive[drive];
1767 /* If no drive, skip */
1768 if ((drvp->drive_flags & DRIVE) == 0)
1769 continue;
1770 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1771 (drvp->drive_flags & DRIVE_UDMA) == 0))
1772 goto pio;
1773
1774 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1775 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1776 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1777 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1778 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1779 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1780 ideconf |= PIIX_CONFIG_PINGPONG;
1781 }
1782 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1783 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1784 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1785 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1786 /* setup Ultra/100 */
1787 if (drvp->UDMA_mode > 2 &&
1788 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1789 drvp->UDMA_mode = 2;
1790 if (drvp->UDMA_mode > 4) {
1791 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1792 } else {
1793 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1794 if (drvp->UDMA_mode > 2) {
1795 ideconf |= PIIX_CONFIG_UDMA66(channel,
1796 drive);
1797 } else {
1798 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1799 drive);
1800 }
1801 }
1802 }
1803 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1804 /* setup Ultra/66 */
1805 if (drvp->UDMA_mode > 2 &&
1806 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1807 drvp->UDMA_mode = 2;
1808 if (drvp->UDMA_mode > 2)
1809 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1810 else
1811 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1812 }
1813 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1814 (drvp->drive_flags & DRIVE_UDMA)) {
1815 /* use Ultra/DMA */
1816 drvp->drive_flags &= ~DRIVE_DMA;
1817 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1818 udmareg |= PIIX_UDMATIM_SET(
1819 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1820 } else {
1821 /* use Multiword DMA */
1822 drvp->drive_flags &= ~DRIVE_UDMA;
1823 if (drive == 0) {
1824 idetim |= piix_setup_idetim_timings(
1825 drvp->DMA_mode, 1, channel);
1826 } else {
1827 sidetim |= piix_setup_sidetim_timings(
1828 drvp->DMA_mode, 1, channel);
1829 idetim =PIIX_IDETIM_SET(idetim,
1830 PIIX_IDETIM_SITRE, channel);
1831 }
1832 }
1833 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1834
1835 pio: /* use PIO mode */
1836 idetim |= piix_setup_idetim_drvs(drvp);
1837 if (drive == 0) {
1838 idetim |= piix_setup_idetim_timings(
1839 drvp->PIO_mode, 0, channel);
1840 } else {
1841 sidetim |= piix_setup_sidetim_timings(
1842 drvp->PIO_mode, 0, channel);
1843 idetim =PIIX_IDETIM_SET(idetim,
1844 PIIX_IDETIM_SITRE, channel);
1845 }
1846 }
1847 if (idedma_ctl != 0) {
1848 /* Add software bits in status register */
1849 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1850 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1851 idedma_ctl);
1852 }
1853 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1854 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1855 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1856 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1857 pciide_print_modes(cp);
1858 }
1859
1860
1861 /* setup ISP and RTC fields, based on mode */
1862 static u_int32_t
1863 piix_setup_idetim_timings(mode, dma, channel)
1864 u_int8_t mode;
1865 u_int8_t dma;
1866 u_int8_t channel;
1867 {
1868
1869 if (dma)
1870 return PIIX_IDETIM_SET(0,
1871 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1872 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1873 channel);
1874 else
1875 return PIIX_IDETIM_SET(0,
1876 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1877 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1878 channel);
1879 }
1880
1881 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1882 static u_int32_t
1883 piix_setup_idetim_drvs(drvp)
1884 struct ata_drive_datas *drvp;
1885 {
1886 u_int32_t ret = 0;
1887 struct channel_softc *chp = drvp->chnl_softc;
1888 u_int8_t channel = chp->channel;
1889 u_int8_t drive = drvp->drive;
1890
1891 /*
1892 * If drive is using UDMA, timings setups are independant
1893 * So just check DMA and PIO here.
1894 */
1895 if (drvp->drive_flags & DRIVE_DMA) {
1896 /* if mode = DMA mode 0, use compatible timings */
1897 if ((drvp->drive_flags & DRIVE_DMA) &&
1898 drvp->DMA_mode == 0) {
1899 drvp->PIO_mode = 0;
1900 return ret;
1901 }
1902 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1903 /*
1904 * PIO and DMA timings are the same, use fast timings for PIO
1905 * too, else use compat timings.
1906 */
1907 if ((piix_isp_pio[drvp->PIO_mode] !=
1908 piix_isp_dma[drvp->DMA_mode]) ||
1909 (piix_rtc_pio[drvp->PIO_mode] !=
1910 piix_rtc_dma[drvp->DMA_mode]))
1911 drvp->PIO_mode = 0;
1912 /* if PIO mode <= 2, use compat timings for PIO */
1913 if (drvp->PIO_mode <= 2) {
1914 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1915 channel);
1916 return ret;
1917 }
1918 }
1919
1920 /*
1921 * Now setup PIO modes. If mode < 2, use compat timings.
1922 * Else enable fast timings. Enable IORDY and prefetch/post
1923 * if PIO mode >= 3.
1924 */
1925
1926 if (drvp->PIO_mode < 2)
1927 return ret;
1928
1929 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1930 if (drvp->PIO_mode >= 3) {
1931 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1932 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1933 }
1934 return ret;
1935 }
1936
1937 /* setup values in SIDETIM registers, based on mode */
1938 static u_int32_t
1939 piix_setup_sidetim_timings(mode, dma, channel)
1940 u_int8_t mode;
1941 u_int8_t dma;
1942 u_int8_t channel;
1943 {
1944 if (dma)
1945 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1946 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1947 else
1948 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1949 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1950 }
1951
1952 void
1953 amd7x6_chip_map(sc, pa)
1954 struct pciide_softc *sc;
1955 struct pci_attach_args *pa;
1956 {
1957 struct pciide_channel *cp;
1958 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1959 int channel;
1960 pcireg_t chanenable;
1961 bus_size_t cmdsize, ctlsize;
1962
1963 if (pciide_chipen(sc, pa) == 0)
1964 return;
1965 printf("%s: bus-master DMA support present",
1966 sc->sc_wdcdev.sc_dev.dv_xname);
1967 pciide_mapreg_dma(sc, pa);
1968 printf("\n");
1969 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1970 WDC_CAPABILITY_MODE;
1971 if (sc->sc_dma_ok) {
1972 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1973 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1974 sc->sc_wdcdev.irqack = pciide_irqack;
1975 }
1976 sc->sc_wdcdev.PIO_cap = 4;
1977 sc->sc_wdcdev.DMA_cap = 2;
1978
1979 switch (sc->sc_pp->ide_product) {
1980 case PCI_PRODUCT_AMD_PBC766_IDE:
1981 case PCI_PRODUCT_AMD_PBC768_IDE:
1982 case PCI_PRODUCT_AMD_PBC8111_IDE:
1983 sc->sc_wdcdev.UDMA_cap = 5;
1984 break;
1985 default:
1986 sc->sc_wdcdev.UDMA_cap = 4;
1987 }
1988 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1989 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1990 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1991 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1992
1993 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1994 DEBUG_PROBE);
1995 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1996 cp = &sc->pciide_channels[channel];
1997 if (pciide_chansetup(sc, channel, interface) == 0)
1998 continue;
1999
2000 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2001 printf("%s: %s channel ignored (disabled)\n",
2002 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2003 continue;
2004 }
2005 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2006 pciide_pci_intr);
2007
2008 if (pciide_chan_candisable(cp))
2009 chanenable &= ~AMD7X6_CHAN_EN(channel);
2010 pciide_map_compat_intr(pa, cp, channel, interface);
2011 if (cp->hw_ok == 0)
2012 continue;
2013
2014 amd7x6_setup_channel(&cp->wdc_channel);
2015 }
2016 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2017 chanenable);
2018 return;
2019 }
2020
2021 void
2022 amd7x6_setup_channel(chp)
2023 struct channel_softc *chp;
2024 {
2025 u_int32_t udmatim_reg, datatim_reg;
2026 u_int8_t idedma_ctl;
2027 int mode, drive;
2028 struct ata_drive_datas *drvp;
2029 struct pciide_channel *cp = (struct pciide_channel*)chp;
2030 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2031 #ifndef PCIIDE_AMD756_ENABLEDMA
2032 int rev = PCI_REVISION(
2033 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2034 #endif
2035
2036 idedma_ctl = 0;
2037 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2038 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2039 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2040 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2041
2042 /* setup DMA if needed */
2043 pciide_channel_dma_setup(cp);
2044
2045 for (drive = 0; drive < 2; drive++) {
2046 drvp = &chp->ch_drive[drive];
2047 /* If no drive, skip */
2048 if ((drvp->drive_flags & DRIVE) == 0)
2049 continue;
2050 /* add timing values, setup DMA if needed */
2051 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2052 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2053 mode = drvp->PIO_mode;
2054 goto pio;
2055 }
2056 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2057 (drvp->drive_flags & DRIVE_UDMA)) {
2058 /* use Ultra/DMA */
2059 drvp->drive_flags &= ~DRIVE_DMA;
2060 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2061 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2062 AMD7X6_UDMA_TIME(chp->channel, drive,
2063 amd7x6_udma_tim[drvp->UDMA_mode]);
2064 /* can use PIO timings, MW DMA unused */
2065 mode = drvp->PIO_mode;
2066 } else {
2067 /* use Multiword DMA, but only if revision is OK */
2068 drvp->drive_flags &= ~DRIVE_UDMA;
2069 #ifndef PCIIDE_AMD756_ENABLEDMA
2070 /*
2071 * The workaround doesn't seem to be necessary
2072 * with all drives, so it can be disabled by
2073 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2074 * triggered.
2075 */
2076 if (sc->sc_pp->ide_product ==
2077 PCI_PRODUCT_AMD_PBC756_IDE &&
2078 AMD756_CHIPREV_DISABLEDMA(rev)) {
2079 printf("%s:%d:%d: multi-word DMA disabled due "
2080 "to chip revision\n",
2081 sc->sc_wdcdev.sc_dev.dv_xname,
2082 chp->channel, drive);
2083 mode = drvp->PIO_mode;
2084 drvp->drive_flags &= ~DRIVE_DMA;
2085 goto pio;
2086 }
2087 #endif
2088 /* mode = min(pio, dma+2) */
2089 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2090 mode = drvp->PIO_mode;
2091 else
2092 mode = drvp->DMA_mode + 2;
2093 }
2094 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2095
2096 pio: /* setup PIO mode */
2097 if (mode <= 2) {
2098 drvp->DMA_mode = 0;
2099 drvp->PIO_mode = 0;
2100 mode = 0;
2101 } else {
2102 drvp->PIO_mode = mode;
2103 drvp->DMA_mode = mode - 2;
2104 }
2105 datatim_reg |=
2106 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2107 amd7x6_pio_set[mode]) |
2108 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2109 amd7x6_pio_rec[mode]);
2110 }
2111 if (idedma_ctl != 0) {
2112 /* Add software bits in status register */
2113 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2114 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2115 idedma_ctl);
2116 }
2117 pciide_print_modes(cp);
2118 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2119 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2120 }
2121
2122 void
2123 apollo_chip_map(sc, pa)
2124 struct pciide_softc *sc;
2125 struct pci_attach_args *pa;
2126 {
2127 struct pciide_channel *cp;
2128 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2129 int channel;
2130 u_int32_t ideconf;
2131 bus_size_t cmdsize, ctlsize;
2132 pcitag_t pcib_tag;
2133 pcireg_t pcib_id, pcib_class;
2134
2135 if (pciide_chipen(sc, pa) == 0)
2136 return;
2137 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2138 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2139 /* and read ID and rev of the ISA bridge */
2140 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2141 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2142 printf(": VIA Technologies ");
2143 switch (PCI_PRODUCT(pcib_id)) {
2144 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2145 printf("VT82C586 (Apollo VP) ");
2146 if(PCI_REVISION(pcib_class) >= 0x02) {
2147 printf("ATA33 controller\n");
2148 sc->sc_wdcdev.UDMA_cap = 2;
2149 } else {
2150 printf("controller\n");
2151 sc->sc_wdcdev.UDMA_cap = 0;
2152 }
2153 break;
2154 case PCI_PRODUCT_VIATECH_VT82C596A:
2155 printf("VT82C596A (Apollo Pro) ");
2156 if (PCI_REVISION(pcib_class) >= 0x12) {
2157 printf("ATA66 controller\n");
2158 sc->sc_wdcdev.UDMA_cap = 4;
2159 } else {
2160 printf("ATA33 controller\n");
2161 sc->sc_wdcdev.UDMA_cap = 2;
2162 }
2163 break;
2164 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2165 printf("VT82C686A (Apollo KX133) ");
2166 if (PCI_REVISION(pcib_class) >= 0x40) {
2167 printf("ATA100 controller\n");
2168 sc->sc_wdcdev.UDMA_cap = 5;
2169 } else {
2170 printf("ATA66 controller\n");
2171 sc->sc_wdcdev.UDMA_cap = 4;
2172 }
2173 break;
2174 case PCI_PRODUCT_VIATECH_VT8231:
2175 printf("VT8231 ATA100 controller\n");
2176 sc->sc_wdcdev.UDMA_cap = 5;
2177 break;
2178 case PCI_PRODUCT_VIATECH_VT8233:
2179 printf("VT8233 ATA100 controller\n");
2180 sc->sc_wdcdev.UDMA_cap = 5;
2181 break;
2182 case PCI_PRODUCT_VIATECH_VT8233A:
2183 printf("VT8233A ATA133 controller\n");
2184 /* XXX use ATA100 untill ATA133 is supported */
2185 sc->sc_wdcdev.UDMA_cap = 5;
2186 break;
2187 default:
2188 printf("unknown ATA controller\n");
2189 sc->sc_wdcdev.UDMA_cap = 0;
2190 }
2191
2192 printf("%s: bus-master DMA support present",
2193 sc->sc_wdcdev.sc_dev.dv_xname);
2194 pciide_mapreg_dma(sc, pa);
2195 printf("\n");
2196 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2197 WDC_CAPABILITY_MODE;
2198 if (sc->sc_dma_ok) {
2199 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2200 sc->sc_wdcdev.irqack = pciide_irqack;
2201 if (sc->sc_wdcdev.UDMA_cap > 0)
2202 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2203 }
2204 sc->sc_wdcdev.PIO_cap = 4;
2205 sc->sc_wdcdev.DMA_cap = 2;
2206 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2207 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2208 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2209
2210 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2211 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2212 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2213 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2214 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2215 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2216 DEBUG_PROBE);
2217
2218 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2219 cp = &sc->pciide_channels[channel];
2220 if (pciide_chansetup(sc, channel, interface) == 0)
2221 continue;
2222
2223 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2224 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2225 printf("%s: %s channel ignored (disabled)\n",
2226 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2227 continue;
2228 }
2229 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2230 pciide_pci_intr);
2231 if (cp->hw_ok == 0)
2232 continue;
2233 if (pciide_chan_candisable(cp)) {
2234 ideconf &= ~APO_IDECONF_EN(channel);
2235 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2236 ideconf);
2237 }
2238 pciide_map_compat_intr(pa, cp, channel, interface);
2239
2240 if (cp->hw_ok == 0)
2241 continue;
2242 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2243 }
2244 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2245 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2246 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2247 }
2248
2249 void
2250 apollo_setup_channel(chp)
2251 struct channel_softc *chp;
2252 {
2253 u_int32_t udmatim_reg, datatim_reg;
2254 u_int8_t idedma_ctl;
2255 int mode, drive;
2256 struct ata_drive_datas *drvp;
2257 struct pciide_channel *cp = (struct pciide_channel*)chp;
2258 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2259
2260 idedma_ctl = 0;
2261 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2262 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2263 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2264 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2265
2266 /* setup DMA if needed */
2267 pciide_channel_dma_setup(cp);
2268
2269 for (drive = 0; drive < 2; drive++) {
2270 drvp = &chp->ch_drive[drive];
2271 /* If no drive, skip */
2272 if ((drvp->drive_flags & DRIVE) == 0)
2273 continue;
2274 /* add timing values, setup DMA if needed */
2275 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2276 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2277 mode = drvp->PIO_mode;
2278 goto pio;
2279 }
2280 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2281 (drvp->drive_flags & DRIVE_UDMA)) {
2282 /* use Ultra/DMA */
2283 drvp->drive_flags &= ~DRIVE_DMA;
2284 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2285 APO_UDMA_EN_MTH(chp->channel, drive);
2286 if (sc->sc_wdcdev.UDMA_cap == 5) {
2287 /* 686b */
2288 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2289 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2290 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2291 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2292 /* 596b or 686a */
2293 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2294 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2295 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2296 } else {
2297 /* 596a or 586b */
2298 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2299 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2300 }
2301 /* can use PIO timings, MW DMA unused */
2302 mode = drvp->PIO_mode;
2303 } else {
2304 /* use Multiword DMA */
2305 drvp->drive_flags &= ~DRIVE_UDMA;
2306 /* mode = min(pio, dma+2) */
2307 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2308 mode = drvp->PIO_mode;
2309 else
2310 mode = drvp->DMA_mode + 2;
2311 }
2312 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2313
2314 pio: /* setup PIO mode */
2315 if (mode <= 2) {
2316 drvp->DMA_mode = 0;
2317 drvp->PIO_mode = 0;
2318 mode = 0;
2319 } else {
2320 drvp->PIO_mode = mode;
2321 drvp->DMA_mode = mode - 2;
2322 }
2323 datatim_reg |=
2324 APO_DATATIM_PULSE(chp->channel, drive,
2325 apollo_pio_set[mode]) |
2326 APO_DATATIM_RECOV(chp->channel, drive,
2327 apollo_pio_rec[mode]);
2328 }
2329 if (idedma_ctl != 0) {
2330 /* Add software bits in status register */
2331 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2332 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2333 idedma_ctl);
2334 }
2335 pciide_print_modes(cp);
2336 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2337 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2338 }
2339
2340 void
2341 cmd_channel_map(pa, sc, channel)
2342 struct pci_attach_args *pa;
2343 struct pciide_softc *sc;
2344 int channel;
2345 {
2346 struct pciide_channel *cp = &sc->pciide_channels[channel];
2347 bus_size_t cmdsize, ctlsize;
2348 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2349 int interface, one_channel;
2350
2351 /*
2352 * The 0648/0649 can be told to identify as a RAID controller.
2353 * In this case, we have to fake interface
2354 */
2355 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2356 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2357 PCIIDE_INTERFACE_SETTABLE(1);
2358 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2359 CMD_CONF_DSA1)
2360 interface |= PCIIDE_INTERFACE_PCI(0) |
2361 PCIIDE_INTERFACE_PCI(1);
2362 } else {
2363 interface = PCI_INTERFACE(pa->pa_class);
2364 }
2365
2366 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2367 cp->name = PCIIDE_CHANNEL_NAME(channel);
2368 cp->wdc_channel.channel = channel;
2369 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2370
2371 /*
2372 * Older CMD64X doesn't have independant channels
2373 */
2374 switch (sc->sc_pp->ide_product) {
2375 case PCI_PRODUCT_CMDTECH_649:
2376 one_channel = 0;
2377 break;
2378 default:
2379 one_channel = 1;
2380 break;
2381 }
2382
2383 if (channel > 0 && one_channel) {
2384 cp->wdc_channel.ch_queue =
2385 sc->pciide_channels[0].wdc_channel.ch_queue;
2386 } else {
2387 cp->wdc_channel.ch_queue =
2388 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2389 }
2390 if (cp->wdc_channel.ch_queue == NULL) {
2391 printf("%s %s channel: "
2392 "can't allocate memory for command queue",
2393 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2394 return;
2395 }
2396
2397 printf("%s: %s channel %s to %s mode\n",
2398 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2399 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2400 "configured" : "wired",
2401 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2402 "native-PCI" : "compatibility");
2403
2404 /*
2405 * with a CMD PCI64x, if we get here, the first channel is enabled:
2406 * there's no way to disable the first channel without disabling
2407 * the whole device
2408 */
2409 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2410 printf("%s: %s channel ignored (disabled)\n",
2411 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2412 return;
2413 }
2414
2415 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2416 if (cp->hw_ok == 0)
2417 return;
2418 if (channel == 1) {
2419 if (pciide_chan_candisable(cp)) {
2420 ctrl &= ~CMD_CTRL_2PORT;
2421 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2422 CMD_CTRL, ctrl);
2423 }
2424 }
2425 pciide_map_compat_intr(pa, cp, channel, interface);
2426 }
2427
2428 int
2429 cmd_pci_intr(arg)
2430 void *arg;
2431 {
2432 struct pciide_softc *sc = arg;
2433 struct pciide_channel *cp;
2434 struct channel_softc *wdc_cp;
2435 int i, rv, crv;
2436 u_int32_t priirq, secirq;
2437
2438 rv = 0;
2439 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2440 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2441 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2442 cp = &sc->pciide_channels[i];
2443 wdc_cp = &cp->wdc_channel;
2444 /* If a compat channel skip. */
2445 if (cp->compat)
2446 continue;
2447 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2448 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2449 crv = wdcintr(wdc_cp);
2450 if (crv == 0)
2451 printf("%s:%d: bogus intr\n",
2452 sc->sc_wdcdev.sc_dev.dv_xname, i);
2453 else
2454 rv = 1;
2455 }
2456 }
2457 return rv;
2458 }
2459
2460 void
2461 cmd_chip_map(sc, pa)
2462 struct pciide_softc *sc;
2463 struct pci_attach_args *pa;
2464 {
2465 int channel;
2466
2467 /*
2468 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2469 * and base adresses registers can be disabled at
2470 * hardware level. In this case, the device is wired
2471 * in compat mode and its first channel is always enabled,
2472 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2473 * In fact, it seems that the first channel of the CMD PCI0640
2474 * can't be disabled.
2475 */
2476
2477 #ifdef PCIIDE_CMD064x_DISABLE
2478 if (pciide_chipen(sc, pa) == 0)
2479 return;
2480 #endif
2481
2482 printf("%s: hardware does not support DMA\n",
2483 sc->sc_wdcdev.sc_dev.dv_xname);
2484 sc->sc_dma_ok = 0;
2485
2486 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2487 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2488 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2489
2490 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2491 cmd_channel_map(pa, sc, channel);
2492 }
2493 }
2494
2495 void
2496 cmd0643_9_chip_map(sc, pa)
2497 struct pciide_softc *sc;
2498 struct pci_attach_args *pa;
2499 {
2500 struct pciide_channel *cp;
2501 int channel;
2502 pcireg_t rev = PCI_REVISION(pa->pa_class);
2503
2504 /*
2505 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2506 * and base adresses registers can be disabled at
2507 * hardware level. In this case, the device is wired
2508 * in compat mode and its first channel is always enabled,
2509 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2510 * In fact, it seems that the first channel of the CMD PCI0640
2511 * can't be disabled.
2512 */
2513
2514 #ifdef PCIIDE_CMD064x_DISABLE
2515 if (pciide_chipen(sc, pa) == 0)
2516 return;
2517 #endif
2518 printf("%s: bus-master DMA support present",
2519 sc->sc_wdcdev.sc_dev.dv_xname);
2520 pciide_mapreg_dma(sc, pa);
2521 printf("\n");
2522 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2523 WDC_CAPABILITY_MODE;
2524 if (sc->sc_dma_ok) {
2525 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2526 switch (sc->sc_pp->ide_product) {
2527 case PCI_PRODUCT_CMDTECH_649:
2528 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2529 sc->sc_wdcdev.UDMA_cap = 5;
2530 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2531 break;
2532 case PCI_PRODUCT_CMDTECH_648:
2533 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2534 sc->sc_wdcdev.UDMA_cap = 4;
2535 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2536 break;
2537 case PCI_PRODUCT_CMDTECH_646:
2538 if (rev >= CMD0646U2_REV) {
2539 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2540 sc->sc_wdcdev.UDMA_cap = 2;
2541 } else if (rev >= CMD0646U_REV) {
2542 /*
2543 * Linux's driver claims that the 646U is broken
2544 * with UDMA. Only enable it if we know what we're
2545 * doing
2546 */
2547 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2548 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2549 sc->sc_wdcdev.UDMA_cap = 2;
2550 #endif
2551 /* explicitly disable UDMA */
2552 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2553 CMD_UDMATIM(0), 0);
2554 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2555 CMD_UDMATIM(1), 0);
2556 }
2557 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2558 break;
2559 default:
2560 sc->sc_wdcdev.irqack = pciide_irqack;
2561 }
2562 }
2563
2564 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2565 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2566 sc->sc_wdcdev.PIO_cap = 4;
2567 sc->sc_wdcdev.DMA_cap = 2;
2568 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2569
2570 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2571 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2572 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2573 DEBUG_PROBE);
2574
2575 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2576 cp = &sc->pciide_channels[channel];
2577 cmd_channel_map(pa, sc, channel);
2578 if (cp->hw_ok == 0)
2579 continue;
2580 cmd0643_9_setup_channel(&cp->wdc_channel);
2581 }
2582 /*
2583 * note - this also makes sure we clear the irq disable and reset
2584 * bits
2585 */
2586 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2587 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2588 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2589 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2590 DEBUG_PROBE);
2591 }
2592
2593 void
2594 cmd0643_9_setup_channel(chp)
2595 struct channel_softc *chp;
2596 {
2597 struct ata_drive_datas *drvp;
2598 u_int8_t tim;
2599 u_int32_t idedma_ctl, udma_reg;
2600 int drive;
2601 struct pciide_channel *cp = (struct pciide_channel*)chp;
2602 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2603
2604 idedma_ctl = 0;
2605 /* setup DMA if needed */
2606 pciide_channel_dma_setup(cp);
2607
2608 for (drive = 0; drive < 2; drive++) {
2609 drvp = &chp->ch_drive[drive];
2610 /* If no drive, skip */
2611 if ((drvp->drive_flags & DRIVE) == 0)
2612 continue;
2613 /* add timing values, setup DMA if needed */
2614 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2615 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2616 if (drvp->drive_flags & DRIVE_UDMA) {
2617 /* UltraDMA on a 646U2, 0648 or 0649 */
2618 drvp->drive_flags &= ~DRIVE_DMA;
2619 udma_reg = pciide_pci_read(sc->sc_pc,
2620 sc->sc_tag, CMD_UDMATIM(chp->channel));
2621 if (drvp->UDMA_mode > 2 &&
2622 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2623 CMD_BICSR) &
2624 CMD_BICSR_80(chp->channel)) == 0)
2625 drvp->UDMA_mode = 2;
2626 if (drvp->UDMA_mode > 2)
2627 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2628 else if (sc->sc_wdcdev.UDMA_cap > 2)
2629 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2630 udma_reg |= CMD_UDMATIM_UDMA(drive);
2631 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2632 CMD_UDMATIM_TIM_OFF(drive));
2633 udma_reg |=
2634 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2635 CMD_UDMATIM_TIM_OFF(drive));
2636 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2637 CMD_UDMATIM(chp->channel), udma_reg);
2638 } else {
2639 /*
2640 * use Multiword DMA.
2641 * Timings will be used for both PIO and DMA,
2642 * so adjust DMA mode if needed
2643 * if we have a 0646U2/8/9, turn off UDMA
2644 */
2645 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2646 udma_reg = pciide_pci_read(sc->sc_pc,
2647 sc->sc_tag,
2648 CMD_UDMATIM(chp->channel));
2649 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2650 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2651 CMD_UDMATIM(chp->channel),
2652 udma_reg);
2653 }
2654 if (drvp->PIO_mode >= 3 &&
2655 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2656 drvp->DMA_mode = drvp->PIO_mode - 2;
2657 }
2658 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2659 }
2660 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2661 }
2662 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2663 CMD_DATA_TIM(chp->channel, drive), tim);
2664 }
2665 if (idedma_ctl != 0) {
2666 /* Add software bits in status register */
2667 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2668 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2669 idedma_ctl);
2670 }
2671 pciide_print_modes(cp);
2672 }
2673
2674 void
2675 cmd646_9_irqack(chp)
2676 struct channel_softc *chp;
2677 {
2678 u_int32_t priirq, secirq;
2679 struct pciide_channel *cp = (struct pciide_channel*)chp;
2680 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2681
2682 if (chp->channel == 0) {
2683 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2684 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2685 } else {
2686 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2687 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2688 }
2689 pciide_irqack(chp);
2690 }
2691
2692 void
2693 cy693_chip_map(sc, pa)
2694 struct pciide_softc *sc;
2695 struct pci_attach_args *pa;
2696 {
2697 struct pciide_channel *cp;
2698 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2699 bus_size_t cmdsize, ctlsize;
2700
2701 if (pciide_chipen(sc, pa) == 0)
2702 return;
2703 /*
2704 * this chip has 2 PCI IDE functions, one for primary and one for
2705 * secondary. So we need to call pciide_mapregs_compat() with
2706 * the real channel
2707 */
2708 if (pa->pa_function == 1) {
2709 sc->sc_cy_compatchan = 0;
2710 } else if (pa->pa_function == 2) {
2711 sc->sc_cy_compatchan = 1;
2712 } else {
2713 printf("%s: unexpected PCI function %d\n",
2714 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2715 return;
2716 }
2717 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2718 printf("%s: bus-master DMA support present",
2719 sc->sc_wdcdev.sc_dev.dv_xname);
2720 pciide_mapreg_dma(sc, pa);
2721 } else {
2722 printf("%s: hardware does not support DMA",
2723 sc->sc_wdcdev.sc_dev.dv_xname);
2724 sc->sc_dma_ok = 0;
2725 }
2726 printf("\n");
2727
2728 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2729 if (sc->sc_cy_handle == NULL) {
2730 printf("%s: unable to map hyperCache control registers\n",
2731 sc->sc_wdcdev.sc_dev.dv_xname);
2732 sc->sc_dma_ok = 0;
2733 }
2734
2735 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2736 WDC_CAPABILITY_MODE;
2737 if (sc->sc_dma_ok) {
2738 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2739 sc->sc_wdcdev.irqack = pciide_irqack;
2740 }
2741 sc->sc_wdcdev.PIO_cap = 4;
2742 sc->sc_wdcdev.DMA_cap = 2;
2743 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2744
2745 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2746 sc->sc_wdcdev.nchannels = 1;
2747
2748 /* Only one channel for this chip; if we are here it's enabled */
2749 cp = &sc->pciide_channels[0];
2750 sc->wdc_chanarray[0] = &cp->wdc_channel;
2751 cp->name = PCIIDE_CHANNEL_NAME(0);
2752 cp->wdc_channel.channel = 0;
2753 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2754 cp->wdc_channel.ch_queue =
2755 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2756 if (cp->wdc_channel.ch_queue == NULL) {
2757 printf("%s primary channel: "
2758 "can't allocate memory for command queue",
2759 sc->sc_wdcdev.sc_dev.dv_xname);
2760 return;
2761 }
2762 printf("%s: primary channel %s to ",
2763 sc->sc_wdcdev.sc_dev.dv_xname,
2764 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2765 "configured" : "wired");
2766 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2767 printf("native-PCI");
2768 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2769 pciide_pci_intr);
2770 } else {
2771 printf("compatibility");
2772 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2773 &cmdsize, &ctlsize);
2774 }
2775 printf(" mode\n");
2776 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2777 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2778 wdcattach(&cp->wdc_channel);
2779 if (pciide_chan_candisable(cp)) {
2780 pci_conf_write(sc->sc_pc, sc->sc_tag,
2781 PCI_COMMAND_STATUS_REG, 0);
2782 }
2783 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2784 if (cp->hw_ok == 0)
2785 return;
2786 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2787 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2788 cy693_setup_channel(&cp->wdc_channel);
2789 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2790 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2791 }
2792
2793 void
2794 cy693_setup_channel(chp)
2795 struct channel_softc *chp;
2796 {
2797 struct ata_drive_datas *drvp;
2798 int drive;
2799 u_int32_t cy_cmd_ctrl;
2800 u_int32_t idedma_ctl;
2801 struct pciide_channel *cp = (struct pciide_channel*)chp;
2802 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2803 int dma_mode = -1;
2804
2805 cy_cmd_ctrl = idedma_ctl = 0;
2806
2807 /* setup DMA if needed */
2808 pciide_channel_dma_setup(cp);
2809
2810 for (drive = 0; drive < 2; drive++) {
2811 drvp = &chp->ch_drive[drive];
2812 /* If no drive, skip */
2813 if ((drvp->drive_flags & DRIVE) == 0)
2814 continue;
2815 /* add timing values, setup DMA if needed */
2816 if (drvp->drive_flags & DRIVE_DMA) {
2817 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2818 /* use Multiword DMA */
2819 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2820 dma_mode = drvp->DMA_mode;
2821 }
2822 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2823 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2824 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2825 CY_CMD_CTRL_IOW_REC_OFF(drive));
2826 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2827 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2828 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2829 CY_CMD_CTRL_IOR_REC_OFF(drive));
2830 }
2831 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2832 chp->ch_drive[0].DMA_mode = dma_mode;
2833 chp->ch_drive[1].DMA_mode = dma_mode;
2834
2835 if (dma_mode == -1)
2836 dma_mode = 0;
2837
2838 if (sc->sc_cy_handle != NULL) {
2839 /* Note: `multiple' is implied. */
2840 cy82c693_write(sc->sc_cy_handle,
2841 (sc->sc_cy_compatchan == 0) ?
2842 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2843 }
2844
2845 pciide_print_modes(cp);
2846
2847 if (idedma_ctl != 0) {
2848 /* Add software bits in status register */
2849 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2850 IDEDMA_CTL, idedma_ctl);
2851 }
2852 }
2853
2854 static int
2855 sis_hostbr_match(pa)
2856 struct pci_attach_args *pa;
2857 {
2858 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2859 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2860 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2861 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2862 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2863 }
2864
2865 void
2866 sis_chip_map(sc, pa)
2867 struct pciide_softc *sc;
2868 struct pci_attach_args *pa;
2869 {
2870 struct pciide_channel *cp;
2871 int channel;
2872 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2873 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2874 pcireg_t rev = PCI_REVISION(pa->pa_class);
2875 bus_size_t cmdsize, ctlsize;
2876 pcitag_t pchb_tag;
2877 pcireg_t pchb_id, pchb_class;
2878
2879 if (pciide_chipen(sc, pa) == 0)
2880 return;
2881 printf("%s: bus-master DMA support present",
2882 sc->sc_wdcdev.sc_dev.dv_xname);
2883 pciide_mapreg_dma(sc, pa);
2884 printf("\n");
2885
2886 /* get a PCI tag for the host bridge (function 0 of the same device) */
2887 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2888 /* and read ID and rev of the ISA bridge */
2889 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2890 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2891
2892 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2893 WDC_CAPABILITY_MODE;
2894 if (sc->sc_dma_ok) {
2895 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2896 sc->sc_wdcdev.irqack = pciide_irqack;
2897 /*
2898 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2899 * have problems with UDMA (info provided by Christos)
2900 */
2901 if (rev >= 0xd0 &&
2902 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2903 PCI_REVISION(pchb_class) >= 0x03))
2904 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2905 }
2906
2907 sc->sc_wdcdev.PIO_cap = 4;
2908 sc->sc_wdcdev.DMA_cap = 2;
2909 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2910 /*
2911 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2912 * chipsets.
2913 */
2914 sc->sc_wdcdev.UDMA_cap =
2915 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2916 sc->sc_wdcdev.set_modes = sis_setup_channel;
2917
2918 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2919 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2920
2921 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2922 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2923 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2924
2925 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2926 cp = &sc->pciide_channels[channel];
2927 if (pciide_chansetup(sc, channel, interface) == 0)
2928 continue;
2929 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2930 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2931 printf("%s: %s channel ignored (disabled)\n",
2932 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2933 continue;
2934 }
2935 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2936 pciide_pci_intr);
2937 if (cp->hw_ok == 0)
2938 continue;
2939 if (pciide_chan_candisable(cp)) {
2940 if (channel == 0)
2941 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2942 else
2943 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2944 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2945 sis_ctr0);
2946 }
2947 pciide_map_compat_intr(pa, cp, channel, interface);
2948 if (cp->hw_ok == 0)
2949 continue;
2950 sis_setup_channel(&cp->wdc_channel);
2951 }
2952 }
2953
2954 void
2955 sis_setup_channel(chp)
2956 struct channel_softc *chp;
2957 {
2958 struct ata_drive_datas *drvp;
2959 int drive;
2960 u_int32_t sis_tim;
2961 u_int32_t idedma_ctl;
2962 struct pciide_channel *cp = (struct pciide_channel*)chp;
2963 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2964
2965 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2966 "channel %d 0x%x\n", chp->channel,
2967 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2968 DEBUG_PROBE);
2969 sis_tim = 0;
2970 idedma_ctl = 0;
2971 /* setup DMA if needed */
2972 pciide_channel_dma_setup(cp);
2973
2974 for (drive = 0; drive < 2; drive++) {
2975 drvp = &chp->ch_drive[drive];
2976 /* If no drive, skip */
2977 if ((drvp->drive_flags & DRIVE) == 0)
2978 continue;
2979 /* add timing values, setup DMA if needed */
2980 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2981 (drvp->drive_flags & DRIVE_UDMA) == 0)
2982 goto pio;
2983
2984 if (drvp->drive_flags & DRIVE_UDMA) {
2985 /* use Ultra/DMA */
2986 drvp->drive_flags &= ~DRIVE_DMA;
2987 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2988 SIS_TIM_UDMA_TIME_OFF(drive);
2989 sis_tim |= SIS_TIM_UDMA_EN(drive);
2990 } else {
2991 /*
2992 * use Multiword DMA
2993 * Timings will be used for both PIO and DMA,
2994 * so adjust DMA mode if needed
2995 */
2996 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2997 drvp->PIO_mode = drvp->DMA_mode + 2;
2998 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2999 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3000 drvp->PIO_mode - 2 : 0;
3001 if (drvp->DMA_mode == 0)
3002 drvp->PIO_mode = 0;
3003 }
3004 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3005 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3006 SIS_TIM_ACT_OFF(drive);
3007 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3008 SIS_TIM_REC_OFF(drive);
3009 }
3010 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3011 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3012 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3013 if (idedma_ctl != 0) {
3014 /* Add software bits in status register */
3015 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3016 IDEDMA_CTL, idedma_ctl);
3017 }
3018 pciide_print_modes(cp);
3019 }
3020
3021 void
3022 acer_chip_map(sc, pa)
3023 struct pciide_softc *sc;
3024 struct pci_attach_args *pa;
3025 {
3026 struct pciide_channel *cp;
3027 int channel;
3028 pcireg_t cr, interface;
3029 bus_size_t cmdsize, ctlsize;
3030 pcireg_t rev = PCI_REVISION(pa->pa_class);
3031
3032 if (pciide_chipen(sc, pa) == 0)
3033 return;
3034 printf("%s: bus-master DMA support present",
3035 sc->sc_wdcdev.sc_dev.dv_xname);
3036 pciide_mapreg_dma(sc, pa);
3037 printf("\n");
3038 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3039 WDC_CAPABILITY_MODE;
3040 if (sc->sc_dma_ok) {
3041 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3042 if (rev >= 0x20) {
3043 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3044 if (rev >= 0xC4)
3045 sc->sc_wdcdev.UDMA_cap = 5;
3046 else if (rev >= 0xC2)
3047 sc->sc_wdcdev.UDMA_cap = 4;
3048 else
3049 sc->sc_wdcdev.UDMA_cap = 2;
3050 }
3051 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3052 sc->sc_wdcdev.irqack = pciide_irqack;
3053 }
3054
3055 sc->sc_wdcdev.PIO_cap = 4;
3056 sc->sc_wdcdev.DMA_cap = 2;
3057 sc->sc_wdcdev.set_modes = acer_setup_channel;
3058 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3059 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3060
3061 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3062 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3063 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3064
3065 /* Enable "microsoft register bits" R/W. */
3066 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3067 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3068 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3069 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3070 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3071 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3072 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3073 ~ACER_CHANSTATUSREGS_RO);
3074 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3075 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3076 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3077 /* Don't use cr, re-read the real register content instead */
3078 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3079 PCI_CLASS_REG));
3080
3081 /* From linux: enable "Cable Detection" */
3082 if (rev >= 0xC2) {
3083 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3084 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3085 | ACER_0x4B_CDETECT);
3086 }
3087
3088 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3089 cp = &sc->pciide_channels[channel];
3090 if (pciide_chansetup(sc, channel, interface) == 0)
3091 continue;
3092 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3093 printf("%s: %s channel ignored (disabled)\n",
3094 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3095 continue;
3096 }
3097 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3098 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3099 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3100 if (cp->hw_ok == 0)
3101 continue;
3102 if (pciide_chan_candisable(cp)) {
3103 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3104 pci_conf_write(sc->sc_pc, sc->sc_tag,
3105 PCI_CLASS_REG, cr);
3106 }
3107 pciide_map_compat_intr(pa, cp, channel, interface);
3108 acer_setup_channel(&cp->wdc_channel);
3109 }
3110 }
3111
3112 void
3113 acer_setup_channel(chp)
3114 struct channel_softc *chp;
3115 {
3116 struct ata_drive_datas *drvp;
3117 int drive;
3118 u_int32_t acer_fifo_udma;
3119 u_int32_t idedma_ctl;
3120 struct pciide_channel *cp = (struct pciide_channel*)chp;
3121 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3122
3123 idedma_ctl = 0;
3124 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3125 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3126 acer_fifo_udma), DEBUG_PROBE);
3127 /* setup DMA if needed */
3128 pciide_channel_dma_setup(cp);
3129
3130 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3131 DRIVE_UDMA) { /* check 80 pins cable */
3132 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3133 ACER_0x4A_80PIN(chp->channel)) {
3134 if (chp->ch_drive[0].UDMA_mode > 2)
3135 chp->ch_drive[0].UDMA_mode = 2;
3136 if (chp->ch_drive[1].UDMA_mode > 2)
3137 chp->ch_drive[1].UDMA_mode = 2;
3138 }
3139 }
3140
3141 for (drive = 0; drive < 2; drive++) {
3142 drvp = &chp->ch_drive[drive];
3143 /* If no drive, skip */
3144 if ((drvp->drive_flags & DRIVE) == 0)
3145 continue;
3146 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3147 "channel %d drive %d 0x%x\n", chp->channel, drive,
3148 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3149 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3150 /* clear FIFO/DMA mode */
3151 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3152 ACER_UDMA_EN(chp->channel, drive) |
3153 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3154
3155 /* add timing values, setup DMA if needed */
3156 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3157 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3158 acer_fifo_udma |=
3159 ACER_FTH_OPL(chp->channel, drive, 0x1);
3160 goto pio;
3161 }
3162
3163 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3164 if (drvp->drive_flags & DRIVE_UDMA) {
3165 /* use Ultra/DMA */
3166 drvp->drive_flags &= ~DRIVE_DMA;
3167 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3168 acer_fifo_udma |=
3169 ACER_UDMA_TIM(chp->channel, drive,
3170 acer_udma[drvp->UDMA_mode]);
3171 /* XXX disable if one drive < UDMA3 ? */
3172 if (drvp->UDMA_mode >= 3) {
3173 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3174 ACER_0x4B,
3175 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3176 ACER_0x4B) | ACER_0x4B_UDMA66);
3177 }
3178 } else {
3179 /*
3180 * use Multiword DMA
3181 * Timings will be used for both PIO and DMA,
3182 * so adjust DMA mode if needed
3183 */
3184 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3185 drvp->PIO_mode = drvp->DMA_mode + 2;
3186 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3187 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3188 drvp->PIO_mode - 2 : 0;
3189 if (drvp->DMA_mode == 0)
3190 drvp->PIO_mode = 0;
3191 }
3192 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3193 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3194 ACER_IDETIM(chp->channel, drive),
3195 acer_pio[drvp->PIO_mode]);
3196 }
3197 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3198 acer_fifo_udma), DEBUG_PROBE);
3199 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3200 if (idedma_ctl != 0) {
3201 /* Add software bits in status register */
3202 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3203 IDEDMA_CTL, idedma_ctl);
3204 }
3205 pciide_print_modes(cp);
3206 }
3207
3208 int
3209 acer_pci_intr(arg)
3210 void *arg;
3211 {
3212 struct pciide_softc *sc = arg;
3213 struct pciide_channel *cp;
3214 struct channel_softc *wdc_cp;
3215 int i, rv, crv;
3216 u_int32_t chids;
3217
3218 rv = 0;
3219 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3220 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3221 cp = &sc->pciide_channels[i];
3222 wdc_cp = &cp->wdc_channel;
3223 /* If a compat channel skip. */
3224 if (cp->compat)
3225 continue;
3226 if (chids & ACER_CHIDS_INT(i)) {
3227 crv = wdcintr(wdc_cp);
3228 if (crv == 0)
3229 printf("%s:%d: bogus intr\n",
3230 sc->sc_wdcdev.sc_dev.dv_xname, i);
3231 else
3232 rv = 1;
3233 }
3234 }
3235 return rv;
3236 }
3237
3238 void
3239 hpt_chip_map(sc, pa)
3240 struct pciide_softc *sc;
3241 struct pci_attach_args *pa;
3242 {
3243 struct pciide_channel *cp;
3244 int i, compatchan, revision;
3245 pcireg_t interface;
3246 bus_size_t cmdsize, ctlsize;
3247
3248 if (pciide_chipen(sc, pa) == 0)
3249 return;
3250 revision = PCI_REVISION(pa->pa_class);
3251 printf(": Triones/Highpoint ");
3252 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3253 printf("HPT374 IDE Controller\n");
3254 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3255 if (revision == HPT370_REV)
3256 printf("HPT370 IDE Controller\n");
3257 else if (revision == HPT370A_REV)
3258 printf("HPT370A IDE Controller\n");
3259 else if (revision == HPT366_REV)
3260 printf("HPT366 IDE Controller\n");
3261 else
3262 printf("unknown HPT IDE controller rev %d\n", revision);
3263 } else
3264 printf("unknown HPT IDE controller 0x%x\n",
3265 sc->sc_pp->ide_product);
3266
3267 /*
3268 * when the chip is in native mode it identifies itself as a
3269 * 'misc mass storage'. Fake interface in this case.
3270 */
3271 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3272 interface = PCI_INTERFACE(pa->pa_class);
3273 } else {
3274 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3275 PCIIDE_INTERFACE_PCI(0);
3276 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3277 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3278 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3279 interface |= PCIIDE_INTERFACE_PCI(1);
3280 }
3281
3282 printf("%s: bus-master DMA support present",
3283 sc->sc_wdcdev.sc_dev.dv_xname);
3284 pciide_mapreg_dma(sc, pa);
3285 printf("\n");
3286 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3287 WDC_CAPABILITY_MODE;
3288 if (sc->sc_dma_ok) {
3289 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3290 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3291 sc->sc_wdcdev.irqack = pciide_irqack;
3292 }
3293 sc->sc_wdcdev.PIO_cap = 4;
3294 sc->sc_wdcdev.DMA_cap = 2;
3295
3296 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3297 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3298 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3299 revision == HPT366_REV) {
3300 sc->sc_wdcdev.UDMA_cap = 4;
3301 /*
3302 * The 366 has 2 PCI IDE functions, one for primary and one
3303 * for secondary. So we need to call pciide_mapregs_compat()
3304 * with the real channel
3305 */
3306 if (pa->pa_function == 0) {
3307 compatchan = 0;
3308 } else if (pa->pa_function == 1) {
3309 compatchan = 1;
3310 } else {
3311 printf("%s: unexpected PCI function %d\n",
3312 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3313 return;
3314 }
3315 sc->sc_wdcdev.nchannels = 1;
3316 } else {
3317 sc->sc_wdcdev.nchannels = 2;
3318 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3319 sc->sc_wdcdev.UDMA_cap = 6;
3320 else
3321 sc->sc_wdcdev.UDMA_cap = 5;
3322 }
3323 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3324 cp = &sc->pciide_channels[i];
3325 if (sc->sc_wdcdev.nchannels > 1) {
3326 compatchan = i;
3327 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3328 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3329 printf("%s: %s channel ignored (disabled)\n",
3330 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3331 continue;
3332 }
3333 }
3334 if (pciide_chansetup(sc, i, interface) == 0)
3335 continue;
3336 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3337 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3338 &ctlsize, hpt_pci_intr);
3339 } else {
3340 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3341 &cmdsize, &ctlsize);
3342 }
3343 if (cp->hw_ok == 0)
3344 return;
3345 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3346 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3347 wdcattach(&cp->wdc_channel);
3348 hpt_setup_channel(&cp->wdc_channel);
3349 }
3350 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3351 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3352 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3353 /*
3354 * HPT370_REV and highter has a bit to disable interrupts,
3355 * make sure to clear it
3356 */
3357 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3358 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3359 ~HPT_CSEL_IRQDIS);
3360 }
3361 /* set clocks, etc (mandatory on 374, optional otherwise) */
3362 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3363 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3364 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3365 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3366 return;
3367 }
3368
3369 void
3370 hpt_setup_channel(chp)
3371 struct channel_softc *chp;
3372 {
3373 struct ata_drive_datas *drvp;
3374 int drive;
3375 int cable;
3376 u_int32_t before, after;
3377 u_int32_t idedma_ctl;
3378 struct pciide_channel *cp = (struct pciide_channel*)chp;
3379 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3380
3381 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3382
3383 /* setup DMA if needed */
3384 pciide_channel_dma_setup(cp);
3385
3386 idedma_ctl = 0;
3387
3388 /* Per drive settings */
3389 for (drive = 0; drive < 2; drive++) {
3390 drvp = &chp->ch_drive[drive];
3391 /* If no drive, skip */
3392 if ((drvp->drive_flags & DRIVE) == 0)
3393 continue;
3394 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3395 HPT_IDETIM(chp->channel, drive));
3396
3397 /* add timing values, setup DMA if needed */
3398 if (drvp->drive_flags & DRIVE_UDMA) {
3399 /* use Ultra/DMA */
3400 drvp->drive_flags &= ~DRIVE_DMA;
3401 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3402 drvp->UDMA_mode > 2)
3403 drvp->UDMA_mode = 2;
3404 after = (sc->sc_wdcdev.nchannels == 2) ?
3405 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3406 hpt374_udma[drvp->UDMA_mode] :
3407 hpt370_udma[drvp->UDMA_mode]) :
3408 hpt366_udma[drvp->UDMA_mode];
3409 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3410 } else if (drvp->drive_flags & DRIVE_DMA) {
3411 /*
3412 * use Multiword DMA.
3413 * Timings will be used for both PIO and DMA, so adjust
3414 * DMA mode if needed
3415 */
3416 if (drvp->PIO_mode >= 3 &&
3417 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3418 drvp->DMA_mode = drvp->PIO_mode - 2;
3419 }
3420 after = (sc->sc_wdcdev.nchannels == 2) ?
3421 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3422 hpt374_dma[drvp->DMA_mode] :
3423 hpt370_dma[drvp->DMA_mode]) :
3424 hpt366_dma[drvp->DMA_mode];
3425 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3426 } else {
3427 /* PIO only */
3428 after = (sc->sc_wdcdev.nchannels == 2) ?
3429 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3430 hpt374_pio[drvp->PIO_mode] :
3431 hpt370_pio[drvp->PIO_mode]) :
3432 hpt366_pio[drvp->PIO_mode];
3433 }
3434 pci_conf_write(sc->sc_pc, sc->sc_tag,
3435 HPT_IDETIM(chp->channel, drive), after);
3436 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3437 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3438 after, before), DEBUG_PROBE);
3439 }
3440 if (idedma_ctl != 0) {
3441 /* Add software bits in status register */
3442 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3443 IDEDMA_CTL, idedma_ctl);
3444 }
3445 pciide_print_modes(cp);
3446 }
3447
3448 int
3449 hpt_pci_intr(arg)
3450 void *arg;
3451 {
3452 struct pciide_softc *sc = arg;
3453 struct pciide_channel *cp;
3454 struct channel_softc *wdc_cp;
3455 int rv = 0;
3456 int dmastat, i, crv;
3457
3458 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3459 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3460 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3461 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3462 IDEDMA_CTL_INTR)
3463 continue;
3464 cp = &sc->pciide_channels[i];
3465 wdc_cp = &cp->wdc_channel;
3466 crv = wdcintr(wdc_cp);
3467 if (crv == 0) {
3468 printf("%s:%d: bogus intr\n",
3469 sc->sc_wdcdev.sc_dev.dv_xname, i);
3470 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3471 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3472 } else
3473 rv = 1;
3474 }
3475 return rv;
3476 }
3477
3478
3479 /* Macros to test product */
3480 #define PDC_IS_262(sc) \
3481 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3482 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3483 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3484 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3485 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3486 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3487 #define PDC_IS_265(sc) \
3488 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3489 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3490 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3491 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3492 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3493 #define PDC_IS_268(sc) \
3494 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3495 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3496 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3497
3498 void
3499 pdc202xx_chip_map(sc, pa)
3500 struct pciide_softc *sc;
3501 struct pci_attach_args *pa;
3502 {
3503 struct pciide_channel *cp;
3504 int channel;
3505 pcireg_t interface, st, mode;
3506 bus_size_t cmdsize, ctlsize;
3507
3508 if (!PDC_IS_268(sc)) {
3509 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3510 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3511 st), DEBUG_PROBE);
3512 }
3513 if (pciide_chipen(sc, pa) == 0)
3514 return;
3515
3516 /* turn off RAID mode */
3517 if (!PDC_IS_268(sc))
3518 st &= ~PDC2xx_STATE_IDERAID;
3519
3520 /*
3521 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3522 * mode. We have to fake interface
3523 */
3524 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3525 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3526 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3527
3528 printf("%s: bus-master DMA support present",
3529 sc->sc_wdcdev.sc_dev.dv_xname);
3530 pciide_mapreg_dma(sc, pa);
3531 printf("\n");
3532 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3533 WDC_CAPABILITY_MODE;
3534 if (sc->sc_dma_ok) {
3535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3536 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3537 sc->sc_wdcdev.irqack = pciide_irqack;
3538 }
3539 sc->sc_wdcdev.PIO_cap = 4;
3540 sc->sc_wdcdev.DMA_cap = 2;
3541 if (PDC_IS_265(sc))
3542 sc->sc_wdcdev.UDMA_cap = 5;
3543 else if (PDC_IS_262(sc))
3544 sc->sc_wdcdev.UDMA_cap = 4;
3545 else
3546 sc->sc_wdcdev.UDMA_cap = 2;
3547 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3548 pdc20268_setup_channel : pdc202xx_setup_channel;
3549 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3550 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3551
3552 if (!PDC_IS_268(sc)) {
3553 /* setup failsafe defaults */
3554 mode = 0;
3555 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3556 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3557 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3558 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3559 for (channel = 0;
3560 channel < sc->sc_wdcdev.nchannels;
3561 channel++) {
3562 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3563 "drive 0 initial timings 0x%x, now 0x%x\n",
3564 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3565 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3566 DEBUG_PROBE);
3567 pci_conf_write(sc->sc_pc, sc->sc_tag,
3568 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3569 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3570 "drive 1 initial timings 0x%x, now 0x%x\n",
3571 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3572 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3573 pci_conf_write(sc->sc_pc, sc->sc_tag,
3574 PDC2xx_TIM(channel, 1), mode);
3575 }
3576
3577 mode = PDC2xx_SCR_DMA;
3578 if (PDC_IS_262(sc)) {
3579 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3580 } else {
3581 /* the BIOS set it up this way */
3582 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3583 }
3584 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3585 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3586 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3587 "now 0x%x\n",
3588 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3589 PDC2xx_SCR),
3590 mode), DEBUG_PROBE);
3591 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3592 PDC2xx_SCR, mode);
3593
3594 /* controller initial state register is OK even without BIOS */
3595 /* Set DMA mode to IDE DMA compatibility */
3596 mode =
3597 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3598 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3599 DEBUG_PROBE);
3600 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3601 mode | 0x1);
3602 mode =
3603 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3604 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3605 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3606 mode | 0x1);
3607 }
3608
3609 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3610 cp = &sc->pciide_channels[channel];
3611 if (pciide_chansetup(sc, channel, interface) == 0)
3612 continue;
3613 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3614 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3615 printf("%s: %s channel ignored (disabled)\n",
3616 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3617 continue;
3618 }
3619 if (PDC_IS_265(sc))
3620 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3621 pdc20265_pci_intr);
3622 else
3623 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3624 pdc202xx_pci_intr);
3625 if (cp->hw_ok == 0)
3626 continue;
3627 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3628 st &= ~(PDC_IS_262(sc) ?
3629 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3630 pciide_map_compat_intr(pa, cp, channel, interface);
3631 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3632 }
3633 if (!PDC_IS_268(sc)) {
3634 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3635 "0x%x\n", st), DEBUG_PROBE);
3636 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3637 }
3638 return;
3639 }
3640
3641 void
3642 pdc202xx_setup_channel(chp)
3643 struct channel_softc *chp;
3644 {
3645 struct ata_drive_datas *drvp;
3646 int drive;
3647 pcireg_t mode, st;
3648 u_int32_t idedma_ctl, scr, atapi;
3649 struct pciide_channel *cp = (struct pciide_channel*)chp;
3650 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3651 int channel = chp->channel;
3652
3653 /* setup DMA if needed */
3654 pciide_channel_dma_setup(cp);
3655
3656 idedma_ctl = 0;
3657 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3658 sc->sc_wdcdev.sc_dev.dv_xname,
3659 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3660 DEBUG_PROBE);
3661
3662 /* Per channel settings */
3663 if (PDC_IS_262(sc)) {
3664 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3665 PDC262_U66);
3666 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3667 /* Trim UDMA mode */
3668 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3669 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3670 chp->ch_drive[0].UDMA_mode <= 2) ||
3671 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3672 chp->ch_drive[1].UDMA_mode <= 2)) {
3673 if (chp->ch_drive[0].UDMA_mode > 2)
3674 chp->ch_drive[0].UDMA_mode = 2;
3675 if (chp->ch_drive[1].UDMA_mode > 2)
3676 chp->ch_drive[1].UDMA_mode = 2;
3677 }
3678 /* Set U66 if needed */
3679 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3680 chp->ch_drive[0].UDMA_mode > 2) ||
3681 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3682 chp->ch_drive[1].UDMA_mode > 2))
3683 scr |= PDC262_U66_EN(channel);
3684 else
3685 scr &= ~PDC262_U66_EN(channel);
3686 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3687 PDC262_U66, scr);
3688 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3689 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3690 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3691 PDC262_ATAPI(channel))), DEBUG_PROBE);
3692 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3693 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3694 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3695 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3696 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3697 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3698 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3699 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3700 atapi = 0;
3701 else
3702 atapi = PDC262_ATAPI_UDMA;
3703 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3704 PDC262_ATAPI(channel), atapi);
3705 }
3706 }
3707 for (drive = 0; drive < 2; drive++) {
3708 drvp = &chp->ch_drive[drive];
3709 /* If no drive, skip */
3710 if ((drvp->drive_flags & DRIVE) == 0)
3711 continue;
3712 mode = 0;
3713 if (drvp->drive_flags & DRIVE_UDMA) {
3714 /* use Ultra/DMA */
3715 drvp->drive_flags &= ~DRIVE_DMA;
3716 mode = PDC2xx_TIM_SET_MB(mode,
3717 pdc2xx_udma_mb[drvp->UDMA_mode]);
3718 mode = PDC2xx_TIM_SET_MC(mode,
3719 pdc2xx_udma_mc[drvp->UDMA_mode]);
3720 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3721 } else if (drvp->drive_flags & DRIVE_DMA) {
3722 mode = PDC2xx_TIM_SET_MB(mode,
3723 pdc2xx_dma_mb[drvp->DMA_mode]);
3724 mode = PDC2xx_TIM_SET_MC(mode,
3725 pdc2xx_dma_mc[drvp->DMA_mode]);
3726 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3727 } else {
3728 mode = PDC2xx_TIM_SET_MB(mode,
3729 pdc2xx_dma_mb[0]);
3730 mode = PDC2xx_TIM_SET_MC(mode,
3731 pdc2xx_dma_mc[0]);
3732 }
3733 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3734 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3735 if (drvp->drive_flags & DRIVE_ATA)
3736 mode |= PDC2xx_TIM_PRE;
3737 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3738 if (drvp->PIO_mode >= 3) {
3739 mode |= PDC2xx_TIM_IORDY;
3740 if (drive == 0)
3741 mode |= PDC2xx_TIM_IORDYp;
3742 }
3743 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3744 "timings 0x%x\n",
3745 sc->sc_wdcdev.sc_dev.dv_xname,
3746 chp->channel, drive, mode), DEBUG_PROBE);
3747 pci_conf_write(sc->sc_pc, sc->sc_tag,
3748 PDC2xx_TIM(chp->channel, drive), mode);
3749 }
3750 if (idedma_ctl != 0) {
3751 /* Add software bits in status register */
3752 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3753 IDEDMA_CTL, idedma_ctl);
3754 }
3755 pciide_print_modes(cp);
3756 }
3757
3758 void
3759 pdc20268_setup_channel(chp)
3760 struct channel_softc *chp;
3761 {
3762 struct ata_drive_datas *drvp;
3763 int drive;
3764 u_int32_t idedma_ctl;
3765 struct pciide_channel *cp = (struct pciide_channel*)chp;
3766 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3767 int u100;
3768
3769 /* setup DMA if needed */
3770 pciide_channel_dma_setup(cp);
3771
3772 idedma_ctl = 0;
3773
3774 /* I don't know what this is for, FreeBSD does it ... */
3775 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3776 IDEDMA_CMD + 0x1, 0x0b);
3777
3778 /*
3779 * I don't know what this is for; FreeBSD checks this ... this is not
3780 * cable type detect.
3781 */
3782 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3783 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3784
3785 for (drive = 0; drive < 2; drive++) {
3786 drvp = &chp->ch_drive[drive];
3787 /* If no drive, skip */
3788 if ((drvp->drive_flags & DRIVE) == 0)
3789 continue;
3790 if (drvp->drive_flags & DRIVE_UDMA) {
3791 /* use Ultra/DMA */
3792 drvp->drive_flags &= ~DRIVE_DMA;
3793 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3794 if (drvp->UDMA_mode > 2 && u100 == 0)
3795 drvp->UDMA_mode = 2;
3796 } else if (drvp->drive_flags & DRIVE_DMA) {
3797 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3798 }
3799 }
3800 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3801 if (idedma_ctl != 0) {
3802 /* Add software bits in status register */
3803 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3804 IDEDMA_CTL, idedma_ctl);
3805 }
3806 pciide_print_modes(cp);
3807 }
3808
3809 int
3810 pdc202xx_pci_intr(arg)
3811 void *arg;
3812 {
3813 struct pciide_softc *sc = arg;
3814 struct pciide_channel *cp;
3815 struct channel_softc *wdc_cp;
3816 int i, rv, crv;
3817 u_int32_t scr;
3818
3819 rv = 0;
3820 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3821 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3822 cp = &sc->pciide_channels[i];
3823 wdc_cp = &cp->wdc_channel;
3824 /* If a compat channel skip. */
3825 if (cp->compat)
3826 continue;
3827 if (scr & PDC2xx_SCR_INT(i)) {
3828 crv = wdcintr(wdc_cp);
3829 if (crv == 0)
3830 printf("%s:%d: bogus intr (reg 0x%x)\n",
3831 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3832 else
3833 rv = 1;
3834 }
3835 }
3836 return rv;
3837 }
3838
3839 int
3840 pdc20265_pci_intr(arg)
3841 void *arg;
3842 {
3843 struct pciide_softc *sc = arg;
3844 struct pciide_channel *cp;
3845 struct channel_softc *wdc_cp;
3846 int i, rv, crv;
3847 u_int32_t dmastat;
3848
3849 rv = 0;
3850 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3851 cp = &sc->pciide_channels[i];
3852 wdc_cp = &cp->wdc_channel;
3853 /* If a compat channel skip. */
3854 if (cp->compat)
3855 continue;
3856 /*
3857 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3858 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3859 * So use it instead (requires 2 reg reads instead of 1,
3860 * but we can't do it another way).
3861 */
3862 dmastat = bus_space_read_1(sc->sc_dma_iot,
3863 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3864 if((dmastat & IDEDMA_CTL_INTR) == 0)
3865 continue;
3866 crv = wdcintr(wdc_cp);
3867 if (crv == 0)
3868 printf("%s:%d: bogus intr\n",
3869 sc->sc_wdcdev.sc_dev.dv_xname, i);
3870 else
3871 rv = 1;
3872 }
3873 return rv;
3874 }
3875
3876 void
3877 opti_chip_map(sc, pa)
3878 struct pciide_softc *sc;
3879 struct pci_attach_args *pa;
3880 {
3881 struct pciide_channel *cp;
3882 bus_size_t cmdsize, ctlsize;
3883 pcireg_t interface;
3884 u_int8_t init_ctrl;
3885 int channel;
3886
3887 if (pciide_chipen(sc, pa) == 0)
3888 return;
3889 printf("%s: bus-master DMA support present",
3890 sc->sc_wdcdev.sc_dev.dv_xname);
3891
3892 /*
3893 * XXXSCW:
3894 * There seem to be a couple of buggy revisions/implementations
3895 * of the OPTi pciide chipset. This kludge seems to fix one of
3896 * the reported problems (PR/11644) but still fails for the
3897 * other (PR/13151), although the latter may be due to other
3898 * issues too...
3899 */
3900 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3901 printf(" but disabled due to chip rev. <= 0x12");
3902 sc->sc_dma_ok = 0;
3903 } else
3904 pciide_mapreg_dma(sc, pa);
3905
3906 printf("\n");
3907
3908 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
3909 WDC_CAPABILITY_MODE;
3910 sc->sc_wdcdev.PIO_cap = 4;
3911 if (sc->sc_dma_ok) {
3912 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3913 sc->sc_wdcdev.irqack = pciide_irqack;
3914 sc->sc_wdcdev.DMA_cap = 2;
3915 }
3916 sc->sc_wdcdev.set_modes = opti_setup_channel;
3917
3918 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3919 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3920
3921 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3922 OPTI_REG_INIT_CONTROL);
3923
3924 interface = PCI_INTERFACE(pa->pa_class);
3925
3926 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3927 cp = &sc->pciide_channels[channel];
3928 if (pciide_chansetup(sc, channel, interface) == 0)
3929 continue;
3930 if (channel == 1 &&
3931 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3932 printf("%s: %s channel ignored (disabled)\n",
3933 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3934 continue;
3935 }
3936 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3937 pciide_pci_intr);
3938 if (cp->hw_ok == 0)
3939 continue;
3940 pciide_map_compat_intr(pa, cp, channel, interface);
3941 if (cp->hw_ok == 0)
3942 continue;
3943 opti_setup_channel(&cp->wdc_channel);
3944 }
3945 }
3946
3947 void
3948 opti_setup_channel(chp)
3949 struct channel_softc *chp;
3950 {
3951 struct ata_drive_datas *drvp;
3952 struct pciide_channel *cp = (struct pciide_channel*)chp;
3953 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3954 int drive, spd;
3955 int mode[2];
3956 u_int8_t rv, mr;
3957
3958 /*
3959 * The `Delay' and `Address Setup Time' fields of the
3960 * Miscellaneous Register are always zero initially.
3961 */
3962 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3963 mr &= ~(OPTI_MISC_DELAY_MASK |
3964 OPTI_MISC_ADDR_SETUP_MASK |
3965 OPTI_MISC_INDEX_MASK);
3966
3967 /* Prime the control register before setting timing values */
3968 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3969
3970 /* Determine the clockrate of the PCIbus the chip is attached to */
3971 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3972 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3973
3974 /* setup DMA if needed */
3975 pciide_channel_dma_setup(cp);
3976
3977 for (drive = 0; drive < 2; drive++) {
3978 drvp = &chp->ch_drive[drive];
3979 /* If no drive, skip */
3980 if ((drvp->drive_flags & DRIVE) == 0) {
3981 mode[drive] = -1;
3982 continue;
3983 }
3984
3985 if ((drvp->drive_flags & DRIVE_DMA)) {
3986 /*
3987 * Timings will be used for both PIO and DMA,
3988 * so adjust DMA mode if needed
3989 */
3990 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3991 drvp->PIO_mode = drvp->DMA_mode + 2;
3992 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3993 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3994 drvp->PIO_mode - 2 : 0;
3995 if (drvp->DMA_mode == 0)
3996 drvp->PIO_mode = 0;
3997
3998 mode[drive] = drvp->DMA_mode + 5;
3999 } else
4000 mode[drive] = drvp->PIO_mode;
4001
4002 if (drive && mode[0] >= 0 &&
4003 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4004 /*
4005 * Can't have two drives using different values
4006 * for `Address Setup Time'.
4007 * Slow down the faster drive to compensate.
4008 */
4009 int d = (opti_tim_as[spd][mode[0]] >
4010 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4011
4012 mode[d] = mode[1-d];
4013 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4014 chp->ch_drive[d].DMA_mode = 0;
4015 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4016 }
4017 }
4018
4019 for (drive = 0; drive < 2; drive++) {
4020 int m;
4021 if ((m = mode[drive]) < 0)
4022 continue;
4023
4024 /* Set the Address Setup Time and select appropriate index */
4025 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4026 rv |= OPTI_MISC_INDEX(drive);
4027 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4028
4029 /* Set the pulse width and recovery timing parameters */
4030 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4031 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4032 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4033 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4034
4035 /* Set the Enhanced Mode register appropriately */
4036 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4037 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4038 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4039 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4040 }
4041
4042 /* Finally, enable the timings */
4043 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4044
4045 pciide_print_modes(cp);
4046 }
4047
4048 #define ACARD_IS_850(sc) \
4049 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4050
4051 void
4052 acard_chip_map(sc, pa)
4053 struct pciide_softc *sc;
4054 struct pci_attach_args *pa;
4055 {
4056 struct pciide_channel *cp;
4057 int i;
4058 pcireg_t interface;
4059 bus_size_t cmdsize, ctlsize;
4060
4061 if (pciide_chipen(sc, pa) == 0)
4062 return;
4063
4064 /*
4065 * when the chip is in native mode it identifies itself as a
4066 * 'misc mass storage'. Fake interface in this case.
4067 */
4068 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4069 interface = PCI_INTERFACE(pa->pa_class);
4070 } else {
4071 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4072 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4073 }
4074
4075 printf("%s: bus-master DMA support present",
4076 sc->sc_wdcdev.sc_dev.dv_xname);
4077 pciide_mapreg_dma(sc, pa);
4078 printf("\n");
4079 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4080 WDC_CAPABILITY_MODE;
4081
4082 if (sc->sc_dma_ok) {
4083 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4084 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4085 sc->sc_wdcdev.irqack = pciide_irqack;
4086 }
4087 sc->sc_wdcdev.PIO_cap = 4;
4088 sc->sc_wdcdev.DMA_cap = 2;
4089 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4090
4091 sc->sc_wdcdev.set_modes = acard_setup_channel;
4092 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4093 sc->sc_wdcdev.nchannels = 2;
4094
4095 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4096 cp = &sc->pciide_channels[i];
4097 if (pciide_chansetup(sc, i, interface) == 0)
4098 continue;
4099 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4100 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4101 &ctlsize, pciide_pci_intr);
4102 } else {
4103 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4104 &cmdsize, &ctlsize);
4105 }
4106 if (cp->hw_ok == 0)
4107 return;
4108 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4109 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4110 wdcattach(&cp->wdc_channel);
4111 acard_setup_channel(&cp->wdc_channel);
4112 }
4113 if (!ACARD_IS_850(sc)) {
4114 u_int32_t reg;
4115 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4116 reg &= ~ATP860_CTRL_INT;
4117 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4118 }
4119 }
4120
4121 void
4122 acard_setup_channel(chp)
4123 struct channel_softc *chp;
4124 {
4125 struct ata_drive_datas *drvp;
4126 struct pciide_channel *cp = (struct pciide_channel*)chp;
4127 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4128 int channel = chp->channel;
4129 int drive;
4130 u_int32_t idetime, udma_mode;
4131 u_int32_t idedma_ctl;
4132
4133 /* setup DMA if needed */
4134 pciide_channel_dma_setup(cp);
4135
4136 if (ACARD_IS_850(sc)) {
4137 idetime = 0;
4138 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4139 udma_mode &= ~ATP850_UDMA_MASK(channel);
4140 } else {
4141 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4142 idetime &= ~ATP860_SETTIME_MASK(channel);
4143 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4144 udma_mode &= ~ATP860_UDMA_MASK(channel);
4145
4146 /* check 80 pins cable */
4147 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4148 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4149 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4150 & ATP860_CTRL_80P(chp->channel)) {
4151 if (chp->ch_drive[0].UDMA_mode > 2)
4152 chp->ch_drive[0].UDMA_mode = 2;
4153 if (chp->ch_drive[1].UDMA_mode > 2)
4154 chp->ch_drive[1].UDMA_mode = 2;
4155 }
4156 }
4157 }
4158
4159 idedma_ctl = 0;
4160
4161 /* Per drive settings */
4162 for (drive = 0; drive < 2; drive++) {
4163 drvp = &chp->ch_drive[drive];
4164 /* If no drive, skip */
4165 if ((drvp->drive_flags & DRIVE) == 0)
4166 continue;
4167 /* add timing values, setup DMA if needed */
4168 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4169 (drvp->drive_flags & DRIVE_UDMA)) {
4170 /* use Ultra/DMA */
4171 if (ACARD_IS_850(sc)) {
4172 idetime |= ATP850_SETTIME(drive,
4173 acard_act_udma[drvp->UDMA_mode],
4174 acard_rec_udma[drvp->UDMA_mode]);
4175 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4176 acard_udma_conf[drvp->UDMA_mode]);
4177 } else {
4178 idetime |= ATP860_SETTIME(channel, drive,
4179 acard_act_udma[drvp->UDMA_mode],
4180 acard_rec_udma[drvp->UDMA_mode]);
4181 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4182 acard_udma_conf[drvp->UDMA_mode]);
4183 }
4184 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4185 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4186 (drvp->drive_flags & DRIVE_DMA)) {
4187 /* use Multiword DMA */
4188 drvp->drive_flags &= ~DRIVE_UDMA;
4189 if (ACARD_IS_850(sc)) {
4190 idetime |= ATP850_SETTIME(drive,
4191 acard_act_dma[drvp->DMA_mode],
4192 acard_rec_dma[drvp->DMA_mode]);
4193 } else {
4194 idetime |= ATP860_SETTIME(channel, drive,
4195 acard_act_dma[drvp->DMA_mode],
4196 acard_rec_dma[drvp->DMA_mode]);
4197 }
4198 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4199 } else {
4200 /* PIO only */
4201 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4202 if (ACARD_IS_850(sc)) {
4203 idetime |= ATP850_SETTIME(drive,
4204 acard_act_pio[drvp->PIO_mode],
4205 acard_rec_pio[drvp->PIO_mode]);
4206 } else {
4207 idetime |= ATP860_SETTIME(channel, drive,
4208 acard_act_pio[drvp->PIO_mode],
4209 acard_rec_pio[drvp->PIO_mode]);
4210 }
4211 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4212 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4213 | ATP8x0_CTRL_EN(channel));
4214 }
4215 }
4216
4217 if (idedma_ctl != 0) {
4218 /* Add software bits in status register */
4219 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4220 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4221 }
4222 pciide_print_modes(cp);
4223
4224 if (ACARD_IS_850(sc)) {
4225 pci_conf_write(sc->sc_pc, sc->sc_tag,
4226 ATP850_IDETIME(channel), idetime);
4227 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4228 } else {
4229 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4230 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4231 }
4232 }
4233
4234 int
4235 acard_pci_intr(arg)
4236 void *arg;
4237 {
4238 struct pciide_softc *sc = arg;
4239 struct pciide_channel *cp;
4240 struct channel_softc *wdc_cp;
4241 int rv = 0;
4242 int dmastat, i, crv;
4243
4244 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4245 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4246 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4247 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4248 continue;
4249 cp = &sc->pciide_channels[i];
4250 wdc_cp = &cp->wdc_channel;
4251 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4252 (void)wdcintr(wdc_cp);
4253 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4254 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4255 continue;
4256 }
4257 crv = wdcintr(wdc_cp);
4258 if (crv == 0)
4259 printf("%s:%d: bogus intr\n",
4260 sc->sc_wdcdev.sc_dev.dv_xname, i);
4261 else if (crv == 1)
4262 rv = 1;
4263 else if (rv == 0)
4264 rv = crv;
4265 }
4266 return rv;
4267 }
4268
4269 static int
4270 sl82c105_bugchk(struct pci_attach_args *pa)
4271 {
4272
4273 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4274 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4275 return (0);
4276
4277 if (PCI_REVISION(pa->pa_class) <= 0x05)
4278 return (1);
4279
4280 return (0);
4281 }
4282
4283 void
4284 sl82c105_chip_map(sc, pa)
4285 struct pciide_softc *sc;
4286 struct pci_attach_args *pa;
4287 {
4288 struct pciide_channel *cp;
4289 bus_size_t cmdsize, ctlsize;
4290 pcireg_t interface, idecr;
4291 int channel;
4292
4293 if (pciide_chipen(sc, pa) == 0)
4294 return;
4295
4296 printf("%s: bus-master DMA support present",
4297 sc->sc_wdcdev.sc_dev.dv_xname);
4298
4299 /*
4300 * Check to see if we're part of the Winbond 83c553 Southbridge.
4301 * If so, we need to disable DMA on rev. <= 5 of that chip.
4302 */
4303 if (pci_find_device(pa, sl82c105_bugchk)) {
4304 printf(" but disabled due to 83c553 rev. <= 0x05");
4305 sc->sc_dma_ok = 0;
4306 } else
4307 pciide_mapreg_dma(sc, pa);
4308 printf("\n");
4309
4310 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4311 WDC_CAPABILITY_MODE;
4312 sc->sc_wdcdev.PIO_cap = 4;
4313 if (sc->sc_dma_ok) {
4314 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4315 sc->sc_wdcdev.irqack = pciide_irqack;
4316 sc->sc_wdcdev.DMA_cap = 2;
4317 }
4318 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4319
4320 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4321 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4322
4323 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4324
4325 interface = PCI_INTERFACE(pa->pa_class);
4326
4327 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4328 cp = &sc->pciide_channels[channel];
4329 if (pciide_chansetup(sc, channel, interface) == 0)
4330 continue;
4331 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4332 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4333 printf("%s: %s channel ignored (disabled)\n",
4334 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4335 continue;
4336 }
4337 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4338 pciide_pci_intr);
4339 if (cp->hw_ok == 0)
4340 continue;
4341 pciide_map_compat_intr(pa, cp, channel, interface);
4342 if (cp->hw_ok == 0)
4343 continue;
4344 sl82c105_setup_channel(&cp->wdc_channel);
4345 }
4346 }
4347
4348 void
4349 sl82c105_setup_channel(chp)
4350 struct channel_softc *chp;
4351 {
4352 struct ata_drive_datas *drvp;
4353 struct pciide_channel *cp = (struct pciide_channel*)chp;
4354 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4355 int pxdx_reg, drive;
4356 pcireg_t pxdx;
4357
4358 /* Set up DMA if needed. */
4359 pciide_channel_dma_setup(cp);
4360
4361 for (drive = 0; drive < 2; drive++) {
4362 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4363 : SYMPH_P1D0CR) + (drive * 4);
4364
4365 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4366
4367 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4368 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4369
4370 drvp = &chp->ch_drive[drive];
4371 /* If no drive, skip. */
4372 if ((drvp->drive_flags & DRIVE) == 0) {
4373 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4374 continue;
4375 }
4376
4377 if (drvp->drive_flags & DRIVE_DMA) {
4378 /*
4379 * Timings will be used for both PIO and DMA,
4380 * so adjust DMA mode if needed.
4381 */
4382 if (drvp->PIO_mode >= 3) {
4383 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4384 drvp->DMA_mode = drvp->PIO_mode - 2;
4385 if (drvp->DMA_mode < 1) {
4386 /*
4387 * Can't mix both PIO and DMA.
4388 * Disable DMA.
4389 */
4390 drvp->drive_flags &= ~DRIVE_DMA;
4391 }
4392 } else {
4393 /*
4394 * Can't mix both PIO and DMA. Disable
4395 * DMA.
4396 */
4397 drvp->drive_flags &= ~DRIVE_DMA;
4398 }
4399 }
4400
4401 if (drvp->drive_flags & DRIVE_DMA) {
4402 /* Use multi-word DMA. */
4403 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4404 PxDx_CMD_ON_SHIFT;
4405 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4406 } else {
4407 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4408 PxDx_CMD_ON_SHIFT;
4409 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4410 }
4411
4412 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4413
4414 /* ...and set the mode for this drive. */
4415 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4416 }
4417
4418 pciide_print_modes(cp);
4419 }
4420
4421 void
4422 serverworks_chip_map(sc, pa)
4423 struct pciide_softc *sc;
4424 struct pci_attach_args *pa;
4425 {
4426 struct pciide_channel *cp;
4427 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4428 pcitag_t pcib_tag;
4429 int channel;
4430 bus_size_t cmdsize, ctlsize;
4431
4432 if (pciide_chipen(sc, pa) == 0)
4433 return;
4434
4435 printf("%s: bus-master DMA support present",
4436 sc->sc_wdcdev.sc_dev.dv_xname);
4437 pciide_mapreg_dma(sc, pa);
4438 printf("\n");
4439 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4440 WDC_CAPABILITY_MODE;
4441
4442 if (sc->sc_dma_ok) {
4443 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4444 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4445 sc->sc_wdcdev.irqack = pciide_irqack;
4446 }
4447 sc->sc_wdcdev.PIO_cap = 4;
4448 sc->sc_wdcdev.DMA_cap = 2;
4449 switch (sc->sc_pp->ide_product) {
4450 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4451 sc->sc_wdcdev.UDMA_cap = 2;
4452 break;
4453 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4454 if (PCI_REVISION(pa->pa_class) < 0x92)
4455 sc->sc_wdcdev.UDMA_cap = 4;
4456 else
4457 sc->sc_wdcdev.UDMA_cap = 5;
4458 break;
4459 }
4460
4461 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4462 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4463 sc->sc_wdcdev.nchannels = 2;
4464
4465 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4466 cp = &sc->pciide_channels[channel];
4467 if (pciide_chansetup(sc, channel, interface) == 0)
4468 continue;
4469 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4470 serverworks_pci_intr);
4471 if (cp->hw_ok == 0)
4472 return;
4473 pciide_map_compat_intr(pa, cp, channel, interface);
4474 if (cp->hw_ok == 0)
4475 return;
4476 serverworks_setup_channel(&cp->wdc_channel);
4477 }
4478
4479 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4480 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4481 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4482 }
4483
4484 void
4485 serverworks_setup_channel(chp)
4486 struct channel_softc *chp;
4487 {
4488 struct ata_drive_datas *drvp;
4489 struct pciide_channel *cp = (struct pciide_channel*)chp;
4490 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4491 int channel = chp->channel;
4492 int drive, unit;
4493 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4494 u_int32_t idedma_ctl;
4495 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4496 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4497
4498 /* setup DMA if needed */
4499 pciide_channel_dma_setup(cp);
4500
4501 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4502 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4503 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4504 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4505
4506 pio_time &= ~(0xffff << (16 * channel));
4507 dma_time &= ~(0xffff << (16 * channel));
4508 pio_mode &= ~(0xff << (8 * channel + 16));
4509 udma_mode &= ~(0xff << (8 * channel + 16));
4510 udma_mode &= ~(3 << (2 * channel));
4511
4512 idedma_ctl = 0;
4513
4514 /* Per drive settings */
4515 for (drive = 0; drive < 2; drive++) {
4516 drvp = &chp->ch_drive[drive];
4517 /* If no drive, skip */
4518 if ((drvp->drive_flags & DRIVE) == 0)
4519 continue;
4520 unit = drive + 2 * channel;
4521 /* add timing values, setup DMA if needed */
4522 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4523 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4524 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4525 (drvp->drive_flags & DRIVE_UDMA)) {
4526 /* use Ultra/DMA, check for 80-pin cable */
4527 if (drvp->UDMA_mode > 2 &&
4528 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4529 drvp->UDMA_mode = 2;
4530 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4531 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4532 udma_mode |= 1 << unit;
4533 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4534 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4535 (drvp->drive_flags & DRIVE_DMA)) {
4536 /* use Multiword DMA */
4537 drvp->drive_flags &= ~DRIVE_UDMA;
4538 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4539 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4540 } else {
4541 /* PIO only */
4542 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4543 }
4544 }
4545
4546 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4547 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4548 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4549 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4550 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4551
4552 if (idedma_ctl != 0) {
4553 /* Add software bits in status register */
4554 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4555 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4556 }
4557 pciide_print_modes(cp);
4558 }
4559
4560 int
4561 serverworks_pci_intr(arg)
4562 void *arg;
4563 {
4564 struct pciide_softc *sc = arg;
4565 struct pciide_channel *cp;
4566 struct channel_softc *wdc_cp;
4567 int rv = 0;
4568 int dmastat, i, crv;
4569
4570 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4571 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4572 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4573 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4574 IDEDMA_CTL_INTR)
4575 continue;
4576 cp = &sc->pciide_channels[i];
4577 wdc_cp = &cp->wdc_channel;
4578 crv = wdcintr(wdc_cp);
4579 if (crv == 0) {
4580 printf("%s:%d: bogus intr\n",
4581 sc->sc_wdcdev.sc_dev.dv_xname, i);
4582 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4583 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4584 } else
4585 rv = 1;
4586 }
4587 return rv;
4588 }
4589