pciide.c revision 1.142 1 /* $NetBSD: pciide.c,v 1.142 2002/01/14 01:35:39 augustss Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.142 2002/01/14 01:35:39 augustss Exp $");
81
82 #ifndef WDCDEBUG
83 #define WDCDEBUG
84 #endif
85
86 #define DEBUG_DMA 0x01
87 #define DEBUG_XFERS 0x02
88 #define DEBUG_FUNCS 0x08
89 #define DEBUG_PROBE 0x10
90 #ifdef WDCDEBUG
91 int wdcdebug_pciide_mask = 0;
92 #define WDCDEBUG_PRINT(args, level) \
93 if (wdcdebug_pciide_mask & (level)) printf args
94 #else
95 #define WDCDEBUG_PRINT(args, level)
96 #endif
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/device.h>
100 #include <sys/malloc.h>
101
102 #include <uvm/uvm_extern.h>
103
104 #include <machine/endian.h>
105
106 #include <dev/pci/pcireg.h>
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109 #include <dev/pci/pciidereg.h>
110 #include <dev/pci/pciidevar.h>
111 #include <dev/pci/pciide_piix_reg.h>
112 #include <dev/pci/pciide_amd_reg.h>
113 #include <dev/pci/pciide_apollo_reg.h>
114 #include <dev/pci/pciide_cmd_reg.h>
115 #include <dev/pci/pciide_cy693_reg.h>
116 #include <dev/pci/pciide_sis_reg.h>
117 #include <dev/pci/pciide_acer_reg.h>
118 #include <dev/pci/pciide_pdc202xx_reg.h>
119 #include <dev/pci/pciide_opti_reg.h>
120 #include <dev/pci/pciide_hpt_reg.h>
121 #include <dev/pci/pciide_acard_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191 static int acer_isabr_match __P(( struct pci_attach_args *));
192
193 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void pdc202xx_setup_channel __P((struct channel_softc*));
195 void pdc20268_setup_channel __P((struct channel_softc*));
196 int pdc202xx_pci_intr __P((void *));
197 int pdc20265_pci_intr __P((void *));
198
199 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void opti_setup_channel __P((struct channel_softc*));
201
202 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void hpt_setup_channel __P((struct channel_softc*));
204 int hpt_pci_intr __P((void *));
205
206 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
207 void acard_setup_channel __P((struct channel_softc*));
208 int acard_pci_intr __P((void *));
209
210 #ifdef PCIIDE_WINBOND_ENABLE
211 void winbond_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
212 #endif
213
214 void pciide_channel_dma_setup __P((struct pciide_channel *));
215 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
216 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
217 void pciide_dma_start __P((void*, int, int));
218 int pciide_dma_finish __P((void*, int, int, int));
219 void pciide_irqack __P((struct channel_softc *));
220 void pciide_print_modes __P((struct pciide_channel *));
221
222 struct pciide_product_desc {
223 u_int32_t ide_product;
224 int ide_flags;
225 const char *ide_name;
226 /* map and setup chip, probe drives */
227 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
228 };
229
230 /* Flags for ide_flags */
231 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
232 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
233
234 /* Default product description for devices not known from this controller */
235 const struct pciide_product_desc default_product_desc = {
236 0,
237 0,
238 "Generic PCI IDE controller",
239 default_chip_map,
240 };
241
242 const struct pciide_product_desc pciide_intel_products[] = {
243 { PCI_PRODUCT_INTEL_82092AA,
244 0,
245 "Intel 82092AA IDE controller",
246 default_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82371FB_IDE,
249 0,
250 "Intel 82371FB IDE controller (PIIX)",
251 piix_chip_map,
252 },
253 { PCI_PRODUCT_INTEL_82371SB_IDE,
254 0,
255 "Intel 82371SB IDE Interface (PIIX3)",
256 piix_chip_map,
257 },
258 { PCI_PRODUCT_INTEL_82371AB_IDE,
259 0,
260 "Intel 82371AB IDE controller (PIIX4)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82440MX_IDE,
264 0,
265 "Intel 82440MX IDE controller",
266 piix_chip_map
267 },
268 { PCI_PRODUCT_INTEL_82801AA_IDE,
269 0,
270 "Intel 82801AA IDE Controller (ICH)",
271 piix_chip_map,
272 },
273 { PCI_PRODUCT_INTEL_82801AB_IDE,
274 0,
275 "Intel 82801AB IDE Controller (ICH0)",
276 piix_chip_map,
277 },
278 { PCI_PRODUCT_INTEL_82801BA_IDE,
279 0,
280 "Intel 82801BA IDE Controller (ICH2)",
281 piix_chip_map,
282 },
283 { PCI_PRODUCT_INTEL_82801BAM_IDE,
284 0,
285 "Intel 82801BAM IDE Controller (ICH2)",
286 piix_chip_map,
287 },
288 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
289 0,
290 "Intel 82201CA IDE Controller",
291 piix_chip_map,
292 },
293 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
294 0,
295 "Intel 82201CA IDE Controller",
296 piix_chip_map,
297 },
298 { 0,
299 0,
300 NULL,
301 NULL
302 }
303 };
304
305 const struct pciide_product_desc pciide_amd_products[] = {
306 { PCI_PRODUCT_AMD_PBC756_IDE,
307 0,
308 "Advanced Micro Devices AMD756 IDE Controller",
309 amd7x6_chip_map
310 },
311 { PCI_PRODUCT_AMD_PBC766_IDE,
312 0,
313 "Advanced Micro Devices AMD766 IDE Controller",
314 amd7x6_chip_map
315 },
316 { 0,
317 0,
318 NULL,
319 NULL
320 }
321 };
322
323 const struct pciide_product_desc pciide_cmd_products[] = {
324 { PCI_PRODUCT_CMDTECH_640,
325 0,
326 "CMD Technology PCI0640",
327 cmd_chip_map
328 },
329 { PCI_PRODUCT_CMDTECH_643,
330 0,
331 "CMD Technology PCI0643",
332 cmd0643_9_chip_map,
333 },
334 { PCI_PRODUCT_CMDTECH_646,
335 0,
336 "CMD Technology PCI0646",
337 cmd0643_9_chip_map,
338 },
339 { PCI_PRODUCT_CMDTECH_648,
340 IDE_PCI_CLASS_OVERRIDE,
341 "CMD Technology PCI0648",
342 cmd0643_9_chip_map,
343 },
344 { PCI_PRODUCT_CMDTECH_649,
345 IDE_PCI_CLASS_OVERRIDE,
346 "CMD Technology PCI0649",
347 cmd0643_9_chip_map,
348 },
349 { 0,
350 0,
351 NULL,
352 NULL
353 }
354 };
355
356 const struct pciide_product_desc pciide_via_products[] = {
357 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
358 0,
359 NULL,
360 apollo_chip_map,
361 },
362 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
363 0,
364 NULL,
365 apollo_chip_map,
366 },
367 { 0,
368 0,
369 NULL,
370 NULL
371 }
372 };
373
374 const struct pciide_product_desc pciide_cypress_products[] = {
375 { PCI_PRODUCT_CONTAQ_82C693,
376 IDE_16BIT_IOSPACE,
377 "Cypress 82C693 IDE Controller",
378 cy693_chip_map,
379 },
380 { 0,
381 0,
382 NULL,
383 NULL
384 }
385 };
386
387 const struct pciide_product_desc pciide_sis_products[] = {
388 { PCI_PRODUCT_SIS_5597_IDE,
389 0,
390 "Silicon Integrated System 5597/5598 IDE controller",
391 sis_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_acer_products[] = {
401 { PCI_PRODUCT_ALI_M5229,
402 0,
403 "Acer Labs M5229 UDMA IDE Controller",
404 acer_chip_map,
405 },
406 { 0,
407 0,
408 NULL,
409 NULL
410 }
411 };
412
413 const struct pciide_product_desc pciide_promise_products[] = {
414 { PCI_PRODUCT_PROMISE_ULTRA33,
415 IDE_PCI_CLASS_OVERRIDE,
416 "Promise Ultra33/ATA Bus Master IDE Accelerator",
417 pdc202xx_chip_map,
418 },
419 { PCI_PRODUCT_PROMISE_ULTRA66,
420 IDE_PCI_CLASS_OVERRIDE,
421 "Promise Ultra66/ATA Bus Master IDE Accelerator",
422 pdc202xx_chip_map,
423 },
424 { PCI_PRODUCT_PROMISE_ULTRA100,
425 IDE_PCI_CLASS_OVERRIDE,
426 "Promise Ultra100/ATA Bus Master IDE Accelerator",
427 pdc202xx_chip_map,
428 },
429 { PCI_PRODUCT_PROMISE_ULTRA100X,
430 IDE_PCI_CLASS_OVERRIDE,
431 "Promise Ultra100/ATA Bus Master IDE Accelerator",
432 pdc202xx_chip_map,
433 },
434 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
435 IDE_PCI_CLASS_OVERRIDE,
436 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
437 pdc202xx_chip_map,
438 },
439 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
440 IDE_PCI_CLASS_OVERRIDE,
441 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
442 pdc202xx_chip_map,
443 },
444 { PCI_PRODUCT_PROMISE_ULTRA133,
445 IDE_PCI_CLASS_OVERRIDE,
446 "Promise Ultra133/ATA Bus Master IDE Accelerator",
447 pdc202xx_chip_map,
448 },
449 { 0,
450 0,
451 NULL,
452 NULL
453 }
454 };
455
456 const struct pciide_product_desc pciide_opti_products[] = {
457 { PCI_PRODUCT_OPTI_82C621,
458 0,
459 "OPTi 82c621 PCI IDE controller",
460 opti_chip_map,
461 },
462 { PCI_PRODUCT_OPTI_82C568,
463 0,
464 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
465 opti_chip_map,
466 },
467 { PCI_PRODUCT_OPTI_82D568,
468 0,
469 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
470 opti_chip_map,
471 },
472 { 0,
473 0,
474 NULL,
475 NULL
476 }
477 };
478
479 const struct pciide_product_desc pciide_triones_products[] = {
480 { PCI_PRODUCT_TRIONES_HPT366,
481 IDE_PCI_CLASS_OVERRIDE,
482 NULL,
483 hpt_chip_map,
484 },
485 { 0,
486 0,
487 NULL,
488 NULL
489 }
490 };
491
492 const struct pciide_product_desc pciide_acard_products[] = {
493 { PCI_PRODUCT_ACARD_ATP850U,
494 IDE_PCI_CLASS_OVERRIDE,
495 "Acard ATP850U Ultra33 IDE Controller",
496 acard_chip_map,
497 },
498 { PCI_PRODUCT_ACARD_ATP860,
499 IDE_PCI_CLASS_OVERRIDE,
500 "Acard ATP860 Ultra66 IDE Controller",
501 acard_chip_map,
502 },
503 { PCI_PRODUCT_ACARD_ATP860A,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Acard ATP860-A Ultra66 IDE Controller",
506 acard_chip_map,
507 },
508 { 0,
509 0,
510 NULL,
511 NULL
512 }
513 };
514
515 #ifdef PCIIDE_SERVERWORKS_ENABLE
516 const struct pciide_product_desc pciide_serverworks_products[] = {
517 { PCI_PRODUCT_SERVERWORKS_IDE,
518 0,
519 "ServerWorks ROSB4 IDE Controller",
520 piix_chip_map,
521 },
522 { 0,
523 0,
524 NULL,
525 }
526 };
527 #endif
528
529 #ifdef PCIIDE_WINBOND_ENABLE
530 const struct pciide_product_desc pciide_winbond_products[] = {
531 { PCI_PRODUCT_WINBOND_W83C553F_1,
532 0,
533 "Winbond W83C553F IDE controller",
534 winbond_chip_map,
535 },
536 { 0,
537 0,
538 NULL,
539 }
540 };
541 #endif
542
543 struct pciide_vendor_desc {
544 u_int32_t ide_vendor;
545 const struct pciide_product_desc *ide_products;
546 };
547
548 const struct pciide_vendor_desc pciide_vendors[] = {
549 { PCI_VENDOR_INTEL, pciide_intel_products },
550 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
551 { PCI_VENDOR_VIATECH, pciide_via_products },
552 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
553 { PCI_VENDOR_SIS, pciide_sis_products },
554 { PCI_VENDOR_ALI, pciide_acer_products },
555 { PCI_VENDOR_PROMISE, pciide_promise_products },
556 { PCI_VENDOR_AMD, pciide_amd_products },
557 { PCI_VENDOR_OPTI, pciide_opti_products },
558 { PCI_VENDOR_TRIONES, pciide_triones_products },
559 { PCI_VENDOR_ACARD, pciide_acard_products },
560 #ifdef PCIIDE_SERVERWORKS_ENABLE
561 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
562 #endif
563 #ifdef PCIIDE_WINBOND_ENABLE
564 { PCI_VENDOR_WINBOND, pciide_winbond_products },
565 #endif
566 { 0, NULL }
567 };
568
569 /* options passed via the 'flags' config keyword */
570 #define PCIIDE_OPTIONS_DMA 0x01
571 #define PCIIDE_OPTIONS_NODMA 0x02
572
573 int pciide_match __P((struct device *, struct cfdata *, void *));
574 void pciide_attach __P((struct device *, struct device *, void *));
575
576 struct cfattach pciide_ca = {
577 sizeof(struct pciide_softc), pciide_match, pciide_attach
578 };
579 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
580 int pciide_mapregs_compat __P(( struct pci_attach_args *,
581 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
582 int pciide_mapregs_native __P((struct pci_attach_args *,
583 struct pciide_channel *, bus_size_t *, bus_size_t *,
584 int (*pci_intr) __P((void *))));
585 void pciide_mapreg_dma __P((struct pciide_softc *,
586 struct pci_attach_args *));
587 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
588 void pciide_mapchan __P((struct pci_attach_args *,
589 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
590 int (*pci_intr) __P((void *))));
591 int pciide_chan_candisable __P((struct pciide_channel *));
592 void pciide_map_compat_intr __P(( struct pci_attach_args *,
593 struct pciide_channel *, int, int));
594 int pciide_compat_intr __P((void *));
595 int pciide_pci_intr __P((void *));
596 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
597
598 const struct pciide_product_desc *
599 pciide_lookup_product(id)
600 u_int32_t id;
601 {
602 const struct pciide_product_desc *pp;
603 const struct pciide_vendor_desc *vp;
604
605 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
606 if (PCI_VENDOR(id) == vp->ide_vendor)
607 break;
608
609 if ((pp = vp->ide_products) == NULL)
610 return NULL;
611
612 for (; pp->chip_map != NULL; pp++)
613 if (PCI_PRODUCT(id) == pp->ide_product)
614 break;
615
616 if (pp->chip_map == NULL)
617 return NULL;
618 return pp;
619 }
620
621 int
622 pciide_match(parent, match, aux)
623 struct device *parent;
624 struct cfdata *match;
625 void *aux;
626 {
627 struct pci_attach_args *pa = aux;
628 const struct pciide_product_desc *pp;
629
630 /*
631 * Check the ID register to see that it's a PCI IDE controller.
632 * If it is, we assume that we can deal with it; it _should_
633 * work in a standardized way...
634 */
635 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
636 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
637 return (1);
638 }
639
640 /*
641 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
642 * controllers. Let see if we can deal with it anyway.
643 */
644 pp = pciide_lookup_product(pa->pa_id);
645 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
646 return (1);
647 }
648
649 return (0);
650 }
651
652 void
653 pciide_attach(parent, self, aux)
654 struct device *parent, *self;
655 void *aux;
656 {
657 struct pci_attach_args *pa = aux;
658 pci_chipset_tag_t pc = pa->pa_pc;
659 pcitag_t tag = pa->pa_tag;
660 struct pciide_softc *sc = (struct pciide_softc *)self;
661 pcireg_t csr;
662 char devinfo[256];
663 const char *displaydev;
664
665 sc->sc_pp = pciide_lookup_product(pa->pa_id);
666 if (sc->sc_pp == NULL) {
667 sc->sc_pp = &default_product_desc;
668 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
669 displaydev = devinfo;
670 } else
671 displaydev = sc->sc_pp->ide_name;
672
673 /* if displaydev == NULL, printf is done in chip-specific map */
674 if (displaydev)
675 printf(": %s (rev. 0x%02x)\n", displaydev,
676 PCI_REVISION(pa->pa_class));
677
678 sc->sc_pc = pa->pa_pc;
679 sc->sc_tag = pa->pa_tag;
680 #ifdef WDCDEBUG
681 if (wdcdebug_pciide_mask & DEBUG_PROBE)
682 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
683 #endif
684 sc->sc_pp->chip_map(sc, pa);
685
686 if (sc->sc_dma_ok) {
687 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
688 csr |= PCI_COMMAND_MASTER_ENABLE;
689 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
690 }
691 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
692 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
693 }
694
695 /* tell wether the chip is enabled or not */
696 int
697 pciide_chipen(sc, pa)
698 struct pciide_softc *sc;
699 struct pci_attach_args *pa;
700 {
701 pcireg_t csr;
702 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
703 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
704 PCI_COMMAND_STATUS_REG);
705 printf("%s: device disabled (at %s)\n",
706 sc->sc_wdcdev.sc_dev.dv_xname,
707 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
708 "device" : "bridge");
709 return 0;
710 }
711 return 1;
712 }
713
714 int
715 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
716 struct pci_attach_args *pa;
717 struct pciide_channel *cp;
718 int compatchan;
719 bus_size_t *cmdsizep, *ctlsizep;
720 {
721 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
722 struct channel_softc *wdc_cp = &cp->wdc_channel;
723
724 cp->compat = 1;
725 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
726 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
727
728 wdc_cp->cmd_iot = pa->pa_iot;
729 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
730 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
731 printf("%s: couldn't map %s channel cmd regs\n",
732 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
733 return (0);
734 }
735
736 wdc_cp->ctl_iot = pa->pa_iot;
737 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
738 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
739 printf("%s: couldn't map %s channel ctl regs\n",
740 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
741 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
742 PCIIDE_COMPAT_CMD_SIZE);
743 return (0);
744 }
745
746 return (1);
747 }
748
749 int
750 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
751 struct pci_attach_args * pa;
752 struct pciide_channel *cp;
753 bus_size_t *cmdsizep, *ctlsizep;
754 int (*pci_intr) __P((void *));
755 {
756 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
757 struct channel_softc *wdc_cp = &cp->wdc_channel;
758 const char *intrstr;
759 pci_intr_handle_t intrhandle;
760
761 cp->compat = 0;
762
763 if (sc->sc_pci_ih == NULL) {
764 if (pci_intr_map(pa, &intrhandle) != 0) {
765 printf("%s: couldn't map native-PCI interrupt\n",
766 sc->sc_wdcdev.sc_dev.dv_xname);
767 return 0;
768 }
769 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
770 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
771 intrhandle, IPL_BIO, pci_intr, sc);
772 if (sc->sc_pci_ih != NULL) {
773 printf("%s: using %s for native-PCI interrupt\n",
774 sc->sc_wdcdev.sc_dev.dv_xname,
775 intrstr ? intrstr : "unknown interrupt");
776 } else {
777 printf("%s: couldn't establish native-PCI interrupt",
778 sc->sc_wdcdev.sc_dev.dv_xname);
779 if (intrstr != NULL)
780 printf(" at %s", intrstr);
781 printf("\n");
782 return 0;
783 }
784 }
785 cp->ih = sc->sc_pci_ih;
786 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
787 PCI_MAPREG_TYPE_IO, 0,
788 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
789 printf("%s: couldn't map %s channel cmd regs\n",
790 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
791 return 0;
792 }
793
794 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
795 PCI_MAPREG_TYPE_IO, 0,
796 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
797 printf("%s: couldn't map %s channel ctl regs\n",
798 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
799 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
800 return 0;
801 }
802 /*
803 * In native mode, 4 bytes of I/O space are mapped for the control
804 * register, the control register is at offset 2. Pass the generic
805 * code a handle for only one byte at the rigth offset.
806 */
807 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
808 &wdc_cp->ctl_ioh) != 0) {
809 printf("%s: unable to subregion %s channel ctl regs\n",
810 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
811 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
812 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
813 return 0;
814 }
815 return (1);
816 }
817
818 void
819 pciide_mapreg_dma(sc, pa)
820 struct pciide_softc *sc;
821 struct pci_attach_args *pa;
822 {
823 pcireg_t maptype;
824 bus_addr_t addr;
825
826 /*
827 * Map DMA registers
828 *
829 * Note that sc_dma_ok is the right variable to test to see if
830 * DMA can be done. If the interface doesn't support DMA,
831 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
832 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
833 * non-zero if the interface supports DMA and the registers
834 * could be mapped.
835 *
836 * XXX Note that despite the fact that the Bus Master IDE specs
837 * XXX say that "The bus master IDE function uses 16 bytes of IO
838 * XXX space," some controllers (at least the United
839 * XXX Microelectronics UM8886BF) place it in memory space.
840 */
841 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
842 PCIIDE_REG_BUS_MASTER_DMA);
843
844 switch (maptype) {
845 case PCI_MAPREG_TYPE_IO:
846 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
847 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
848 &addr, NULL, NULL) == 0);
849 if (sc->sc_dma_ok == 0) {
850 printf(", but unused (couldn't query registers)");
851 break;
852 }
853 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
854 && addr >= 0x10000) {
855 sc->sc_dma_ok = 0;
856 printf(", but unused (registers at unsafe address "
857 "%#lx)", (unsigned long)addr);
858 break;
859 }
860 /* FALLTHROUGH */
861
862 case PCI_MAPREG_MEM_TYPE_32BIT:
863 sc->sc_dma_ok = (pci_mapreg_map(pa,
864 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
865 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
866 sc->sc_dmat = pa->pa_dmat;
867 if (sc->sc_dma_ok == 0) {
868 printf(", but unused (couldn't map registers)");
869 } else {
870 sc->sc_wdcdev.dma_arg = sc;
871 sc->sc_wdcdev.dma_init = pciide_dma_init;
872 sc->sc_wdcdev.dma_start = pciide_dma_start;
873 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
874 }
875
876 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
877 PCIIDE_OPTIONS_NODMA) {
878 printf(", but unused (forced off by config file)");
879 sc->sc_dma_ok = 0;
880 }
881 break;
882
883 default:
884 sc->sc_dma_ok = 0;
885 printf(", but unsupported register maptype (0x%x)", maptype);
886 }
887 }
888
889 int
890 pciide_compat_intr(arg)
891 void *arg;
892 {
893 struct pciide_channel *cp = arg;
894
895 #ifdef DIAGNOSTIC
896 /* should only be called for a compat channel */
897 if (cp->compat == 0)
898 panic("pciide compat intr called for non-compat chan %p\n", cp);
899 #endif
900 return (wdcintr(&cp->wdc_channel));
901 }
902
903 int
904 pciide_pci_intr(arg)
905 void *arg;
906 {
907 struct pciide_softc *sc = arg;
908 struct pciide_channel *cp;
909 struct channel_softc *wdc_cp;
910 int i, rv, crv;
911
912 rv = 0;
913 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
914 cp = &sc->pciide_channels[i];
915 wdc_cp = &cp->wdc_channel;
916
917 /* If a compat channel skip. */
918 if (cp->compat)
919 continue;
920 /* if this channel not waiting for intr, skip */
921 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
922 continue;
923
924 crv = wdcintr(wdc_cp);
925 if (crv == 0)
926 ; /* leave rv alone */
927 else if (crv == 1)
928 rv = 1; /* claim the intr */
929 else if (rv == 0) /* crv should be -1 in this case */
930 rv = crv; /* if we've done no better, take it */
931 }
932 return (rv);
933 }
934
935 void
936 pciide_channel_dma_setup(cp)
937 struct pciide_channel *cp;
938 {
939 int drive;
940 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
941 struct ata_drive_datas *drvp;
942
943 for (drive = 0; drive < 2; drive++) {
944 drvp = &cp->wdc_channel.ch_drive[drive];
945 /* If no drive, skip */
946 if ((drvp->drive_flags & DRIVE) == 0)
947 continue;
948 /* setup DMA if needed */
949 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
950 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
951 sc->sc_dma_ok == 0) {
952 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
953 continue;
954 }
955 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
956 != 0) {
957 /* Abort DMA setup */
958 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
959 continue;
960 }
961 }
962 }
963
964 int
965 pciide_dma_table_setup(sc, channel, drive)
966 struct pciide_softc *sc;
967 int channel, drive;
968 {
969 bus_dma_segment_t seg;
970 int error, rseg;
971 const bus_size_t dma_table_size =
972 sizeof(struct idedma_table) * NIDEDMA_TABLES;
973 struct pciide_dma_maps *dma_maps =
974 &sc->pciide_channels[channel].dma_maps[drive];
975
976 /* If table was already allocated, just return */
977 if (dma_maps->dma_table)
978 return 0;
979
980 /* Allocate memory for the DMA tables and map it */
981 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
982 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
983 BUS_DMA_NOWAIT)) != 0) {
984 printf("%s:%d: unable to allocate table DMA for "
985 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
986 channel, drive, error);
987 return error;
988 }
989 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
990 dma_table_size,
991 (caddr_t *)&dma_maps->dma_table,
992 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
993 printf("%s:%d: unable to map table DMA for"
994 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
995 channel, drive, error);
996 return error;
997 }
998 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
999 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1000 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1001
1002 /* Create and load table DMA map for this disk */
1003 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1004 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1005 &dma_maps->dmamap_table)) != 0) {
1006 printf("%s:%d: unable to create table DMA map for "
1007 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1008 channel, drive, error);
1009 return error;
1010 }
1011 if ((error = bus_dmamap_load(sc->sc_dmat,
1012 dma_maps->dmamap_table,
1013 dma_maps->dma_table,
1014 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1015 printf("%s:%d: unable to load table DMA map for "
1016 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1017 channel, drive, error);
1018 return error;
1019 }
1020 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1021 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1022 DEBUG_PROBE);
1023 /* Create a xfer DMA map for this drive */
1024 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1025 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1026 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1027 &dma_maps->dmamap_xfer)) != 0) {
1028 printf("%s:%d: unable to create xfer DMA map for "
1029 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1030 channel, drive, error);
1031 return error;
1032 }
1033 return 0;
1034 }
1035
1036 int
1037 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1038 void *v;
1039 int channel, drive;
1040 void *databuf;
1041 size_t datalen;
1042 int flags;
1043 {
1044 struct pciide_softc *sc = v;
1045 int error, seg;
1046 struct pciide_dma_maps *dma_maps =
1047 &sc->pciide_channels[channel].dma_maps[drive];
1048
1049 error = bus_dmamap_load(sc->sc_dmat,
1050 dma_maps->dmamap_xfer,
1051 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1052 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1053 if (error) {
1054 printf("%s:%d: unable to load xfer DMA map for"
1055 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1056 channel, drive, error);
1057 return error;
1058 }
1059
1060 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1061 dma_maps->dmamap_xfer->dm_mapsize,
1062 (flags & WDC_DMA_READ) ?
1063 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1064
1065 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1066 #ifdef DIAGNOSTIC
1067 /* A segment must not cross a 64k boundary */
1068 {
1069 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1070 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1071 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1072 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1073 printf("pciide_dma: segment %d physical addr 0x%lx"
1074 " len 0x%lx not properly aligned\n",
1075 seg, phys, len);
1076 panic("pciide_dma: buf align");
1077 }
1078 }
1079 #endif
1080 dma_maps->dma_table[seg].base_addr =
1081 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1082 dma_maps->dma_table[seg].byte_count =
1083 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1084 IDEDMA_BYTE_COUNT_MASK);
1085 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1086 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1087 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1088
1089 }
1090 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1091 htole32(IDEDMA_BYTE_COUNT_EOT);
1092
1093 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1094 dma_maps->dmamap_table->dm_mapsize,
1095 BUS_DMASYNC_PREWRITE);
1096
1097 /* Maps are ready. Start DMA function */
1098 #ifdef DIAGNOSTIC
1099 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1100 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1101 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1102 panic("pciide_dma_init: table align");
1103 }
1104 #endif
1105
1106 /* Clear status bits */
1107 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1108 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1109 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1110 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1111 /* Write table addr */
1112 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1113 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1114 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1115 /* set read/write */
1116 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1117 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1118 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1119 /* remember flags */
1120 dma_maps->dma_flags = flags;
1121 return 0;
1122 }
1123
1124 void
1125 pciide_dma_start(v, channel, drive)
1126 void *v;
1127 int channel, drive;
1128 {
1129 struct pciide_softc *sc = v;
1130
1131 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1132 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1133 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1134 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1135 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1136 }
1137
1138 int
1139 pciide_dma_finish(v, channel, drive, force)
1140 void *v;
1141 int channel, drive;
1142 int force;
1143 {
1144 struct pciide_softc *sc = v;
1145 u_int8_t status;
1146 int error = 0;
1147 struct pciide_dma_maps *dma_maps =
1148 &sc->pciide_channels[channel].dma_maps[drive];
1149
1150 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1151 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1152 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1153 DEBUG_XFERS);
1154
1155 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1156 return WDC_DMAST_NOIRQ;
1157
1158 /* stop DMA channel */
1159 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1160 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1161 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1162 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1163
1164 /* Unload the map of the data buffer */
1165 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1166 dma_maps->dmamap_xfer->dm_mapsize,
1167 (dma_maps->dma_flags & WDC_DMA_READ) ?
1168 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1169 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1170
1171 if ((status & IDEDMA_CTL_ERR) != 0) {
1172 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1173 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1174 error |= WDC_DMAST_ERR;
1175 }
1176
1177 if ((status & IDEDMA_CTL_INTR) == 0) {
1178 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1179 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1180 drive, status);
1181 error |= WDC_DMAST_NOIRQ;
1182 }
1183
1184 if ((status & IDEDMA_CTL_ACT) != 0) {
1185 /* data underrun, may be a valid condition for ATAPI */
1186 error |= WDC_DMAST_UNDER;
1187 }
1188 return error;
1189 }
1190
1191 void
1192 pciide_irqack(chp)
1193 struct channel_softc *chp;
1194 {
1195 struct pciide_channel *cp = (struct pciide_channel*)chp;
1196 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1197
1198 /* clear status bits in IDE DMA registers */
1199 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1200 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1201 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1202 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1203 }
1204
1205 /* some common code used by several chip_map */
1206 int
1207 pciide_chansetup(sc, channel, interface)
1208 struct pciide_softc *sc;
1209 int channel;
1210 pcireg_t interface;
1211 {
1212 struct pciide_channel *cp = &sc->pciide_channels[channel];
1213 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1214 cp->name = PCIIDE_CHANNEL_NAME(channel);
1215 cp->wdc_channel.channel = channel;
1216 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1217 cp->wdc_channel.ch_queue =
1218 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1219 if (cp->wdc_channel.ch_queue == NULL) {
1220 printf("%s %s channel: "
1221 "can't allocate memory for command queue",
1222 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1223 return 0;
1224 }
1225 printf("%s: %s channel %s to %s mode\n",
1226 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1227 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1228 "configured" : "wired",
1229 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1230 "native-PCI" : "compatibility");
1231 return 1;
1232 }
1233
1234 /* some common code used by several chip channel_map */
1235 void
1236 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1237 struct pci_attach_args *pa;
1238 struct pciide_channel *cp;
1239 pcireg_t interface;
1240 bus_size_t *cmdsizep, *ctlsizep;
1241 int (*pci_intr) __P((void *));
1242 {
1243 struct channel_softc *wdc_cp = &cp->wdc_channel;
1244
1245 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1246 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1247 pci_intr);
1248 else
1249 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1250 wdc_cp->channel, cmdsizep, ctlsizep);
1251
1252 if (cp->hw_ok == 0)
1253 return;
1254 wdc_cp->data32iot = wdc_cp->cmd_iot;
1255 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1256 wdcattach(wdc_cp);
1257 }
1258
1259 /*
1260 * Generic code to call to know if a channel can be disabled. Return 1
1261 * if channel can be disabled, 0 if not
1262 */
1263 int
1264 pciide_chan_candisable(cp)
1265 struct pciide_channel *cp;
1266 {
1267 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1268 struct channel_softc *wdc_cp = &cp->wdc_channel;
1269
1270 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1271 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1272 printf("%s: disabling %s channel (no drives)\n",
1273 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1274 cp->hw_ok = 0;
1275 return 1;
1276 }
1277 return 0;
1278 }
1279
1280 /*
1281 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1282 * Set hw_ok=0 on failure
1283 */
1284 void
1285 pciide_map_compat_intr(pa, cp, compatchan, interface)
1286 struct pci_attach_args *pa;
1287 struct pciide_channel *cp;
1288 int compatchan, interface;
1289 {
1290 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1291 struct channel_softc *wdc_cp = &cp->wdc_channel;
1292
1293 if (cp->hw_ok == 0)
1294 return;
1295 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1296 return;
1297
1298 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1299 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1300 pa, compatchan, pciide_compat_intr, cp);
1301 if (cp->ih == NULL) {
1302 #endif
1303 printf("%s: no compatibility interrupt for use by %s "
1304 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1305 cp->hw_ok = 0;
1306 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1307 }
1308 #endif
1309 }
1310
1311 void
1312 pciide_print_modes(cp)
1313 struct pciide_channel *cp;
1314 {
1315 wdc_print_modes(&cp->wdc_channel);
1316 }
1317
1318 void
1319 default_chip_map(sc, pa)
1320 struct pciide_softc *sc;
1321 struct pci_attach_args *pa;
1322 {
1323 struct pciide_channel *cp;
1324 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1325 pcireg_t csr;
1326 int channel, drive;
1327 struct ata_drive_datas *drvp;
1328 u_int8_t idedma_ctl;
1329 bus_size_t cmdsize, ctlsize;
1330 char *failreason;
1331
1332 if (pciide_chipen(sc, pa) == 0)
1333 return;
1334
1335 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1336 printf("%s: bus-master DMA support present",
1337 sc->sc_wdcdev.sc_dev.dv_xname);
1338 if (sc->sc_pp == &default_product_desc &&
1339 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1340 PCIIDE_OPTIONS_DMA) == 0) {
1341 printf(", but unused (no driver support)");
1342 sc->sc_dma_ok = 0;
1343 } else {
1344 pciide_mapreg_dma(sc, pa);
1345 if (sc->sc_dma_ok != 0)
1346 printf(", used without full driver "
1347 "support");
1348 }
1349 } else {
1350 printf("%s: hardware does not support DMA",
1351 sc->sc_wdcdev.sc_dev.dv_xname);
1352 sc->sc_dma_ok = 0;
1353 }
1354 printf("\n");
1355 if (sc->sc_dma_ok) {
1356 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1357 sc->sc_wdcdev.irqack = pciide_irqack;
1358 }
1359 sc->sc_wdcdev.PIO_cap = 0;
1360 sc->sc_wdcdev.DMA_cap = 0;
1361
1362 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1363 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1364 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1365
1366 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1367 cp = &sc->pciide_channels[channel];
1368 if (pciide_chansetup(sc, channel, interface) == 0)
1369 continue;
1370 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1371 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1372 &ctlsize, pciide_pci_intr);
1373 } else {
1374 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1375 channel, &cmdsize, &ctlsize);
1376 }
1377 if (cp->hw_ok == 0)
1378 continue;
1379 /*
1380 * Check to see if something appears to be there.
1381 */
1382 failreason = NULL;
1383 if (!wdcprobe(&cp->wdc_channel)) {
1384 failreason = "not responding; disabled or no drives?";
1385 goto next;
1386 }
1387 /*
1388 * Now, make sure it's actually attributable to this PCI IDE
1389 * channel by trying to access the channel again while the
1390 * PCI IDE controller's I/O space is disabled. (If the
1391 * channel no longer appears to be there, it belongs to
1392 * this controller.) YUCK!
1393 */
1394 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1395 PCI_COMMAND_STATUS_REG);
1396 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1397 csr & ~PCI_COMMAND_IO_ENABLE);
1398 if (wdcprobe(&cp->wdc_channel))
1399 failreason = "other hardware responding at addresses";
1400 pci_conf_write(sc->sc_pc, sc->sc_tag,
1401 PCI_COMMAND_STATUS_REG, csr);
1402 next:
1403 if (failreason) {
1404 printf("%s: %s channel ignored (%s)\n",
1405 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1406 failreason);
1407 cp->hw_ok = 0;
1408 bus_space_unmap(cp->wdc_channel.cmd_iot,
1409 cp->wdc_channel.cmd_ioh, cmdsize);
1410 bus_space_unmap(cp->wdc_channel.ctl_iot,
1411 cp->wdc_channel.ctl_ioh, ctlsize);
1412 } else {
1413 pciide_map_compat_intr(pa, cp, channel, interface);
1414 }
1415 if (cp->hw_ok) {
1416 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1417 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1418 wdcattach(&cp->wdc_channel);
1419 }
1420 }
1421
1422 if (sc->sc_dma_ok == 0)
1423 return;
1424
1425 /* Allocate DMA maps */
1426 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1427 idedma_ctl = 0;
1428 cp = &sc->pciide_channels[channel];
1429 for (drive = 0; drive < 2; drive++) {
1430 drvp = &cp->wdc_channel.ch_drive[drive];
1431 /* If no drive, skip */
1432 if ((drvp->drive_flags & DRIVE) == 0)
1433 continue;
1434 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1435 continue;
1436 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1437 /* Abort DMA setup */
1438 printf("%s:%d:%d: can't allocate DMA maps, "
1439 "using PIO transfers\n",
1440 sc->sc_wdcdev.sc_dev.dv_xname,
1441 channel, drive);
1442 drvp->drive_flags &= ~DRIVE_DMA;
1443 }
1444 printf("%s:%d:%d: using DMA data transfers\n",
1445 sc->sc_wdcdev.sc_dev.dv_xname,
1446 channel, drive);
1447 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1448 }
1449 if (idedma_ctl != 0) {
1450 /* Add software bits in status register */
1451 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1452 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1453 idedma_ctl);
1454 }
1455 }
1456 }
1457
1458 void
1459 piix_chip_map(sc, pa)
1460 struct pciide_softc *sc;
1461 struct pci_attach_args *pa;
1462 {
1463 struct pciide_channel *cp;
1464 int channel;
1465 u_int32_t idetim;
1466 bus_size_t cmdsize, ctlsize;
1467
1468 if (pciide_chipen(sc, pa) == 0)
1469 return;
1470
1471 printf("%s: bus-master DMA support present",
1472 sc->sc_wdcdev.sc_dev.dv_xname);
1473 pciide_mapreg_dma(sc, pa);
1474 printf("\n");
1475 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1476 WDC_CAPABILITY_MODE;
1477 if (sc->sc_dma_ok) {
1478 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1479 sc->sc_wdcdev.irqack = pciide_irqack;
1480 switch(sc->sc_pp->ide_product) {
1481 case PCI_PRODUCT_INTEL_82371AB_IDE:
1482 case PCI_PRODUCT_INTEL_82440MX_IDE:
1483 case PCI_PRODUCT_INTEL_82801AA_IDE:
1484 case PCI_PRODUCT_INTEL_82801AB_IDE:
1485 case PCI_PRODUCT_INTEL_82801BA_IDE:
1486 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1487 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1488 }
1489 }
1490 sc->sc_wdcdev.PIO_cap = 4;
1491 sc->sc_wdcdev.DMA_cap = 2;
1492 switch(sc->sc_pp->ide_product) {
1493 case PCI_PRODUCT_INTEL_82801AA_IDE:
1494 sc->sc_wdcdev.UDMA_cap = 4;
1495 break;
1496 case PCI_PRODUCT_INTEL_82801BA_IDE:
1497 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1498 sc->sc_wdcdev.UDMA_cap = 5;
1499 break;
1500 default:
1501 sc->sc_wdcdev.UDMA_cap = 2;
1502 }
1503 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1504 sc->sc_wdcdev.set_modes = piix_setup_channel;
1505 else
1506 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1507 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1508 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1509
1510 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1511 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1512 DEBUG_PROBE);
1513 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1514 WDCDEBUG_PRINT((", sidetim=0x%x",
1515 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1516 DEBUG_PROBE);
1517 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1518 WDCDEBUG_PRINT((", udamreg 0x%x",
1519 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1520 DEBUG_PROBE);
1521 }
1522 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1523 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1524 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1525 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1526 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1527 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1528 DEBUG_PROBE);
1529 }
1530
1531 }
1532 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1533
1534 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1535 cp = &sc->pciide_channels[channel];
1536 /* PIIX is compat-only */
1537 if (pciide_chansetup(sc, channel, 0) == 0)
1538 continue;
1539 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1540 if ((PIIX_IDETIM_READ(idetim, channel) &
1541 PIIX_IDETIM_IDE) == 0) {
1542 printf("%s: %s channel ignored (disabled)\n",
1543 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1544 continue;
1545 }
1546 /* PIIX are compat-only pciide devices */
1547 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1548 if (cp->hw_ok == 0)
1549 continue;
1550 if (pciide_chan_candisable(cp)) {
1551 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1552 channel);
1553 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1554 idetim);
1555 }
1556 pciide_map_compat_intr(pa, cp, channel, 0);
1557 if (cp->hw_ok == 0)
1558 continue;
1559 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1560 }
1561
1562 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1563 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1564 DEBUG_PROBE);
1565 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1566 WDCDEBUG_PRINT((", sidetim=0x%x",
1567 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1568 DEBUG_PROBE);
1569 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1570 WDCDEBUG_PRINT((", udamreg 0x%x",
1571 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1572 DEBUG_PROBE);
1573 }
1574 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1575 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1576 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1577 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1578 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1579 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1580 DEBUG_PROBE);
1581 }
1582 }
1583 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1584 }
1585
1586 void
1587 piix_setup_channel(chp)
1588 struct channel_softc *chp;
1589 {
1590 u_int8_t mode[2], drive;
1591 u_int32_t oidetim, idetim, idedma_ctl;
1592 struct pciide_channel *cp = (struct pciide_channel*)chp;
1593 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1594 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1595
1596 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1597 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1598 idedma_ctl = 0;
1599
1600 /* set up new idetim: Enable IDE registers decode */
1601 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1602 chp->channel);
1603
1604 /* setup DMA */
1605 pciide_channel_dma_setup(cp);
1606
1607 /*
1608 * Here we have to mess up with drives mode: PIIX can't have
1609 * different timings for master and slave drives.
1610 * We need to find the best combination.
1611 */
1612
1613 /* If both drives supports DMA, take the lower mode */
1614 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1615 (drvp[1].drive_flags & DRIVE_DMA)) {
1616 mode[0] = mode[1] =
1617 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1618 drvp[0].DMA_mode = mode[0];
1619 drvp[1].DMA_mode = mode[1];
1620 goto ok;
1621 }
1622 /*
1623 * If only one drive supports DMA, use its mode, and
1624 * put the other one in PIO mode 0 if mode not compatible
1625 */
1626 if (drvp[0].drive_flags & DRIVE_DMA) {
1627 mode[0] = drvp[0].DMA_mode;
1628 mode[1] = drvp[1].PIO_mode;
1629 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1630 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1631 mode[1] = drvp[1].PIO_mode = 0;
1632 goto ok;
1633 }
1634 if (drvp[1].drive_flags & DRIVE_DMA) {
1635 mode[1] = drvp[1].DMA_mode;
1636 mode[0] = drvp[0].PIO_mode;
1637 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1638 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1639 mode[0] = drvp[0].PIO_mode = 0;
1640 goto ok;
1641 }
1642 /*
1643 * If both drives are not DMA, takes the lower mode, unless
1644 * one of them is PIO mode < 2
1645 */
1646 if (drvp[0].PIO_mode < 2) {
1647 mode[0] = drvp[0].PIO_mode = 0;
1648 mode[1] = drvp[1].PIO_mode;
1649 } else if (drvp[1].PIO_mode < 2) {
1650 mode[1] = drvp[1].PIO_mode = 0;
1651 mode[0] = drvp[0].PIO_mode;
1652 } else {
1653 mode[0] = mode[1] =
1654 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1655 drvp[0].PIO_mode = mode[0];
1656 drvp[1].PIO_mode = mode[1];
1657 }
1658 ok: /* The modes are setup */
1659 for (drive = 0; drive < 2; drive++) {
1660 if (drvp[drive].drive_flags & DRIVE_DMA) {
1661 idetim |= piix_setup_idetim_timings(
1662 mode[drive], 1, chp->channel);
1663 goto end;
1664 }
1665 }
1666 /* If we are there, none of the drives are DMA */
1667 if (mode[0] >= 2)
1668 idetim |= piix_setup_idetim_timings(
1669 mode[0], 0, chp->channel);
1670 else
1671 idetim |= piix_setup_idetim_timings(
1672 mode[1], 0, chp->channel);
1673 end: /*
1674 * timing mode is now set up in the controller. Enable
1675 * it per-drive
1676 */
1677 for (drive = 0; drive < 2; drive++) {
1678 /* If no drive, skip */
1679 if ((drvp[drive].drive_flags & DRIVE) == 0)
1680 continue;
1681 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1682 if (drvp[drive].drive_flags & DRIVE_DMA)
1683 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1684 }
1685 if (idedma_ctl != 0) {
1686 /* Add software bits in status register */
1687 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1688 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1689 idedma_ctl);
1690 }
1691 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1692 pciide_print_modes(cp);
1693 }
1694
1695 void
1696 piix3_4_setup_channel(chp)
1697 struct channel_softc *chp;
1698 {
1699 struct ata_drive_datas *drvp;
1700 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1701 struct pciide_channel *cp = (struct pciide_channel*)chp;
1702 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1703 int drive;
1704 int channel = chp->channel;
1705
1706 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1707 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1708 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1709 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1710 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1711 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1712 PIIX_SIDETIM_RTC_MASK(channel));
1713
1714 idedma_ctl = 0;
1715 /* If channel disabled, no need to go further */
1716 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1717 return;
1718 /* set up new idetim: Enable IDE registers decode */
1719 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1720
1721 /* setup DMA if needed */
1722 pciide_channel_dma_setup(cp);
1723
1724 for (drive = 0; drive < 2; drive++) {
1725 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1726 PIIX_UDMATIM_SET(0x3, channel, drive));
1727 drvp = &chp->ch_drive[drive];
1728 /* If no drive, skip */
1729 if ((drvp->drive_flags & DRIVE) == 0)
1730 continue;
1731 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1732 (drvp->drive_flags & DRIVE_UDMA) == 0))
1733 goto pio;
1734
1735 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1736 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1737 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1738 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1739 ideconf |= PIIX_CONFIG_PINGPONG;
1740 }
1741 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1742 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1743 /* setup Ultra/100 */
1744 if (drvp->UDMA_mode > 2 &&
1745 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1746 drvp->UDMA_mode = 2;
1747 if (drvp->UDMA_mode > 4) {
1748 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1749 } else {
1750 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1751 if (drvp->UDMA_mode > 2) {
1752 ideconf |= PIIX_CONFIG_UDMA66(channel,
1753 drive);
1754 } else {
1755 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1756 drive);
1757 }
1758 }
1759 }
1760 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1761 /* setup Ultra/66 */
1762 if (drvp->UDMA_mode > 2 &&
1763 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1764 drvp->UDMA_mode = 2;
1765 if (drvp->UDMA_mode > 2)
1766 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1767 else
1768 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1769 }
1770 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1771 (drvp->drive_flags & DRIVE_UDMA)) {
1772 /* use Ultra/DMA */
1773 drvp->drive_flags &= ~DRIVE_DMA;
1774 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1775 udmareg |= PIIX_UDMATIM_SET(
1776 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1777 } else {
1778 /* use Multiword DMA */
1779 drvp->drive_flags &= ~DRIVE_UDMA;
1780 if (drive == 0) {
1781 idetim |= piix_setup_idetim_timings(
1782 drvp->DMA_mode, 1, channel);
1783 } else {
1784 sidetim |= piix_setup_sidetim_timings(
1785 drvp->DMA_mode, 1, channel);
1786 idetim =PIIX_IDETIM_SET(idetim,
1787 PIIX_IDETIM_SITRE, channel);
1788 }
1789 }
1790 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1791
1792 pio: /* use PIO mode */
1793 idetim |= piix_setup_idetim_drvs(drvp);
1794 if (drive == 0) {
1795 idetim |= piix_setup_idetim_timings(
1796 drvp->PIO_mode, 0, channel);
1797 } else {
1798 sidetim |= piix_setup_sidetim_timings(
1799 drvp->PIO_mode, 0, channel);
1800 idetim =PIIX_IDETIM_SET(idetim,
1801 PIIX_IDETIM_SITRE, channel);
1802 }
1803 }
1804 if (idedma_ctl != 0) {
1805 /* Add software bits in status register */
1806 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1807 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1808 idedma_ctl);
1809 }
1810 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1811 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1812 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1813 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1814 pciide_print_modes(cp);
1815 }
1816
1817
1818 /* setup ISP and RTC fields, based on mode */
1819 static u_int32_t
1820 piix_setup_idetim_timings(mode, dma, channel)
1821 u_int8_t mode;
1822 u_int8_t dma;
1823 u_int8_t channel;
1824 {
1825
1826 if (dma)
1827 return PIIX_IDETIM_SET(0,
1828 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1829 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1830 channel);
1831 else
1832 return PIIX_IDETIM_SET(0,
1833 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1834 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1835 channel);
1836 }
1837
1838 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1839 static u_int32_t
1840 piix_setup_idetim_drvs(drvp)
1841 struct ata_drive_datas *drvp;
1842 {
1843 u_int32_t ret = 0;
1844 struct channel_softc *chp = drvp->chnl_softc;
1845 u_int8_t channel = chp->channel;
1846 u_int8_t drive = drvp->drive;
1847
1848 /*
1849 * If drive is using UDMA, timings setups are independant
1850 * So just check DMA and PIO here.
1851 */
1852 if (drvp->drive_flags & DRIVE_DMA) {
1853 /* if mode = DMA mode 0, use compatible timings */
1854 if ((drvp->drive_flags & DRIVE_DMA) &&
1855 drvp->DMA_mode == 0) {
1856 drvp->PIO_mode = 0;
1857 return ret;
1858 }
1859 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1860 /*
1861 * PIO and DMA timings are the same, use fast timings for PIO
1862 * too, else use compat timings.
1863 */
1864 if ((piix_isp_pio[drvp->PIO_mode] !=
1865 piix_isp_dma[drvp->DMA_mode]) ||
1866 (piix_rtc_pio[drvp->PIO_mode] !=
1867 piix_rtc_dma[drvp->DMA_mode]))
1868 drvp->PIO_mode = 0;
1869 /* if PIO mode <= 2, use compat timings for PIO */
1870 if (drvp->PIO_mode <= 2) {
1871 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1872 channel);
1873 return ret;
1874 }
1875 }
1876
1877 /*
1878 * Now setup PIO modes. If mode < 2, use compat timings.
1879 * Else enable fast timings. Enable IORDY and prefetch/post
1880 * if PIO mode >= 3.
1881 */
1882
1883 if (drvp->PIO_mode < 2)
1884 return ret;
1885
1886 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1887 if (drvp->PIO_mode >= 3) {
1888 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1889 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1890 }
1891 return ret;
1892 }
1893
1894 /* setup values in SIDETIM registers, based on mode */
1895 static u_int32_t
1896 piix_setup_sidetim_timings(mode, dma, channel)
1897 u_int8_t mode;
1898 u_int8_t dma;
1899 u_int8_t channel;
1900 {
1901 if (dma)
1902 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1903 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1904 else
1905 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1906 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1907 }
1908
1909 void
1910 amd7x6_chip_map(sc, pa)
1911 struct pciide_softc *sc;
1912 struct pci_attach_args *pa;
1913 {
1914 struct pciide_channel *cp;
1915 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1916 int channel;
1917 pcireg_t chanenable;
1918 bus_size_t cmdsize, ctlsize;
1919
1920 if (pciide_chipen(sc, pa) == 0)
1921 return;
1922 printf("%s: bus-master DMA support present",
1923 sc->sc_wdcdev.sc_dev.dv_xname);
1924 pciide_mapreg_dma(sc, pa);
1925 printf("\n");
1926 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1927 WDC_CAPABILITY_MODE;
1928 if (sc->sc_dma_ok) {
1929 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1930 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1931 sc->sc_wdcdev.irqack = pciide_irqack;
1932 }
1933 sc->sc_wdcdev.PIO_cap = 4;
1934 sc->sc_wdcdev.DMA_cap = 2;
1935
1936 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1937 sc->sc_wdcdev.UDMA_cap = 5;
1938 else
1939 sc->sc_wdcdev.UDMA_cap = 4;
1940 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1941 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1942 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1943 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1944
1945 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1946 DEBUG_PROBE);
1947 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1948 cp = &sc->pciide_channels[channel];
1949 if (pciide_chansetup(sc, channel, interface) == 0)
1950 continue;
1951
1952 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1953 printf("%s: %s channel ignored (disabled)\n",
1954 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1955 continue;
1956 }
1957 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1958 pciide_pci_intr);
1959
1960 if (pciide_chan_candisable(cp))
1961 chanenable &= ~AMD7X6_CHAN_EN(channel);
1962 pciide_map_compat_intr(pa, cp, channel, interface);
1963 if (cp->hw_ok == 0)
1964 continue;
1965
1966 amd7x6_setup_channel(&cp->wdc_channel);
1967 }
1968 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1969 chanenable);
1970 return;
1971 }
1972
1973 void
1974 amd7x6_setup_channel(chp)
1975 struct channel_softc *chp;
1976 {
1977 u_int32_t udmatim_reg, datatim_reg;
1978 u_int8_t idedma_ctl;
1979 int mode, drive;
1980 struct ata_drive_datas *drvp;
1981 struct pciide_channel *cp = (struct pciide_channel*)chp;
1982 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1983 #ifndef PCIIDE_AMD756_ENABLEDMA
1984 int rev = PCI_REVISION(
1985 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1986 #endif
1987
1988 idedma_ctl = 0;
1989 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1990 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1991 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1992 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1993
1994 /* setup DMA if needed */
1995 pciide_channel_dma_setup(cp);
1996
1997 for (drive = 0; drive < 2; drive++) {
1998 drvp = &chp->ch_drive[drive];
1999 /* If no drive, skip */
2000 if ((drvp->drive_flags & DRIVE) == 0)
2001 continue;
2002 /* add timing values, setup DMA if needed */
2003 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2004 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2005 mode = drvp->PIO_mode;
2006 goto pio;
2007 }
2008 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2009 (drvp->drive_flags & DRIVE_UDMA)) {
2010 /* use Ultra/DMA */
2011 drvp->drive_flags &= ~DRIVE_DMA;
2012 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2013 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2014 AMD7X6_UDMA_TIME(chp->channel, drive,
2015 amd7x6_udma_tim[drvp->UDMA_mode]);
2016 /* can use PIO timings, MW DMA unused */
2017 mode = drvp->PIO_mode;
2018 } else {
2019 /* use Multiword DMA, but only if revision is OK */
2020 drvp->drive_flags &= ~DRIVE_UDMA;
2021 #ifndef PCIIDE_AMD756_ENABLEDMA
2022 /*
2023 * The workaround doesn't seem to be necessary
2024 * with all drives, so it can be disabled by
2025 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2026 * triggered.
2027 */
2028 if (sc->sc_pp->ide_product ==
2029 PCI_PRODUCT_AMD_PBC756_IDE &&
2030 AMD756_CHIPREV_DISABLEDMA(rev)) {
2031 printf("%s:%d:%d: multi-word DMA disabled due "
2032 "to chip revision\n",
2033 sc->sc_wdcdev.sc_dev.dv_xname,
2034 chp->channel, drive);
2035 mode = drvp->PIO_mode;
2036 drvp->drive_flags &= ~DRIVE_DMA;
2037 goto pio;
2038 }
2039 #endif
2040 /* mode = min(pio, dma+2) */
2041 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2042 mode = drvp->PIO_mode;
2043 else
2044 mode = drvp->DMA_mode + 2;
2045 }
2046 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2047
2048 pio: /* setup PIO mode */
2049 if (mode <= 2) {
2050 drvp->DMA_mode = 0;
2051 drvp->PIO_mode = 0;
2052 mode = 0;
2053 } else {
2054 drvp->PIO_mode = mode;
2055 drvp->DMA_mode = mode - 2;
2056 }
2057 datatim_reg |=
2058 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2059 amd7x6_pio_set[mode]) |
2060 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2061 amd7x6_pio_rec[mode]);
2062 }
2063 if (idedma_ctl != 0) {
2064 /* Add software bits in status register */
2065 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2066 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2067 idedma_ctl);
2068 }
2069 pciide_print_modes(cp);
2070 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2071 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2072 }
2073
2074 void
2075 apollo_chip_map(sc, pa)
2076 struct pciide_softc *sc;
2077 struct pci_attach_args *pa;
2078 {
2079 struct pciide_channel *cp;
2080 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2081 int channel;
2082 u_int32_t ideconf;
2083 bus_size_t cmdsize, ctlsize;
2084 pcitag_t pcib_tag;
2085 pcireg_t pcib_id, pcib_class;
2086
2087 if (pciide_chipen(sc, pa) == 0)
2088 return;
2089 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2090 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2091 /* and read ID and rev of the ISA bridge */
2092 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2093 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2094 printf(": VIA Technologies ");
2095 switch (PCI_PRODUCT(pcib_id)) {
2096 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2097 printf("VT82C586 (Apollo VP) ");
2098 if(PCI_REVISION(pcib_class) >= 0x02) {
2099 printf("ATA33 controller\n");
2100 sc->sc_wdcdev.UDMA_cap = 2;
2101 } else {
2102 printf("controller\n");
2103 sc->sc_wdcdev.UDMA_cap = 0;
2104 }
2105 break;
2106 case PCI_PRODUCT_VIATECH_VT82C596A:
2107 printf("VT82C596A (Apollo Pro) ");
2108 if (PCI_REVISION(pcib_class) >= 0x12) {
2109 printf("ATA66 controller\n");
2110 sc->sc_wdcdev.UDMA_cap = 4;
2111 } else {
2112 printf("ATA33 controller\n");
2113 sc->sc_wdcdev.UDMA_cap = 2;
2114 }
2115 break;
2116 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2117 printf("VT82C686A (Apollo KX133) ");
2118 if (PCI_REVISION(pcib_class) >= 0x40) {
2119 printf("ATA100 controller\n");
2120 sc->sc_wdcdev.UDMA_cap = 5;
2121 } else {
2122 printf("ATA66 controller\n");
2123 sc->sc_wdcdev.UDMA_cap = 4;
2124 }
2125 break;
2126 case PCI_PRODUCT_VIATECH_VT8233:
2127 printf("VT8233 ATA100 controller\n");
2128 sc->sc_wdcdev.UDMA_cap = 5;
2129 break;
2130 default:
2131 printf("unknown ATA controller\n");
2132 sc->sc_wdcdev.UDMA_cap = 0;
2133 }
2134
2135 printf("%s: bus-master DMA support present",
2136 sc->sc_wdcdev.sc_dev.dv_xname);
2137 pciide_mapreg_dma(sc, pa);
2138 printf("\n");
2139 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2140 WDC_CAPABILITY_MODE;
2141 if (sc->sc_dma_ok) {
2142 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2143 sc->sc_wdcdev.irqack = pciide_irqack;
2144 if (sc->sc_wdcdev.UDMA_cap > 0)
2145 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2146 }
2147 sc->sc_wdcdev.PIO_cap = 4;
2148 sc->sc_wdcdev.DMA_cap = 2;
2149 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2150 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2151 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2152
2153 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2154 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2155 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2156 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2157 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2158 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2159 DEBUG_PROBE);
2160
2161 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2162 cp = &sc->pciide_channels[channel];
2163 if (pciide_chansetup(sc, channel, interface) == 0)
2164 continue;
2165
2166 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2167 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2168 printf("%s: %s channel ignored (disabled)\n",
2169 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2170 continue;
2171 }
2172 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2173 pciide_pci_intr);
2174 if (cp->hw_ok == 0)
2175 continue;
2176 if (pciide_chan_candisable(cp)) {
2177 ideconf &= ~APO_IDECONF_EN(channel);
2178 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2179 ideconf);
2180 }
2181 pciide_map_compat_intr(pa, cp, channel, interface);
2182
2183 if (cp->hw_ok == 0)
2184 continue;
2185 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2186 }
2187 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2188 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2189 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2190 }
2191
2192 void
2193 apollo_setup_channel(chp)
2194 struct channel_softc *chp;
2195 {
2196 u_int32_t udmatim_reg, datatim_reg;
2197 u_int8_t idedma_ctl;
2198 int mode, drive;
2199 struct ata_drive_datas *drvp;
2200 struct pciide_channel *cp = (struct pciide_channel*)chp;
2201 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2202
2203 idedma_ctl = 0;
2204 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2205 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2206 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2207 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2208
2209 /* setup DMA if needed */
2210 pciide_channel_dma_setup(cp);
2211
2212 for (drive = 0; drive < 2; drive++) {
2213 drvp = &chp->ch_drive[drive];
2214 /* If no drive, skip */
2215 if ((drvp->drive_flags & DRIVE) == 0)
2216 continue;
2217 /* add timing values, setup DMA if needed */
2218 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2219 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2220 mode = drvp->PIO_mode;
2221 goto pio;
2222 }
2223 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2224 (drvp->drive_flags & DRIVE_UDMA)) {
2225 /* use Ultra/DMA */
2226 drvp->drive_flags &= ~DRIVE_DMA;
2227 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2228 APO_UDMA_EN_MTH(chp->channel, drive);
2229 if (sc->sc_wdcdev.UDMA_cap == 5) {
2230 /* 686b */
2231 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2232 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2233 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2234 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2235 /* 596b or 686a */
2236 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2237 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2238 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2239 } else {
2240 /* 596a or 586b */
2241 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2242 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2243 }
2244 /* can use PIO timings, MW DMA unused */
2245 mode = drvp->PIO_mode;
2246 } else {
2247 /* use Multiword DMA */
2248 drvp->drive_flags &= ~DRIVE_UDMA;
2249 /* mode = min(pio, dma+2) */
2250 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2251 mode = drvp->PIO_mode;
2252 else
2253 mode = drvp->DMA_mode + 2;
2254 }
2255 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2256
2257 pio: /* setup PIO mode */
2258 if (mode <= 2) {
2259 drvp->DMA_mode = 0;
2260 drvp->PIO_mode = 0;
2261 mode = 0;
2262 } else {
2263 drvp->PIO_mode = mode;
2264 drvp->DMA_mode = mode - 2;
2265 }
2266 datatim_reg |=
2267 APO_DATATIM_PULSE(chp->channel, drive,
2268 apollo_pio_set[mode]) |
2269 APO_DATATIM_RECOV(chp->channel, drive,
2270 apollo_pio_rec[mode]);
2271 }
2272 if (idedma_ctl != 0) {
2273 /* Add software bits in status register */
2274 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2275 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2276 idedma_ctl);
2277 }
2278 pciide_print_modes(cp);
2279 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2280 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2281 }
2282
2283 void
2284 cmd_channel_map(pa, sc, channel)
2285 struct pci_attach_args *pa;
2286 struct pciide_softc *sc;
2287 int channel;
2288 {
2289 struct pciide_channel *cp = &sc->pciide_channels[channel];
2290 bus_size_t cmdsize, ctlsize;
2291 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2292 int interface, one_channel;
2293
2294 /*
2295 * The 0648/0649 can be told to identify as a RAID controller.
2296 * In this case, we have to fake interface
2297 */
2298 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2299 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2300 PCIIDE_INTERFACE_SETTABLE(1);
2301 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2302 CMD_CONF_DSA1)
2303 interface |= PCIIDE_INTERFACE_PCI(0) |
2304 PCIIDE_INTERFACE_PCI(1);
2305 } else {
2306 interface = PCI_INTERFACE(pa->pa_class);
2307 }
2308
2309 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2310 cp->name = PCIIDE_CHANNEL_NAME(channel);
2311 cp->wdc_channel.channel = channel;
2312 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2313
2314 /*
2315 * Older CMD64X doesn't have independant channels
2316 */
2317 switch (sc->sc_pp->ide_product) {
2318 case PCI_PRODUCT_CMDTECH_649:
2319 one_channel = 0;
2320 break;
2321 default:
2322 one_channel = 1;
2323 break;
2324 }
2325
2326 if (channel > 0 && one_channel) {
2327 cp->wdc_channel.ch_queue =
2328 sc->pciide_channels[0].wdc_channel.ch_queue;
2329 } else {
2330 cp->wdc_channel.ch_queue =
2331 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2332 }
2333 if (cp->wdc_channel.ch_queue == NULL) {
2334 printf("%s %s channel: "
2335 "can't allocate memory for command queue",
2336 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2337 return;
2338 }
2339
2340 printf("%s: %s channel %s to %s mode\n",
2341 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2342 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2343 "configured" : "wired",
2344 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2345 "native-PCI" : "compatibility");
2346
2347 /*
2348 * with a CMD PCI64x, if we get here, the first channel is enabled:
2349 * there's no way to disable the first channel without disabling
2350 * the whole device
2351 */
2352 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2353 printf("%s: %s channel ignored (disabled)\n",
2354 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2355 return;
2356 }
2357
2358 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2359 if (cp->hw_ok == 0)
2360 return;
2361 if (channel == 1) {
2362 if (pciide_chan_candisable(cp)) {
2363 ctrl &= ~CMD_CTRL_2PORT;
2364 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2365 CMD_CTRL, ctrl);
2366 }
2367 }
2368 pciide_map_compat_intr(pa, cp, channel, interface);
2369 }
2370
2371 int
2372 cmd_pci_intr(arg)
2373 void *arg;
2374 {
2375 struct pciide_softc *sc = arg;
2376 struct pciide_channel *cp;
2377 struct channel_softc *wdc_cp;
2378 int i, rv, crv;
2379 u_int32_t priirq, secirq;
2380
2381 rv = 0;
2382 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2383 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2384 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2385 cp = &sc->pciide_channels[i];
2386 wdc_cp = &cp->wdc_channel;
2387 /* If a compat channel skip. */
2388 if (cp->compat)
2389 continue;
2390 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2391 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2392 crv = wdcintr(wdc_cp);
2393 if (crv == 0)
2394 printf("%s:%d: bogus intr\n",
2395 sc->sc_wdcdev.sc_dev.dv_xname, i);
2396 else
2397 rv = 1;
2398 }
2399 }
2400 return rv;
2401 }
2402
2403 void
2404 cmd_chip_map(sc, pa)
2405 struct pciide_softc *sc;
2406 struct pci_attach_args *pa;
2407 {
2408 int channel;
2409
2410 /*
2411 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2412 * and base adresses registers can be disabled at
2413 * hardware level. In this case, the device is wired
2414 * in compat mode and its first channel is always enabled,
2415 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2416 * In fact, it seems that the first channel of the CMD PCI0640
2417 * can't be disabled.
2418 */
2419
2420 #ifdef PCIIDE_CMD064x_DISABLE
2421 if (pciide_chipen(sc, pa) == 0)
2422 return;
2423 #endif
2424
2425 printf("%s: hardware does not support DMA\n",
2426 sc->sc_wdcdev.sc_dev.dv_xname);
2427 sc->sc_dma_ok = 0;
2428
2429 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2430 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2431 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2432
2433 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2434 cmd_channel_map(pa, sc, channel);
2435 }
2436 }
2437
2438 void
2439 cmd0643_9_chip_map(sc, pa)
2440 struct pciide_softc *sc;
2441 struct pci_attach_args *pa;
2442 {
2443 struct pciide_channel *cp;
2444 int channel;
2445 int rev = PCI_REVISION(
2446 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2447
2448 /*
2449 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2450 * and base adresses registers can be disabled at
2451 * hardware level. In this case, the device is wired
2452 * in compat mode and its first channel is always enabled,
2453 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2454 * In fact, it seems that the first channel of the CMD PCI0640
2455 * can't be disabled.
2456 */
2457
2458 #ifdef PCIIDE_CMD064x_DISABLE
2459 if (pciide_chipen(sc, pa) == 0)
2460 return;
2461 #endif
2462 printf("%s: bus-master DMA support present",
2463 sc->sc_wdcdev.sc_dev.dv_xname);
2464 pciide_mapreg_dma(sc, pa);
2465 printf("\n");
2466 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2467 WDC_CAPABILITY_MODE;
2468 if (sc->sc_dma_ok) {
2469 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2470 switch (sc->sc_pp->ide_product) {
2471 case PCI_PRODUCT_CMDTECH_649:
2472 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2473 sc->sc_wdcdev.UDMA_cap = 5;
2474 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2475 break;
2476 case PCI_PRODUCT_CMDTECH_648:
2477 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2478 sc->sc_wdcdev.UDMA_cap = 4;
2479 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2480 break;
2481 case PCI_PRODUCT_CMDTECH_646:
2482 if (rev >= CMD0646U2_REV) {
2483 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2484 sc->sc_wdcdev.UDMA_cap = 2;
2485 } else if (rev >= CMD0646U_REV) {
2486 /*
2487 * Linux's driver claims that the 646U is broken
2488 * with UDMA. Only enable it if we know what we're
2489 * doing
2490 */
2491 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2492 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2493 sc->sc_wdcdev.UDMA_cap = 2;
2494 #endif
2495 /* explicitly disable UDMA */
2496 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2497 CMD_UDMATIM(0), 0);
2498 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2499 CMD_UDMATIM(1), 0);
2500 }
2501 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2502 break;
2503 default:
2504 sc->sc_wdcdev.irqack = pciide_irqack;
2505 }
2506 }
2507
2508 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2509 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2510 sc->sc_wdcdev.PIO_cap = 4;
2511 sc->sc_wdcdev.DMA_cap = 2;
2512 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2513
2514 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2515 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2516 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2517 DEBUG_PROBE);
2518
2519 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2520 cp = &sc->pciide_channels[channel];
2521 cmd_channel_map(pa, sc, channel);
2522 if (cp->hw_ok == 0)
2523 continue;
2524 cmd0643_9_setup_channel(&cp->wdc_channel);
2525 }
2526 /*
2527 * note - this also makes sure we clear the irq disable and reset
2528 * bits
2529 */
2530 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2531 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2532 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2533 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2534 DEBUG_PROBE);
2535 }
2536
2537 void
2538 cmd0643_9_setup_channel(chp)
2539 struct channel_softc *chp;
2540 {
2541 struct ata_drive_datas *drvp;
2542 u_int8_t tim;
2543 u_int32_t idedma_ctl, udma_reg;
2544 int drive;
2545 struct pciide_channel *cp = (struct pciide_channel*)chp;
2546 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2547
2548 idedma_ctl = 0;
2549 /* setup DMA if needed */
2550 pciide_channel_dma_setup(cp);
2551
2552 for (drive = 0; drive < 2; drive++) {
2553 drvp = &chp->ch_drive[drive];
2554 /* If no drive, skip */
2555 if ((drvp->drive_flags & DRIVE) == 0)
2556 continue;
2557 /* add timing values, setup DMA if needed */
2558 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2559 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2560 if (drvp->drive_flags & DRIVE_UDMA) {
2561 /* UltraDMA on a 646U2, 0648 or 0649 */
2562 drvp->drive_flags &= ~DRIVE_DMA;
2563 udma_reg = pciide_pci_read(sc->sc_pc,
2564 sc->sc_tag, CMD_UDMATIM(chp->channel));
2565 if (drvp->UDMA_mode > 2 &&
2566 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2567 CMD_BICSR) &
2568 CMD_BICSR_80(chp->channel)) == 0)
2569 drvp->UDMA_mode = 2;
2570 if (drvp->UDMA_mode > 2)
2571 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2572 else if (sc->sc_wdcdev.UDMA_cap > 2)
2573 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2574 udma_reg |= CMD_UDMATIM_UDMA(drive);
2575 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2576 CMD_UDMATIM_TIM_OFF(drive));
2577 udma_reg |=
2578 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2579 CMD_UDMATIM_TIM_OFF(drive));
2580 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2581 CMD_UDMATIM(chp->channel), udma_reg);
2582 } else {
2583 /*
2584 * use Multiword DMA.
2585 * Timings will be used for both PIO and DMA,
2586 * so adjust DMA mode if needed
2587 * if we have a 0646U2/8/9, turn off UDMA
2588 */
2589 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2590 udma_reg = pciide_pci_read(sc->sc_pc,
2591 sc->sc_tag,
2592 CMD_UDMATIM(chp->channel));
2593 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2594 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2595 CMD_UDMATIM(chp->channel),
2596 udma_reg);
2597 }
2598 if (drvp->PIO_mode >= 3 &&
2599 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2600 drvp->DMA_mode = drvp->PIO_mode - 2;
2601 }
2602 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2603 }
2604 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2605 }
2606 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2607 CMD_DATA_TIM(chp->channel, drive), tim);
2608 }
2609 if (idedma_ctl != 0) {
2610 /* Add software bits in status register */
2611 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2612 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2613 idedma_ctl);
2614 }
2615 pciide_print_modes(cp);
2616 }
2617
2618 void
2619 cmd646_9_irqack(chp)
2620 struct channel_softc *chp;
2621 {
2622 u_int32_t priirq, secirq;
2623 struct pciide_channel *cp = (struct pciide_channel*)chp;
2624 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2625
2626 if (chp->channel == 0) {
2627 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2628 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2629 } else {
2630 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2631 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2632 }
2633 pciide_irqack(chp);
2634 }
2635
2636 void
2637 cy693_chip_map(sc, pa)
2638 struct pciide_softc *sc;
2639 struct pci_attach_args *pa;
2640 {
2641 struct pciide_channel *cp;
2642 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2643 bus_size_t cmdsize, ctlsize;
2644
2645 if (pciide_chipen(sc, pa) == 0)
2646 return;
2647 /*
2648 * this chip has 2 PCI IDE functions, one for primary and one for
2649 * secondary. So we need to call pciide_mapregs_compat() with
2650 * the real channel
2651 */
2652 if (pa->pa_function == 1) {
2653 sc->sc_cy_compatchan = 0;
2654 } else if (pa->pa_function == 2) {
2655 sc->sc_cy_compatchan = 1;
2656 } else {
2657 printf("%s: unexpected PCI function %d\n",
2658 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2659 return;
2660 }
2661 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2662 printf("%s: bus-master DMA support present",
2663 sc->sc_wdcdev.sc_dev.dv_xname);
2664 pciide_mapreg_dma(sc, pa);
2665 } else {
2666 printf("%s: hardware does not support DMA",
2667 sc->sc_wdcdev.sc_dev.dv_xname);
2668 sc->sc_dma_ok = 0;
2669 }
2670 printf("\n");
2671
2672 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2673 if (sc->sc_cy_handle == NULL) {
2674 printf("%s: unable to map hyperCache control registers\n",
2675 sc->sc_wdcdev.sc_dev.dv_xname);
2676 sc->sc_dma_ok = 0;
2677 }
2678
2679 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2680 WDC_CAPABILITY_MODE;
2681 if (sc->sc_dma_ok) {
2682 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2683 sc->sc_wdcdev.irqack = pciide_irqack;
2684 }
2685 sc->sc_wdcdev.PIO_cap = 4;
2686 sc->sc_wdcdev.DMA_cap = 2;
2687 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2688
2689 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2690 sc->sc_wdcdev.nchannels = 1;
2691
2692 /* Only one channel for this chip; if we are here it's enabled */
2693 cp = &sc->pciide_channels[0];
2694 sc->wdc_chanarray[0] = &cp->wdc_channel;
2695 cp->name = PCIIDE_CHANNEL_NAME(0);
2696 cp->wdc_channel.channel = 0;
2697 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2698 cp->wdc_channel.ch_queue =
2699 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2700 if (cp->wdc_channel.ch_queue == NULL) {
2701 printf("%s primary channel: "
2702 "can't allocate memory for command queue",
2703 sc->sc_wdcdev.sc_dev.dv_xname);
2704 return;
2705 }
2706 printf("%s: primary channel %s to ",
2707 sc->sc_wdcdev.sc_dev.dv_xname,
2708 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2709 "configured" : "wired");
2710 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2711 printf("native-PCI");
2712 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2713 pciide_pci_intr);
2714 } else {
2715 printf("compatibility");
2716 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2717 &cmdsize, &ctlsize);
2718 }
2719 printf(" mode\n");
2720 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2721 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2722 wdcattach(&cp->wdc_channel);
2723 if (pciide_chan_candisable(cp)) {
2724 pci_conf_write(sc->sc_pc, sc->sc_tag,
2725 PCI_COMMAND_STATUS_REG, 0);
2726 }
2727 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2728 if (cp->hw_ok == 0)
2729 return;
2730 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2731 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2732 cy693_setup_channel(&cp->wdc_channel);
2733 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2734 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2735 }
2736
2737 void
2738 cy693_setup_channel(chp)
2739 struct channel_softc *chp;
2740 {
2741 struct ata_drive_datas *drvp;
2742 int drive;
2743 u_int32_t cy_cmd_ctrl;
2744 u_int32_t idedma_ctl;
2745 struct pciide_channel *cp = (struct pciide_channel*)chp;
2746 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2747 int dma_mode = -1;
2748
2749 cy_cmd_ctrl = idedma_ctl = 0;
2750
2751 /* setup DMA if needed */
2752 pciide_channel_dma_setup(cp);
2753
2754 for (drive = 0; drive < 2; drive++) {
2755 drvp = &chp->ch_drive[drive];
2756 /* If no drive, skip */
2757 if ((drvp->drive_flags & DRIVE) == 0)
2758 continue;
2759 /* add timing values, setup DMA if needed */
2760 if (drvp->drive_flags & DRIVE_DMA) {
2761 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2762 /* use Multiword DMA */
2763 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2764 dma_mode = drvp->DMA_mode;
2765 }
2766 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2767 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2768 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2769 CY_CMD_CTRL_IOW_REC_OFF(drive));
2770 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2771 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2772 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2773 CY_CMD_CTRL_IOR_REC_OFF(drive));
2774 }
2775 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2776 chp->ch_drive[0].DMA_mode = dma_mode;
2777 chp->ch_drive[1].DMA_mode = dma_mode;
2778
2779 if (dma_mode == -1)
2780 dma_mode = 0;
2781
2782 if (sc->sc_cy_handle != NULL) {
2783 /* Note: `multiple' is implied. */
2784 cy82c693_write(sc->sc_cy_handle,
2785 (sc->sc_cy_compatchan == 0) ?
2786 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2787 }
2788
2789 pciide_print_modes(cp);
2790
2791 if (idedma_ctl != 0) {
2792 /* Add software bits in status register */
2793 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2794 IDEDMA_CTL, idedma_ctl);
2795 }
2796 }
2797
2798 static int
2799 sis_hostbr_match(pa)
2800 struct pci_attach_args *pa;
2801 {
2802 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2803 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2804 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2805 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2806 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2807 }
2808
2809 void
2810 sis_chip_map(sc, pa)
2811 struct pciide_softc *sc;
2812 struct pci_attach_args *pa;
2813 {
2814 struct pciide_channel *cp;
2815 int channel;
2816 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2817 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2818 pcireg_t rev = PCI_REVISION(pa->pa_class);
2819 bus_size_t cmdsize, ctlsize;
2820 pcitag_t pchb_tag;
2821 pcireg_t pchb_id, pchb_class;
2822
2823 if (pciide_chipen(sc, pa) == 0)
2824 return;
2825 printf("%s: bus-master DMA support present",
2826 sc->sc_wdcdev.sc_dev.dv_xname);
2827 pciide_mapreg_dma(sc, pa);
2828 printf("\n");
2829
2830 /* get a PCI tag for the host bridge (function 0 of the same device) */
2831 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2832 /* and read ID and rev of the ISA bridge */
2833 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2834 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2835
2836 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2837 WDC_CAPABILITY_MODE;
2838 if (sc->sc_dma_ok) {
2839 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2840 sc->sc_wdcdev.irqack = pciide_irqack;
2841 /*
2842 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2843 * have problems with UDMA (info provided by Christos)
2844 */
2845 if (rev >= 0xd0 &&
2846 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2847 PCI_REVISION(pchb_class) >= 0x03))
2848 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2849 }
2850
2851 sc->sc_wdcdev.PIO_cap = 4;
2852 sc->sc_wdcdev.DMA_cap = 2;
2853 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2854 /*
2855 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2856 * chipsets.
2857 */
2858 sc->sc_wdcdev.UDMA_cap =
2859 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2860 sc->sc_wdcdev.set_modes = sis_setup_channel;
2861
2862 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2863 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2864
2865 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2866 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2867 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2868
2869 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2870 cp = &sc->pciide_channels[channel];
2871 if (pciide_chansetup(sc, channel, interface) == 0)
2872 continue;
2873 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2874 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2875 printf("%s: %s channel ignored (disabled)\n",
2876 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2877 continue;
2878 }
2879 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2880 pciide_pci_intr);
2881 if (cp->hw_ok == 0)
2882 continue;
2883 if (pciide_chan_candisable(cp)) {
2884 if (channel == 0)
2885 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2886 else
2887 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2888 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2889 sis_ctr0);
2890 }
2891 pciide_map_compat_intr(pa, cp, channel, interface);
2892 if (cp->hw_ok == 0)
2893 continue;
2894 sis_setup_channel(&cp->wdc_channel);
2895 }
2896 }
2897
2898 void
2899 sis_setup_channel(chp)
2900 struct channel_softc *chp;
2901 {
2902 struct ata_drive_datas *drvp;
2903 int drive;
2904 u_int32_t sis_tim;
2905 u_int32_t idedma_ctl;
2906 struct pciide_channel *cp = (struct pciide_channel*)chp;
2907 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2908
2909 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2910 "channel %d 0x%x\n", chp->channel,
2911 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2912 DEBUG_PROBE);
2913 sis_tim = 0;
2914 idedma_ctl = 0;
2915 /* setup DMA if needed */
2916 pciide_channel_dma_setup(cp);
2917
2918 for (drive = 0; drive < 2; drive++) {
2919 drvp = &chp->ch_drive[drive];
2920 /* If no drive, skip */
2921 if ((drvp->drive_flags & DRIVE) == 0)
2922 continue;
2923 /* add timing values, setup DMA if needed */
2924 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2925 (drvp->drive_flags & DRIVE_UDMA) == 0)
2926 goto pio;
2927
2928 if (drvp->drive_flags & DRIVE_UDMA) {
2929 /* use Ultra/DMA */
2930 drvp->drive_flags &= ~DRIVE_DMA;
2931 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2932 SIS_TIM_UDMA_TIME_OFF(drive);
2933 sis_tim |= SIS_TIM_UDMA_EN(drive);
2934 } else {
2935 /*
2936 * use Multiword DMA
2937 * Timings will be used for both PIO and DMA,
2938 * so adjust DMA mode if needed
2939 */
2940 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2941 drvp->PIO_mode = drvp->DMA_mode + 2;
2942 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2943 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2944 drvp->PIO_mode - 2 : 0;
2945 if (drvp->DMA_mode == 0)
2946 drvp->PIO_mode = 0;
2947 }
2948 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2949 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2950 SIS_TIM_ACT_OFF(drive);
2951 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2952 SIS_TIM_REC_OFF(drive);
2953 }
2954 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2955 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2956 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2957 if (idedma_ctl != 0) {
2958 /* Add software bits in status register */
2959 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2960 IDEDMA_CTL, idedma_ctl);
2961 }
2962 pciide_print_modes(cp);
2963 }
2964
2965 static int
2966 acer_isabr_match(pa)
2967 struct pci_attach_args *pa;
2968 {
2969 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI) &&
2970 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543));
2971 }
2972
2973 void
2974 acer_chip_map(sc, pa)
2975 struct pciide_softc *sc;
2976 struct pci_attach_args *pa;
2977 {
2978 struct pci_attach_args isa_pa;
2979 struct pciide_channel *cp;
2980 int channel;
2981 pcireg_t cr, interface;
2982 bus_size_t cmdsize, ctlsize;
2983 pcireg_t rev = PCI_REVISION(pa->pa_class);
2984
2985 if (pciide_chipen(sc, pa) == 0)
2986 return;
2987 printf("%s: bus-master DMA support present",
2988 sc->sc_wdcdev.sc_dev.dv_xname);
2989 pciide_mapreg_dma(sc, pa);
2990 printf("\n");
2991 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2992 WDC_CAPABILITY_MODE;
2993 if (sc->sc_dma_ok) {
2994 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2995 if (rev >= 0x20) {
2996 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2997 if (rev >= 0xC4)
2998 sc->sc_wdcdev.UDMA_cap = 5;
2999 else if (rev >= 0xC2)
3000 sc->sc_wdcdev.UDMA_cap = 4;
3001 else
3002 sc->sc_wdcdev.UDMA_cap = 2;
3003 }
3004 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3005 sc->sc_wdcdev.irqack = pciide_irqack;
3006 }
3007
3008 sc->sc_wdcdev.PIO_cap = 4;
3009 sc->sc_wdcdev.DMA_cap = 2;
3010 sc->sc_wdcdev.set_modes = acer_setup_channel;
3011 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3012 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3013
3014 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3015 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3016 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3017
3018 /* Enable "microsoft register bits" R/W. */
3019 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3020 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3021 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3022 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3023 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3024 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3025 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3026 ~ACER_CHANSTATUSREGS_RO);
3027 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3028 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3029 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3030 /* Don't use cr, re-read the real register content instead */
3031 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3032 PCI_CLASS_REG));
3033
3034 /* From linux: enable "Cable Detection" */
3035 if (rev >= 0xC2) {
3036 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3037 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3038 | ACER_0x4B_CDETECT);
3039 /* set south-bridge's enable bit, m1533, 0x79 */
3040 if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
3041 printf("%s: can't find PCI/ISA bridge, downgrading "
3042 "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
3043 sc->sc_wdcdev.UDMA_cap = 2;
3044 } else {
3045 if (rev == 0xC2)
3046 /* 1543C-B0 (m1533, 0x79, bit 2) */
3047 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3048 ACER_0x79,
3049 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3050 ACER_0x79)
3051 | ACER_0x79_REVC2_EN);
3052 else
3053 /* 1553/1535 (m1533, 0x79, bit 1) */
3054 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3055 ACER_0x79,
3056 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3057 ACER_0x79)
3058 | ACER_0x79_EN);
3059 }
3060 }
3061
3062 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3063 cp = &sc->pciide_channels[channel];
3064 if (pciide_chansetup(sc, channel, interface) == 0)
3065 continue;
3066 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3067 printf("%s: %s channel ignored (disabled)\n",
3068 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3069 continue;
3070 }
3071 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3072 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3073 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3074 if (cp->hw_ok == 0)
3075 continue;
3076 if (pciide_chan_candisable(cp)) {
3077 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3078 pci_conf_write(sc->sc_pc, sc->sc_tag,
3079 PCI_CLASS_REG, cr);
3080 }
3081 pciide_map_compat_intr(pa, cp, channel, interface);
3082 acer_setup_channel(&cp->wdc_channel);
3083 }
3084 }
3085
3086 void
3087 acer_setup_channel(chp)
3088 struct channel_softc *chp;
3089 {
3090 struct ata_drive_datas *drvp;
3091 int drive;
3092 u_int32_t acer_fifo_udma;
3093 u_int32_t idedma_ctl;
3094 struct pciide_channel *cp = (struct pciide_channel*)chp;
3095 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3096
3097 idedma_ctl = 0;
3098 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3099 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3100 acer_fifo_udma), DEBUG_PROBE);
3101 /* setup DMA if needed */
3102 pciide_channel_dma_setup(cp);
3103
3104 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3105 DRIVE_UDMA) { /* check 80 pins cable */
3106 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3107 ACER_0x4A_80PIN(chp->channel)) {
3108 if (chp->ch_drive[0].UDMA_mode > 2)
3109 chp->ch_drive[0].UDMA_mode = 2;
3110 if (chp->ch_drive[1].UDMA_mode > 2)
3111 chp->ch_drive[1].UDMA_mode = 2;
3112 }
3113 }
3114
3115 for (drive = 0; drive < 2; drive++) {
3116 drvp = &chp->ch_drive[drive];
3117 /* If no drive, skip */
3118 if ((drvp->drive_flags & DRIVE) == 0)
3119 continue;
3120 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3121 "channel %d drive %d 0x%x\n", chp->channel, drive,
3122 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3123 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3124 /* clear FIFO/DMA mode */
3125 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3126 ACER_UDMA_EN(chp->channel, drive) |
3127 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3128
3129 /* add timing values, setup DMA if needed */
3130 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3131 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3132 acer_fifo_udma |=
3133 ACER_FTH_OPL(chp->channel, drive, 0x1);
3134 goto pio;
3135 }
3136
3137 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3138 if (drvp->drive_flags & DRIVE_UDMA) {
3139 /* use Ultra/DMA */
3140 drvp->drive_flags &= ~DRIVE_DMA;
3141 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3142 acer_fifo_udma |=
3143 ACER_UDMA_TIM(chp->channel, drive,
3144 acer_udma[drvp->UDMA_mode]);
3145 /* XXX disable if one drive < UDMA3 ? */
3146 if (drvp->UDMA_mode >= 3) {
3147 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3148 ACER_0x4B,
3149 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3150 ACER_0x4B) | ACER_0x4B_UDMA66);
3151 }
3152 } else {
3153 /*
3154 * use Multiword DMA
3155 * Timings will be used for both PIO and DMA,
3156 * so adjust DMA mode if needed
3157 */
3158 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3159 drvp->PIO_mode = drvp->DMA_mode + 2;
3160 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3161 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3162 drvp->PIO_mode - 2 : 0;
3163 if (drvp->DMA_mode == 0)
3164 drvp->PIO_mode = 0;
3165 }
3166 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3167 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3168 ACER_IDETIM(chp->channel, drive),
3169 acer_pio[drvp->PIO_mode]);
3170 }
3171 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3172 acer_fifo_udma), DEBUG_PROBE);
3173 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3174 if (idedma_ctl != 0) {
3175 /* Add software bits in status register */
3176 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3177 IDEDMA_CTL, idedma_ctl);
3178 }
3179 pciide_print_modes(cp);
3180 }
3181
3182 int
3183 acer_pci_intr(arg)
3184 void *arg;
3185 {
3186 struct pciide_softc *sc = arg;
3187 struct pciide_channel *cp;
3188 struct channel_softc *wdc_cp;
3189 int i, rv, crv;
3190 u_int32_t chids;
3191
3192 rv = 0;
3193 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3194 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3195 cp = &sc->pciide_channels[i];
3196 wdc_cp = &cp->wdc_channel;
3197 /* If a compat channel skip. */
3198 if (cp->compat)
3199 continue;
3200 if (chids & ACER_CHIDS_INT(i)) {
3201 crv = wdcintr(wdc_cp);
3202 if (crv == 0)
3203 printf("%s:%d: bogus intr\n",
3204 sc->sc_wdcdev.sc_dev.dv_xname, i);
3205 else
3206 rv = 1;
3207 }
3208 }
3209 return rv;
3210 }
3211
3212 void
3213 hpt_chip_map(sc, pa)
3214 struct pciide_softc *sc;
3215 struct pci_attach_args *pa;
3216 {
3217 struct pciide_channel *cp;
3218 int i, compatchan, revision;
3219 pcireg_t interface;
3220 bus_size_t cmdsize, ctlsize;
3221
3222 if (pciide_chipen(sc, pa) == 0)
3223 return;
3224 revision = PCI_REVISION(pa->pa_class);
3225 printf(": Triones/Highpoint ");
3226 if (revision == HPT370_REV)
3227 printf("HPT370 IDE Controller\n");
3228 else if (revision == HPT370A_REV)
3229 printf("HPT370A IDE Controller\n");
3230 else if (revision == HPT366_REV)
3231 printf("HPT366 IDE Controller\n");
3232 else
3233 printf("unknown HPT IDE controller rev %d\n", revision);
3234
3235 /*
3236 * when the chip is in native mode it identifies itself as a
3237 * 'misc mass storage'. Fake interface in this case.
3238 */
3239 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3240 interface = PCI_INTERFACE(pa->pa_class);
3241 } else {
3242 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3243 PCIIDE_INTERFACE_PCI(0);
3244 if (revision == HPT370_REV || revision == HPT370A_REV)
3245 interface |= PCIIDE_INTERFACE_PCI(1);
3246 }
3247
3248 printf("%s: bus-master DMA support present",
3249 sc->sc_wdcdev.sc_dev.dv_xname);
3250 pciide_mapreg_dma(sc, pa);
3251 printf("\n");
3252 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3253 WDC_CAPABILITY_MODE;
3254 if (sc->sc_dma_ok) {
3255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3256 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3257 sc->sc_wdcdev.irqack = pciide_irqack;
3258 }
3259 sc->sc_wdcdev.PIO_cap = 4;
3260 sc->sc_wdcdev.DMA_cap = 2;
3261
3262 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3263 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3264 if (revision == HPT366_REV) {
3265 sc->sc_wdcdev.UDMA_cap = 4;
3266 /*
3267 * The 366 has 2 PCI IDE functions, one for primary and one
3268 * for secondary. So we need to call pciide_mapregs_compat()
3269 * with the real channel
3270 */
3271 if (pa->pa_function == 0) {
3272 compatchan = 0;
3273 } else if (pa->pa_function == 1) {
3274 compatchan = 1;
3275 } else {
3276 printf("%s: unexpected PCI function %d\n",
3277 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3278 return;
3279 }
3280 sc->sc_wdcdev.nchannels = 1;
3281 } else {
3282 sc->sc_wdcdev.nchannels = 2;
3283 sc->sc_wdcdev.UDMA_cap = 5;
3284 }
3285 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3286 cp = &sc->pciide_channels[i];
3287 if (sc->sc_wdcdev.nchannels > 1) {
3288 compatchan = i;
3289 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3290 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3291 printf("%s: %s channel ignored (disabled)\n",
3292 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3293 continue;
3294 }
3295 }
3296 if (pciide_chansetup(sc, i, interface) == 0)
3297 continue;
3298 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3299 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3300 &ctlsize, hpt_pci_intr);
3301 } else {
3302 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3303 &cmdsize, &ctlsize);
3304 }
3305 if (cp->hw_ok == 0)
3306 return;
3307 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3308 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3309 wdcattach(&cp->wdc_channel);
3310 hpt_setup_channel(&cp->wdc_channel);
3311 }
3312 if (revision == HPT370_REV || revision == HPT370A_REV) {
3313 /*
3314 * HPT370_REV has a bit to disable interrupts, make sure
3315 * to clear it
3316 */
3317 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3318 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3319 ~HPT_CSEL_IRQDIS);
3320 }
3321 return;
3322 }
3323
3324 void
3325 hpt_setup_channel(chp)
3326 struct channel_softc *chp;
3327 {
3328 struct ata_drive_datas *drvp;
3329 int drive;
3330 int cable;
3331 u_int32_t before, after;
3332 u_int32_t idedma_ctl;
3333 struct pciide_channel *cp = (struct pciide_channel*)chp;
3334 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3335
3336 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3337
3338 /* setup DMA if needed */
3339 pciide_channel_dma_setup(cp);
3340
3341 idedma_ctl = 0;
3342
3343 /* Per drive settings */
3344 for (drive = 0; drive < 2; drive++) {
3345 drvp = &chp->ch_drive[drive];
3346 /* If no drive, skip */
3347 if ((drvp->drive_flags & DRIVE) == 0)
3348 continue;
3349 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3350 HPT_IDETIM(chp->channel, drive));
3351
3352 /* add timing values, setup DMA if needed */
3353 if (drvp->drive_flags & DRIVE_UDMA) {
3354 /* use Ultra/DMA */
3355 drvp->drive_flags &= ~DRIVE_DMA;
3356 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3357 drvp->UDMA_mode > 2)
3358 drvp->UDMA_mode = 2;
3359 after = (sc->sc_wdcdev.nchannels == 2) ?
3360 hpt370_udma[drvp->UDMA_mode] :
3361 hpt366_udma[drvp->UDMA_mode];
3362 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3363 } else if (drvp->drive_flags & DRIVE_DMA) {
3364 /*
3365 * use Multiword DMA.
3366 * Timings will be used for both PIO and DMA, so adjust
3367 * DMA mode if needed
3368 */
3369 if (drvp->PIO_mode >= 3 &&
3370 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3371 drvp->DMA_mode = drvp->PIO_mode - 2;
3372 }
3373 after = (sc->sc_wdcdev.nchannels == 2) ?
3374 hpt370_dma[drvp->DMA_mode] :
3375 hpt366_dma[drvp->DMA_mode];
3376 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3377 } else {
3378 /* PIO only */
3379 after = (sc->sc_wdcdev.nchannels == 2) ?
3380 hpt370_pio[drvp->PIO_mode] :
3381 hpt366_pio[drvp->PIO_mode];
3382 }
3383 pci_conf_write(sc->sc_pc, sc->sc_tag,
3384 HPT_IDETIM(chp->channel, drive), after);
3385 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3386 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3387 after, before), DEBUG_PROBE);
3388 }
3389 if (idedma_ctl != 0) {
3390 /* Add software bits in status register */
3391 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3392 IDEDMA_CTL, idedma_ctl);
3393 }
3394 pciide_print_modes(cp);
3395 }
3396
3397 int
3398 hpt_pci_intr(arg)
3399 void *arg;
3400 {
3401 struct pciide_softc *sc = arg;
3402 struct pciide_channel *cp;
3403 struct channel_softc *wdc_cp;
3404 int rv = 0;
3405 int dmastat, i, crv;
3406
3407 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3408 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3409 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3410 if((dmastat & IDEDMA_CTL_INTR) == 0)
3411 continue;
3412 cp = &sc->pciide_channels[i];
3413 wdc_cp = &cp->wdc_channel;
3414 crv = wdcintr(wdc_cp);
3415 if (crv == 0) {
3416 printf("%s:%d: bogus intr\n",
3417 sc->sc_wdcdev.sc_dev.dv_xname, i);
3418 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3419 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3420 } else
3421 rv = 1;
3422 }
3423 return rv;
3424 }
3425
3426
3427 /* Macros to test product */
3428 #define PDC_IS_262(sc) \
3429 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3430 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3431 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3432 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3433 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3434 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3435 #define PDC_IS_265(sc) \
3436 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3437 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3438 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3439 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3440 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3441 #define PDC_IS_268(sc) \
3442 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3443 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3444 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3445
3446 void
3447 pdc202xx_chip_map(sc, pa)
3448 struct pciide_softc *sc;
3449 struct pci_attach_args *pa;
3450 {
3451 struct pciide_channel *cp;
3452 int channel;
3453 pcireg_t interface, st, mode;
3454 bus_size_t cmdsize, ctlsize;
3455
3456 if (!PDC_IS_268(sc)) {
3457 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3458 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3459 st), DEBUG_PROBE);
3460 }
3461 if (pciide_chipen(sc, pa) == 0)
3462 return;
3463
3464 /* turn off RAID mode */
3465 if (!PDC_IS_268(sc))
3466 st &= ~PDC2xx_STATE_IDERAID;
3467
3468 /*
3469 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3470 * mode. We have to fake interface
3471 */
3472 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3473 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3474 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3475
3476 printf("%s: bus-master DMA support present",
3477 sc->sc_wdcdev.sc_dev.dv_xname);
3478 pciide_mapreg_dma(sc, pa);
3479 printf("\n");
3480 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3481 WDC_CAPABILITY_MODE;
3482 if (sc->sc_dma_ok) {
3483 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3484 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3485 sc->sc_wdcdev.irqack = pciide_irqack;
3486 }
3487 sc->sc_wdcdev.PIO_cap = 4;
3488 sc->sc_wdcdev.DMA_cap = 2;
3489 if (PDC_IS_265(sc))
3490 sc->sc_wdcdev.UDMA_cap = 5;
3491 else if (PDC_IS_262(sc))
3492 sc->sc_wdcdev.UDMA_cap = 4;
3493 else
3494 sc->sc_wdcdev.UDMA_cap = 2;
3495 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3496 pdc20268_setup_channel : pdc202xx_setup_channel;
3497 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3498 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3499
3500 if (!PDC_IS_268(sc)) {
3501 /* setup failsafe defaults */
3502 mode = 0;
3503 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3504 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3505 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3506 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3507 for (channel = 0;
3508 channel < sc->sc_wdcdev.nchannels;
3509 channel++) {
3510 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3511 "drive 0 initial timings 0x%x, now 0x%x\n",
3512 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3513 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3514 DEBUG_PROBE);
3515 pci_conf_write(sc->sc_pc, sc->sc_tag,
3516 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3517 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3518 "drive 1 initial timings 0x%x, now 0x%x\n",
3519 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3520 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3521 pci_conf_write(sc->sc_pc, sc->sc_tag,
3522 PDC2xx_TIM(channel, 1), mode);
3523 }
3524
3525 mode = PDC2xx_SCR_DMA;
3526 if (PDC_IS_262(sc)) {
3527 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3528 } else {
3529 /* the BIOS set it up this way */
3530 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3531 }
3532 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3533 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3534 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3535 "now 0x%x\n",
3536 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3537 PDC2xx_SCR),
3538 mode), DEBUG_PROBE);
3539 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3540 PDC2xx_SCR, mode);
3541
3542 /* controller initial state register is OK even without BIOS */
3543 /* Set DMA mode to IDE DMA compatibility */
3544 mode =
3545 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3546 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3547 DEBUG_PROBE);
3548 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3549 mode | 0x1);
3550 mode =
3551 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3552 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3553 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3554 mode | 0x1);
3555 }
3556
3557 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3558 cp = &sc->pciide_channels[channel];
3559 if (pciide_chansetup(sc, channel, interface) == 0)
3560 continue;
3561 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3562 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3563 printf("%s: %s channel ignored (disabled)\n",
3564 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3565 continue;
3566 }
3567 if (PDC_IS_265(sc))
3568 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3569 pdc20265_pci_intr);
3570 else
3571 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3572 pdc202xx_pci_intr);
3573 if (cp->hw_ok == 0)
3574 continue;
3575 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3576 st &= ~(PDC_IS_262(sc) ?
3577 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3578 pciide_map_compat_intr(pa, cp, channel, interface);
3579 pdc202xx_setup_channel(&cp->wdc_channel);
3580 }
3581 if (!PDC_IS_268(sc)) {
3582 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3583 "0x%x\n", st), DEBUG_PROBE);
3584 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3585 }
3586 return;
3587 }
3588
3589 void
3590 pdc202xx_setup_channel(chp)
3591 struct channel_softc *chp;
3592 {
3593 struct ata_drive_datas *drvp;
3594 int drive;
3595 pcireg_t mode, st;
3596 u_int32_t idedma_ctl, scr, atapi;
3597 struct pciide_channel *cp = (struct pciide_channel*)chp;
3598 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3599 int channel = chp->channel;
3600
3601 /* setup DMA if needed */
3602 pciide_channel_dma_setup(cp);
3603
3604 idedma_ctl = 0;
3605 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3606 sc->sc_wdcdev.sc_dev.dv_xname,
3607 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3608 DEBUG_PROBE);
3609
3610 /* Per channel settings */
3611 if (PDC_IS_262(sc)) {
3612 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3613 PDC262_U66);
3614 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3615 /* Trim UDMA mode */
3616 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3617 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3618 chp->ch_drive[0].UDMA_mode <= 2) ||
3619 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3620 chp->ch_drive[1].UDMA_mode <= 2)) {
3621 if (chp->ch_drive[0].UDMA_mode > 2)
3622 chp->ch_drive[0].UDMA_mode = 2;
3623 if (chp->ch_drive[1].UDMA_mode > 2)
3624 chp->ch_drive[1].UDMA_mode = 2;
3625 }
3626 /* Set U66 if needed */
3627 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3628 chp->ch_drive[0].UDMA_mode > 2) ||
3629 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3630 chp->ch_drive[1].UDMA_mode > 2))
3631 scr |= PDC262_U66_EN(channel);
3632 else
3633 scr &= ~PDC262_U66_EN(channel);
3634 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3635 PDC262_U66, scr);
3636 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3637 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3638 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3639 PDC262_ATAPI(channel))), DEBUG_PROBE);
3640 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3641 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3642 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3643 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3644 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3645 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3646 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3647 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3648 atapi = 0;
3649 else
3650 atapi = PDC262_ATAPI_UDMA;
3651 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3652 PDC262_ATAPI(channel), atapi);
3653 }
3654 }
3655 for (drive = 0; drive < 2; drive++) {
3656 drvp = &chp->ch_drive[drive];
3657 /* If no drive, skip */
3658 if ((drvp->drive_flags & DRIVE) == 0)
3659 continue;
3660 mode = 0;
3661 if (drvp->drive_flags & DRIVE_UDMA) {
3662 /* use Ultra/DMA */
3663 drvp->drive_flags &= ~DRIVE_DMA;
3664 mode = PDC2xx_TIM_SET_MB(mode,
3665 pdc2xx_udma_mb[drvp->UDMA_mode]);
3666 mode = PDC2xx_TIM_SET_MC(mode,
3667 pdc2xx_udma_mc[drvp->UDMA_mode]);
3668 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3669 } else if (drvp->drive_flags & DRIVE_DMA) {
3670 mode = PDC2xx_TIM_SET_MB(mode,
3671 pdc2xx_dma_mb[drvp->DMA_mode]);
3672 mode = PDC2xx_TIM_SET_MC(mode,
3673 pdc2xx_dma_mc[drvp->DMA_mode]);
3674 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3675 } else {
3676 mode = PDC2xx_TIM_SET_MB(mode,
3677 pdc2xx_dma_mb[0]);
3678 mode = PDC2xx_TIM_SET_MC(mode,
3679 pdc2xx_dma_mc[0]);
3680 }
3681 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3682 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3683 if (drvp->drive_flags & DRIVE_ATA)
3684 mode |= PDC2xx_TIM_PRE;
3685 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3686 if (drvp->PIO_mode >= 3) {
3687 mode |= PDC2xx_TIM_IORDY;
3688 if (drive == 0)
3689 mode |= PDC2xx_TIM_IORDYp;
3690 }
3691 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3692 "timings 0x%x\n",
3693 sc->sc_wdcdev.sc_dev.dv_xname,
3694 chp->channel, drive, mode), DEBUG_PROBE);
3695 pci_conf_write(sc->sc_pc, sc->sc_tag,
3696 PDC2xx_TIM(chp->channel, drive), mode);
3697 }
3698 if (idedma_ctl != 0) {
3699 /* Add software bits in status register */
3700 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3701 IDEDMA_CTL, idedma_ctl);
3702 }
3703 pciide_print_modes(cp);
3704 }
3705
3706 void
3707 pdc20268_setup_channel(chp)
3708 struct channel_softc *chp;
3709 {
3710 struct ata_drive_datas *drvp;
3711 int drive;
3712 u_int32_t idedma_ctl;
3713 struct pciide_channel *cp = (struct pciide_channel*)chp;
3714 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3715 int u100;
3716
3717 /* setup DMA if needed */
3718 pciide_channel_dma_setup(cp);
3719
3720 idedma_ctl = 0;
3721
3722 /* I don't know what this is for, FreeBSD does it ... */
3723 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3724 IDEDMA_CMD + 0x1, 0x0b);
3725
3726 /*
3727 * I don't know what this is for; FreeBSD checks this ... this is not
3728 * cable type detect.
3729 */
3730 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3731 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3732
3733 for (drive = 0; drive < 2; drive++) {
3734 drvp = &chp->ch_drive[drive];
3735 /* If no drive, skip */
3736 if ((drvp->drive_flags & DRIVE) == 0)
3737 continue;
3738 if (drvp->drive_flags & DRIVE_UDMA) {
3739 /* use Ultra/DMA */
3740 drvp->drive_flags &= ~DRIVE_DMA;
3741 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3742 if (drvp->UDMA_mode > 2 && u100 == 0)
3743 drvp->UDMA_mode = 2;
3744 } else if (drvp->drive_flags & DRIVE_DMA) {
3745 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3746 }
3747 }
3748 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3749 if (idedma_ctl != 0) {
3750 /* Add software bits in status register */
3751 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3752 IDEDMA_CTL, idedma_ctl);
3753 }
3754 pciide_print_modes(cp);
3755 }
3756
3757 int
3758 pdc202xx_pci_intr(arg)
3759 void *arg;
3760 {
3761 struct pciide_softc *sc = arg;
3762 struct pciide_channel *cp;
3763 struct channel_softc *wdc_cp;
3764 int i, rv, crv;
3765 u_int32_t scr;
3766
3767 rv = 0;
3768 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3769 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3770 cp = &sc->pciide_channels[i];
3771 wdc_cp = &cp->wdc_channel;
3772 /* If a compat channel skip. */
3773 if (cp->compat)
3774 continue;
3775 if (scr & PDC2xx_SCR_INT(i)) {
3776 crv = wdcintr(wdc_cp);
3777 if (crv == 0)
3778 printf("%s:%d: bogus intr (reg 0x%x)\n",
3779 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3780 else
3781 rv = 1;
3782 }
3783 }
3784 return rv;
3785 }
3786
3787 int
3788 pdc20265_pci_intr(arg)
3789 void *arg;
3790 {
3791 struct pciide_softc *sc = arg;
3792 struct pciide_channel *cp;
3793 struct channel_softc *wdc_cp;
3794 int i, rv, crv;
3795 u_int32_t dmastat;
3796
3797 rv = 0;
3798 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3799 cp = &sc->pciide_channels[i];
3800 wdc_cp = &cp->wdc_channel;
3801 /* If a compat channel skip. */
3802 if (cp->compat)
3803 continue;
3804 /*
3805 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3806 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3807 * So use it instead (requires 2 reg reads instead of 1,
3808 * but we can't do it another way).
3809 */
3810 dmastat = bus_space_read_1(sc->sc_dma_iot,
3811 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3812 if((dmastat & IDEDMA_CTL_INTR) == 0)
3813 continue;
3814 crv = wdcintr(wdc_cp);
3815 if (crv == 0)
3816 printf("%s:%d: bogus intr\n",
3817 sc->sc_wdcdev.sc_dev.dv_xname, i);
3818 else
3819 rv = 1;
3820 }
3821 return rv;
3822 }
3823
3824 void
3825 opti_chip_map(sc, pa)
3826 struct pciide_softc *sc;
3827 struct pci_attach_args *pa;
3828 {
3829 struct pciide_channel *cp;
3830 bus_size_t cmdsize, ctlsize;
3831 pcireg_t interface;
3832 u_int8_t init_ctrl;
3833 int channel;
3834
3835 if (pciide_chipen(sc, pa) == 0)
3836 return;
3837 printf("%s: bus-master DMA support present",
3838 sc->sc_wdcdev.sc_dev.dv_xname);
3839
3840 /*
3841 * XXXSCW:
3842 * There seem to be a couple of buggy revisions/implementations
3843 * of the OPTi pciide chipset. This kludge seems to fix one of
3844 * the reported problems (PR/11644) but still fails for the
3845 * other (PR/13151), although the latter may be due to other
3846 * issues too...
3847 */
3848 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3849 printf(" but disabled due to chip rev. <= 0x12");
3850 sc->sc_dma_ok = 0;
3851 sc->sc_wdcdev.cap = 0;
3852 } else {
3853 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3854 pciide_mapreg_dma(sc, pa);
3855 }
3856 printf("\n");
3857
3858 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3859 sc->sc_wdcdev.PIO_cap = 4;
3860 if (sc->sc_dma_ok) {
3861 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3862 sc->sc_wdcdev.irqack = pciide_irqack;
3863 sc->sc_wdcdev.DMA_cap = 2;
3864 }
3865 sc->sc_wdcdev.set_modes = opti_setup_channel;
3866
3867 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3868 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3869
3870 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3871 OPTI_REG_INIT_CONTROL);
3872
3873 interface = PCI_INTERFACE(pa->pa_class);
3874
3875 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3876 cp = &sc->pciide_channels[channel];
3877 if (pciide_chansetup(sc, channel, interface) == 0)
3878 continue;
3879 if (channel == 1 &&
3880 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3881 printf("%s: %s channel ignored (disabled)\n",
3882 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3883 continue;
3884 }
3885 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3886 pciide_pci_intr);
3887 if (cp->hw_ok == 0)
3888 continue;
3889 pciide_map_compat_intr(pa, cp, channel, interface);
3890 if (cp->hw_ok == 0)
3891 continue;
3892 opti_setup_channel(&cp->wdc_channel);
3893 }
3894 }
3895
3896 void
3897 opti_setup_channel(chp)
3898 struct channel_softc *chp;
3899 {
3900 struct ata_drive_datas *drvp;
3901 struct pciide_channel *cp = (struct pciide_channel*)chp;
3902 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3903 int drive, spd;
3904 int mode[2];
3905 u_int8_t rv, mr;
3906
3907 /*
3908 * The `Delay' and `Address Setup Time' fields of the
3909 * Miscellaneous Register are always zero initially.
3910 */
3911 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3912 mr &= ~(OPTI_MISC_DELAY_MASK |
3913 OPTI_MISC_ADDR_SETUP_MASK |
3914 OPTI_MISC_INDEX_MASK);
3915
3916 /* Prime the control register before setting timing values */
3917 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3918
3919 /* Determine the clockrate of the PCIbus the chip is attached to */
3920 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3921 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3922
3923 /* setup DMA if needed */
3924 pciide_channel_dma_setup(cp);
3925
3926 for (drive = 0; drive < 2; drive++) {
3927 drvp = &chp->ch_drive[drive];
3928 /* If no drive, skip */
3929 if ((drvp->drive_flags & DRIVE) == 0) {
3930 mode[drive] = -1;
3931 continue;
3932 }
3933
3934 if ((drvp->drive_flags & DRIVE_DMA)) {
3935 /*
3936 * Timings will be used for both PIO and DMA,
3937 * so adjust DMA mode if needed
3938 */
3939 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3940 drvp->PIO_mode = drvp->DMA_mode + 2;
3941 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3942 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3943 drvp->PIO_mode - 2 : 0;
3944 if (drvp->DMA_mode == 0)
3945 drvp->PIO_mode = 0;
3946
3947 mode[drive] = drvp->DMA_mode + 5;
3948 } else
3949 mode[drive] = drvp->PIO_mode;
3950
3951 if (drive && mode[0] >= 0 &&
3952 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3953 /*
3954 * Can't have two drives using different values
3955 * for `Address Setup Time'.
3956 * Slow down the faster drive to compensate.
3957 */
3958 int d = (opti_tim_as[spd][mode[0]] >
3959 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3960
3961 mode[d] = mode[1-d];
3962 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3963 chp->ch_drive[d].DMA_mode = 0;
3964 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3965 }
3966 }
3967
3968 for (drive = 0; drive < 2; drive++) {
3969 int m;
3970 if ((m = mode[drive]) < 0)
3971 continue;
3972
3973 /* Set the Address Setup Time and select appropriate index */
3974 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3975 rv |= OPTI_MISC_INDEX(drive);
3976 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3977
3978 /* Set the pulse width and recovery timing parameters */
3979 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3980 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3981 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3982 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3983
3984 /* Set the Enhanced Mode register appropriately */
3985 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3986 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3987 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3988 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3989 }
3990
3991 /* Finally, enable the timings */
3992 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3993
3994 pciide_print_modes(cp);
3995 }
3996
3997 #define ACARD_IS_850(sc) \
3998 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3999
4000 void
4001 acard_chip_map(sc, pa)
4002 struct pciide_softc *sc;
4003 struct pci_attach_args *pa;
4004 {
4005 struct pciide_channel *cp;
4006 int i;
4007 pcireg_t interface;
4008 bus_size_t cmdsize, ctlsize;
4009
4010 if (pciide_chipen(sc, pa) == 0)
4011 return;
4012
4013 /*
4014 * when the chip is in native mode it identifies itself as a
4015 * 'misc mass storage'. Fake interface in this case.
4016 */
4017 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4018 interface = PCI_INTERFACE(pa->pa_class);
4019 } else {
4020 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4021 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4022 }
4023
4024 printf("%s: bus-master DMA support present",
4025 sc->sc_wdcdev.sc_dev.dv_xname);
4026 pciide_mapreg_dma(sc, pa);
4027 printf("\n");
4028 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4029 WDC_CAPABILITY_MODE;
4030
4031 if (sc->sc_dma_ok) {
4032 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4033 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4034 sc->sc_wdcdev.irqack = pciide_irqack;
4035 }
4036 sc->sc_wdcdev.PIO_cap = 4;
4037 sc->sc_wdcdev.DMA_cap = 2;
4038 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4039
4040 sc->sc_wdcdev.set_modes = acard_setup_channel;
4041 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4042 sc->sc_wdcdev.nchannels = 2;
4043
4044 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4045 cp = &sc->pciide_channels[i];
4046 if (pciide_chansetup(sc, i, interface) == 0)
4047 continue;
4048 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4049 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4050 &ctlsize, pciide_pci_intr);
4051 } else {
4052 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4053 &cmdsize, &ctlsize);
4054 }
4055 if (cp->hw_ok == 0)
4056 return;
4057 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4058 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4059 wdcattach(&cp->wdc_channel);
4060 acard_setup_channel(&cp->wdc_channel);
4061 }
4062 if (!ACARD_IS_850(sc)) {
4063 u_int32_t reg;
4064 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4065 reg &= ~ATP860_CTRL_INT;
4066 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4067 }
4068 }
4069
4070 void
4071 acard_setup_channel(chp)
4072 struct channel_softc *chp;
4073 {
4074 struct ata_drive_datas *drvp;
4075 struct pciide_channel *cp = (struct pciide_channel*)chp;
4076 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4077 int channel = chp->channel;
4078 int drive;
4079 u_int32_t idetime, udma_mode;
4080 u_int32_t idedma_ctl;
4081
4082 /* setup DMA if needed */
4083 pciide_channel_dma_setup(cp);
4084
4085 if (ACARD_IS_850(sc)) {
4086 idetime = 0;
4087 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4088 udma_mode &= ~ATP850_UDMA_MASK(channel);
4089 } else {
4090 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4091 idetime &= ~ATP860_SETTIME_MASK(channel);
4092 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4093 udma_mode &= ~ATP860_UDMA_MASK(channel);
4094
4095 /* check 80 pins cable */
4096 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4097 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4098 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4099 & ATP860_CTRL_80P(chp->channel)) {
4100 if (chp->ch_drive[0].UDMA_mode > 2)
4101 chp->ch_drive[0].UDMA_mode = 2;
4102 if (chp->ch_drive[1].UDMA_mode > 2)
4103 chp->ch_drive[1].UDMA_mode = 2;
4104 }
4105 }
4106 }
4107
4108 idedma_ctl = 0;
4109
4110 /* Per drive settings */
4111 for (drive = 0; drive < 2; drive++) {
4112 drvp = &chp->ch_drive[drive];
4113 /* If no drive, skip */
4114 if ((drvp->drive_flags & DRIVE) == 0)
4115 continue;
4116 /* add timing values, setup DMA if needed */
4117 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4118 (drvp->drive_flags & DRIVE_UDMA)) {
4119 /* use Ultra/DMA */
4120 if (ACARD_IS_850(sc)) {
4121 idetime |= ATP850_SETTIME(drive,
4122 acard_act_udma[drvp->UDMA_mode],
4123 acard_rec_udma[drvp->UDMA_mode]);
4124 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4125 acard_udma_conf[drvp->UDMA_mode]);
4126 } else {
4127 idetime |= ATP860_SETTIME(channel, drive,
4128 acard_act_udma[drvp->UDMA_mode],
4129 acard_rec_udma[drvp->UDMA_mode]);
4130 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4131 acard_udma_conf[drvp->UDMA_mode]);
4132 }
4133 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4134 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4135 (drvp->drive_flags & DRIVE_DMA)) {
4136 /* use Multiword DMA */
4137 drvp->drive_flags &= ~DRIVE_UDMA;
4138 if (ACARD_IS_850(sc)) {
4139 idetime |= ATP850_SETTIME(drive,
4140 acard_act_dma[drvp->DMA_mode],
4141 acard_rec_dma[drvp->DMA_mode]);
4142 } else {
4143 idetime |= ATP860_SETTIME(channel, drive,
4144 acard_act_dma[drvp->DMA_mode],
4145 acard_rec_dma[drvp->DMA_mode]);
4146 }
4147 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4148 } else {
4149 /* PIO only */
4150 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4151 if (ACARD_IS_850(sc)) {
4152 idetime |= ATP850_SETTIME(drive,
4153 acard_act_pio[drvp->PIO_mode],
4154 acard_rec_pio[drvp->PIO_mode]);
4155 } else {
4156 idetime |= ATP860_SETTIME(channel, drive,
4157 acard_act_pio[drvp->PIO_mode],
4158 acard_rec_pio[drvp->PIO_mode]);
4159 }
4160 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4161 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4162 | ATP8x0_CTRL_EN(channel));
4163 }
4164 }
4165
4166 if (idedma_ctl != 0) {
4167 /* Add software bits in status register */
4168 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4169 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4170 }
4171 pciide_print_modes(cp);
4172
4173 if (ACARD_IS_850(sc)) {
4174 pci_conf_write(sc->sc_pc, sc->sc_tag,
4175 ATP850_IDETIME(channel), idetime);
4176 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4177 } else {
4178 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4179 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4180 }
4181 }
4182
4183 int
4184 acard_pci_intr(arg)
4185 void *arg;
4186 {
4187 struct pciide_softc *sc = arg;
4188 struct pciide_channel *cp;
4189 struct channel_softc *wdc_cp;
4190 int rv = 0;
4191 int dmastat, i, crv;
4192
4193 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4194 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4195 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4196 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4197 continue;
4198 cp = &sc->pciide_channels[i];
4199 wdc_cp = &cp->wdc_channel;
4200 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4201 (void)wdcintr(wdc_cp);
4202 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4203 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4204 continue;
4205 }
4206 crv = wdcintr(wdc_cp);
4207 if (crv == 0)
4208 printf("%s:%d: bogus intr\n",
4209 sc->sc_wdcdev.sc_dev.dv_xname, i);
4210 else if (crv == 1)
4211 rv = 1;
4212 else if (rv == 0)
4213 rv = crv;
4214 }
4215 return rv;
4216 }
4217