pciide.c revision 1.144 1 /* $NetBSD: pciide.c,v 1.144 2002/01/29 21:13:17 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.144 2002/01/29 21:13:17 bouyer Exp $");
81
82 #ifndef WDCDEBUG
83 #define WDCDEBUG
84 #endif
85
86 #define DEBUG_DMA 0x01
87 #define DEBUG_XFERS 0x02
88 #define DEBUG_FUNCS 0x08
89 #define DEBUG_PROBE 0x10
90 #ifdef WDCDEBUG
91 int wdcdebug_pciide_mask = 0;
92 #define WDCDEBUG_PRINT(args, level) \
93 if (wdcdebug_pciide_mask & (level)) printf args
94 #else
95 #define WDCDEBUG_PRINT(args, level)
96 #endif
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/device.h>
100 #include <sys/malloc.h>
101
102 #include <uvm/uvm_extern.h>
103
104 #include <machine/endian.h>
105
106 #include <dev/pci/pcireg.h>
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109 #include <dev/pci/pciidereg.h>
110 #include <dev/pci/pciidevar.h>
111 #include <dev/pci/pciide_piix_reg.h>
112 #include <dev/pci/pciide_amd_reg.h>
113 #include <dev/pci/pciide_apollo_reg.h>
114 #include <dev/pci/pciide_cmd_reg.h>
115 #include <dev/pci/pciide_cy693_reg.h>
116 #include <dev/pci/pciide_sis_reg.h>
117 #include <dev/pci/pciide_acer_reg.h>
118 #include <dev/pci/pciide_pdc202xx_reg.h>
119 #include <dev/pci/pciide_opti_reg.h>
120 #include <dev/pci/pciide_hpt_reg.h>
121 #include <dev/pci/pciide_acard_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191 static int acer_isabr_match __P(( struct pci_attach_args *));
192
193 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void pdc202xx_setup_channel __P((struct channel_softc*));
195 void pdc20268_setup_channel __P((struct channel_softc*));
196 int pdc202xx_pci_intr __P((void *));
197 int pdc20265_pci_intr __P((void *));
198
199 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void opti_setup_channel __P((struct channel_softc*));
201
202 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void hpt_setup_channel __P((struct channel_softc*));
204 int hpt_pci_intr __P((void *));
205
206 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
207 void acard_setup_channel __P((struct channel_softc*));
208 int acard_pci_intr __P((void *));
209
210 #ifdef PCIIDE_WINBOND_ENABLE
211 void winbond_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
212 #endif
213
214 void pciide_channel_dma_setup __P((struct pciide_channel *));
215 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
216 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
217 void pciide_dma_start __P((void*, int, int));
218 int pciide_dma_finish __P((void*, int, int, int));
219 void pciide_irqack __P((struct channel_softc *));
220 void pciide_print_modes __P((struct pciide_channel *));
221
222 struct pciide_product_desc {
223 u_int32_t ide_product;
224 int ide_flags;
225 const char *ide_name;
226 /* map and setup chip, probe drives */
227 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
228 };
229
230 /* Flags for ide_flags */
231 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
232 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
233
234 /* Default product description for devices not known from this controller */
235 const struct pciide_product_desc default_product_desc = {
236 0,
237 0,
238 "Generic PCI IDE controller",
239 default_chip_map,
240 };
241
242 const struct pciide_product_desc pciide_intel_products[] = {
243 { PCI_PRODUCT_INTEL_82092AA,
244 0,
245 "Intel 82092AA IDE controller",
246 default_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82371FB_IDE,
249 0,
250 "Intel 82371FB IDE controller (PIIX)",
251 piix_chip_map,
252 },
253 { PCI_PRODUCT_INTEL_82371SB_IDE,
254 0,
255 "Intel 82371SB IDE Interface (PIIX3)",
256 piix_chip_map,
257 },
258 { PCI_PRODUCT_INTEL_82371AB_IDE,
259 0,
260 "Intel 82371AB IDE controller (PIIX4)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82440MX_IDE,
264 0,
265 "Intel 82440MX IDE controller",
266 piix_chip_map
267 },
268 { PCI_PRODUCT_INTEL_82801AA_IDE,
269 0,
270 "Intel 82801AA IDE Controller (ICH)",
271 piix_chip_map,
272 },
273 { PCI_PRODUCT_INTEL_82801AB_IDE,
274 0,
275 "Intel 82801AB IDE Controller (ICH0)",
276 piix_chip_map,
277 },
278 { PCI_PRODUCT_INTEL_82801BA_IDE,
279 0,
280 "Intel 82801BA IDE Controller (ICH2)",
281 piix_chip_map,
282 },
283 { PCI_PRODUCT_INTEL_82801BAM_IDE,
284 0,
285 "Intel 82801BAM IDE Controller (ICH2)",
286 piix_chip_map,
287 },
288 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
289 0,
290 "Intel 82201CA IDE Controller",
291 piix_chip_map,
292 },
293 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
294 0,
295 "Intel 82201CA IDE Controller",
296 piix_chip_map,
297 },
298 { 0,
299 0,
300 NULL,
301 NULL
302 }
303 };
304
305 const struct pciide_product_desc pciide_amd_products[] = {
306 { PCI_PRODUCT_AMD_PBC756_IDE,
307 0,
308 "Advanced Micro Devices AMD756 IDE Controller",
309 amd7x6_chip_map
310 },
311 { PCI_PRODUCT_AMD_PBC766_IDE,
312 0,
313 "Advanced Micro Devices AMD766 IDE Controller",
314 amd7x6_chip_map
315 },
316 { 0,
317 0,
318 NULL,
319 NULL
320 }
321 };
322
323 const struct pciide_product_desc pciide_cmd_products[] = {
324 { PCI_PRODUCT_CMDTECH_640,
325 0,
326 "CMD Technology PCI0640",
327 cmd_chip_map
328 },
329 { PCI_PRODUCT_CMDTECH_643,
330 0,
331 "CMD Technology PCI0643",
332 cmd0643_9_chip_map,
333 },
334 { PCI_PRODUCT_CMDTECH_646,
335 0,
336 "CMD Technology PCI0646",
337 cmd0643_9_chip_map,
338 },
339 { PCI_PRODUCT_CMDTECH_648,
340 IDE_PCI_CLASS_OVERRIDE,
341 "CMD Technology PCI0648",
342 cmd0643_9_chip_map,
343 },
344 { PCI_PRODUCT_CMDTECH_649,
345 IDE_PCI_CLASS_OVERRIDE,
346 "CMD Technology PCI0649",
347 cmd0643_9_chip_map,
348 },
349 { 0,
350 0,
351 NULL,
352 NULL
353 }
354 };
355
356 const struct pciide_product_desc pciide_via_products[] = {
357 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
358 0,
359 NULL,
360 apollo_chip_map,
361 },
362 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
363 0,
364 NULL,
365 apollo_chip_map,
366 },
367 { 0,
368 0,
369 NULL,
370 NULL
371 }
372 };
373
374 const struct pciide_product_desc pciide_cypress_products[] = {
375 { PCI_PRODUCT_CONTAQ_82C693,
376 IDE_16BIT_IOSPACE,
377 "Cypress 82C693 IDE Controller",
378 cy693_chip_map,
379 },
380 { 0,
381 0,
382 NULL,
383 NULL
384 }
385 };
386
387 const struct pciide_product_desc pciide_sis_products[] = {
388 { PCI_PRODUCT_SIS_5597_IDE,
389 0,
390 "Silicon Integrated System 5597/5598 IDE controller",
391 sis_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_acer_products[] = {
401 { PCI_PRODUCT_ALI_M5229,
402 0,
403 "Acer Labs M5229 UDMA IDE Controller",
404 acer_chip_map,
405 },
406 { 0,
407 0,
408 NULL,
409 NULL
410 }
411 };
412
413 const struct pciide_product_desc pciide_promise_products[] = {
414 { PCI_PRODUCT_PROMISE_ULTRA33,
415 IDE_PCI_CLASS_OVERRIDE,
416 "Promise Ultra33/ATA Bus Master IDE Accelerator",
417 pdc202xx_chip_map,
418 },
419 { PCI_PRODUCT_PROMISE_ULTRA66,
420 IDE_PCI_CLASS_OVERRIDE,
421 "Promise Ultra66/ATA Bus Master IDE Accelerator",
422 pdc202xx_chip_map,
423 },
424 { PCI_PRODUCT_PROMISE_ULTRA100,
425 IDE_PCI_CLASS_OVERRIDE,
426 "Promise Ultra100/ATA Bus Master IDE Accelerator",
427 pdc202xx_chip_map,
428 },
429 { PCI_PRODUCT_PROMISE_ULTRA100X,
430 IDE_PCI_CLASS_OVERRIDE,
431 "Promise Ultra100/ATA Bus Master IDE Accelerator",
432 pdc202xx_chip_map,
433 },
434 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
435 IDE_PCI_CLASS_OVERRIDE,
436 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
437 pdc202xx_chip_map,
438 },
439 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
440 IDE_PCI_CLASS_OVERRIDE,
441 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
442 pdc202xx_chip_map,
443 },
444 { PCI_PRODUCT_PROMISE_ULTRA133,
445 IDE_PCI_CLASS_OVERRIDE,
446 "Promise Ultra133/ATA Bus Master IDE Accelerator",
447 pdc202xx_chip_map,
448 },
449 { 0,
450 0,
451 NULL,
452 NULL
453 }
454 };
455
456 const struct pciide_product_desc pciide_opti_products[] = {
457 { PCI_PRODUCT_OPTI_82C621,
458 0,
459 "OPTi 82c621 PCI IDE controller",
460 opti_chip_map,
461 },
462 { PCI_PRODUCT_OPTI_82C568,
463 0,
464 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
465 opti_chip_map,
466 },
467 { PCI_PRODUCT_OPTI_82D568,
468 0,
469 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
470 opti_chip_map,
471 },
472 { 0,
473 0,
474 NULL,
475 NULL
476 }
477 };
478
479 const struct pciide_product_desc pciide_triones_products[] = {
480 { PCI_PRODUCT_TRIONES_HPT366,
481 IDE_PCI_CLASS_OVERRIDE,
482 NULL,
483 hpt_chip_map,
484 },
485 { 0,
486 0,
487 NULL,
488 NULL
489 }
490 };
491
492 const struct pciide_product_desc pciide_acard_products[] = {
493 { PCI_PRODUCT_ACARD_ATP850U,
494 IDE_PCI_CLASS_OVERRIDE,
495 "Acard ATP850U Ultra33 IDE Controller",
496 acard_chip_map,
497 },
498 { PCI_PRODUCT_ACARD_ATP860,
499 IDE_PCI_CLASS_OVERRIDE,
500 "Acard ATP860 Ultra66 IDE Controller",
501 acard_chip_map,
502 },
503 { PCI_PRODUCT_ACARD_ATP860A,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Acard ATP860-A Ultra66 IDE Controller",
506 acard_chip_map,
507 },
508 { 0,
509 0,
510 NULL,
511 NULL
512 }
513 };
514
515 #ifdef PCIIDE_SERVERWORKS_ENABLE
516 const struct pciide_product_desc pciide_serverworks_products[] = {
517 { PCI_PRODUCT_SERVERWORKS_IDE,
518 0,
519 "ServerWorks ROSB4 IDE Controller",
520 piix_chip_map,
521 },
522 { 0,
523 0,
524 NULL,
525 }
526 };
527 #endif
528
529 #ifdef PCIIDE_WINBOND_ENABLE
530 const struct pciide_product_desc pciide_winbond_products[] = {
531 { PCI_PRODUCT_WINBOND_W83C553F_1,
532 0,
533 "Winbond W83C553F IDE controller",
534 winbond_chip_map,
535 },
536 { 0,
537 0,
538 NULL,
539 }
540 };
541 #endif
542
543 struct pciide_vendor_desc {
544 u_int32_t ide_vendor;
545 const struct pciide_product_desc *ide_products;
546 };
547
548 const struct pciide_vendor_desc pciide_vendors[] = {
549 { PCI_VENDOR_INTEL, pciide_intel_products },
550 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
551 { PCI_VENDOR_VIATECH, pciide_via_products },
552 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
553 { PCI_VENDOR_SIS, pciide_sis_products },
554 { PCI_VENDOR_ALI, pciide_acer_products },
555 { PCI_VENDOR_PROMISE, pciide_promise_products },
556 { PCI_VENDOR_AMD, pciide_amd_products },
557 { PCI_VENDOR_OPTI, pciide_opti_products },
558 { PCI_VENDOR_TRIONES, pciide_triones_products },
559 { PCI_VENDOR_ACARD, pciide_acard_products },
560 #ifdef PCIIDE_SERVERWORKS_ENABLE
561 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
562 #endif
563 #ifdef PCIIDE_WINBOND_ENABLE
564 { PCI_VENDOR_WINBOND, pciide_winbond_products },
565 #endif
566 { 0, NULL }
567 };
568
569 /* options passed via the 'flags' config keyword */
570 #define PCIIDE_OPTIONS_DMA 0x01
571 #define PCIIDE_OPTIONS_NODMA 0x02
572
573 int pciide_match __P((struct device *, struct cfdata *, void *));
574 void pciide_attach __P((struct device *, struct device *, void *));
575
576 struct cfattach pciide_ca = {
577 sizeof(struct pciide_softc), pciide_match, pciide_attach
578 };
579 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
580 int pciide_mapregs_compat __P(( struct pci_attach_args *,
581 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
582 int pciide_mapregs_native __P((struct pci_attach_args *,
583 struct pciide_channel *, bus_size_t *, bus_size_t *,
584 int (*pci_intr) __P((void *))));
585 void pciide_mapreg_dma __P((struct pciide_softc *,
586 struct pci_attach_args *));
587 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
588 void pciide_mapchan __P((struct pci_attach_args *,
589 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
590 int (*pci_intr) __P((void *))));
591 int pciide_chan_candisable __P((struct pciide_channel *));
592 void pciide_map_compat_intr __P(( struct pci_attach_args *,
593 struct pciide_channel *, int, int));
594 int pciide_compat_intr __P((void *));
595 int pciide_pci_intr __P((void *));
596 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
597
598 const struct pciide_product_desc *
599 pciide_lookup_product(id)
600 u_int32_t id;
601 {
602 const struct pciide_product_desc *pp;
603 const struct pciide_vendor_desc *vp;
604
605 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
606 if (PCI_VENDOR(id) == vp->ide_vendor)
607 break;
608
609 if ((pp = vp->ide_products) == NULL)
610 return NULL;
611
612 for (; pp->chip_map != NULL; pp++)
613 if (PCI_PRODUCT(id) == pp->ide_product)
614 break;
615
616 if (pp->chip_map == NULL)
617 return NULL;
618 return pp;
619 }
620
621 int
622 pciide_match(parent, match, aux)
623 struct device *parent;
624 struct cfdata *match;
625 void *aux;
626 {
627 struct pci_attach_args *pa = aux;
628 const struct pciide_product_desc *pp;
629
630 /*
631 * Check the ID register to see that it's a PCI IDE controller.
632 * If it is, we assume that we can deal with it; it _should_
633 * work in a standardized way...
634 */
635 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
636 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
637 return (1);
638 }
639
640 /*
641 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
642 * controllers. Let see if we can deal with it anyway.
643 */
644 pp = pciide_lookup_product(pa->pa_id);
645 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
646 return (1);
647 }
648
649 return (0);
650 }
651
652 void
653 pciide_attach(parent, self, aux)
654 struct device *parent, *self;
655 void *aux;
656 {
657 struct pci_attach_args *pa = aux;
658 pci_chipset_tag_t pc = pa->pa_pc;
659 pcitag_t tag = pa->pa_tag;
660 struct pciide_softc *sc = (struct pciide_softc *)self;
661 pcireg_t csr;
662 char devinfo[256];
663 const char *displaydev;
664
665 sc->sc_pp = pciide_lookup_product(pa->pa_id);
666 if (sc->sc_pp == NULL) {
667 sc->sc_pp = &default_product_desc;
668 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
669 displaydev = devinfo;
670 } else
671 displaydev = sc->sc_pp->ide_name;
672
673 /* if displaydev == NULL, printf is done in chip-specific map */
674 if (displaydev)
675 printf(": %s (rev. 0x%02x)\n", displaydev,
676 PCI_REVISION(pa->pa_class));
677
678 sc->sc_pc = pa->pa_pc;
679 sc->sc_tag = pa->pa_tag;
680 #ifdef WDCDEBUG
681 if (wdcdebug_pciide_mask & DEBUG_PROBE)
682 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
683 #endif
684 sc->sc_pp->chip_map(sc, pa);
685
686 if (sc->sc_dma_ok) {
687 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
688 csr |= PCI_COMMAND_MASTER_ENABLE;
689 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
690 }
691 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
692 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
693 }
694
695 /* tell wether the chip is enabled or not */
696 int
697 pciide_chipen(sc, pa)
698 struct pciide_softc *sc;
699 struct pci_attach_args *pa;
700 {
701 pcireg_t csr;
702 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
703 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
704 PCI_COMMAND_STATUS_REG);
705 printf("%s: device disabled (at %s)\n",
706 sc->sc_wdcdev.sc_dev.dv_xname,
707 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
708 "device" : "bridge");
709 return 0;
710 }
711 return 1;
712 }
713
714 int
715 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
716 struct pci_attach_args *pa;
717 struct pciide_channel *cp;
718 int compatchan;
719 bus_size_t *cmdsizep, *ctlsizep;
720 {
721 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
722 struct channel_softc *wdc_cp = &cp->wdc_channel;
723
724 cp->compat = 1;
725 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
726 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
727
728 wdc_cp->cmd_iot = pa->pa_iot;
729 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
730 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
731 printf("%s: couldn't map %s channel cmd regs\n",
732 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
733 return (0);
734 }
735
736 wdc_cp->ctl_iot = pa->pa_iot;
737 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
738 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
739 printf("%s: couldn't map %s channel ctl regs\n",
740 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
741 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
742 PCIIDE_COMPAT_CMD_SIZE);
743 return (0);
744 }
745
746 return (1);
747 }
748
749 int
750 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
751 struct pci_attach_args * pa;
752 struct pciide_channel *cp;
753 bus_size_t *cmdsizep, *ctlsizep;
754 int (*pci_intr) __P((void *));
755 {
756 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
757 struct channel_softc *wdc_cp = &cp->wdc_channel;
758 const char *intrstr;
759 pci_intr_handle_t intrhandle;
760
761 cp->compat = 0;
762
763 if (sc->sc_pci_ih == NULL) {
764 if (pci_intr_map(pa, &intrhandle) != 0) {
765 printf("%s: couldn't map native-PCI interrupt\n",
766 sc->sc_wdcdev.sc_dev.dv_xname);
767 return 0;
768 }
769 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
770 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
771 intrhandle, IPL_BIO, pci_intr, sc);
772 if (sc->sc_pci_ih != NULL) {
773 printf("%s: using %s for native-PCI interrupt\n",
774 sc->sc_wdcdev.sc_dev.dv_xname,
775 intrstr ? intrstr : "unknown interrupt");
776 } else {
777 printf("%s: couldn't establish native-PCI interrupt",
778 sc->sc_wdcdev.sc_dev.dv_xname);
779 if (intrstr != NULL)
780 printf(" at %s", intrstr);
781 printf("\n");
782 return 0;
783 }
784 }
785 cp->ih = sc->sc_pci_ih;
786 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
787 PCI_MAPREG_TYPE_IO, 0,
788 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
789 printf("%s: couldn't map %s channel cmd regs\n",
790 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
791 return 0;
792 }
793
794 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
795 PCI_MAPREG_TYPE_IO, 0,
796 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
797 printf("%s: couldn't map %s channel ctl regs\n",
798 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
799 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
800 return 0;
801 }
802 /*
803 * In native mode, 4 bytes of I/O space are mapped for the control
804 * register, the control register is at offset 2. Pass the generic
805 * code a handle for only one byte at the rigth offset.
806 */
807 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
808 &wdc_cp->ctl_ioh) != 0) {
809 printf("%s: unable to subregion %s channel ctl regs\n",
810 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
811 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
812 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
813 return 0;
814 }
815 return (1);
816 }
817
818 void
819 pciide_mapreg_dma(sc, pa)
820 struct pciide_softc *sc;
821 struct pci_attach_args *pa;
822 {
823 pcireg_t maptype;
824 bus_addr_t addr;
825
826 /*
827 * Map DMA registers
828 *
829 * Note that sc_dma_ok is the right variable to test to see if
830 * DMA can be done. If the interface doesn't support DMA,
831 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
832 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
833 * non-zero if the interface supports DMA and the registers
834 * could be mapped.
835 *
836 * XXX Note that despite the fact that the Bus Master IDE specs
837 * XXX say that "The bus master IDE function uses 16 bytes of IO
838 * XXX space," some controllers (at least the United
839 * XXX Microelectronics UM8886BF) place it in memory space.
840 */
841 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
842 PCIIDE_REG_BUS_MASTER_DMA);
843
844 switch (maptype) {
845 case PCI_MAPREG_TYPE_IO:
846 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
847 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
848 &addr, NULL, NULL) == 0);
849 if (sc->sc_dma_ok == 0) {
850 printf(", but unused (couldn't query registers)");
851 break;
852 }
853 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
854 && addr >= 0x10000) {
855 sc->sc_dma_ok = 0;
856 printf(", but unused (registers at unsafe address "
857 "%#lx)", (unsigned long)addr);
858 break;
859 }
860 /* FALLTHROUGH */
861
862 case PCI_MAPREG_MEM_TYPE_32BIT:
863 sc->sc_dma_ok = (pci_mapreg_map(pa,
864 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
865 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
866 sc->sc_dmat = pa->pa_dmat;
867 if (sc->sc_dma_ok == 0) {
868 printf(", but unused (couldn't map registers)");
869 } else {
870 sc->sc_wdcdev.dma_arg = sc;
871 sc->sc_wdcdev.dma_init = pciide_dma_init;
872 sc->sc_wdcdev.dma_start = pciide_dma_start;
873 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
874 }
875
876 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
877 PCIIDE_OPTIONS_NODMA) {
878 printf(", but unused (forced off by config file)");
879 sc->sc_dma_ok = 0;
880 }
881 break;
882
883 default:
884 sc->sc_dma_ok = 0;
885 printf(", but unsupported register maptype (0x%x)", maptype);
886 }
887 }
888
889 int
890 pciide_compat_intr(arg)
891 void *arg;
892 {
893 struct pciide_channel *cp = arg;
894
895 #ifdef DIAGNOSTIC
896 /* should only be called for a compat channel */
897 if (cp->compat == 0)
898 panic("pciide compat intr called for non-compat chan %p\n", cp);
899 #endif
900 return (wdcintr(&cp->wdc_channel));
901 }
902
903 int
904 pciide_pci_intr(arg)
905 void *arg;
906 {
907 struct pciide_softc *sc = arg;
908 struct pciide_channel *cp;
909 struct channel_softc *wdc_cp;
910 int i, rv, crv;
911
912 rv = 0;
913 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
914 cp = &sc->pciide_channels[i];
915 wdc_cp = &cp->wdc_channel;
916
917 /* If a compat channel skip. */
918 if (cp->compat)
919 continue;
920 /* if this channel not waiting for intr, skip */
921 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
922 continue;
923
924 crv = wdcintr(wdc_cp);
925 if (crv == 0)
926 ; /* leave rv alone */
927 else if (crv == 1)
928 rv = 1; /* claim the intr */
929 else if (rv == 0) /* crv should be -1 in this case */
930 rv = crv; /* if we've done no better, take it */
931 }
932 return (rv);
933 }
934
935 void
936 pciide_channel_dma_setup(cp)
937 struct pciide_channel *cp;
938 {
939 int drive;
940 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
941 struct ata_drive_datas *drvp;
942
943 for (drive = 0; drive < 2; drive++) {
944 drvp = &cp->wdc_channel.ch_drive[drive];
945 /* If no drive, skip */
946 if ((drvp->drive_flags & DRIVE) == 0)
947 continue;
948 /* setup DMA if needed */
949 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
950 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
951 sc->sc_dma_ok == 0) {
952 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
953 continue;
954 }
955 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
956 != 0) {
957 /* Abort DMA setup */
958 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
959 continue;
960 }
961 }
962 }
963
964 int
965 pciide_dma_table_setup(sc, channel, drive)
966 struct pciide_softc *sc;
967 int channel, drive;
968 {
969 bus_dma_segment_t seg;
970 int error, rseg;
971 const bus_size_t dma_table_size =
972 sizeof(struct idedma_table) * NIDEDMA_TABLES;
973 struct pciide_dma_maps *dma_maps =
974 &sc->pciide_channels[channel].dma_maps[drive];
975
976 /* If table was already allocated, just return */
977 if (dma_maps->dma_table)
978 return 0;
979
980 /* Allocate memory for the DMA tables and map it */
981 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
982 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
983 BUS_DMA_NOWAIT)) != 0) {
984 printf("%s:%d: unable to allocate table DMA for "
985 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
986 channel, drive, error);
987 return error;
988 }
989 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
990 dma_table_size,
991 (caddr_t *)&dma_maps->dma_table,
992 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
993 printf("%s:%d: unable to map table DMA for"
994 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
995 channel, drive, error);
996 return error;
997 }
998 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
999 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1000 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1001
1002 /* Create and load table DMA map for this disk */
1003 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1004 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1005 &dma_maps->dmamap_table)) != 0) {
1006 printf("%s:%d: unable to create table DMA map for "
1007 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1008 channel, drive, error);
1009 return error;
1010 }
1011 if ((error = bus_dmamap_load(sc->sc_dmat,
1012 dma_maps->dmamap_table,
1013 dma_maps->dma_table,
1014 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1015 printf("%s:%d: unable to load table DMA map for "
1016 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1017 channel, drive, error);
1018 return error;
1019 }
1020 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1021 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1022 DEBUG_PROBE);
1023 /* Create a xfer DMA map for this drive */
1024 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1025 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1026 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1027 &dma_maps->dmamap_xfer)) != 0) {
1028 printf("%s:%d: unable to create xfer DMA map for "
1029 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1030 channel, drive, error);
1031 return error;
1032 }
1033 return 0;
1034 }
1035
1036 int
1037 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1038 void *v;
1039 int channel, drive;
1040 void *databuf;
1041 size_t datalen;
1042 int flags;
1043 {
1044 struct pciide_softc *sc = v;
1045 int error, seg;
1046 struct pciide_dma_maps *dma_maps =
1047 &sc->pciide_channels[channel].dma_maps[drive];
1048
1049 error = bus_dmamap_load(sc->sc_dmat,
1050 dma_maps->dmamap_xfer,
1051 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1052 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1053 if (error) {
1054 printf("%s:%d: unable to load xfer DMA map for"
1055 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1056 channel, drive, error);
1057 return error;
1058 }
1059
1060 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1061 dma_maps->dmamap_xfer->dm_mapsize,
1062 (flags & WDC_DMA_READ) ?
1063 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1064
1065 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1066 #ifdef DIAGNOSTIC
1067 /* A segment must not cross a 64k boundary */
1068 {
1069 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1070 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1071 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1072 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1073 printf("pciide_dma: segment %d physical addr 0x%lx"
1074 " len 0x%lx not properly aligned\n",
1075 seg, phys, len);
1076 panic("pciide_dma: buf align");
1077 }
1078 }
1079 #endif
1080 dma_maps->dma_table[seg].base_addr =
1081 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1082 dma_maps->dma_table[seg].byte_count =
1083 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1084 IDEDMA_BYTE_COUNT_MASK);
1085 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1086 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1087 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1088
1089 }
1090 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1091 htole32(IDEDMA_BYTE_COUNT_EOT);
1092
1093 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1094 dma_maps->dmamap_table->dm_mapsize,
1095 BUS_DMASYNC_PREWRITE);
1096
1097 /* Maps are ready. Start DMA function */
1098 #ifdef DIAGNOSTIC
1099 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1100 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1101 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1102 panic("pciide_dma_init: table align");
1103 }
1104 #endif
1105
1106 /* Clear status bits */
1107 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1108 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1109 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1110 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1111 /* Write table addr */
1112 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1113 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1114 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1115 /* set read/write */
1116 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1117 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1118 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1119 /* remember flags */
1120 dma_maps->dma_flags = flags;
1121 return 0;
1122 }
1123
1124 void
1125 pciide_dma_start(v, channel, drive)
1126 void *v;
1127 int channel, drive;
1128 {
1129 struct pciide_softc *sc = v;
1130
1131 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1132 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1133 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1134 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1135 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1136 }
1137
1138 int
1139 pciide_dma_finish(v, channel, drive, force)
1140 void *v;
1141 int channel, drive;
1142 int force;
1143 {
1144 struct pciide_softc *sc = v;
1145 u_int8_t status;
1146 int error = 0;
1147 struct pciide_dma_maps *dma_maps =
1148 &sc->pciide_channels[channel].dma_maps[drive];
1149
1150 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1151 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1152 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1153 DEBUG_XFERS);
1154
1155 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1156 return WDC_DMAST_NOIRQ;
1157
1158 /* stop DMA channel */
1159 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1160 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1161 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1162 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1163
1164 /* Unload the map of the data buffer */
1165 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1166 dma_maps->dmamap_xfer->dm_mapsize,
1167 (dma_maps->dma_flags & WDC_DMA_READ) ?
1168 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1169 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1170
1171 if ((status & IDEDMA_CTL_ERR) != 0) {
1172 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1173 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1174 error |= WDC_DMAST_ERR;
1175 }
1176
1177 if ((status & IDEDMA_CTL_INTR) == 0) {
1178 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1179 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1180 drive, status);
1181 error |= WDC_DMAST_NOIRQ;
1182 }
1183
1184 if ((status & IDEDMA_CTL_ACT) != 0) {
1185 /* data underrun, may be a valid condition for ATAPI */
1186 error |= WDC_DMAST_UNDER;
1187 }
1188 return error;
1189 }
1190
1191 void
1192 pciide_irqack(chp)
1193 struct channel_softc *chp;
1194 {
1195 struct pciide_channel *cp = (struct pciide_channel*)chp;
1196 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1197
1198 /* clear status bits in IDE DMA registers */
1199 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1200 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1201 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1202 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1203 }
1204
1205 /* some common code used by several chip_map */
1206 int
1207 pciide_chansetup(sc, channel, interface)
1208 struct pciide_softc *sc;
1209 int channel;
1210 pcireg_t interface;
1211 {
1212 struct pciide_channel *cp = &sc->pciide_channels[channel];
1213 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1214 cp->name = PCIIDE_CHANNEL_NAME(channel);
1215 cp->wdc_channel.channel = channel;
1216 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1217 cp->wdc_channel.ch_queue =
1218 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1219 if (cp->wdc_channel.ch_queue == NULL) {
1220 printf("%s %s channel: "
1221 "can't allocate memory for command queue",
1222 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1223 return 0;
1224 }
1225 printf("%s: %s channel %s to %s mode\n",
1226 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1227 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1228 "configured" : "wired",
1229 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1230 "native-PCI" : "compatibility");
1231 return 1;
1232 }
1233
1234 /* some common code used by several chip channel_map */
1235 void
1236 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1237 struct pci_attach_args *pa;
1238 struct pciide_channel *cp;
1239 pcireg_t interface;
1240 bus_size_t *cmdsizep, *ctlsizep;
1241 int (*pci_intr) __P((void *));
1242 {
1243 struct channel_softc *wdc_cp = &cp->wdc_channel;
1244
1245 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1246 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1247 pci_intr);
1248 else
1249 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1250 wdc_cp->channel, cmdsizep, ctlsizep);
1251
1252 if (cp->hw_ok == 0)
1253 return;
1254 wdc_cp->data32iot = wdc_cp->cmd_iot;
1255 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1256 wdcattach(wdc_cp);
1257 }
1258
1259 /*
1260 * Generic code to call to know if a channel can be disabled. Return 1
1261 * if channel can be disabled, 0 if not
1262 */
1263 int
1264 pciide_chan_candisable(cp)
1265 struct pciide_channel *cp;
1266 {
1267 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1268 struct channel_softc *wdc_cp = &cp->wdc_channel;
1269
1270 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1271 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1272 printf("%s: disabling %s channel (no drives)\n",
1273 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1274 cp->hw_ok = 0;
1275 return 1;
1276 }
1277 return 0;
1278 }
1279
1280 /*
1281 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1282 * Set hw_ok=0 on failure
1283 */
1284 void
1285 pciide_map_compat_intr(pa, cp, compatchan, interface)
1286 struct pci_attach_args *pa;
1287 struct pciide_channel *cp;
1288 int compatchan, interface;
1289 {
1290 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1291 struct channel_softc *wdc_cp = &cp->wdc_channel;
1292
1293 if (cp->hw_ok == 0)
1294 return;
1295 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1296 return;
1297
1298 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1299 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1300 pa, compatchan, pciide_compat_intr, cp);
1301 if (cp->ih == NULL) {
1302 #endif
1303 printf("%s: no compatibility interrupt for use by %s "
1304 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1305 cp->hw_ok = 0;
1306 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1307 }
1308 #endif
1309 }
1310
1311 void
1312 pciide_print_modes(cp)
1313 struct pciide_channel *cp;
1314 {
1315 wdc_print_modes(&cp->wdc_channel);
1316 }
1317
1318 void
1319 default_chip_map(sc, pa)
1320 struct pciide_softc *sc;
1321 struct pci_attach_args *pa;
1322 {
1323 struct pciide_channel *cp;
1324 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1325 pcireg_t csr;
1326 int channel, drive;
1327 struct ata_drive_datas *drvp;
1328 u_int8_t idedma_ctl;
1329 bus_size_t cmdsize, ctlsize;
1330 char *failreason;
1331
1332 if (pciide_chipen(sc, pa) == 0)
1333 return;
1334
1335 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1336 printf("%s: bus-master DMA support present",
1337 sc->sc_wdcdev.sc_dev.dv_xname);
1338 if (sc->sc_pp == &default_product_desc &&
1339 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1340 PCIIDE_OPTIONS_DMA) == 0) {
1341 printf(", but unused (no driver support)");
1342 sc->sc_dma_ok = 0;
1343 } else {
1344 pciide_mapreg_dma(sc, pa);
1345 if (sc->sc_dma_ok != 0)
1346 printf(", used without full driver "
1347 "support");
1348 }
1349 } else {
1350 printf("%s: hardware does not support DMA",
1351 sc->sc_wdcdev.sc_dev.dv_xname);
1352 sc->sc_dma_ok = 0;
1353 }
1354 printf("\n");
1355 if (sc->sc_dma_ok) {
1356 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1357 sc->sc_wdcdev.irqack = pciide_irqack;
1358 }
1359 sc->sc_wdcdev.PIO_cap = 0;
1360 sc->sc_wdcdev.DMA_cap = 0;
1361
1362 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1363 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1364 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1365
1366 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1367 cp = &sc->pciide_channels[channel];
1368 if (pciide_chansetup(sc, channel, interface) == 0)
1369 continue;
1370 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1371 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1372 &ctlsize, pciide_pci_intr);
1373 } else {
1374 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1375 channel, &cmdsize, &ctlsize);
1376 }
1377 if (cp->hw_ok == 0)
1378 continue;
1379 /*
1380 * Check to see if something appears to be there.
1381 */
1382 failreason = NULL;
1383 if (!wdcprobe(&cp->wdc_channel)) {
1384 failreason = "not responding; disabled or no drives?";
1385 goto next;
1386 }
1387 /*
1388 * Now, make sure it's actually attributable to this PCI IDE
1389 * channel by trying to access the channel again while the
1390 * PCI IDE controller's I/O space is disabled. (If the
1391 * channel no longer appears to be there, it belongs to
1392 * this controller.) YUCK!
1393 */
1394 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1395 PCI_COMMAND_STATUS_REG);
1396 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1397 csr & ~PCI_COMMAND_IO_ENABLE);
1398 if (wdcprobe(&cp->wdc_channel))
1399 failreason = "other hardware responding at addresses";
1400 pci_conf_write(sc->sc_pc, sc->sc_tag,
1401 PCI_COMMAND_STATUS_REG, csr);
1402 next:
1403 if (failreason) {
1404 printf("%s: %s channel ignored (%s)\n",
1405 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1406 failreason);
1407 cp->hw_ok = 0;
1408 bus_space_unmap(cp->wdc_channel.cmd_iot,
1409 cp->wdc_channel.cmd_ioh, cmdsize);
1410 bus_space_unmap(cp->wdc_channel.ctl_iot,
1411 cp->wdc_channel.ctl_ioh, ctlsize);
1412 } else {
1413 pciide_map_compat_intr(pa, cp, channel, interface);
1414 }
1415 if (cp->hw_ok) {
1416 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1417 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1418 wdcattach(&cp->wdc_channel);
1419 }
1420 }
1421
1422 if (sc->sc_dma_ok == 0)
1423 return;
1424
1425 /* Allocate DMA maps */
1426 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1427 idedma_ctl = 0;
1428 cp = &sc->pciide_channels[channel];
1429 for (drive = 0; drive < 2; drive++) {
1430 drvp = &cp->wdc_channel.ch_drive[drive];
1431 /* If no drive, skip */
1432 if ((drvp->drive_flags & DRIVE) == 0)
1433 continue;
1434 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1435 continue;
1436 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1437 /* Abort DMA setup */
1438 printf("%s:%d:%d: can't allocate DMA maps, "
1439 "using PIO transfers\n",
1440 sc->sc_wdcdev.sc_dev.dv_xname,
1441 channel, drive);
1442 drvp->drive_flags &= ~DRIVE_DMA;
1443 }
1444 printf("%s:%d:%d: using DMA data transfers\n",
1445 sc->sc_wdcdev.sc_dev.dv_xname,
1446 channel, drive);
1447 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1448 }
1449 if (idedma_ctl != 0) {
1450 /* Add software bits in status register */
1451 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1452 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1453 idedma_ctl);
1454 }
1455 }
1456 }
1457
1458 void
1459 piix_chip_map(sc, pa)
1460 struct pciide_softc *sc;
1461 struct pci_attach_args *pa;
1462 {
1463 struct pciide_channel *cp;
1464 int channel;
1465 u_int32_t idetim;
1466 bus_size_t cmdsize, ctlsize;
1467
1468 if (pciide_chipen(sc, pa) == 0)
1469 return;
1470
1471 printf("%s: bus-master DMA support present",
1472 sc->sc_wdcdev.sc_dev.dv_xname);
1473 pciide_mapreg_dma(sc, pa);
1474 printf("\n");
1475 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1476 WDC_CAPABILITY_MODE;
1477 if (sc->sc_dma_ok) {
1478 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1479 sc->sc_wdcdev.irqack = pciide_irqack;
1480 switch(sc->sc_pp->ide_product) {
1481 case PCI_PRODUCT_INTEL_82371AB_IDE:
1482 case PCI_PRODUCT_INTEL_82440MX_IDE:
1483 case PCI_PRODUCT_INTEL_82801AA_IDE:
1484 case PCI_PRODUCT_INTEL_82801AB_IDE:
1485 case PCI_PRODUCT_INTEL_82801BA_IDE:
1486 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1487 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1488 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1489 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1490 }
1491 }
1492 sc->sc_wdcdev.PIO_cap = 4;
1493 sc->sc_wdcdev.DMA_cap = 2;
1494 switch(sc->sc_pp->ide_product) {
1495 case PCI_PRODUCT_INTEL_82801AA_IDE:
1496 sc->sc_wdcdev.UDMA_cap = 4;
1497 break;
1498 case PCI_PRODUCT_INTEL_82801BA_IDE:
1499 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1500 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1501 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1502 sc->sc_wdcdev.UDMA_cap = 5;
1503 break;
1504 default:
1505 sc->sc_wdcdev.UDMA_cap = 2;
1506 }
1507 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1508 sc->sc_wdcdev.set_modes = piix_setup_channel;
1509 else
1510 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1511 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1512 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1513
1514 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1515 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1516 DEBUG_PROBE);
1517 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1518 WDCDEBUG_PRINT((", sidetim=0x%x",
1519 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1520 DEBUG_PROBE);
1521 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1522 WDCDEBUG_PRINT((", udamreg 0x%x",
1523 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1524 DEBUG_PROBE);
1525 }
1526 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1527 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1528 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1529 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1530 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1531 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1532 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1533 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1534 DEBUG_PROBE);
1535 }
1536
1537 }
1538 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1539
1540 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1541 cp = &sc->pciide_channels[channel];
1542 /* PIIX is compat-only */
1543 if (pciide_chansetup(sc, channel, 0) == 0)
1544 continue;
1545 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1546 if ((PIIX_IDETIM_READ(idetim, channel) &
1547 PIIX_IDETIM_IDE) == 0) {
1548 printf("%s: %s channel ignored (disabled)\n",
1549 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1550 continue;
1551 }
1552 /* PIIX are compat-only pciide devices */
1553 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1554 if (cp->hw_ok == 0)
1555 continue;
1556 if (pciide_chan_candisable(cp)) {
1557 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1558 channel);
1559 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1560 idetim);
1561 }
1562 pciide_map_compat_intr(pa, cp, channel, 0);
1563 if (cp->hw_ok == 0)
1564 continue;
1565 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1566 }
1567
1568 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1569 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1570 DEBUG_PROBE);
1571 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1572 WDCDEBUG_PRINT((", sidetim=0x%x",
1573 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1574 DEBUG_PROBE);
1575 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1576 WDCDEBUG_PRINT((", udamreg 0x%x",
1577 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1578 DEBUG_PROBE);
1579 }
1580 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1581 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1582 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1583 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1584 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1585 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1586 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1587 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1588 DEBUG_PROBE);
1589 }
1590 }
1591 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1592 }
1593
1594 void
1595 piix_setup_channel(chp)
1596 struct channel_softc *chp;
1597 {
1598 u_int8_t mode[2], drive;
1599 u_int32_t oidetim, idetim, idedma_ctl;
1600 struct pciide_channel *cp = (struct pciide_channel*)chp;
1601 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1602 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1603
1604 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1605 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1606 idedma_ctl = 0;
1607
1608 /* set up new idetim: Enable IDE registers decode */
1609 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1610 chp->channel);
1611
1612 /* setup DMA */
1613 pciide_channel_dma_setup(cp);
1614
1615 /*
1616 * Here we have to mess up with drives mode: PIIX can't have
1617 * different timings for master and slave drives.
1618 * We need to find the best combination.
1619 */
1620
1621 /* If both drives supports DMA, take the lower mode */
1622 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1623 (drvp[1].drive_flags & DRIVE_DMA)) {
1624 mode[0] = mode[1] =
1625 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1626 drvp[0].DMA_mode = mode[0];
1627 drvp[1].DMA_mode = mode[1];
1628 goto ok;
1629 }
1630 /*
1631 * If only one drive supports DMA, use its mode, and
1632 * put the other one in PIO mode 0 if mode not compatible
1633 */
1634 if (drvp[0].drive_flags & DRIVE_DMA) {
1635 mode[0] = drvp[0].DMA_mode;
1636 mode[1] = drvp[1].PIO_mode;
1637 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1638 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1639 mode[1] = drvp[1].PIO_mode = 0;
1640 goto ok;
1641 }
1642 if (drvp[1].drive_flags & DRIVE_DMA) {
1643 mode[1] = drvp[1].DMA_mode;
1644 mode[0] = drvp[0].PIO_mode;
1645 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1646 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1647 mode[0] = drvp[0].PIO_mode = 0;
1648 goto ok;
1649 }
1650 /*
1651 * If both drives are not DMA, takes the lower mode, unless
1652 * one of them is PIO mode < 2
1653 */
1654 if (drvp[0].PIO_mode < 2) {
1655 mode[0] = drvp[0].PIO_mode = 0;
1656 mode[1] = drvp[1].PIO_mode;
1657 } else if (drvp[1].PIO_mode < 2) {
1658 mode[1] = drvp[1].PIO_mode = 0;
1659 mode[0] = drvp[0].PIO_mode;
1660 } else {
1661 mode[0] = mode[1] =
1662 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1663 drvp[0].PIO_mode = mode[0];
1664 drvp[1].PIO_mode = mode[1];
1665 }
1666 ok: /* The modes are setup */
1667 for (drive = 0; drive < 2; drive++) {
1668 if (drvp[drive].drive_flags & DRIVE_DMA) {
1669 idetim |= piix_setup_idetim_timings(
1670 mode[drive], 1, chp->channel);
1671 goto end;
1672 }
1673 }
1674 /* If we are there, none of the drives are DMA */
1675 if (mode[0] >= 2)
1676 idetim |= piix_setup_idetim_timings(
1677 mode[0], 0, chp->channel);
1678 else
1679 idetim |= piix_setup_idetim_timings(
1680 mode[1], 0, chp->channel);
1681 end: /*
1682 * timing mode is now set up in the controller. Enable
1683 * it per-drive
1684 */
1685 for (drive = 0; drive < 2; drive++) {
1686 /* If no drive, skip */
1687 if ((drvp[drive].drive_flags & DRIVE) == 0)
1688 continue;
1689 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1690 if (drvp[drive].drive_flags & DRIVE_DMA)
1691 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1692 }
1693 if (idedma_ctl != 0) {
1694 /* Add software bits in status register */
1695 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1696 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1697 idedma_ctl);
1698 }
1699 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1700 pciide_print_modes(cp);
1701 }
1702
1703 void
1704 piix3_4_setup_channel(chp)
1705 struct channel_softc *chp;
1706 {
1707 struct ata_drive_datas *drvp;
1708 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1709 struct pciide_channel *cp = (struct pciide_channel*)chp;
1710 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1711 int drive;
1712 int channel = chp->channel;
1713
1714 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1715 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1716 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1717 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1718 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1719 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1720 PIIX_SIDETIM_RTC_MASK(channel));
1721
1722 idedma_ctl = 0;
1723 /* If channel disabled, no need to go further */
1724 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1725 return;
1726 /* set up new idetim: Enable IDE registers decode */
1727 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1728
1729 /* setup DMA if needed */
1730 pciide_channel_dma_setup(cp);
1731
1732 for (drive = 0; drive < 2; drive++) {
1733 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1734 PIIX_UDMATIM_SET(0x3, channel, drive));
1735 drvp = &chp->ch_drive[drive];
1736 /* If no drive, skip */
1737 if ((drvp->drive_flags & DRIVE) == 0)
1738 continue;
1739 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1740 (drvp->drive_flags & DRIVE_UDMA) == 0))
1741 goto pio;
1742
1743 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1744 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1745 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1746 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1747 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1748 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1749 ideconf |= PIIX_CONFIG_PINGPONG;
1750 }
1751 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1752 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1753 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1754 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2) {
1755 /* setup Ultra/100 */
1756 if (drvp->UDMA_mode > 2 &&
1757 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1758 drvp->UDMA_mode = 2;
1759 if (drvp->UDMA_mode > 4) {
1760 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1761 } else {
1762 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1763 if (drvp->UDMA_mode > 2) {
1764 ideconf |= PIIX_CONFIG_UDMA66(channel,
1765 drive);
1766 } else {
1767 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1768 drive);
1769 }
1770 }
1771 }
1772 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1773 /* setup Ultra/66 */
1774 if (drvp->UDMA_mode > 2 &&
1775 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1776 drvp->UDMA_mode = 2;
1777 if (drvp->UDMA_mode > 2)
1778 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1779 else
1780 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1781 }
1782 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1783 (drvp->drive_flags & DRIVE_UDMA)) {
1784 /* use Ultra/DMA */
1785 drvp->drive_flags &= ~DRIVE_DMA;
1786 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1787 udmareg |= PIIX_UDMATIM_SET(
1788 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1789 } else {
1790 /* use Multiword DMA */
1791 drvp->drive_flags &= ~DRIVE_UDMA;
1792 if (drive == 0) {
1793 idetim |= piix_setup_idetim_timings(
1794 drvp->DMA_mode, 1, channel);
1795 } else {
1796 sidetim |= piix_setup_sidetim_timings(
1797 drvp->DMA_mode, 1, channel);
1798 idetim =PIIX_IDETIM_SET(idetim,
1799 PIIX_IDETIM_SITRE, channel);
1800 }
1801 }
1802 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1803
1804 pio: /* use PIO mode */
1805 idetim |= piix_setup_idetim_drvs(drvp);
1806 if (drive == 0) {
1807 idetim |= piix_setup_idetim_timings(
1808 drvp->PIO_mode, 0, channel);
1809 } else {
1810 sidetim |= piix_setup_sidetim_timings(
1811 drvp->PIO_mode, 0, channel);
1812 idetim =PIIX_IDETIM_SET(idetim,
1813 PIIX_IDETIM_SITRE, channel);
1814 }
1815 }
1816 if (idedma_ctl != 0) {
1817 /* Add software bits in status register */
1818 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1819 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1820 idedma_ctl);
1821 }
1822 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1823 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1824 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1825 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1826 pciide_print_modes(cp);
1827 }
1828
1829
1830 /* setup ISP and RTC fields, based on mode */
1831 static u_int32_t
1832 piix_setup_idetim_timings(mode, dma, channel)
1833 u_int8_t mode;
1834 u_int8_t dma;
1835 u_int8_t channel;
1836 {
1837
1838 if (dma)
1839 return PIIX_IDETIM_SET(0,
1840 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1841 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1842 channel);
1843 else
1844 return PIIX_IDETIM_SET(0,
1845 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1846 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1847 channel);
1848 }
1849
1850 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1851 static u_int32_t
1852 piix_setup_idetim_drvs(drvp)
1853 struct ata_drive_datas *drvp;
1854 {
1855 u_int32_t ret = 0;
1856 struct channel_softc *chp = drvp->chnl_softc;
1857 u_int8_t channel = chp->channel;
1858 u_int8_t drive = drvp->drive;
1859
1860 /*
1861 * If drive is using UDMA, timings setups are independant
1862 * So just check DMA and PIO here.
1863 */
1864 if (drvp->drive_flags & DRIVE_DMA) {
1865 /* if mode = DMA mode 0, use compatible timings */
1866 if ((drvp->drive_flags & DRIVE_DMA) &&
1867 drvp->DMA_mode == 0) {
1868 drvp->PIO_mode = 0;
1869 return ret;
1870 }
1871 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1872 /*
1873 * PIO and DMA timings are the same, use fast timings for PIO
1874 * too, else use compat timings.
1875 */
1876 if ((piix_isp_pio[drvp->PIO_mode] !=
1877 piix_isp_dma[drvp->DMA_mode]) ||
1878 (piix_rtc_pio[drvp->PIO_mode] !=
1879 piix_rtc_dma[drvp->DMA_mode]))
1880 drvp->PIO_mode = 0;
1881 /* if PIO mode <= 2, use compat timings for PIO */
1882 if (drvp->PIO_mode <= 2) {
1883 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1884 channel);
1885 return ret;
1886 }
1887 }
1888
1889 /*
1890 * Now setup PIO modes. If mode < 2, use compat timings.
1891 * Else enable fast timings. Enable IORDY and prefetch/post
1892 * if PIO mode >= 3.
1893 */
1894
1895 if (drvp->PIO_mode < 2)
1896 return ret;
1897
1898 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1899 if (drvp->PIO_mode >= 3) {
1900 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1901 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1902 }
1903 return ret;
1904 }
1905
1906 /* setup values in SIDETIM registers, based on mode */
1907 static u_int32_t
1908 piix_setup_sidetim_timings(mode, dma, channel)
1909 u_int8_t mode;
1910 u_int8_t dma;
1911 u_int8_t channel;
1912 {
1913 if (dma)
1914 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1915 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1916 else
1917 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1918 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1919 }
1920
1921 void
1922 amd7x6_chip_map(sc, pa)
1923 struct pciide_softc *sc;
1924 struct pci_attach_args *pa;
1925 {
1926 struct pciide_channel *cp;
1927 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1928 int channel;
1929 pcireg_t chanenable;
1930 bus_size_t cmdsize, ctlsize;
1931
1932 if (pciide_chipen(sc, pa) == 0)
1933 return;
1934 printf("%s: bus-master DMA support present",
1935 sc->sc_wdcdev.sc_dev.dv_xname);
1936 pciide_mapreg_dma(sc, pa);
1937 printf("\n");
1938 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1939 WDC_CAPABILITY_MODE;
1940 if (sc->sc_dma_ok) {
1941 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1942 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1943 sc->sc_wdcdev.irqack = pciide_irqack;
1944 }
1945 sc->sc_wdcdev.PIO_cap = 4;
1946 sc->sc_wdcdev.DMA_cap = 2;
1947
1948 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1949 sc->sc_wdcdev.UDMA_cap = 5;
1950 else
1951 sc->sc_wdcdev.UDMA_cap = 4;
1952 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1953 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1954 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1955 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1956
1957 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1958 DEBUG_PROBE);
1959 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1960 cp = &sc->pciide_channels[channel];
1961 if (pciide_chansetup(sc, channel, interface) == 0)
1962 continue;
1963
1964 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1965 printf("%s: %s channel ignored (disabled)\n",
1966 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1967 continue;
1968 }
1969 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1970 pciide_pci_intr);
1971
1972 if (pciide_chan_candisable(cp))
1973 chanenable &= ~AMD7X6_CHAN_EN(channel);
1974 pciide_map_compat_intr(pa, cp, channel, interface);
1975 if (cp->hw_ok == 0)
1976 continue;
1977
1978 amd7x6_setup_channel(&cp->wdc_channel);
1979 }
1980 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1981 chanenable);
1982 return;
1983 }
1984
1985 void
1986 amd7x6_setup_channel(chp)
1987 struct channel_softc *chp;
1988 {
1989 u_int32_t udmatim_reg, datatim_reg;
1990 u_int8_t idedma_ctl;
1991 int mode, drive;
1992 struct ata_drive_datas *drvp;
1993 struct pciide_channel *cp = (struct pciide_channel*)chp;
1994 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1995 #ifndef PCIIDE_AMD756_ENABLEDMA
1996 int rev = PCI_REVISION(
1997 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1998 #endif
1999
2000 idedma_ctl = 0;
2001 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2002 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2003 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2004 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2005
2006 /* setup DMA if needed */
2007 pciide_channel_dma_setup(cp);
2008
2009 for (drive = 0; drive < 2; drive++) {
2010 drvp = &chp->ch_drive[drive];
2011 /* If no drive, skip */
2012 if ((drvp->drive_flags & DRIVE) == 0)
2013 continue;
2014 /* add timing values, setup DMA if needed */
2015 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2016 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2017 mode = drvp->PIO_mode;
2018 goto pio;
2019 }
2020 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2021 (drvp->drive_flags & DRIVE_UDMA)) {
2022 /* use Ultra/DMA */
2023 drvp->drive_flags &= ~DRIVE_DMA;
2024 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2025 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2026 AMD7X6_UDMA_TIME(chp->channel, drive,
2027 amd7x6_udma_tim[drvp->UDMA_mode]);
2028 /* can use PIO timings, MW DMA unused */
2029 mode = drvp->PIO_mode;
2030 } else {
2031 /* use Multiword DMA, but only if revision is OK */
2032 drvp->drive_flags &= ~DRIVE_UDMA;
2033 #ifndef PCIIDE_AMD756_ENABLEDMA
2034 /*
2035 * The workaround doesn't seem to be necessary
2036 * with all drives, so it can be disabled by
2037 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2038 * triggered.
2039 */
2040 if (sc->sc_pp->ide_product ==
2041 PCI_PRODUCT_AMD_PBC756_IDE &&
2042 AMD756_CHIPREV_DISABLEDMA(rev)) {
2043 printf("%s:%d:%d: multi-word DMA disabled due "
2044 "to chip revision\n",
2045 sc->sc_wdcdev.sc_dev.dv_xname,
2046 chp->channel, drive);
2047 mode = drvp->PIO_mode;
2048 drvp->drive_flags &= ~DRIVE_DMA;
2049 goto pio;
2050 }
2051 #endif
2052 /* mode = min(pio, dma+2) */
2053 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2054 mode = drvp->PIO_mode;
2055 else
2056 mode = drvp->DMA_mode + 2;
2057 }
2058 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2059
2060 pio: /* setup PIO mode */
2061 if (mode <= 2) {
2062 drvp->DMA_mode = 0;
2063 drvp->PIO_mode = 0;
2064 mode = 0;
2065 } else {
2066 drvp->PIO_mode = mode;
2067 drvp->DMA_mode = mode - 2;
2068 }
2069 datatim_reg |=
2070 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2071 amd7x6_pio_set[mode]) |
2072 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2073 amd7x6_pio_rec[mode]);
2074 }
2075 if (idedma_ctl != 0) {
2076 /* Add software bits in status register */
2077 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2078 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2079 idedma_ctl);
2080 }
2081 pciide_print_modes(cp);
2082 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2083 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2084 }
2085
2086 void
2087 apollo_chip_map(sc, pa)
2088 struct pciide_softc *sc;
2089 struct pci_attach_args *pa;
2090 {
2091 struct pciide_channel *cp;
2092 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2093 int channel;
2094 u_int32_t ideconf;
2095 bus_size_t cmdsize, ctlsize;
2096 pcitag_t pcib_tag;
2097 pcireg_t pcib_id, pcib_class;
2098
2099 if (pciide_chipen(sc, pa) == 0)
2100 return;
2101 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2102 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2103 /* and read ID and rev of the ISA bridge */
2104 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2105 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2106 printf(": VIA Technologies ");
2107 switch (PCI_PRODUCT(pcib_id)) {
2108 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2109 printf("VT82C586 (Apollo VP) ");
2110 if(PCI_REVISION(pcib_class) >= 0x02) {
2111 printf("ATA33 controller\n");
2112 sc->sc_wdcdev.UDMA_cap = 2;
2113 } else {
2114 printf("controller\n");
2115 sc->sc_wdcdev.UDMA_cap = 0;
2116 }
2117 break;
2118 case PCI_PRODUCT_VIATECH_VT82C596A:
2119 printf("VT82C596A (Apollo Pro) ");
2120 if (PCI_REVISION(pcib_class) >= 0x12) {
2121 printf("ATA66 controller\n");
2122 sc->sc_wdcdev.UDMA_cap = 4;
2123 } else {
2124 printf("ATA33 controller\n");
2125 sc->sc_wdcdev.UDMA_cap = 2;
2126 }
2127 break;
2128 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2129 printf("VT82C686A (Apollo KX133) ");
2130 if (PCI_REVISION(pcib_class) >= 0x40) {
2131 printf("ATA100 controller\n");
2132 sc->sc_wdcdev.UDMA_cap = 5;
2133 } else {
2134 printf("ATA66 controller\n");
2135 sc->sc_wdcdev.UDMA_cap = 4;
2136 }
2137 break;
2138 case PCI_PRODUCT_VIATECH_VT8233:
2139 printf("VT8233 ATA100 controller\n");
2140 sc->sc_wdcdev.UDMA_cap = 5;
2141 break;
2142 default:
2143 printf("unknown ATA controller\n");
2144 sc->sc_wdcdev.UDMA_cap = 0;
2145 }
2146
2147 printf("%s: bus-master DMA support present",
2148 sc->sc_wdcdev.sc_dev.dv_xname);
2149 pciide_mapreg_dma(sc, pa);
2150 printf("\n");
2151 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2152 WDC_CAPABILITY_MODE;
2153 if (sc->sc_dma_ok) {
2154 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2155 sc->sc_wdcdev.irqack = pciide_irqack;
2156 if (sc->sc_wdcdev.UDMA_cap > 0)
2157 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2158 }
2159 sc->sc_wdcdev.PIO_cap = 4;
2160 sc->sc_wdcdev.DMA_cap = 2;
2161 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2162 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2163 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2164
2165 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2166 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2167 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2168 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2169 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2170 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2171 DEBUG_PROBE);
2172
2173 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2174 cp = &sc->pciide_channels[channel];
2175 if (pciide_chansetup(sc, channel, interface) == 0)
2176 continue;
2177
2178 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2179 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2180 printf("%s: %s channel ignored (disabled)\n",
2181 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2182 continue;
2183 }
2184 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2185 pciide_pci_intr);
2186 if (cp->hw_ok == 0)
2187 continue;
2188 if (pciide_chan_candisable(cp)) {
2189 ideconf &= ~APO_IDECONF_EN(channel);
2190 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2191 ideconf);
2192 }
2193 pciide_map_compat_intr(pa, cp, channel, interface);
2194
2195 if (cp->hw_ok == 0)
2196 continue;
2197 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2198 }
2199 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2200 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2201 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2202 }
2203
2204 void
2205 apollo_setup_channel(chp)
2206 struct channel_softc *chp;
2207 {
2208 u_int32_t udmatim_reg, datatim_reg;
2209 u_int8_t idedma_ctl;
2210 int mode, drive;
2211 struct ata_drive_datas *drvp;
2212 struct pciide_channel *cp = (struct pciide_channel*)chp;
2213 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2214
2215 idedma_ctl = 0;
2216 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2217 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2218 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2219 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2220
2221 /* setup DMA if needed */
2222 pciide_channel_dma_setup(cp);
2223
2224 for (drive = 0; drive < 2; drive++) {
2225 drvp = &chp->ch_drive[drive];
2226 /* If no drive, skip */
2227 if ((drvp->drive_flags & DRIVE) == 0)
2228 continue;
2229 /* add timing values, setup DMA if needed */
2230 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2231 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2232 mode = drvp->PIO_mode;
2233 goto pio;
2234 }
2235 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2236 (drvp->drive_flags & DRIVE_UDMA)) {
2237 /* use Ultra/DMA */
2238 drvp->drive_flags &= ~DRIVE_DMA;
2239 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2240 APO_UDMA_EN_MTH(chp->channel, drive);
2241 if (sc->sc_wdcdev.UDMA_cap == 5) {
2242 /* 686b */
2243 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2244 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2245 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2246 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2247 /* 596b or 686a */
2248 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2249 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2250 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2251 } else {
2252 /* 596a or 586b */
2253 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2254 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2255 }
2256 /* can use PIO timings, MW DMA unused */
2257 mode = drvp->PIO_mode;
2258 } else {
2259 /* use Multiword DMA */
2260 drvp->drive_flags &= ~DRIVE_UDMA;
2261 /* mode = min(pio, dma+2) */
2262 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2263 mode = drvp->PIO_mode;
2264 else
2265 mode = drvp->DMA_mode + 2;
2266 }
2267 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2268
2269 pio: /* setup PIO mode */
2270 if (mode <= 2) {
2271 drvp->DMA_mode = 0;
2272 drvp->PIO_mode = 0;
2273 mode = 0;
2274 } else {
2275 drvp->PIO_mode = mode;
2276 drvp->DMA_mode = mode - 2;
2277 }
2278 datatim_reg |=
2279 APO_DATATIM_PULSE(chp->channel, drive,
2280 apollo_pio_set[mode]) |
2281 APO_DATATIM_RECOV(chp->channel, drive,
2282 apollo_pio_rec[mode]);
2283 }
2284 if (idedma_ctl != 0) {
2285 /* Add software bits in status register */
2286 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2287 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2288 idedma_ctl);
2289 }
2290 pciide_print_modes(cp);
2291 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2292 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2293 }
2294
2295 void
2296 cmd_channel_map(pa, sc, channel)
2297 struct pci_attach_args *pa;
2298 struct pciide_softc *sc;
2299 int channel;
2300 {
2301 struct pciide_channel *cp = &sc->pciide_channels[channel];
2302 bus_size_t cmdsize, ctlsize;
2303 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2304 int interface, one_channel;
2305
2306 /*
2307 * The 0648/0649 can be told to identify as a RAID controller.
2308 * In this case, we have to fake interface
2309 */
2310 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2311 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2312 PCIIDE_INTERFACE_SETTABLE(1);
2313 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2314 CMD_CONF_DSA1)
2315 interface |= PCIIDE_INTERFACE_PCI(0) |
2316 PCIIDE_INTERFACE_PCI(1);
2317 } else {
2318 interface = PCI_INTERFACE(pa->pa_class);
2319 }
2320
2321 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2322 cp->name = PCIIDE_CHANNEL_NAME(channel);
2323 cp->wdc_channel.channel = channel;
2324 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2325
2326 /*
2327 * Older CMD64X doesn't have independant channels
2328 */
2329 switch (sc->sc_pp->ide_product) {
2330 case PCI_PRODUCT_CMDTECH_649:
2331 one_channel = 0;
2332 break;
2333 default:
2334 one_channel = 1;
2335 break;
2336 }
2337
2338 if (channel > 0 && one_channel) {
2339 cp->wdc_channel.ch_queue =
2340 sc->pciide_channels[0].wdc_channel.ch_queue;
2341 } else {
2342 cp->wdc_channel.ch_queue =
2343 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2344 }
2345 if (cp->wdc_channel.ch_queue == NULL) {
2346 printf("%s %s channel: "
2347 "can't allocate memory for command queue",
2348 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2349 return;
2350 }
2351
2352 printf("%s: %s channel %s to %s mode\n",
2353 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2354 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2355 "configured" : "wired",
2356 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2357 "native-PCI" : "compatibility");
2358
2359 /*
2360 * with a CMD PCI64x, if we get here, the first channel is enabled:
2361 * there's no way to disable the first channel without disabling
2362 * the whole device
2363 */
2364 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2365 printf("%s: %s channel ignored (disabled)\n",
2366 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2367 return;
2368 }
2369
2370 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2371 if (cp->hw_ok == 0)
2372 return;
2373 if (channel == 1) {
2374 if (pciide_chan_candisable(cp)) {
2375 ctrl &= ~CMD_CTRL_2PORT;
2376 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2377 CMD_CTRL, ctrl);
2378 }
2379 }
2380 pciide_map_compat_intr(pa, cp, channel, interface);
2381 }
2382
2383 int
2384 cmd_pci_intr(arg)
2385 void *arg;
2386 {
2387 struct pciide_softc *sc = arg;
2388 struct pciide_channel *cp;
2389 struct channel_softc *wdc_cp;
2390 int i, rv, crv;
2391 u_int32_t priirq, secirq;
2392
2393 rv = 0;
2394 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2395 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2396 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2397 cp = &sc->pciide_channels[i];
2398 wdc_cp = &cp->wdc_channel;
2399 /* If a compat channel skip. */
2400 if (cp->compat)
2401 continue;
2402 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2403 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2404 crv = wdcintr(wdc_cp);
2405 if (crv == 0)
2406 printf("%s:%d: bogus intr\n",
2407 sc->sc_wdcdev.sc_dev.dv_xname, i);
2408 else
2409 rv = 1;
2410 }
2411 }
2412 return rv;
2413 }
2414
2415 void
2416 cmd_chip_map(sc, pa)
2417 struct pciide_softc *sc;
2418 struct pci_attach_args *pa;
2419 {
2420 int channel;
2421
2422 /*
2423 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2424 * and base adresses registers can be disabled at
2425 * hardware level. In this case, the device is wired
2426 * in compat mode and its first channel is always enabled,
2427 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2428 * In fact, it seems that the first channel of the CMD PCI0640
2429 * can't be disabled.
2430 */
2431
2432 #ifdef PCIIDE_CMD064x_DISABLE
2433 if (pciide_chipen(sc, pa) == 0)
2434 return;
2435 #endif
2436
2437 printf("%s: hardware does not support DMA\n",
2438 sc->sc_wdcdev.sc_dev.dv_xname);
2439 sc->sc_dma_ok = 0;
2440
2441 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2442 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2443 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2444
2445 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2446 cmd_channel_map(pa, sc, channel);
2447 }
2448 }
2449
2450 void
2451 cmd0643_9_chip_map(sc, pa)
2452 struct pciide_softc *sc;
2453 struct pci_attach_args *pa;
2454 {
2455 struct pciide_channel *cp;
2456 int channel;
2457 int rev = PCI_REVISION(
2458 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2459
2460 /*
2461 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2462 * and base adresses registers can be disabled at
2463 * hardware level. In this case, the device is wired
2464 * in compat mode and its first channel is always enabled,
2465 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2466 * In fact, it seems that the first channel of the CMD PCI0640
2467 * can't be disabled.
2468 */
2469
2470 #ifdef PCIIDE_CMD064x_DISABLE
2471 if (pciide_chipen(sc, pa) == 0)
2472 return;
2473 #endif
2474 printf("%s: bus-master DMA support present",
2475 sc->sc_wdcdev.sc_dev.dv_xname);
2476 pciide_mapreg_dma(sc, pa);
2477 printf("\n");
2478 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2479 WDC_CAPABILITY_MODE;
2480 if (sc->sc_dma_ok) {
2481 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2482 switch (sc->sc_pp->ide_product) {
2483 case PCI_PRODUCT_CMDTECH_649:
2484 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2485 sc->sc_wdcdev.UDMA_cap = 5;
2486 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2487 break;
2488 case PCI_PRODUCT_CMDTECH_648:
2489 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2490 sc->sc_wdcdev.UDMA_cap = 4;
2491 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2492 break;
2493 case PCI_PRODUCT_CMDTECH_646:
2494 if (rev >= CMD0646U2_REV) {
2495 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2496 sc->sc_wdcdev.UDMA_cap = 2;
2497 } else if (rev >= CMD0646U_REV) {
2498 /*
2499 * Linux's driver claims that the 646U is broken
2500 * with UDMA. Only enable it if we know what we're
2501 * doing
2502 */
2503 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2504 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2505 sc->sc_wdcdev.UDMA_cap = 2;
2506 #endif
2507 /* explicitly disable UDMA */
2508 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2509 CMD_UDMATIM(0), 0);
2510 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2511 CMD_UDMATIM(1), 0);
2512 }
2513 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2514 break;
2515 default:
2516 sc->sc_wdcdev.irqack = pciide_irqack;
2517 }
2518 }
2519
2520 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2521 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2522 sc->sc_wdcdev.PIO_cap = 4;
2523 sc->sc_wdcdev.DMA_cap = 2;
2524 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2525
2526 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2527 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2528 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2529 DEBUG_PROBE);
2530
2531 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2532 cp = &sc->pciide_channels[channel];
2533 cmd_channel_map(pa, sc, channel);
2534 if (cp->hw_ok == 0)
2535 continue;
2536 cmd0643_9_setup_channel(&cp->wdc_channel);
2537 }
2538 /*
2539 * note - this also makes sure we clear the irq disable and reset
2540 * bits
2541 */
2542 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2543 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2544 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2545 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2546 DEBUG_PROBE);
2547 }
2548
2549 void
2550 cmd0643_9_setup_channel(chp)
2551 struct channel_softc *chp;
2552 {
2553 struct ata_drive_datas *drvp;
2554 u_int8_t tim;
2555 u_int32_t idedma_ctl, udma_reg;
2556 int drive;
2557 struct pciide_channel *cp = (struct pciide_channel*)chp;
2558 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2559
2560 idedma_ctl = 0;
2561 /* setup DMA if needed */
2562 pciide_channel_dma_setup(cp);
2563
2564 for (drive = 0; drive < 2; drive++) {
2565 drvp = &chp->ch_drive[drive];
2566 /* If no drive, skip */
2567 if ((drvp->drive_flags & DRIVE) == 0)
2568 continue;
2569 /* add timing values, setup DMA if needed */
2570 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2571 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2572 if (drvp->drive_flags & DRIVE_UDMA) {
2573 /* UltraDMA on a 646U2, 0648 or 0649 */
2574 drvp->drive_flags &= ~DRIVE_DMA;
2575 udma_reg = pciide_pci_read(sc->sc_pc,
2576 sc->sc_tag, CMD_UDMATIM(chp->channel));
2577 if (drvp->UDMA_mode > 2 &&
2578 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2579 CMD_BICSR) &
2580 CMD_BICSR_80(chp->channel)) == 0)
2581 drvp->UDMA_mode = 2;
2582 if (drvp->UDMA_mode > 2)
2583 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2584 else if (sc->sc_wdcdev.UDMA_cap > 2)
2585 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2586 udma_reg |= CMD_UDMATIM_UDMA(drive);
2587 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2588 CMD_UDMATIM_TIM_OFF(drive));
2589 udma_reg |=
2590 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2591 CMD_UDMATIM_TIM_OFF(drive));
2592 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2593 CMD_UDMATIM(chp->channel), udma_reg);
2594 } else {
2595 /*
2596 * use Multiword DMA.
2597 * Timings will be used for both PIO and DMA,
2598 * so adjust DMA mode if needed
2599 * if we have a 0646U2/8/9, turn off UDMA
2600 */
2601 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2602 udma_reg = pciide_pci_read(sc->sc_pc,
2603 sc->sc_tag,
2604 CMD_UDMATIM(chp->channel));
2605 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2606 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2607 CMD_UDMATIM(chp->channel),
2608 udma_reg);
2609 }
2610 if (drvp->PIO_mode >= 3 &&
2611 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2612 drvp->DMA_mode = drvp->PIO_mode - 2;
2613 }
2614 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2615 }
2616 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2617 }
2618 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2619 CMD_DATA_TIM(chp->channel, drive), tim);
2620 }
2621 if (idedma_ctl != 0) {
2622 /* Add software bits in status register */
2623 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2624 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2625 idedma_ctl);
2626 }
2627 pciide_print_modes(cp);
2628 }
2629
2630 void
2631 cmd646_9_irqack(chp)
2632 struct channel_softc *chp;
2633 {
2634 u_int32_t priirq, secirq;
2635 struct pciide_channel *cp = (struct pciide_channel*)chp;
2636 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2637
2638 if (chp->channel == 0) {
2639 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2640 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2641 } else {
2642 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2643 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2644 }
2645 pciide_irqack(chp);
2646 }
2647
2648 void
2649 cy693_chip_map(sc, pa)
2650 struct pciide_softc *sc;
2651 struct pci_attach_args *pa;
2652 {
2653 struct pciide_channel *cp;
2654 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2655 bus_size_t cmdsize, ctlsize;
2656
2657 if (pciide_chipen(sc, pa) == 0)
2658 return;
2659 /*
2660 * this chip has 2 PCI IDE functions, one for primary and one for
2661 * secondary. So we need to call pciide_mapregs_compat() with
2662 * the real channel
2663 */
2664 if (pa->pa_function == 1) {
2665 sc->sc_cy_compatchan = 0;
2666 } else if (pa->pa_function == 2) {
2667 sc->sc_cy_compatchan = 1;
2668 } else {
2669 printf("%s: unexpected PCI function %d\n",
2670 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2671 return;
2672 }
2673 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2674 printf("%s: bus-master DMA support present",
2675 sc->sc_wdcdev.sc_dev.dv_xname);
2676 pciide_mapreg_dma(sc, pa);
2677 } else {
2678 printf("%s: hardware does not support DMA",
2679 sc->sc_wdcdev.sc_dev.dv_xname);
2680 sc->sc_dma_ok = 0;
2681 }
2682 printf("\n");
2683
2684 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2685 if (sc->sc_cy_handle == NULL) {
2686 printf("%s: unable to map hyperCache control registers\n",
2687 sc->sc_wdcdev.sc_dev.dv_xname);
2688 sc->sc_dma_ok = 0;
2689 }
2690
2691 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2692 WDC_CAPABILITY_MODE;
2693 if (sc->sc_dma_ok) {
2694 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2695 sc->sc_wdcdev.irqack = pciide_irqack;
2696 }
2697 sc->sc_wdcdev.PIO_cap = 4;
2698 sc->sc_wdcdev.DMA_cap = 2;
2699 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2700
2701 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2702 sc->sc_wdcdev.nchannels = 1;
2703
2704 /* Only one channel for this chip; if we are here it's enabled */
2705 cp = &sc->pciide_channels[0];
2706 sc->wdc_chanarray[0] = &cp->wdc_channel;
2707 cp->name = PCIIDE_CHANNEL_NAME(0);
2708 cp->wdc_channel.channel = 0;
2709 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2710 cp->wdc_channel.ch_queue =
2711 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2712 if (cp->wdc_channel.ch_queue == NULL) {
2713 printf("%s primary channel: "
2714 "can't allocate memory for command queue",
2715 sc->sc_wdcdev.sc_dev.dv_xname);
2716 return;
2717 }
2718 printf("%s: primary channel %s to ",
2719 sc->sc_wdcdev.sc_dev.dv_xname,
2720 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2721 "configured" : "wired");
2722 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2723 printf("native-PCI");
2724 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2725 pciide_pci_intr);
2726 } else {
2727 printf("compatibility");
2728 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2729 &cmdsize, &ctlsize);
2730 }
2731 printf(" mode\n");
2732 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2733 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2734 wdcattach(&cp->wdc_channel);
2735 if (pciide_chan_candisable(cp)) {
2736 pci_conf_write(sc->sc_pc, sc->sc_tag,
2737 PCI_COMMAND_STATUS_REG, 0);
2738 }
2739 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2740 if (cp->hw_ok == 0)
2741 return;
2742 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2743 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2744 cy693_setup_channel(&cp->wdc_channel);
2745 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2746 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2747 }
2748
2749 void
2750 cy693_setup_channel(chp)
2751 struct channel_softc *chp;
2752 {
2753 struct ata_drive_datas *drvp;
2754 int drive;
2755 u_int32_t cy_cmd_ctrl;
2756 u_int32_t idedma_ctl;
2757 struct pciide_channel *cp = (struct pciide_channel*)chp;
2758 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2759 int dma_mode = -1;
2760
2761 cy_cmd_ctrl = idedma_ctl = 0;
2762
2763 /* setup DMA if needed */
2764 pciide_channel_dma_setup(cp);
2765
2766 for (drive = 0; drive < 2; drive++) {
2767 drvp = &chp->ch_drive[drive];
2768 /* If no drive, skip */
2769 if ((drvp->drive_flags & DRIVE) == 0)
2770 continue;
2771 /* add timing values, setup DMA if needed */
2772 if (drvp->drive_flags & DRIVE_DMA) {
2773 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2774 /* use Multiword DMA */
2775 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2776 dma_mode = drvp->DMA_mode;
2777 }
2778 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2779 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2780 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2781 CY_CMD_CTRL_IOW_REC_OFF(drive));
2782 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2783 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2784 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2785 CY_CMD_CTRL_IOR_REC_OFF(drive));
2786 }
2787 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2788 chp->ch_drive[0].DMA_mode = dma_mode;
2789 chp->ch_drive[1].DMA_mode = dma_mode;
2790
2791 if (dma_mode == -1)
2792 dma_mode = 0;
2793
2794 if (sc->sc_cy_handle != NULL) {
2795 /* Note: `multiple' is implied. */
2796 cy82c693_write(sc->sc_cy_handle,
2797 (sc->sc_cy_compatchan == 0) ?
2798 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2799 }
2800
2801 pciide_print_modes(cp);
2802
2803 if (idedma_ctl != 0) {
2804 /* Add software bits in status register */
2805 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2806 IDEDMA_CTL, idedma_ctl);
2807 }
2808 }
2809
2810 static int
2811 sis_hostbr_match(pa)
2812 struct pci_attach_args *pa;
2813 {
2814 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2815 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2816 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2817 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2818 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2819 }
2820
2821 void
2822 sis_chip_map(sc, pa)
2823 struct pciide_softc *sc;
2824 struct pci_attach_args *pa;
2825 {
2826 struct pciide_channel *cp;
2827 int channel;
2828 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2829 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2830 pcireg_t rev = PCI_REVISION(pa->pa_class);
2831 bus_size_t cmdsize, ctlsize;
2832 pcitag_t pchb_tag;
2833 pcireg_t pchb_id, pchb_class;
2834
2835 if (pciide_chipen(sc, pa) == 0)
2836 return;
2837 printf("%s: bus-master DMA support present",
2838 sc->sc_wdcdev.sc_dev.dv_xname);
2839 pciide_mapreg_dma(sc, pa);
2840 printf("\n");
2841
2842 /* get a PCI tag for the host bridge (function 0 of the same device) */
2843 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2844 /* and read ID and rev of the ISA bridge */
2845 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2846 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2847
2848 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2849 WDC_CAPABILITY_MODE;
2850 if (sc->sc_dma_ok) {
2851 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2852 sc->sc_wdcdev.irqack = pciide_irqack;
2853 /*
2854 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2855 * have problems with UDMA (info provided by Christos)
2856 */
2857 if (rev >= 0xd0 &&
2858 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2859 PCI_REVISION(pchb_class) >= 0x03))
2860 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2861 }
2862
2863 sc->sc_wdcdev.PIO_cap = 4;
2864 sc->sc_wdcdev.DMA_cap = 2;
2865 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2866 /*
2867 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2868 * chipsets.
2869 */
2870 sc->sc_wdcdev.UDMA_cap =
2871 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2872 sc->sc_wdcdev.set_modes = sis_setup_channel;
2873
2874 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2875 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2876
2877 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2878 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2879 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2880
2881 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2882 cp = &sc->pciide_channels[channel];
2883 if (pciide_chansetup(sc, channel, interface) == 0)
2884 continue;
2885 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2886 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2887 printf("%s: %s channel ignored (disabled)\n",
2888 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2889 continue;
2890 }
2891 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2892 pciide_pci_intr);
2893 if (cp->hw_ok == 0)
2894 continue;
2895 if (pciide_chan_candisable(cp)) {
2896 if (channel == 0)
2897 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2898 else
2899 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2900 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2901 sis_ctr0);
2902 }
2903 pciide_map_compat_intr(pa, cp, channel, interface);
2904 if (cp->hw_ok == 0)
2905 continue;
2906 sis_setup_channel(&cp->wdc_channel);
2907 }
2908 }
2909
2910 void
2911 sis_setup_channel(chp)
2912 struct channel_softc *chp;
2913 {
2914 struct ata_drive_datas *drvp;
2915 int drive;
2916 u_int32_t sis_tim;
2917 u_int32_t idedma_ctl;
2918 struct pciide_channel *cp = (struct pciide_channel*)chp;
2919 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2920
2921 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2922 "channel %d 0x%x\n", chp->channel,
2923 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2924 DEBUG_PROBE);
2925 sis_tim = 0;
2926 idedma_ctl = 0;
2927 /* setup DMA if needed */
2928 pciide_channel_dma_setup(cp);
2929
2930 for (drive = 0; drive < 2; drive++) {
2931 drvp = &chp->ch_drive[drive];
2932 /* If no drive, skip */
2933 if ((drvp->drive_flags & DRIVE) == 0)
2934 continue;
2935 /* add timing values, setup DMA if needed */
2936 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2937 (drvp->drive_flags & DRIVE_UDMA) == 0)
2938 goto pio;
2939
2940 if (drvp->drive_flags & DRIVE_UDMA) {
2941 /* use Ultra/DMA */
2942 drvp->drive_flags &= ~DRIVE_DMA;
2943 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2944 SIS_TIM_UDMA_TIME_OFF(drive);
2945 sis_tim |= SIS_TIM_UDMA_EN(drive);
2946 } else {
2947 /*
2948 * use Multiword DMA
2949 * Timings will be used for both PIO and DMA,
2950 * so adjust DMA mode if needed
2951 */
2952 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2953 drvp->PIO_mode = drvp->DMA_mode + 2;
2954 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2955 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2956 drvp->PIO_mode - 2 : 0;
2957 if (drvp->DMA_mode == 0)
2958 drvp->PIO_mode = 0;
2959 }
2960 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2961 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2962 SIS_TIM_ACT_OFF(drive);
2963 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2964 SIS_TIM_REC_OFF(drive);
2965 }
2966 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2967 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2968 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2969 if (idedma_ctl != 0) {
2970 /* Add software bits in status register */
2971 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2972 IDEDMA_CTL, idedma_ctl);
2973 }
2974 pciide_print_modes(cp);
2975 }
2976
2977 static int
2978 acer_isabr_match(pa)
2979 struct pci_attach_args *pa;
2980 {
2981 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI) &&
2982 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543));
2983 }
2984
2985 void
2986 acer_chip_map(sc, pa)
2987 struct pciide_softc *sc;
2988 struct pci_attach_args *pa;
2989 {
2990 struct pci_attach_args isa_pa;
2991 struct pciide_channel *cp;
2992 int channel;
2993 pcireg_t cr, interface;
2994 bus_size_t cmdsize, ctlsize;
2995 pcireg_t rev = PCI_REVISION(pa->pa_class);
2996
2997 if (pciide_chipen(sc, pa) == 0)
2998 return;
2999 printf("%s: bus-master DMA support present",
3000 sc->sc_wdcdev.sc_dev.dv_xname);
3001 pciide_mapreg_dma(sc, pa);
3002 printf("\n");
3003 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3004 WDC_CAPABILITY_MODE;
3005 if (sc->sc_dma_ok) {
3006 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3007 if (rev >= 0x20) {
3008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3009 if (rev >= 0xC4)
3010 sc->sc_wdcdev.UDMA_cap = 5;
3011 else if (rev >= 0xC2)
3012 sc->sc_wdcdev.UDMA_cap = 4;
3013 else
3014 sc->sc_wdcdev.UDMA_cap = 2;
3015 }
3016 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3017 sc->sc_wdcdev.irqack = pciide_irqack;
3018 }
3019
3020 sc->sc_wdcdev.PIO_cap = 4;
3021 sc->sc_wdcdev.DMA_cap = 2;
3022 sc->sc_wdcdev.set_modes = acer_setup_channel;
3023 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3024 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3025
3026 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3027 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3028 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3029
3030 /* Enable "microsoft register bits" R/W. */
3031 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3032 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3033 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3034 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3035 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3036 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3037 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3038 ~ACER_CHANSTATUSREGS_RO);
3039 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3040 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3041 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3042 /* Don't use cr, re-read the real register content instead */
3043 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3044 PCI_CLASS_REG));
3045
3046 /* From linux: enable "Cable Detection" */
3047 if (rev >= 0xC2) {
3048 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3049 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3050 | ACER_0x4B_CDETECT);
3051 /* set south-bridge's enable bit, m1533, 0x79 */
3052 if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
3053 printf("%s: can't find PCI/ISA bridge, downgrading "
3054 "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
3055 sc->sc_wdcdev.UDMA_cap = 2;
3056 } else {
3057 if (rev == 0xC2)
3058 /* 1543C-B0 (m1533, 0x79, bit 2) */
3059 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3060 ACER_0x79,
3061 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3062 ACER_0x79)
3063 | ACER_0x79_REVC2_EN);
3064 else
3065 /* 1553/1535 (m1533, 0x79, bit 1) */
3066 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3067 ACER_0x79,
3068 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3069 ACER_0x79)
3070 | ACER_0x79_EN);
3071 }
3072 }
3073
3074 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3075 cp = &sc->pciide_channels[channel];
3076 if (pciide_chansetup(sc, channel, interface) == 0)
3077 continue;
3078 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3079 printf("%s: %s channel ignored (disabled)\n",
3080 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3081 continue;
3082 }
3083 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3084 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3085 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3086 if (cp->hw_ok == 0)
3087 continue;
3088 if (pciide_chan_candisable(cp)) {
3089 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3090 pci_conf_write(sc->sc_pc, sc->sc_tag,
3091 PCI_CLASS_REG, cr);
3092 }
3093 pciide_map_compat_intr(pa, cp, channel, interface);
3094 acer_setup_channel(&cp->wdc_channel);
3095 }
3096 }
3097
3098 void
3099 acer_setup_channel(chp)
3100 struct channel_softc *chp;
3101 {
3102 struct ata_drive_datas *drvp;
3103 int drive;
3104 u_int32_t acer_fifo_udma;
3105 u_int32_t idedma_ctl;
3106 struct pciide_channel *cp = (struct pciide_channel*)chp;
3107 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3108
3109 idedma_ctl = 0;
3110 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3111 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3112 acer_fifo_udma), DEBUG_PROBE);
3113 /* setup DMA if needed */
3114 pciide_channel_dma_setup(cp);
3115
3116 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3117 DRIVE_UDMA) { /* check 80 pins cable */
3118 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3119 ACER_0x4A_80PIN(chp->channel)) {
3120 if (chp->ch_drive[0].UDMA_mode > 2)
3121 chp->ch_drive[0].UDMA_mode = 2;
3122 if (chp->ch_drive[1].UDMA_mode > 2)
3123 chp->ch_drive[1].UDMA_mode = 2;
3124 }
3125 }
3126
3127 for (drive = 0; drive < 2; drive++) {
3128 drvp = &chp->ch_drive[drive];
3129 /* If no drive, skip */
3130 if ((drvp->drive_flags & DRIVE) == 0)
3131 continue;
3132 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3133 "channel %d drive %d 0x%x\n", chp->channel, drive,
3134 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3135 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3136 /* clear FIFO/DMA mode */
3137 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3138 ACER_UDMA_EN(chp->channel, drive) |
3139 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3140
3141 /* add timing values, setup DMA if needed */
3142 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3143 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3144 acer_fifo_udma |=
3145 ACER_FTH_OPL(chp->channel, drive, 0x1);
3146 goto pio;
3147 }
3148
3149 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3150 if (drvp->drive_flags & DRIVE_UDMA) {
3151 /* use Ultra/DMA */
3152 drvp->drive_flags &= ~DRIVE_DMA;
3153 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3154 acer_fifo_udma |=
3155 ACER_UDMA_TIM(chp->channel, drive,
3156 acer_udma[drvp->UDMA_mode]);
3157 /* XXX disable if one drive < UDMA3 ? */
3158 if (drvp->UDMA_mode >= 3) {
3159 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3160 ACER_0x4B,
3161 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3162 ACER_0x4B) | ACER_0x4B_UDMA66);
3163 }
3164 } else {
3165 /*
3166 * use Multiword DMA
3167 * Timings will be used for both PIO and DMA,
3168 * so adjust DMA mode if needed
3169 */
3170 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3171 drvp->PIO_mode = drvp->DMA_mode + 2;
3172 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3173 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3174 drvp->PIO_mode - 2 : 0;
3175 if (drvp->DMA_mode == 0)
3176 drvp->PIO_mode = 0;
3177 }
3178 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3179 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3180 ACER_IDETIM(chp->channel, drive),
3181 acer_pio[drvp->PIO_mode]);
3182 }
3183 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3184 acer_fifo_udma), DEBUG_PROBE);
3185 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3186 if (idedma_ctl != 0) {
3187 /* Add software bits in status register */
3188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3189 IDEDMA_CTL, idedma_ctl);
3190 }
3191 pciide_print_modes(cp);
3192 }
3193
3194 int
3195 acer_pci_intr(arg)
3196 void *arg;
3197 {
3198 struct pciide_softc *sc = arg;
3199 struct pciide_channel *cp;
3200 struct channel_softc *wdc_cp;
3201 int i, rv, crv;
3202 u_int32_t chids;
3203
3204 rv = 0;
3205 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3206 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3207 cp = &sc->pciide_channels[i];
3208 wdc_cp = &cp->wdc_channel;
3209 /* If a compat channel skip. */
3210 if (cp->compat)
3211 continue;
3212 if (chids & ACER_CHIDS_INT(i)) {
3213 crv = wdcintr(wdc_cp);
3214 if (crv == 0)
3215 printf("%s:%d: bogus intr\n",
3216 sc->sc_wdcdev.sc_dev.dv_xname, i);
3217 else
3218 rv = 1;
3219 }
3220 }
3221 return rv;
3222 }
3223
3224 void
3225 hpt_chip_map(sc, pa)
3226 struct pciide_softc *sc;
3227 struct pci_attach_args *pa;
3228 {
3229 struct pciide_channel *cp;
3230 int i, compatchan, revision;
3231 pcireg_t interface;
3232 bus_size_t cmdsize, ctlsize;
3233
3234 if (pciide_chipen(sc, pa) == 0)
3235 return;
3236 revision = PCI_REVISION(pa->pa_class);
3237 printf(": Triones/Highpoint ");
3238 if (revision == HPT370_REV)
3239 printf("HPT370 IDE Controller\n");
3240 else if (revision == HPT370A_REV)
3241 printf("HPT370A IDE Controller\n");
3242 else if (revision == HPT366_REV)
3243 printf("HPT366 IDE Controller\n");
3244 else
3245 printf("unknown HPT IDE controller rev %d\n", revision);
3246
3247 /*
3248 * when the chip is in native mode it identifies itself as a
3249 * 'misc mass storage'. Fake interface in this case.
3250 */
3251 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3252 interface = PCI_INTERFACE(pa->pa_class);
3253 } else {
3254 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3255 PCIIDE_INTERFACE_PCI(0);
3256 if (revision == HPT370_REV || revision == HPT370A_REV)
3257 interface |= PCIIDE_INTERFACE_PCI(1);
3258 }
3259
3260 printf("%s: bus-master DMA support present",
3261 sc->sc_wdcdev.sc_dev.dv_xname);
3262 pciide_mapreg_dma(sc, pa);
3263 printf("\n");
3264 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3265 WDC_CAPABILITY_MODE;
3266 if (sc->sc_dma_ok) {
3267 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3268 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3269 sc->sc_wdcdev.irqack = pciide_irqack;
3270 }
3271 sc->sc_wdcdev.PIO_cap = 4;
3272 sc->sc_wdcdev.DMA_cap = 2;
3273
3274 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3275 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3276 if (revision == HPT366_REV) {
3277 sc->sc_wdcdev.UDMA_cap = 4;
3278 /*
3279 * The 366 has 2 PCI IDE functions, one for primary and one
3280 * for secondary. So we need to call pciide_mapregs_compat()
3281 * with the real channel
3282 */
3283 if (pa->pa_function == 0) {
3284 compatchan = 0;
3285 } else if (pa->pa_function == 1) {
3286 compatchan = 1;
3287 } else {
3288 printf("%s: unexpected PCI function %d\n",
3289 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3290 return;
3291 }
3292 sc->sc_wdcdev.nchannels = 1;
3293 } else {
3294 sc->sc_wdcdev.nchannels = 2;
3295 sc->sc_wdcdev.UDMA_cap = 5;
3296 }
3297 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3298 cp = &sc->pciide_channels[i];
3299 if (sc->sc_wdcdev.nchannels > 1) {
3300 compatchan = i;
3301 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3302 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3303 printf("%s: %s channel ignored (disabled)\n",
3304 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3305 continue;
3306 }
3307 }
3308 if (pciide_chansetup(sc, i, interface) == 0)
3309 continue;
3310 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3311 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3312 &ctlsize, hpt_pci_intr);
3313 } else {
3314 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3315 &cmdsize, &ctlsize);
3316 }
3317 if (cp->hw_ok == 0)
3318 return;
3319 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3320 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3321 wdcattach(&cp->wdc_channel);
3322 hpt_setup_channel(&cp->wdc_channel);
3323 }
3324 if (revision == HPT370_REV || revision == HPT370A_REV) {
3325 /*
3326 * HPT370_REV has a bit to disable interrupts, make sure
3327 * to clear it
3328 */
3329 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3330 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3331 ~HPT_CSEL_IRQDIS);
3332 }
3333 return;
3334 }
3335
3336 void
3337 hpt_setup_channel(chp)
3338 struct channel_softc *chp;
3339 {
3340 struct ata_drive_datas *drvp;
3341 int drive;
3342 int cable;
3343 u_int32_t before, after;
3344 u_int32_t idedma_ctl;
3345 struct pciide_channel *cp = (struct pciide_channel*)chp;
3346 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3347
3348 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3349
3350 /* setup DMA if needed */
3351 pciide_channel_dma_setup(cp);
3352
3353 idedma_ctl = 0;
3354
3355 /* Per drive settings */
3356 for (drive = 0; drive < 2; drive++) {
3357 drvp = &chp->ch_drive[drive];
3358 /* If no drive, skip */
3359 if ((drvp->drive_flags & DRIVE) == 0)
3360 continue;
3361 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3362 HPT_IDETIM(chp->channel, drive));
3363
3364 /* add timing values, setup DMA if needed */
3365 if (drvp->drive_flags & DRIVE_UDMA) {
3366 /* use Ultra/DMA */
3367 drvp->drive_flags &= ~DRIVE_DMA;
3368 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3369 drvp->UDMA_mode > 2)
3370 drvp->UDMA_mode = 2;
3371 after = (sc->sc_wdcdev.nchannels == 2) ?
3372 hpt370_udma[drvp->UDMA_mode] :
3373 hpt366_udma[drvp->UDMA_mode];
3374 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3375 } else if (drvp->drive_flags & DRIVE_DMA) {
3376 /*
3377 * use Multiword DMA.
3378 * Timings will be used for both PIO and DMA, so adjust
3379 * DMA mode if needed
3380 */
3381 if (drvp->PIO_mode >= 3 &&
3382 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3383 drvp->DMA_mode = drvp->PIO_mode - 2;
3384 }
3385 after = (sc->sc_wdcdev.nchannels == 2) ?
3386 hpt370_dma[drvp->DMA_mode] :
3387 hpt366_dma[drvp->DMA_mode];
3388 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3389 } else {
3390 /* PIO only */
3391 after = (sc->sc_wdcdev.nchannels == 2) ?
3392 hpt370_pio[drvp->PIO_mode] :
3393 hpt366_pio[drvp->PIO_mode];
3394 }
3395 pci_conf_write(sc->sc_pc, sc->sc_tag,
3396 HPT_IDETIM(chp->channel, drive), after);
3397 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3398 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3399 after, before), DEBUG_PROBE);
3400 }
3401 if (idedma_ctl != 0) {
3402 /* Add software bits in status register */
3403 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3404 IDEDMA_CTL, idedma_ctl);
3405 }
3406 pciide_print_modes(cp);
3407 }
3408
3409 int
3410 hpt_pci_intr(arg)
3411 void *arg;
3412 {
3413 struct pciide_softc *sc = arg;
3414 struct pciide_channel *cp;
3415 struct channel_softc *wdc_cp;
3416 int rv = 0;
3417 int dmastat, i, crv;
3418
3419 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3420 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3421 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3422 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3423 IDEDMA_CTL_INTR)
3424 continue;
3425 cp = &sc->pciide_channels[i];
3426 wdc_cp = &cp->wdc_channel;
3427 crv = wdcintr(wdc_cp);
3428 if (crv == 0) {
3429 printf("%s:%d: bogus intr\n",
3430 sc->sc_wdcdev.sc_dev.dv_xname, i);
3431 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3432 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3433 } else
3434 rv = 1;
3435 }
3436 return rv;
3437 }
3438
3439
3440 /* Macros to test product */
3441 #define PDC_IS_262(sc) \
3442 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3443 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3444 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3445 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3446 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3447 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3448 #define PDC_IS_265(sc) \
3449 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3450 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3451 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3452 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3453 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3454 #define PDC_IS_268(sc) \
3455 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3456 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3457 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133)
3458
3459 void
3460 pdc202xx_chip_map(sc, pa)
3461 struct pciide_softc *sc;
3462 struct pci_attach_args *pa;
3463 {
3464 struct pciide_channel *cp;
3465 int channel;
3466 pcireg_t interface, st, mode;
3467 bus_size_t cmdsize, ctlsize;
3468
3469 if (!PDC_IS_268(sc)) {
3470 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3471 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3472 st), DEBUG_PROBE);
3473 }
3474 if (pciide_chipen(sc, pa) == 0)
3475 return;
3476
3477 /* turn off RAID mode */
3478 if (!PDC_IS_268(sc))
3479 st &= ~PDC2xx_STATE_IDERAID;
3480
3481 /*
3482 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3483 * mode. We have to fake interface
3484 */
3485 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3486 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3487 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3488
3489 printf("%s: bus-master DMA support present",
3490 sc->sc_wdcdev.sc_dev.dv_xname);
3491 pciide_mapreg_dma(sc, pa);
3492 printf("\n");
3493 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3494 WDC_CAPABILITY_MODE;
3495 if (sc->sc_dma_ok) {
3496 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3497 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3498 sc->sc_wdcdev.irqack = pciide_irqack;
3499 }
3500 sc->sc_wdcdev.PIO_cap = 4;
3501 sc->sc_wdcdev.DMA_cap = 2;
3502 if (PDC_IS_265(sc))
3503 sc->sc_wdcdev.UDMA_cap = 5;
3504 else if (PDC_IS_262(sc))
3505 sc->sc_wdcdev.UDMA_cap = 4;
3506 else
3507 sc->sc_wdcdev.UDMA_cap = 2;
3508 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3509 pdc20268_setup_channel : pdc202xx_setup_channel;
3510 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3511 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3512
3513 if (!PDC_IS_268(sc)) {
3514 /* setup failsafe defaults */
3515 mode = 0;
3516 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3517 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3518 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3519 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3520 for (channel = 0;
3521 channel < sc->sc_wdcdev.nchannels;
3522 channel++) {
3523 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3524 "drive 0 initial timings 0x%x, now 0x%x\n",
3525 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3526 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3527 DEBUG_PROBE);
3528 pci_conf_write(sc->sc_pc, sc->sc_tag,
3529 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3530 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3531 "drive 1 initial timings 0x%x, now 0x%x\n",
3532 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3533 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3534 pci_conf_write(sc->sc_pc, sc->sc_tag,
3535 PDC2xx_TIM(channel, 1), mode);
3536 }
3537
3538 mode = PDC2xx_SCR_DMA;
3539 if (PDC_IS_262(sc)) {
3540 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3541 } else {
3542 /* the BIOS set it up this way */
3543 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3544 }
3545 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3546 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3547 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3548 "now 0x%x\n",
3549 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3550 PDC2xx_SCR),
3551 mode), DEBUG_PROBE);
3552 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3553 PDC2xx_SCR, mode);
3554
3555 /* controller initial state register is OK even without BIOS */
3556 /* Set DMA mode to IDE DMA compatibility */
3557 mode =
3558 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3559 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3560 DEBUG_PROBE);
3561 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3562 mode | 0x1);
3563 mode =
3564 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3565 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3566 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3567 mode | 0x1);
3568 }
3569
3570 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3571 cp = &sc->pciide_channels[channel];
3572 if (pciide_chansetup(sc, channel, interface) == 0)
3573 continue;
3574 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3575 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3576 printf("%s: %s channel ignored (disabled)\n",
3577 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3578 continue;
3579 }
3580 if (PDC_IS_265(sc))
3581 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3582 pdc20265_pci_intr);
3583 else
3584 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3585 pdc202xx_pci_intr);
3586 if (cp->hw_ok == 0)
3587 continue;
3588 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3589 st &= ~(PDC_IS_262(sc) ?
3590 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3591 pciide_map_compat_intr(pa, cp, channel, interface);
3592 pdc202xx_setup_channel(&cp->wdc_channel);
3593 }
3594 if (!PDC_IS_268(sc)) {
3595 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3596 "0x%x\n", st), DEBUG_PROBE);
3597 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3598 }
3599 return;
3600 }
3601
3602 void
3603 pdc202xx_setup_channel(chp)
3604 struct channel_softc *chp;
3605 {
3606 struct ata_drive_datas *drvp;
3607 int drive;
3608 pcireg_t mode, st;
3609 u_int32_t idedma_ctl, scr, atapi;
3610 struct pciide_channel *cp = (struct pciide_channel*)chp;
3611 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3612 int channel = chp->channel;
3613
3614 /* setup DMA if needed */
3615 pciide_channel_dma_setup(cp);
3616
3617 idedma_ctl = 0;
3618 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3619 sc->sc_wdcdev.sc_dev.dv_xname,
3620 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3621 DEBUG_PROBE);
3622
3623 /* Per channel settings */
3624 if (PDC_IS_262(sc)) {
3625 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3626 PDC262_U66);
3627 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3628 /* Trim UDMA mode */
3629 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3630 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3631 chp->ch_drive[0].UDMA_mode <= 2) ||
3632 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3633 chp->ch_drive[1].UDMA_mode <= 2)) {
3634 if (chp->ch_drive[0].UDMA_mode > 2)
3635 chp->ch_drive[0].UDMA_mode = 2;
3636 if (chp->ch_drive[1].UDMA_mode > 2)
3637 chp->ch_drive[1].UDMA_mode = 2;
3638 }
3639 /* Set U66 if needed */
3640 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3641 chp->ch_drive[0].UDMA_mode > 2) ||
3642 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3643 chp->ch_drive[1].UDMA_mode > 2))
3644 scr |= PDC262_U66_EN(channel);
3645 else
3646 scr &= ~PDC262_U66_EN(channel);
3647 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3648 PDC262_U66, scr);
3649 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3650 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3651 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3652 PDC262_ATAPI(channel))), DEBUG_PROBE);
3653 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3654 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3655 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3656 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3657 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3658 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3659 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3660 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3661 atapi = 0;
3662 else
3663 atapi = PDC262_ATAPI_UDMA;
3664 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3665 PDC262_ATAPI(channel), atapi);
3666 }
3667 }
3668 for (drive = 0; drive < 2; drive++) {
3669 drvp = &chp->ch_drive[drive];
3670 /* If no drive, skip */
3671 if ((drvp->drive_flags & DRIVE) == 0)
3672 continue;
3673 mode = 0;
3674 if (drvp->drive_flags & DRIVE_UDMA) {
3675 /* use Ultra/DMA */
3676 drvp->drive_flags &= ~DRIVE_DMA;
3677 mode = PDC2xx_TIM_SET_MB(mode,
3678 pdc2xx_udma_mb[drvp->UDMA_mode]);
3679 mode = PDC2xx_TIM_SET_MC(mode,
3680 pdc2xx_udma_mc[drvp->UDMA_mode]);
3681 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3682 } else if (drvp->drive_flags & DRIVE_DMA) {
3683 mode = PDC2xx_TIM_SET_MB(mode,
3684 pdc2xx_dma_mb[drvp->DMA_mode]);
3685 mode = PDC2xx_TIM_SET_MC(mode,
3686 pdc2xx_dma_mc[drvp->DMA_mode]);
3687 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3688 } else {
3689 mode = PDC2xx_TIM_SET_MB(mode,
3690 pdc2xx_dma_mb[0]);
3691 mode = PDC2xx_TIM_SET_MC(mode,
3692 pdc2xx_dma_mc[0]);
3693 }
3694 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3695 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3696 if (drvp->drive_flags & DRIVE_ATA)
3697 mode |= PDC2xx_TIM_PRE;
3698 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3699 if (drvp->PIO_mode >= 3) {
3700 mode |= PDC2xx_TIM_IORDY;
3701 if (drive == 0)
3702 mode |= PDC2xx_TIM_IORDYp;
3703 }
3704 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3705 "timings 0x%x\n",
3706 sc->sc_wdcdev.sc_dev.dv_xname,
3707 chp->channel, drive, mode), DEBUG_PROBE);
3708 pci_conf_write(sc->sc_pc, sc->sc_tag,
3709 PDC2xx_TIM(chp->channel, drive), mode);
3710 }
3711 if (idedma_ctl != 0) {
3712 /* Add software bits in status register */
3713 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3714 IDEDMA_CTL, idedma_ctl);
3715 }
3716 pciide_print_modes(cp);
3717 }
3718
3719 void
3720 pdc20268_setup_channel(chp)
3721 struct channel_softc *chp;
3722 {
3723 struct ata_drive_datas *drvp;
3724 int drive;
3725 u_int32_t idedma_ctl;
3726 struct pciide_channel *cp = (struct pciide_channel*)chp;
3727 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3728 int u100;
3729
3730 /* setup DMA if needed */
3731 pciide_channel_dma_setup(cp);
3732
3733 idedma_ctl = 0;
3734
3735 /* I don't know what this is for, FreeBSD does it ... */
3736 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3737 IDEDMA_CMD + 0x1, 0x0b);
3738
3739 /*
3740 * I don't know what this is for; FreeBSD checks this ... this is not
3741 * cable type detect.
3742 */
3743 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3744 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3745
3746 for (drive = 0; drive < 2; drive++) {
3747 drvp = &chp->ch_drive[drive];
3748 /* If no drive, skip */
3749 if ((drvp->drive_flags & DRIVE) == 0)
3750 continue;
3751 if (drvp->drive_flags & DRIVE_UDMA) {
3752 /* use Ultra/DMA */
3753 drvp->drive_flags &= ~DRIVE_DMA;
3754 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3755 if (drvp->UDMA_mode > 2 && u100 == 0)
3756 drvp->UDMA_mode = 2;
3757 } else if (drvp->drive_flags & DRIVE_DMA) {
3758 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3759 }
3760 }
3761 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3762 if (idedma_ctl != 0) {
3763 /* Add software bits in status register */
3764 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3765 IDEDMA_CTL, idedma_ctl);
3766 }
3767 pciide_print_modes(cp);
3768 }
3769
3770 int
3771 pdc202xx_pci_intr(arg)
3772 void *arg;
3773 {
3774 struct pciide_softc *sc = arg;
3775 struct pciide_channel *cp;
3776 struct channel_softc *wdc_cp;
3777 int i, rv, crv;
3778 u_int32_t scr;
3779
3780 rv = 0;
3781 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3782 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3783 cp = &sc->pciide_channels[i];
3784 wdc_cp = &cp->wdc_channel;
3785 /* If a compat channel skip. */
3786 if (cp->compat)
3787 continue;
3788 if (scr & PDC2xx_SCR_INT(i)) {
3789 crv = wdcintr(wdc_cp);
3790 if (crv == 0)
3791 printf("%s:%d: bogus intr (reg 0x%x)\n",
3792 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3793 else
3794 rv = 1;
3795 }
3796 }
3797 return rv;
3798 }
3799
3800 int
3801 pdc20265_pci_intr(arg)
3802 void *arg;
3803 {
3804 struct pciide_softc *sc = arg;
3805 struct pciide_channel *cp;
3806 struct channel_softc *wdc_cp;
3807 int i, rv, crv;
3808 u_int32_t dmastat;
3809
3810 rv = 0;
3811 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3812 cp = &sc->pciide_channels[i];
3813 wdc_cp = &cp->wdc_channel;
3814 /* If a compat channel skip. */
3815 if (cp->compat)
3816 continue;
3817 /*
3818 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3819 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3820 * So use it instead (requires 2 reg reads instead of 1,
3821 * but we can't do it another way).
3822 */
3823 dmastat = bus_space_read_1(sc->sc_dma_iot,
3824 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3825 if((dmastat & IDEDMA_CTL_INTR) == 0)
3826 continue;
3827 crv = wdcintr(wdc_cp);
3828 if (crv == 0)
3829 printf("%s:%d: bogus intr\n",
3830 sc->sc_wdcdev.sc_dev.dv_xname, i);
3831 else
3832 rv = 1;
3833 }
3834 return rv;
3835 }
3836
3837 void
3838 opti_chip_map(sc, pa)
3839 struct pciide_softc *sc;
3840 struct pci_attach_args *pa;
3841 {
3842 struct pciide_channel *cp;
3843 bus_size_t cmdsize, ctlsize;
3844 pcireg_t interface;
3845 u_int8_t init_ctrl;
3846 int channel;
3847
3848 if (pciide_chipen(sc, pa) == 0)
3849 return;
3850 printf("%s: bus-master DMA support present",
3851 sc->sc_wdcdev.sc_dev.dv_xname);
3852
3853 /*
3854 * XXXSCW:
3855 * There seem to be a couple of buggy revisions/implementations
3856 * of the OPTi pciide chipset. This kludge seems to fix one of
3857 * the reported problems (PR/11644) but still fails for the
3858 * other (PR/13151), although the latter may be due to other
3859 * issues too...
3860 */
3861 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3862 printf(" but disabled due to chip rev. <= 0x12");
3863 sc->sc_dma_ok = 0;
3864 sc->sc_wdcdev.cap = 0;
3865 } else {
3866 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3867 pciide_mapreg_dma(sc, pa);
3868 }
3869 printf("\n");
3870
3871 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3872 sc->sc_wdcdev.PIO_cap = 4;
3873 if (sc->sc_dma_ok) {
3874 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3875 sc->sc_wdcdev.irqack = pciide_irqack;
3876 sc->sc_wdcdev.DMA_cap = 2;
3877 }
3878 sc->sc_wdcdev.set_modes = opti_setup_channel;
3879
3880 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3881 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3882
3883 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3884 OPTI_REG_INIT_CONTROL);
3885
3886 interface = PCI_INTERFACE(pa->pa_class);
3887
3888 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3889 cp = &sc->pciide_channels[channel];
3890 if (pciide_chansetup(sc, channel, interface) == 0)
3891 continue;
3892 if (channel == 1 &&
3893 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3894 printf("%s: %s channel ignored (disabled)\n",
3895 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3896 continue;
3897 }
3898 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3899 pciide_pci_intr);
3900 if (cp->hw_ok == 0)
3901 continue;
3902 pciide_map_compat_intr(pa, cp, channel, interface);
3903 if (cp->hw_ok == 0)
3904 continue;
3905 opti_setup_channel(&cp->wdc_channel);
3906 }
3907 }
3908
3909 void
3910 opti_setup_channel(chp)
3911 struct channel_softc *chp;
3912 {
3913 struct ata_drive_datas *drvp;
3914 struct pciide_channel *cp = (struct pciide_channel*)chp;
3915 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3916 int drive, spd;
3917 int mode[2];
3918 u_int8_t rv, mr;
3919
3920 /*
3921 * The `Delay' and `Address Setup Time' fields of the
3922 * Miscellaneous Register are always zero initially.
3923 */
3924 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3925 mr &= ~(OPTI_MISC_DELAY_MASK |
3926 OPTI_MISC_ADDR_SETUP_MASK |
3927 OPTI_MISC_INDEX_MASK);
3928
3929 /* Prime the control register before setting timing values */
3930 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3931
3932 /* Determine the clockrate of the PCIbus the chip is attached to */
3933 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3934 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3935
3936 /* setup DMA if needed */
3937 pciide_channel_dma_setup(cp);
3938
3939 for (drive = 0; drive < 2; drive++) {
3940 drvp = &chp->ch_drive[drive];
3941 /* If no drive, skip */
3942 if ((drvp->drive_flags & DRIVE) == 0) {
3943 mode[drive] = -1;
3944 continue;
3945 }
3946
3947 if ((drvp->drive_flags & DRIVE_DMA)) {
3948 /*
3949 * Timings will be used for both PIO and DMA,
3950 * so adjust DMA mode if needed
3951 */
3952 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3953 drvp->PIO_mode = drvp->DMA_mode + 2;
3954 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3955 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3956 drvp->PIO_mode - 2 : 0;
3957 if (drvp->DMA_mode == 0)
3958 drvp->PIO_mode = 0;
3959
3960 mode[drive] = drvp->DMA_mode + 5;
3961 } else
3962 mode[drive] = drvp->PIO_mode;
3963
3964 if (drive && mode[0] >= 0 &&
3965 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3966 /*
3967 * Can't have two drives using different values
3968 * for `Address Setup Time'.
3969 * Slow down the faster drive to compensate.
3970 */
3971 int d = (opti_tim_as[spd][mode[0]] >
3972 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3973
3974 mode[d] = mode[1-d];
3975 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3976 chp->ch_drive[d].DMA_mode = 0;
3977 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3978 }
3979 }
3980
3981 for (drive = 0; drive < 2; drive++) {
3982 int m;
3983 if ((m = mode[drive]) < 0)
3984 continue;
3985
3986 /* Set the Address Setup Time and select appropriate index */
3987 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3988 rv |= OPTI_MISC_INDEX(drive);
3989 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3990
3991 /* Set the pulse width and recovery timing parameters */
3992 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3993 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3994 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3995 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3996
3997 /* Set the Enhanced Mode register appropriately */
3998 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3999 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4000 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4001 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4002 }
4003
4004 /* Finally, enable the timings */
4005 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4006
4007 pciide_print_modes(cp);
4008 }
4009
4010 #define ACARD_IS_850(sc) \
4011 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4012
4013 void
4014 acard_chip_map(sc, pa)
4015 struct pciide_softc *sc;
4016 struct pci_attach_args *pa;
4017 {
4018 struct pciide_channel *cp;
4019 int i;
4020 pcireg_t interface;
4021 bus_size_t cmdsize, ctlsize;
4022
4023 if (pciide_chipen(sc, pa) == 0)
4024 return;
4025
4026 /*
4027 * when the chip is in native mode it identifies itself as a
4028 * 'misc mass storage'. Fake interface in this case.
4029 */
4030 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4031 interface = PCI_INTERFACE(pa->pa_class);
4032 } else {
4033 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4034 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4035 }
4036
4037 printf("%s: bus-master DMA support present",
4038 sc->sc_wdcdev.sc_dev.dv_xname);
4039 pciide_mapreg_dma(sc, pa);
4040 printf("\n");
4041 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4042 WDC_CAPABILITY_MODE;
4043
4044 if (sc->sc_dma_ok) {
4045 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4046 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4047 sc->sc_wdcdev.irqack = pciide_irqack;
4048 }
4049 sc->sc_wdcdev.PIO_cap = 4;
4050 sc->sc_wdcdev.DMA_cap = 2;
4051 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4052
4053 sc->sc_wdcdev.set_modes = acard_setup_channel;
4054 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4055 sc->sc_wdcdev.nchannels = 2;
4056
4057 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4058 cp = &sc->pciide_channels[i];
4059 if (pciide_chansetup(sc, i, interface) == 0)
4060 continue;
4061 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4062 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4063 &ctlsize, pciide_pci_intr);
4064 } else {
4065 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4066 &cmdsize, &ctlsize);
4067 }
4068 if (cp->hw_ok == 0)
4069 return;
4070 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4071 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4072 wdcattach(&cp->wdc_channel);
4073 acard_setup_channel(&cp->wdc_channel);
4074 }
4075 if (!ACARD_IS_850(sc)) {
4076 u_int32_t reg;
4077 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4078 reg &= ~ATP860_CTRL_INT;
4079 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4080 }
4081 }
4082
4083 void
4084 acard_setup_channel(chp)
4085 struct channel_softc *chp;
4086 {
4087 struct ata_drive_datas *drvp;
4088 struct pciide_channel *cp = (struct pciide_channel*)chp;
4089 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4090 int channel = chp->channel;
4091 int drive;
4092 u_int32_t idetime, udma_mode;
4093 u_int32_t idedma_ctl;
4094
4095 /* setup DMA if needed */
4096 pciide_channel_dma_setup(cp);
4097
4098 if (ACARD_IS_850(sc)) {
4099 idetime = 0;
4100 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4101 udma_mode &= ~ATP850_UDMA_MASK(channel);
4102 } else {
4103 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4104 idetime &= ~ATP860_SETTIME_MASK(channel);
4105 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4106 udma_mode &= ~ATP860_UDMA_MASK(channel);
4107
4108 /* check 80 pins cable */
4109 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4110 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4111 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4112 & ATP860_CTRL_80P(chp->channel)) {
4113 if (chp->ch_drive[0].UDMA_mode > 2)
4114 chp->ch_drive[0].UDMA_mode = 2;
4115 if (chp->ch_drive[1].UDMA_mode > 2)
4116 chp->ch_drive[1].UDMA_mode = 2;
4117 }
4118 }
4119 }
4120
4121 idedma_ctl = 0;
4122
4123 /* Per drive settings */
4124 for (drive = 0; drive < 2; drive++) {
4125 drvp = &chp->ch_drive[drive];
4126 /* If no drive, skip */
4127 if ((drvp->drive_flags & DRIVE) == 0)
4128 continue;
4129 /* add timing values, setup DMA if needed */
4130 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4131 (drvp->drive_flags & DRIVE_UDMA)) {
4132 /* use Ultra/DMA */
4133 if (ACARD_IS_850(sc)) {
4134 idetime |= ATP850_SETTIME(drive,
4135 acard_act_udma[drvp->UDMA_mode],
4136 acard_rec_udma[drvp->UDMA_mode]);
4137 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4138 acard_udma_conf[drvp->UDMA_mode]);
4139 } else {
4140 idetime |= ATP860_SETTIME(channel, drive,
4141 acard_act_udma[drvp->UDMA_mode],
4142 acard_rec_udma[drvp->UDMA_mode]);
4143 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4144 acard_udma_conf[drvp->UDMA_mode]);
4145 }
4146 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4147 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4148 (drvp->drive_flags & DRIVE_DMA)) {
4149 /* use Multiword DMA */
4150 drvp->drive_flags &= ~DRIVE_UDMA;
4151 if (ACARD_IS_850(sc)) {
4152 idetime |= ATP850_SETTIME(drive,
4153 acard_act_dma[drvp->DMA_mode],
4154 acard_rec_dma[drvp->DMA_mode]);
4155 } else {
4156 idetime |= ATP860_SETTIME(channel, drive,
4157 acard_act_dma[drvp->DMA_mode],
4158 acard_rec_dma[drvp->DMA_mode]);
4159 }
4160 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4161 } else {
4162 /* PIO only */
4163 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4164 if (ACARD_IS_850(sc)) {
4165 idetime |= ATP850_SETTIME(drive,
4166 acard_act_pio[drvp->PIO_mode],
4167 acard_rec_pio[drvp->PIO_mode]);
4168 } else {
4169 idetime |= ATP860_SETTIME(channel, drive,
4170 acard_act_pio[drvp->PIO_mode],
4171 acard_rec_pio[drvp->PIO_mode]);
4172 }
4173 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4174 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4175 | ATP8x0_CTRL_EN(channel));
4176 }
4177 }
4178
4179 if (idedma_ctl != 0) {
4180 /* Add software bits in status register */
4181 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4182 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4183 }
4184 pciide_print_modes(cp);
4185
4186 if (ACARD_IS_850(sc)) {
4187 pci_conf_write(sc->sc_pc, sc->sc_tag,
4188 ATP850_IDETIME(channel), idetime);
4189 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4190 } else {
4191 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4192 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4193 }
4194 }
4195
4196 int
4197 acard_pci_intr(arg)
4198 void *arg;
4199 {
4200 struct pciide_softc *sc = arg;
4201 struct pciide_channel *cp;
4202 struct channel_softc *wdc_cp;
4203 int rv = 0;
4204 int dmastat, i, crv;
4205
4206 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4207 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4208 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4209 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4210 continue;
4211 cp = &sc->pciide_channels[i];
4212 wdc_cp = &cp->wdc_channel;
4213 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4214 (void)wdcintr(wdc_cp);
4215 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4216 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4217 continue;
4218 }
4219 crv = wdcintr(wdc_cp);
4220 if (crv == 0)
4221 printf("%s:%d: bogus intr\n",
4222 sc->sc_wdcdev.sc_dev.dv_xname, i);
4223 else if (crv == 1)
4224 rv = 1;
4225 else if (rv == 0)
4226 rv = crv;
4227 }
4228 return rv;
4229 }
4230