pciide.c revision 1.68.2.32 1 /* $NetBSD: pciide.c,v 1.68.2.32 2002/03/25 17:57:01 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/cy82c693var.h>
122
123 #include "opt_pciide.h"
124
125 /* inlines for reading/writing 8-bit PCI registers */
126 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
127 int));
128 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
129 int, u_int8_t));
130
131 static __inline u_int8_t
132 pciide_pci_read(pc, pa, reg)
133 pci_chipset_tag_t pc;
134 pcitag_t pa;
135 int reg;
136 {
137
138 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
139 ((reg & 0x03) * 8) & 0xff);
140 }
141
142 static __inline void
143 pciide_pci_write(pc, pa, reg, val)
144 pci_chipset_tag_t pc;
145 pcitag_t pa;
146 int reg;
147 u_int8_t val;
148 {
149 pcireg_t pcival;
150
151 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
152 pcival &= ~(0xff << ((reg & 0x03) * 8));
153 pcival |= (val << ((reg & 0x03) * 8));
154 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
155 }
156
157 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
158
159 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
160 void piix_setup_channel __P((struct channel_softc*));
161 void piix3_4_setup_channel __P((struct channel_softc*));
162 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
163 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
164 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
165
166 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void amd7x6_setup_channel __P((struct channel_softc*));
168
169 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void apollo_setup_channel __P((struct channel_softc*));
171
172 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
173 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_setup_channel __P((struct channel_softc*));
175 void cmd_channel_map __P((struct pci_attach_args *,
176 struct pciide_softc *, int));
177 int cmd_pci_intr __P((void *));
178 void cmd646_9_irqack __P((struct channel_softc *));
179
180 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cy693_setup_channel __P((struct channel_softc*));
182
183 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
184 void sis_setup_channel __P((struct channel_softc*));
185
186 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
187 void acer_setup_channel __P((struct channel_softc*));
188 int acer_pci_intr __P((void *));
189
190 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void pdc202xx_setup_channel __P((struct channel_softc*));
192 int pdc202xx_pci_intr __P((void *));
193 int pdc20265_pci_intr __P((void *));
194
195 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void opti_setup_channel __P((struct channel_softc*));
197
198 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void hpt_setup_channel __P((struct channel_softc*));
200 int hpt_pci_intr __P((void *));
201
202 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void acard_setup_channel __P((struct channel_softc*));
204 int acard_pci_intr __P((void *));
205
206 void pciide_channel_dma_setup __P((struct pciide_channel *));
207 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
208 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
209 void pciide_dma_start __P((void*, int, int));
210 int pciide_dma_finish __P((void*, int, int, int));
211 void pciide_irqack __P((struct channel_softc *));
212 void pciide_print_modes __P((struct pciide_channel *));
213
214 struct pciide_product_desc {
215 u_int32_t ide_product;
216 int ide_flags;
217 const char *ide_name;
218 /* map and setup chip, probe drives */
219 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
220 };
221
222 /* Flags for ide_flags */
223 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
224
225 /* Default product description for devices not known from this controller */
226 const struct pciide_product_desc default_product_desc = {
227 0,
228 0,
229 "Generic PCI IDE controller",
230 default_chip_map,
231 };
232
233 const struct pciide_product_desc pciide_intel_products[] = {
234 { PCI_PRODUCT_INTEL_82092AA,
235 0,
236 "Intel 82092AA IDE controller",
237 default_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371FB_IDE,
240 0,
241 "Intel 82371FB IDE controller (PIIX)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82371SB_IDE,
245 0,
246 "Intel 82371SB IDE Interface (PIIX3)",
247 piix_chip_map,
248 },
249 { PCI_PRODUCT_INTEL_82371AB_IDE,
250 0,
251 "Intel 82371AB IDE controller (PIIX4)",
252 piix_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82801AA_IDE,
255 0,
256 "Intel 82801AA IDE Controller (ICH)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82801AB_IDE,
260 0,
261 "Intel 82801AB IDE Controller (ICH0)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82801BA_IDE,
265 0,
266 "Intel 82801BA IDE Controller (ICH2)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82801BAM_IDE,
270 0,
271 "Intel 82801BAM IDE Controller (ICH2)",
272 piix_chip_map,
273 },
274 { 0,
275 0,
276 NULL,
277 NULL
278 }
279 };
280
281 const struct pciide_product_desc pciide_amd_products[] = {
282 { PCI_PRODUCT_AMD_PBC756_IDE,
283 0,
284 "Advanced Micro Devices AMD756 IDE Controller",
285 amd7x6_chip_map
286 },
287 { PCI_PRODUCT_AMD_PBC766_IDE,
288 0,
289 "Advanced Micro Devices AMD766 IDE Controller",
290 amd7x6_chip_map
291 },
292 { PCI_PRODUCT_AMD_PBC768_IDE,
293 0,
294 "Advanced Micro Devices AMD768 IDE Controller",
295 amd7x6_chip_map
296 },
297 { 0,
298 0,
299 NULL,
300 NULL
301 }
302 };
303
304 const struct pciide_product_desc pciide_cmd_products[] = {
305 { PCI_PRODUCT_CMDTECH_640,
306 0,
307 "CMD Technology PCI0640",
308 cmd_chip_map
309 },
310 { PCI_PRODUCT_CMDTECH_643,
311 0,
312 "CMD Technology PCI0643",
313 cmd0643_9_chip_map,
314 },
315 { PCI_PRODUCT_CMDTECH_646,
316 0,
317 "CMD Technology PCI0646",
318 cmd0643_9_chip_map,
319 },
320 { PCI_PRODUCT_CMDTECH_648,
321 IDE_PCI_CLASS_OVERRIDE,
322 "CMD Technology PCI0648",
323 cmd0643_9_chip_map,
324 },
325 { PCI_PRODUCT_CMDTECH_649,
326 IDE_PCI_CLASS_OVERRIDE,
327 "CMD Technology PCI0649",
328 cmd0643_9_chip_map,
329 },
330 { 0,
331 0,
332 NULL,
333 NULL
334 }
335 };
336
337 const struct pciide_product_desc pciide_via_products[] = {
338 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
339 0,
340 NULL,
341 apollo_chip_map,
342 },
343 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
344 0,
345 NULL,
346 apollo_chip_map,
347 },
348 { 0,
349 0,
350 NULL,
351 NULL
352 }
353 };
354
355 const struct pciide_product_desc pciide_cypress_products[] = {
356 { PCI_PRODUCT_CONTAQ_82C693,
357 0,
358 "Cypress 82C693 IDE Controller",
359 cy693_chip_map,
360 },
361 { 0,
362 0,
363 NULL,
364 NULL
365 }
366 };
367
368 const struct pciide_product_desc pciide_sis_products[] = {
369 { PCI_PRODUCT_SIS_5597_IDE,
370 0,
371 "Silicon Integrated System 5597/5598 IDE controller",
372 sis_chip_map,
373 },
374 { 0,
375 0,
376 NULL,
377 NULL
378 }
379 };
380
381 const struct pciide_product_desc pciide_acer_products[] = {
382 { PCI_PRODUCT_ALI_M5229,
383 0,
384 "Acer Labs M5229 UDMA IDE Controller",
385 acer_chip_map,
386 },
387 { 0,
388 0,
389 NULL,
390 NULL
391 }
392 };
393
394 const struct pciide_product_desc pciide_promise_products[] = {
395 { PCI_PRODUCT_PROMISE_ULTRA33,
396 IDE_PCI_CLASS_OVERRIDE,
397 "Promise Ultra33/ATA Bus Master IDE Accelerator",
398 pdc202xx_chip_map,
399 },
400 { PCI_PRODUCT_PROMISE_ULTRA66,
401 IDE_PCI_CLASS_OVERRIDE,
402 "Promise Ultra66/ATA Bus Master IDE Accelerator",
403 pdc202xx_chip_map,
404 },
405 { PCI_PRODUCT_PROMISE_ULTRA100,
406 IDE_PCI_CLASS_OVERRIDE,
407 "Promise Ultra100/ATA Bus Master IDE Accelerator",
408 pdc202xx_chip_map,
409 },
410 { PCI_PRODUCT_PROMISE_ULTRA100X,
411 IDE_PCI_CLASS_OVERRIDE,
412 "Promise Ultra100/ATA Bus Master IDE Accelerator",
413 pdc202xx_chip_map,
414 },
415 { 0,
416 0,
417 NULL,
418 NULL
419 }
420 };
421
422 const struct pciide_product_desc pciide_opti_products[] = {
423 { PCI_PRODUCT_OPTI_82C621,
424 0,
425 "OPTi 82c621 PCI IDE controller",
426 opti_chip_map,
427 },
428 { PCI_PRODUCT_OPTI_82C568,
429 0,
430 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
431 opti_chip_map,
432 },
433 { PCI_PRODUCT_OPTI_82D568,
434 0,
435 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
436 opti_chip_map,
437 },
438 { 0,
439 0,
440 NULL,
441 NULL
442 }
443 };
444
445 const struct pciide_product_desc pciide_triones_products[] = {
446 { PCI_PRODUCT_TRIONES_HPT366,
447 IDE_PCI_CLASS_OVERRIDE,
448 NULL,
449 hpt_chip_map,
450 },
451 { 0,
452 0,
453 NULL,
454 NULL
455 }
456 };
457
458 const struct pciide_product_desc pciide_acard_products[] = {
459 { PCI_PRODUCT_ACARD_ATP850U,
460 IDE_PCI_CLASS_OVERRIDE,
461 "Acard ATP850U Ultra33 IDE Controller",
462 acard_chip_map,
463 },
464 { PCI_PRODUCT_ACARD_ATP860,
465 IDE_PCI_CLASS_OVERRIDE,
466 "Acard ATP860 Ultra66 IDE Controller",
467 acard_chip_map,
468 },
469 { PCI_PRODUCT_ACARD_ATP860A,
470 IDE_PCI_CLASS_OVERRIDE,
471 "Acard ATP860-A Ultra66 IDE Controller",
472 acard_chip_map,
473 },
474 { 0,
475 0,
476 NULL,
477 }
478 };
479
480 struct pciide_vendor_desc {
481 u_int32_t ide_vendor;
482 const struct pciide_product_desc *ide_products;
483 };
484
485 const struct pciide_vendor_desc pciide_vendors[] = {
486 { PCI_VENDOR_INTEL, pciide_intel_products },
487 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
488 { PCI_VENDOR_VIATECH, pciide_via_products },
489 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
490 { PCI_VENDOR_SIS, pciide_sis_products },
491 { PCI_VENDOR_ALI, pciide_acer_products },
492 { PCI_VENDOR_PROMISE, pciide_promise_products },
493 { PCI_VENDOR_AMD, pciide_amd_products },
494 { PCI_VENDOR_OPTI, pciide_opti_products },
495 { PCI_VENDOR_TRIONES, pciide_triones_products },
496 { PCI_VENDOR_ACARD, pciide_acard_products },
497 { 0, NULL }
498 };
499
500 /* options passed via the 'flags' config keyword */
501 #define PCIIDE_OPTIONS_DMA 0x01
502
503 int pciide_match __P((struct device *, struct cfdata *, void *));
504 void pciide_attach __P((struct device *, struct device *, void *));
505
506 struct cfattach pciide_ca = {
507 sizeof(struct pciide_softc), pciide_match, pciide_attach
508 };
509 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
510 int pciide_mapregs_compat __P(( struct pci_attach_args *,
511 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
512 int pciide_mapregs_native __P((struct pci_attach_args *,
513 struct pciide_channel *, bus_size_t *, bus_size_t *,
514 int (*pci_intr) __P((void *))));
515 void pciide_mapreg_dma __P((struct pciide_softc *,
516 struct pci_attach_args *));
517 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
518 void pciide_mapchan __P((struct pci_attach_args *,
519 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
520 int (*pci_intr) __P((void *))));
521 int pciide_chan_candisable __P((struct pciide_channel *));
522 void pciide_map_compat_intr __P(( struct pci_attach_args *,
523 struct pciide_channel *, int, int));
524 int pciide_print __P((void *, const char *pnp));
525 int pciide_compat_intr __P((void *));
526 int pciide_pci_intr __P((void *));
527 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
528
529 const struct pciide_product_desc *
530 pciide_lookup_product(id)
531 u_int32_t id;
532 {
533 const struct pciide_product_desc *pp;
534 const struct pciide_vendor_desc *vp;
535
536 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
537 if (PCI_VENDOR(id) == vp->ide_vendor)
538 break;
539
540 if ((pp = vp->ide_products) == NULL)
541 return NULL;
542
543 for (; pp->chip_map != NULL; pp++)
544 if (PCI_PRODUCT(id) == pp->ide_product)
545 break;
546
547 if (pp->chip_map == NULL)
548 return NULL;
549 return pp;
550 }
551
552 int
553 pciide_match(parent, match, aux)
554 struct device *parent;
555 struct cfdata *match;
556 void *aux;
557 {
558 struct pci_attach_args *pa = aux;
559 const struct pciide_product_desc *pp;
560
561 /*
562 * Check the ID register to see that it's a PCI IDE controller.
563 * If it is, we assume that we can deal with it; it _should_
564 * work in a standardized way...
565 */
566 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
567 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
568 return (1);
569 }
570
571 /*
572 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
573 * controllers. Let see if we can deal with it anyway.
574 */
575 pp = pciide_lookup_product(pa->pa_id);
576 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
577 return (1);
578 }
579
580 return (0);
581 }
582
583 void
584 pciide_attach(parent, self, aux)
585 struct device *parent, *self;
586 void *aux;
587 {
588 struct pci_attach_args *pa = aux;
589 pci_chipset_tag_t pc = pa->pa_pc;
590 pcitag_t tag = pa->pa_tag;
591 struct pciide_softc *sc = (struct pciide_softc *)self;
592 pcireg_t csr;
593 char devinfo[256];
594 const char *displaydev;
595
596 sc->sc_pp = pciide_lookup_product(pa->pa_id);
597 if (sc->sc_pp == NULL) {
598 sc->sc_pp = &default_product_desc;
599 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
600 displaydev = devinfo;
601 } else
602 displaydev = sc->sc_pp->ide_name;
603
604 /* if displaydev == NULL, printf is done in chip-specific map */
605 if (displaydev)
606 printf(": %s (rev. 0x%02x)\n", displaydev,
607 PCI_REVISION(pa->pa_class));
608
609 sc->sc_pc = pa->pa_pc;
610 sc->sc_tag = pa->pa_tag;
611 #ifdef WDCDEBUG
612 if (wdcdebug_pciide_mask & DEBUG_PROBE)
613 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
614 #endif
615 sc->sc_pp->chip_map(sc, pa);
616
617 if (sc->sc_dma_ok) {
618 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
619 csr |= PCI_COMMAND_MASTER_ENABLE;
620 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
621 }
622 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
623 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
624 }
625
626 /* tell wether the chip is enabled or not */
627 int
628 pciide_chipen(sc, pa)
629 struct pciide_softc *sc;
630 struct pci_attach_args *pa;
631 {
632 pcireg_t csr;
633 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
634 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
635 PCI_COMMAND_STATUS_REG);
636 printf("%s: device disabled (at %s)\n",
637 sc->sc_wdcdev.sc_dev.dv_xname,
638 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
639 "device" : "bridge");
640 return 0;
641 }
642 return 1;
643 }
644
645 int
646 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
647 struct pci_attach_args *pa;
648 struct pciide_channel *cp;
649 int compatchan;
650 bus_size_t *cmdsizep, *ctlsizep;
651 {
652 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
653 struct channel_softc *wdc_cp = &cp->wdc_channel;
654
655 cp->compat = 1;
656 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
657 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
658
659 wdc_cp->cmd_iot = pa->pa_iot;
660 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
661 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
662 printf("%s: couldn't map %s channel cmd regs\n",
663 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
664 return (0);
665 }
666
667 wdc_cp->ctl_iot = pa->pa_iot;
668 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
669 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
670 printf("%s: couldn't map %s channel ctl regs\n",
671 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
672 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
673 PCIIDE_COMPAT_CMD_SIZE);
674 return (0);
675 }
676
677 return (1);
678 }
679
680 int
681 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
682 struct pci_attach_args * pa;
683 struct pciide_channel *cp;
684 bus_size_t *cmdsizep, *ctlsizep;
685 int (*pci_intr) __P((void *));
686 {
687 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
688 struct channel_softc *wdc_cp = &cp->wdc_channel;
689 const char *intrstr;
690 pci_intr_handle_t intrhandle;
691
692 cp->compat = 0;
693
694 if (sc->sc_pci_ih == NULL) {
695 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
696 pa->pa_intrline, &intrhandle) != 0) {
697 printf("%s: couldn't map native-PCI interrupt\n",
698 sc->sc_wdcdev.sc_dev.dv_xname);
699 return 0;
700 }
701 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
702 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
703 intrhandle, IPL_BIO, pci_intr, sc);
704 if (sc->sc_pci_ih != NULL) {
705 printf("%s: using %s for native-PCI interrupt\n",
706 sc->sc_wdcdev.sc_dev.dv_xname,
707 intrstr ? intrstr : "unknown interrupt");
708 } else {
709 printf("%s: couldn't establish native-PCI interrupt",
710 sc->sc_wdcdev.sc_dev.dv_xname);
711 if (intrstr != NULL)
712 printf(" at %s", intrstr);
713 printf("\n");
714 return 0;
715 }
716 }
717 cp->ih = sc->sc_pci_ih;
718 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
719 PCI_MAPREG_TYPE_IO, 0,
720 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
721 printf("%s: couldn't map %s channel cmd regs\n",
722 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
723 return 0;
724 }
725
726 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
727 PCI_MAPREG_TYPE_IO, 0,
728 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
729 printf("%s: couldn't map %s channel ctl regs\n",
730 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
731 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
732 return 0;
733 }
734 /*
735 * In native mode, 4 bytes of I/O space are mapped for the control
736 * register, the control register is at offset 2. Pass the generic
737 * code a handle for only one byte at the rigth offset.
738 */
739 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
740 &wdc_cp->ctl_ioh) != 0) {
741 printf("%s: unable to subregion %s channel ctl regs\n",
742 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
743 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
744 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
745 return 0;
746 }
747 return (1);
748 }
749
750 void
751 pciide_mapreg_dma(sc, pa)
752 struct pciide_softc *sc;
753 struct pci_attach_args *pa;
754 {
755 pcireg_t maptype;
756
757 /*
758 * Map DMA registers
759 *
760 * Note that sc_dma_ok is the right variable to test to see if
761 * DMA can be done. If the interface doesn't support DMA,
762 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
763 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
764 * non-zero if the interface supports DMA and the registers
765 * could be mapped.
766 *
767 * XXX Note that despite the fact that the Bus Master IDE specs
768 * XXX say that "The bus master IDE function uses 16 bytes of IO
769 * XXX space," some controllers (at least the United
770 * XXX Microelectronics UM8886BF) place it in memory space.
771 */
772 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
773 PCIIDE_REG_BUS_MASTER_DMA);
774
775 switch (maptype) {
776 case PCI_MAPREG_TYPE_IO:
777 case PCI_MAPREG_MEM_TYPE_32BIT:
778 sc->sc_dma_ok = (pci_mapreg_map(pa,
779 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
780 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
781 sc->sc_dmat = pa->pa_dmat;
782 if (sc->sc_dma_ok == 0) {
783 printf(", but unused (couldn't map registers)");
784 } else {
785 sc->sc_wdcdev.dma_arg = sc;
786 sc->sc_wdcdev.dma_init = pciide_dma_init;
787 sc->sc_wdcdev.dma_start = pciide_dma_start;
788 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
789 }
790 break;
791
792 default:
793 sc->sc_dma_ok = 0;
794 printf(", but unsupported register maptype (0x%x)", maptype);
795 }
796 }
797
798 int
799 pciide_compat_intr(arg)
800 void *arg;
801 {
802 struct pciide_channel *cp = arg;
803
804 #ifdef DIAGNOSTIC
805 /* should only be called for a compat channel */
806 if (cp->compat == 0)
807 panic("pciide compat intr called for non-compat chan %p\n", cp);
808 #endif
809 return (wdcintr(&cp->wdc_channel));
810 }
811
812 int
813 pciide_pci_intr(arg)
814 void *arg;
815 {
816 struct pciide_softc *sc = arg;
817 struct pciide_channel *cp;
818 struct channel_softc *wdc_cp;
819 int i, rv, crv;
820
821 rv = 0;
822 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
823 cp = &sc->pciide_channels[i];
824 wdc_cp = &cp->wdc_channel;
825
826 /* If a compat channel skip. */
827 if (cp->compat)
828 continue;
829 /* if this channel not waiting for intr, skip */
830 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
831 continue;
832
833 crv = wdcintr(wdc_cp);
834 if (crv == 0)
835 ; /* leave rv alone */
836 else if (crv == 1)
837 rv = 1; /* claim the intr */
838 else if (rv == 0) /* crv should be -1 in this case */
839 rv = crv; /* if we've done no better, take it */
840 }
841 return (rv);
842 }
843
844 void
845 pciide_channel_dma_setup(cp)
846 struct pciide_channel *cp;
847 {
848 int drive;
849 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
850 struct ata_drive_datas *drvp;
851
852 for (drive = 0; drive < 2; drive++) {
853 drvp = &cp->wdc_channel.ch_drive[drive];
854 /* If no drive, skip */
855 if ((drvp->drive_flags & DRIVE) == 0)
856 continue;
857 /* setup DMA if needed */
858 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
859 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
860 sc->sc_dma_ok == 0) {
861 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
862 continue;
863 }
864 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
865 != 0) {
866 /* Abort DMA setup */
867 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
868 continue;
869 }
870 }
871 }
872
873 int
874 pciide_dma_table_setup(sc, channel, drive)
875 struct pciide_softc *sc;
876 int channel, drive;
877 {
878 bus_dma_segment_t seg;
879 int error, rseg;
880 const bus_size_t dma_table_size =
881 sizeof(struct idedma_table) * NIDEDMA_TABLES;
882 struct pciide_dma_maps *dma_maps =
883 &sc->pciide_channels[channel].dma_maps[drive];
884
885 /* If table was already allocated, just return */
886 if (dma_maps->dma_table)
887 return 0;
888
889 /* Allocate memory for the DMA tables and map it */
890 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
891 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
892 BUS_DMA_NOWAIT)) != 0) {
893 printf("%s:%d: unable to allocate table DMA for "
894 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
895 channel, drive, error);
896 return error;
897 }
898 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
899 dma_table_size,
900 (caddr_t *)&dma_maps->dma_table,
901 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
902 printf("%s:%d: unable to map table DMA for"
903 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
904 channel, drive, error);
905 return error;
906 }
907 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
908 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
909 seg.ds_addr), DEBUG_PROBE);
910
911 /* Create and load table DMA map for this disk */
912 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
913 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
914 &dma_maps->dmamap_table)) != 0) {
915 printf("%s:%d: unable to create table DMA map for "
916 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
917 channel, drive, error);
918 return error;
919 }
920 if ((error = bus_dmamap_load(sc->sc_dmat,
921 dma_maps->dmamap_table,
922 dma_maps->dma_table,
923 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
924 printf("%s:%d: unable to load table DMA map for "
925 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
926 channel, drive, error);
927 return error;
928 }
929 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
930 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
931 /* Create a xfer DMA map for this drive */
932 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
933 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
934 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
935 &dma_maps->dmamap_xfer)) != 0) {
936 printf("%s:%d: unable to create xfer DMA map for "
937 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
938 channel, drive, error);
939 return error;
940 }
941 return 0;
942 }
943
944 int
945 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
946 void *v;
947 int channel, drive;
948 void *databuf;
949 size_t datalen;
950 int flags;
951 {
952 struct pciide_softc *sc = v;
953 int error, seg;
954 struct pciide_dma_maps *dma_maps =
955 &sc->pciide_channels[channel].dma_maps[drive];
956
957 error = bus_dmamap_load(sc->sc_dmat,
958 dma_maps->dmamap_xfer,
959 databuf, datalen, NULL, BUS_DMA_NOWAIT);
960 if (error) {
961 printf("%s:%d: unable to load xfer DMA map for"
962 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
963 channel, drive, error);
964 return error;
965 }
966
967 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
968 dma_maps->dmamap_xfer->dm_mapsize,
969 (flags & WDC_DMA_READ) ?
970 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
971
972 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
973 #ifdef DIAGNOSTIC
974 /* A segment must not cross a 64k boundary */
975 {
976 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
977 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
978 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
979 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
980 printf("pciide_dma: segment %d physical addr 0x%lx"
981 " len 0x%lx not properly aligned\n",
982 seg, phys, len);
983 panic("pciide_dma: buf align");
984 }
985 }
986 #endif
987 dma_maps->dma_table[seg].base_addr =
988 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
989 dma_maps->dma_table[seg].byte_count =
990 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
991 IDEDMA_BYTE_COUNT_MASK);
992 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
993 seg, le32toh(dma_maps->dma_table[seg].byte_count),
994 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
995
996 }
997 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
998 htole32(IDEDMA_BYTE_COUNT_EOT);
999
1000 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1001 dma_maps->dmamap_table->dm_mapsize,
1002 BUS_DMASYNC_PREWRITE);
1003
1004 /* Maps are ready. Start DMA function */
1005 #ifdef DIAGNOSTIC
1006 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1007 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1008 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1009 panic("pciide_dma_init: table align");
1010 }
1011 #endif
1012
1013 /* Clear status bits */
1014 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1015 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1016 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1017 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1018 /* Write table addr */
1019 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1020 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1021 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1022 /* set read/write */
1023 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1024 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1025 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1026 /* remember flags */
1027 dma_maps->dma_flags = flags;
1028 return 0;
1029 }
1030
1031 void
1032 pciide_dma_start(v, channel, drive)
1033 void *v;
1034 int channel, drive;
1035 {
1036 struct pciide_softc *sc = v;
1037
1038 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1039 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1040 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1041 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1042 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1043 }
1044
1045 int
1046 pciide_dma_finish(v, channel, drive, force)
1047 void *v;
1048 int channel, drive;
1049 int force;
1050 {
1051 struct pciide_softc *sc = v;
1052 u_int8_t status;
1053 int error = 0;
1054 struct pciide_dma_maps *dma_maps =
1055 &sc->pciide_channels[channel].dma_maps[drive];
1056
1057 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1058 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1059 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1060 DEBUG_XFERS);
1061
1062 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1063 return WDC_DMAST_NOIRQ;
1064
1065 /* stop DMA channel */
1066 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1067 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1068 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1069 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1070
1071 /* Unload the map of the data buffer */
1072 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1073 dma_maps->dmamap_xfer->dm_mapsize,
1074 (dma_maps->dma_flags & WDC_DMA_READ) ?
1075 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1076 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1077
1078 if ((status & IDEDMA_CTL_ERR) != 0) {
1079 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1080 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1081 error |= WDC_DMAST_ERR;
1082 }
1083
1084 if ((status & IDEDMA_CTL_INTR) == 0) {
1085 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1086 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1087 drive, status);
1088 error |= WDC_DMAST_NOIRQ;
1089 }
1090
1091 if ((status & IDEDMA_CTL_ACT) != 0) {
1092 /* data underrun, may be a valid condition for ATAPI */
1093 error |= WDC_DMAST_UNDER;
1094 }
1095 return error;
1096 }
1097
1098 void
1099 pciide_irqack(chp)
1100 struct channel_softc *chp;
1101 {
1102 struct pciide_channel *cp = (struct pciide_channel*)chp;
1103 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1104
1105 /* clear status bits in IDE DMA registers */
1106 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1107 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1108 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1109 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1110 }
1111
1112 /* some common code used by several chip_map */
1113 int
1114 pciide_chansetup(sc, channel, interface)
1115 struct pciide_softc *sc;
1116 int channel;
1117 pcireg_t interface;
1118 {
1119 struct pciide_channel *cp = &sc->pciide_channels[channel];
1120 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1121 cp->name = PCIIDE_CHANNEL_NAME(channel);
1122 cp->wdc_channel.channel = channel;
1123 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1124 cp->wdc_channel.ch_queue =
1125 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1126 if (cp->wdc_channel.ch_queue == NULL) {
1127 printf("%s %s channel: "
1128 "can't allocate memory for command queue",
1129 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1130 return 0;
1131 }
1132 printf("%s: %s channel %s to %s mode\n",
1133 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1134 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1135 "configured" : "wired",
1136 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1137 "native-PCI" : "compatibility");
1138 return 1;
1139 }
1140
1141 /* some common code used by several chip channel_map */
1142 void
1143 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1144 struct pci_attach_args *pa;
1145 struct pciide_channel *cp;
1146 pcireg_t interface;
1147 bus_size_t *cmdsizep, *ctlsizep;
1148 int (*pci_intr) __P((void *));
1149 {
1150 struct channel_softc *wdc_cp = &cp->wdc_channel;
1151
1152 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1153 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1154 pci_intr);
1155 else
1156 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1157 wdc_cp->channel, cmdsizep, ctlsizep);
1158
1159 if (cp->hw_ok == 0)
1160 return;
1161 wdc_cp->data32iot = wdc_cp->cmd_iot;
1162 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1163 wdcattach(wdc_cp);
1164 }
1165
1166 /*
1167 * Generic code to call to know if a channel can be disabled. Return 1
1168 * if channel can be disabled, 0 if not
1169 */
1170 int
1171 pciide_chan_candisable(cp)
1172 struct pciide_channel *cp;
1173 {
1174 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1175 struct channel_softc *wdc_cp = &cp->wdc_channel;
1176
1177 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1178 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1179 printf("%s: disabling %s channel (no drives)\n",
1180 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1181 cp->hw_ok = 0;
1182 return 1;
1183 }
1184 return 0;
1185 }
1186
1187 /*
1188 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1189 * Set hw_ok=0 on failure
1190 */
1191 void
1192 pciide_map_compat_intr(pa, cp, compatchan, interface)
1193 struct pci_attach_args *pa;
1194 struct pciide_channel *cp;
1195 int compatchan, interface;
1196 {
1197 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1198 struct channel_softc *wdc_cp = &cp->wdc_channel;
1199
1200 if (cp->hw_ok == 0)
1201 return;
1202 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1203 return;
1204
1205 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1206 pa, compatchan, pciide_compat_intr, cp);
1207 if (cp->ih == NULL) {
1208 printf("%s: no compatibility interrupt for use by %s "
1209 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1210 cp->hw_ok = 0;
1211 }
1212 }
1213
1214 void
1215 pciide_print_modes(cp)
1216 struct pciide_channel *cp;
1217 {
1218 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1219 int drive;
1220 struct channel_softc *chp;
1221 struct ata_drive_datas *drvp;
1222
1223 chp = &cp->wdc_channel;
1224 for (drive = 0; drive < 2; drive++) {
1225 drvp = &chp->ch_drive[drive];
1226 if ((drvp->drive_flags & DRIVE) == 0)
1227 continue;
1228 printf("%s(%s:%d:%d): using PIO mode %d",
1229 drvp->drv_softc->dv_xname,
1230 sc->sc_wdcdev.sc_dev.dv_xname,
1231 chp->channel, drive, drvp->PIO_mode);
1232 if (drvp->drive_flags & DRIVE_DMA)
1233 printf(", DMA mode %d", drvp->DMA_mode);
1234 if (drvp->drive_flags & DRIVE_UDMA)
1235 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1236 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1237 printf(" (using DMA data transfers)");
1238 printf("\n");
1239 }
1240 }
1241
1242 void
1243 default_chip_map(sc, pa)
1244 struct pciide_softc *sc;
1245 struct pci_attach_args *pa;
1246 {
1247 struct pciide_channel *cp;
1248 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1249 pcireg_t csr;
1250 int channel, drive;
1251 struct ata_drive_datas *drvp;
1252 u_int8_t idedma_ctl;
1253 bus_size_t cmdsize, ctlsize;
1254 char *failreason;
1255
1256 if (pciide_chipen(sc, pa) == 0)
1257 return;
1258
1259 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1260 printf("%s: bus-master DMA support present",
1261 sc->sc_wdcdev.sc_dev.dv_xname);
1262 if (sc->sc_pp == &default_product_desc &&
1263 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1264 PCIIDE_OPTIONS_DMA) == 0) {
1265 printf(", but unused (no driver support)");
1266 sc->sc_dma_ok = 0;
1267 } else {
1268 pciide_mapreg_dma(sc, pa);
1269 if (sc->sc_dma_ok != 0)
1270 printf(", used without full driver "
1271 "support");
1272 }
1273 } else {
1274 printf("%s: hardware does not support DMA",
1275 sc->sc_wdcdev.sc_dev.dv_xname);
1276 sc->sc_dma_ok = 0;
1277 }
1278 printf("\n");
1279 if (sc->sc_dma_ok) {
1280 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1281 sc->sc_wdcdev.irqack = pciide_irqack;
1282 }
1283 sc->sc_wdcdev.PIO_cap = 0;
1284 sc->sc_wdcdev.DMA_cap = 0;
1285
1286 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1287 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1288 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1289
1290 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1291 cp = &sc->pciide_channels[channel];
1292 if (pciide_chansetup(sc, channel, interface) == 0)
1293 continue;
1294 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1295 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1296 &ctlsize, pciide_pci_intr);
1297 } else {
1298 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1299 channel, &cmdsize, &ctlsize);
1300 }
1301 if (cp->hw_ok == 0)
1302 continue;
1303 /*
1304 * Check to see if something appears to be there.
1305 */
1306 failreason = NULL;
1307 if (!wdcprobe(&cp->wdc_channel)) {
1308 failreason = "not responding; disabled or no drives?";
1309 goto next;
1310 }
1311 /*
1312 * Now, make sure it's actually attributable to this PCI IDE
1313 * channel by trying to access the channel again while the
1314 * PCI IDE controller's I/O space is disabled. (If the
1315 * channel no longer appears to be there, it belongs to
1316 * this controller.) YUCK!
1317 */
1318 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1319 PCI_COMMAND_STATUS_REG);
1320 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1321 csr & ~PCI_COMMAND_IO_ENABLE);
1322 if (wdcprobe(&cp->wdc_channel))
1323 failreason = "other hardware responding at addresses";
1324 pci_conf_write(sc->sc_pc, sc->sc_tag,
1325 PCI_COMMAND_STATUS_REG, csr);
1326 next:
1327 if (failreason) {
1328 printf("%s: %s channel ignored (%s)\n",
1329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1330 failreason);
1331 cp->hw_ok = 0;
1332 bus_space_unmap(cp->wdc_channel.cmd_iot,
1333 cp->wdc_channel.cmd_ioh, cmdsize);
1334 bus_space_unmap(cp->wdc_channel.ctl_iot,
1335 cp->wdc_channel.ctl_ioh, ctlsize);
1336 } else {
1337 pciide_map_compat_intr(pa, cp, channel, interface);
1338 }
1339 if (cp->hw_ok) {
1340 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1341 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1342 wdcattach(&cp->wdc_channel);
1343 }
1344 }
1345
1346 if (sc->sc_dma_ok == 0)
1347 return;
1348
1349 /* Allocate DMA maps */
1350 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1351 idedma_ctl = 0;
1352 cp = &sc->pciide_channels[channel];
1353 for (drive = 0; drive < 2; drive++) {
1354 drvp = &cp->wdc_channel.ch_drive[drive];
1355 /* If no drive, skip */
1356 if ((drvp->drive_flags & DRIVE) == 0)
1357 continue;
1358 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1359 continue;
1360 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1361 /* Abort DMA setup */
1362 printf("%s:%d:%d: can't allocate DMA maps, "
1363 "using PIO transfers\n",
1364 sc->sc_wdcdev.sc_dev.dv_xname,
1365 channel, drive);
1366 drvp->drive_flags &= ~DRIVE_DMA;
1367 }
1368 printf("%s:%d:%d: using DMA data transfers\n",
1369 sc->sc_wdcdev.sc_dev.dv_xname,
1370 channel, drive);
1371 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1372 }
1373 if (idedma_ctl != 0) {
1374 /* Add software bits in status register */
1375 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1376 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1377 idedma_ctl);
1378 }
1379 }
1380 }
1381
1382 void
1383 piix_chip_map(sc, pa)
1384 struct pciide_softc *sc;
1385 struct pci_attach_args *pa;
1386 {
1387 struct pciide_channel *cp;
1388 int channel;
1389 u_int32_t idetim;
1390 bus_size_t cmdsize, ctlsize;
1391
1392 if (pciide_chipen(sc, pa) == 0)
1393 return;
1394
1395 printf("%s: bus-master DMA support present",
1396 sc->sc_wdcdev.sc_dev.dv_xname);
1397 pciide_mapreg_dma(sc, pa);
1398 printf("\n");
1399 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1400 WDC_CAPABILITY_MODE;
1401 if (sc->sc_dma_ok) {
1402 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1403 sc->sc_wdcdev.irqack = pciide_irqack;
1404 switch(sc->sc_pp->ide_product) {
1405 case PCI_PRODUCT_INTEL_82371AB_IDE:
1406 case PCI_PRODUCT_INTEL_82801AA_IDE:
1407 case PCI_PRODUCT_INTEL_82801AB_IDE:
1408 case PCI_PRODUCT_INTEL_82801BA_IDE:
1409 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1410 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1411 }
1412 }
1413 sc->sc_wdcdev.PIO_cap = 4;
1414 sc->sc_wdcdev.DMA_cap = 2;
1415 switch(sc->sc_pp->ide_product) {
1416 case PCI_PRODUCT_INTEL_82801AA_IDE:
1417 sc->sc_wdcdev.UDMA_cap = 4;
1418 break;
1419 case PCI_PRODUCT_INTEL_82801BA_IDE:
1420 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1421 sc->sc_wdcdev.UDMA_cap = 5;
1422 break;
1423 default:
1424 sc->sc_wdcdev.UDMA_cap = 2;
1425 }
1426 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1427 sc->sc_wdcdev.set_modes = piix_setup_channel;
1428 else
1429 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1430 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1431 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1432
1433 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1434 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1435 DEBUG_PROBE);
1436 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1437 WDCDEBUG_PRINT((", sidetim=0x%x",
1438 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1439 DEBUG_PROBE);
1440 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1441 WDCDEBUG_PRINT((", udamreg 0x%x",
1442 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1443 DEBUG_PROBE);
1444 }
1445 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1446 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1447 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1448 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1449 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1450 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1451 DEBUG_PROBE);
1452 }
1453
1454 }
1455 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1456
1457 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1458 cp = &sc->pciide_channels[channel];
1459 /* PIIX is compat-only */
1460 if (pciide_chansetup(sc, channel, 0) == 0)
1461 continue;
1462 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1463 if ((PIIX_IDETIM_READ(idetim, channel) &
1464 PIIX_IDETIM_IDE) == 0) {
1465 printf("%s: %s channel ignored (disabled)\n",
1466 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1467 continue;
1468 }
1469 /* PIIX are compat-only pciide devices */
1470 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1471 if (cp->hw_ok == 0)
1472 continue;
1473 if (pciide_chan_candisable(cp)) {
1474 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1475 channel);
1476 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1477 idetim);
1478 }
1479 pciide_map_compat_intr(pa, cp, channel, 0);
1480 if (cp->hw_ok == 0)
1481 continue;
1482 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1483 }
1484
1485 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1486 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1487 DEBUG_PROBE);
1488 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1489 WDCDEBUG_PRINT((", sidetim=0x%x",
1490 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1491 DEBUG_PROBE);
1492 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1493 WDCDEBUG_PRINT((", udamreg 0x%x",
1494 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1495 DEBUG_PROBE);
1496 }
1497 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1498 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1499 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1500 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1501 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1502 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1503 DEBUG_PROBE);
1504 }
1505 }
1506 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1507 }
1508
1509 void
1510 piix_setup_channel(chp)
1511 struct channel_softc *chp;
1512 {
1513 u_int8_t mode[2], drive;
1514 u_int32_t oidetim, idetim, idedma_ctl;
1515 struct pciide_channel *cp = (struct pciide_channel*)chp;
1516 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1517 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1518
1519 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1520 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1521 idedma_ctl = 0;
1522
1523 /* set up new idetim: Enable IDE registers decode */
1524 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1525 chp->channel);
1526
1527 /* setup DMA */
1528 pciide_channel_dma_setup(cp);
1529
1530 /*
1531 * Here we have to mess up with drives mode: PIIX can't have
1532 * different timings for master and slave drives.
1533 * We need to find the best combination.
1534 */
1535
1536 /* If both drives supports DMA, take the lower mode */
1537 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1538 (drvp[1].drive_flags & DRIVE_DMA)) {
1539 mode[0] = mode[1] =
1540 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1541 drvp[0].DMA_mode = mode[0];
1542 drvp[1].DMA_mode = mode[1];
1543 goto ok;
1544 }
1545 /*
1546 * If only one drive supports DMA, use its mode, and
1547 * put the other one in PIO mode 0 if mode not compatible
1548 */
1549 if (drvp[0].drive_flags & DRIVE_DMA) {
1550 mode[0] = drvp[0].DMA_mode;
1551 mode[1] = drvp[1].PIO_mode;
1552 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1553 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1554 mode[1] = drvp[1].PIO_mode = 0;
1555 goto ok;
1556 }
1557 if (drvp[1].drive_flags & DRIVE_DMA) {
1558 mode[1] = drvp[1].DMA_mode;
1559 mode[0] = drvp[0].PIO_mode;
1560 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1561 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1562 mode[0] = drvp[0].PIO_mode = 0;
1563 goto ok;
1564 }
1565 /*
1566 * If both drives are not DMA, takes the lower mode, unless
1567 * one of them is PIO mode < 2
1568 */
1569 if (drvp[0].PIO_mode < 2) {
1570 mode[0] = drvp[0].PIO_mode = 0;
1571 mode[1] = drvp[1].PIO_mode;
1572 } else if (drvp[1].PIO_mode < 2) {
1573 mode[1] = drvp[1].PIO_mode = 0;
1574 mode[0] = drvp[0].PIO_mode;
1575 } else {
1576 mode[0] = mode[1] =
1577 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1578 drvp[0].PIO_mode = mode[0];
1579 drvp[1].PIO_mode = mode[1];
1580 }
1581 ok: /* The modes are setup */
1582 for (drive = 0; drive < 2; drive++) {
1583 if (drvp[drive].drive_flags & DRIVE_DMA) {
1584 idetim |= piix_setup_idetim_timings(
1585 mode[drive], 1, chp->channel);
1586 goto end;
1587 }
1588 }
1589 /* If we are there, none of the drives are DMA */
1590 if (mode[0] >= 2)
1591 idetim |= piix_setup_idetim_timings(
1592 mode[0], 0, chp->channel);
1593 else
1594 idetim |= piix_setup_idetim_timings(
1595 mode[1], 0, chp->channel);
1596 end: /*
1597 * timing mode is now set up in the controller. Enable
1598 * it per-drive
1599 */
1600 for (drive = 0; drive < 2; drive++) {
1601 /* If no drive, skip */
1602 if ((drvp[drive].drive_flags & DRIVE) == 0)
1603 continue;
1604 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1605 if (drvp[drive].drive_flags & DRIVE_DMA)
1606 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1607 }
1608 if (idedma_ctl != 0) {
1609 /* Add software bits in status register */
1610 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1611 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1612 idedma_ctl);
1613 }
1614 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1615 pciide_print_modes(cp);
1616 }
1617
1618 void
1619 piix3_4_setup_channel(chp)
1620 struct channel_softc *chp;
1621 {
1622 struct ata_drive_datas *drvp;
1623 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1624 struct pciide_channel *cp = (struct pciide_channel*)chp;
1625 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1626 int drive;
1627 int channel = chp->channel;
1628
1629 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1630 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1631 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1632 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1633 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1634 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1635 PIIX_SIDETIM_RTC_MASK(channel));
1636
1637 idedma_ctl = 0;
1638 /* If channel disabled, no need to go further */
1639 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1640 return;
1641 /* set up new idetim: Enable IDE registers decode */
1642 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1643
1644 /* setup DMA if needed */
1645 pciide_channel_dma_setup(cp);
1646
1647 for (drive = 0; drive < 2; drive++) {
1648 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1649 PIIX_UDMATIM_SET(0x3, channel, drive));
1650 drvp = &chp->ch_drive[drive];
1651 /* If no drive, skip */
1652 if ((drvp->drive_flags & DRIVE) == 0)
1653 continue;
1654 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1655 (drvp->drive_flags & DRIVE_UDMA) == 0))
1656 goto pio;
1657
1658 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1659 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1660 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1661 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1662 ideconf |= PIIX_CONFIG_PINGPONG;
1663 }
1664 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1665 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1666 /* setup Ultra/100 */
1667 if (drvp->UDMA_mode > 2 &&
1668 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1669 drvp->UDMA_mode = 2;
1670 if (drvp->UDMA_mode > 4) {
1671 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1672 } else {
1673 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1674 if (drvp->UDMA_mode > 2) {
1675 ideconf |= PIIX_CONFIG_UDMA66(channel,
1676 drive);
1677 } else {
1678 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1679 drive);
1680 }
1681 }
1682 }
1683 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1684 /* setup Ultra/66 */
1685 if (drvp->UDMA_mode > 2 &&
1686 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1687 drvp->UDMA_mode = 2;
1688 if (drvp->UDMA_mode > 2)
1689 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1690 else
1691 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1692 }
1693 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1694 (drvp->drive_flags & DRIVE_UDMA)) {
1695 /* use Ultra/DMA */
1696 drvp->drive_flags &= ~DRIVE_DMA;
1697 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1698 udmareg |= PIIX_UDMATIM_SET(
1699 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1700 } else {
1701 /* use Multiword DMA */
1702 drvp->drive_flags &= ~DRIVE_UDMA;
1703 if (drive == 0) {
1704 idetim |= piix_setup_idetim_timings(
1705 drvp->DMA_mode, 1, channel);
1706 } else {
1707 sidetim |= piix_setup_sidetim_timings(
1708 drvp->DMA_mode, 1, channel);
1709 idetim =PIIX_IDETIM_SET(idetim,
1710 PIIX_IDETIM_SITRE, channel);
1711 }
1712 }
1713 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1714
1715 pio: /* use PIO mode */
1716 idetim |= piix_setup_idetim_drvs(drvp);
1717 if (drive == 0) {
1718 idetim |= piix_setup_idetim_timings(
1719 drvp->PIO_mode, 0, channel);
1720 } else {
1721 sidetim |= piix_setup_sidetim_timings(
1722 drvp->PIO_mode, 0, channel);
1723 idetim =PIIX_IDETIM_SET(idetim,
1724 PIIX_IDETIM_SITRE, channel);
1725 }
1726 }
1727 if (idedma_ctl != 0) {
1728 /* Add software bits in status register */
1729 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1730 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1731 idedma_ctl);
1732 }
1733 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1734 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1735 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1736 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1737 pciide_print_modes(cp);
1738 }
1739
1740
1741 /* setup ISP and RTC fields, based on mode */
1742 static u_int32_t
1743 piix_setup_idetim_timings(mode, dma, channel)
1744 u_int8_t mode;
1745 u_int8_t dma;
1746 u_int8_t channel;
1747 {
1748
1749 if (dma)
1750 return PIIX_IDETIM_SET(0,
1751 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1752 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1753 channel);
1754 else
1755 return PIIX_IDETIM_SET(0,
1756 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1757 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1758 channel);
1759 }
1760
1761 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1762 static u_int32_t
1763 piix_setup_idetim_drvs(drvp)
1764 struct ata_drive_datas *drvp;
1765 {
1766 u_int32_t ret = 0;
1767 struct channel_softc *chp = drvp->chnl_softc;
1768 u_int8_t channel = chp->channel;
1769 u_int8_t drive = drvp->drive;
1770
1771 /*
1772 * If drive is using UDMA, timings setups are independant
1773 * So just check DMA and PIO here.
1774 */
1775 if (drvp->drive_flags & DRIVE_DMA) {
1776 /* if mode = DMA mode 0, use compatible timings */
1777 if ((drvp->drive_flags & DRIVE_DMA) &&
1778 drvp->DMA_mode == 0) {
1779 drvp->PIO_mode = 0;
1780 return ret;
1781 }
1782 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1783 /*
1784 * PIO and DMA timings are the same, use fast timings for PIO
1785 * too, else use compat timings.
1786 */
1787 if ((piix_isp_pio[drvp->PIO_mode] !=
1788 piix_isp_dma[drvp->DMA_mode]) ||
1789 (piix_rtc_pio[drvp->PIO_mode] !=
1790 piix_rtc_dma[drvp->DMA_mode]))
1791 drvp->PIO_mode = 0;
1792 /* if PIO mode <= 2, use compat timings for PIO */
1793 if (drvp->PIO_mode <= 2) {
1794 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1795 channel);
1796 return ret;
1797 }
1798 }
1799
1800 /*
1801 * Now setup PIO modes. If mode < 2, use compat timings.
1802 * Else enable fast timings. Enable IORDY and prefetch/post
1803 * if PIO mode >= 3.
1804 */
1805
1806 if (drvp->PIO_mode < 2)
1807 return ret;
1808
1809 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1810 if (drvp->PIO_mode >= 3) {
1811 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1812 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1813 }
1814 return ret;
1815 }
1816
1817 /* setup values in SIDETIM registers, based on mode */
1818 static u_int32_t
1819 piix_setup_sidetim_timings(mode, dma, channel)
1820 u_int8_t mode;
1821 u_int8_t dma;
1822 u_int8_t channel;
1823 {
1824 if (dma)
1825 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1826 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1827 else
1828 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1829 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1830 }
1831
1832 void
1833 amd7x6_chip_map(sc, pa)
1834 struct pciide_softc *sc;
1835 struct pci_attach_args *pa;
1836 {
1837 struct pciide_channel *cp;
1838 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1839 int channel;
1840 pcireg_t chanenable;
1841 bus_size_t cmdsize, ctlsize;
1842
1843 if (pciide_chipen(sc, pa) == 0)
1844 return;
1845 printf("%s: bus-master DMA support present",
1846 sc->sc_wdcdev.sc_dev.dv_xname);
1847 pciide_mapreg_dma(sc, pa);
1848 printf("\n");
1849 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1850 WDC_CAPABILITY_MODE;
1851 if (sc->sc_dma_ok) {
1852 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1853 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1854 sc->sc_wdcdev.irqack = pciide_irqack;
1855 }
1856 sc->sc_wdcdev.PIO_cap = 4;
1857 sc->sc_wdcdev.DMA_cap = 2;
1858
1859 switch (sc->sc_pp->ide_product) {
1860 case PCI_PRODUCT_AMD_PBC766_IDE:
1861 case PCI_PRODUCT_AMD_PBC768_IDE:
1862 sc->sc_wdcdev.UDMA_cap = 5;
1863 break;
1864 default:
1865 sc->sc_wdcdev.UDMA_cap = 4;
1866 }
1867 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1868 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1869 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1870 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1871
1872 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1873 DEBUG_PROBE);
1874 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1875 cp = &sc->pciide_channels[channel];
1876 if (pciide_chansetup(sc, channel, interface) == 0)
1877 continue;
1878
1879 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1880 printf("%s: %s channel ignored (disabled)\n",
1881 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1882 continue;
1883 }
1884 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1885 pciide_pci_intr);
1886
1887 if (pciide_chan_candisable(cp))
1888 chanenable &= ~AMD7X6_CHAN_EN(channel);
1889 pciide_map_compat_intr(pa, cp, channel, interface);
1890 if (cp->hw_ok == 0)
1891 continue;
1892
1893 amd7x6_setup_channel(&cp->wdc_channel);
1894 }
1895 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1896 chanenable);
1897 return;
1898 }
1899
1900 void
1901 amd7x6_setup_channel(chp)
1902 struct channel_softc *chp;
1903 {
1904 u_int32_t udmatim_reg, datatim_reg;
1905 u_int8_t idedma_ctl;
1906 int mode, drive;
1907 struct ata_drive_datas *drvp;
1908 struct pciide_channel *cp = (struct pciide_channel*)chp;
1909 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1910 #ifndef PCIIDE_AMD756_ENABLEDMA
1911 int rev = PCI_REVISION(
1912 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1913 #endif
1914
1915 idedma_ctl = 0;
1916 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1917 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1918 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1919 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1920
1921 /* setup DMA if needed */
1922 pciide_channel_dma_setup(cp);
1923
1924 for (drive = 0; drive < 2; drive++) {
1925 drvp = &chp->ch_drive[drive];
1926 /* If no drive, skip */
1927 if ((drvp->drive_flags & DRIVE) == 0)
1928 continue;
1929 /* add timing values, setup DMA if needed */
1930 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1931 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1932 mode = drvp->PIO_mode;
1933 goto pio;
1934 }
1935 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1936 (drvp->drive_flags & DRIVE_UDMA)) {
1937 /* use Ultra/DMA */
1938 drvp->drive_flags &= ~DRIVE_DMA;
1939 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1940 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1941 AMD7X6_UDMA_TIME(chp->channel, drive,
1942 amd7x6_udma_tim[drvp->UDMA_mode]);
1943 /* can use PIO timings, MW DMA unused */
1944 mode = drvp->PIO_mode;
1945 } else {
1946 /* use Multiword DMA, but only if revision is OK */
1947 drvp->drive_flags &= ~DRIVE_UDMA;
1948 #ifndef PCIIDE_AMD756_ENABLEDMA
1949 /*
1950 * The workaround doesn't seem to be necessary
1951 * with all drives, so it can be disabled by
1952 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1953 * triggered.
1954 */
1955 if (sc->sc_pp->ide_product ==
1956 PCI_PRODUCT_AMD_PBC756_IDE &&
1957 AMD756_CHIPREV_DISABLEDMA(rev)) {
1958 printf("%s:%d:%d: multi-word DMA disabled due "
1959 "to chip revision\n",
1960 sc->sc_wdcdev.sc_dev.dv_xname,
1961 chp->channel, drive);
1962 mode = drvp->PIO_mode;
1963 drvp->drive_flags &= ~DRIVE_DMA;
1964 goto pio;
1965 }
1966 #endif
1967 /* mode = min(pio, dma+2) */
1968 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1969 mode = drvp->PIO_mode;
1970 else
1971 mode = drvp->DMA_mode + 2;
1972 }
1973 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1974
1975 pio: /* setup PIO mode */
1976 if (mode <= 2) {
1977 drvp->DMA_mode = 0;
1978 drvp->PIO_mode = 0;
1979 mode = 0;
1980 } else {
1981 drvp->PIO_mode = mode;
1982 drvp->DMA_mode = mode - 2;
1983 }
1984 datatim_reg |=
1985 AMD7X6_DATATIM_PULSE(chp->channel, drive,
1986 amd7x6_pio_set[mode]) |
1987 AMD7X6_DATATIM_RECOV(chp->channel, drive,
1988 amd7x6_pio_rec[mode]);
1989 }
1990 if (idedma_ctl != 0) {
1991 /* Add software bits in status register */
1992 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1993 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1994 idedma_ctl);
1995 }
1996 pciide_print_modes(cp);
1997 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
1998 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
1999 }
2000
2001 void
2002 apollo_chip_map(sc, pa)
2003 struct pciide_softc *sc;
2004 struct pci_attach_args *pa;
2005 {
2006 struct pciide_channel *cp;
2007 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2008 int channel;
2009 u_int32_t ideconf;
2010 bus_size_t cmdsize, ctlsize;
2011 pcitag_t pcib_tag;
2012 pcireg_t pcib_id, pcib_class;
2013
2014 if (pciide_chipen(sc, pa) == 0)
2015 return;
2016 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2017 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2018 /* and read ID and rev of the ISA bridge */
2019 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2020 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2021 printf(": VIA Technologies ");
2022 switch (PCI_PRODUCT(pcib_id)) {
2023 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2024 printf("VT82C586 (Apollo VP) ");
2025 if(PCI_REVISION(pcib_class) >= 0x02) {
2026 printf("ATA33 controller\n");
2027 sc->sc_wdcdev.UDMA_cap = 2;
2028 } else {
2029 printf("controller\n");
2030 sc->sc_wdcdev.UDMA_cap = 0;
2031 }
2032 break;
2033 case PCI_PRODUCT_VIATECH_VT82C596A:
2034 printf("VT82C596A (Apollo Pro) ");
2035 if (PCI_REVISION(pcib_class) >= 0x12) {
2036 printf("ATA66 controller\n");
2037 sc->sc_wdcdev.UDMA_cap = 4;
2038 } else {
2039 printf("ATA33 controller\n");
2040 sc->sc_wdcdev.UDMA_cap = 2;
2041 }
2042 break;
2043 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2044 printf("VT82C686A (Apollo KX133) ");
2045 if (PCI_REVISION(pcib_class) >= 0x40) {
2046 printf("ATA100 controller\n");
2047 sc->sc_wdcdev.UDMA_cap = 5;
2048 } else {
2049 printf("ATA66 controller\n");
2050 sc->sc_wdcdev.UDMA_cap = 4;
2051 }
2052 break;
2053 default:
2054 printf("unknown ATA controller\n");
2055 sc->sc_wdcdev.UDMA_cap = 0;
2056 }
2057
2058 printf("%s: bus-master DMA support present",
2059 sc->sc_wdcdev.sc_dev.dv_xname);
2060 pciide_mapreg_dma(sc, pa);
2061 printf("\n");
2062 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2063 WDC_CAPABILITY_MODE;
2064 if (sc->sc_dma_ok) {
2065 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2066 sc->sc_wdcdev.irqack = pciide_irqack;
2067 if (sc->sc_wdcdev.UDMA_cap > 0)
2068 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2069 }
2070 sc->sc_wdcdev.PIO_cap = 4;
2071 sc->sc_wdcdev.DMA_cap = 2;
2072 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2073 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2074 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2075
2076 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2077 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2078 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2079 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2080 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2081 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2082 DEBUG_PROBE);
2083
2084 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2085 cp = &sc->pciide_channels[channel];
2086 if (pciide_chansetup(sc, channel, interface) == 0)
2087 continue;
2088
2089 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2090 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2091 printf("%s: %s channel ignored (disabled)\n",
2092 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2093 continue;
2094 }
2095 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2096 pciide_pci_intr);
2097 if (cp->hw_ok == 0)
2098 continue;
2099 if (pciide_chan_candisable(cp)) {
2100 ideconf &= ~APO_IDECONF_EN(channel);
2101 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2102 ideconf);
2103 }
2104 pciide_map_compat_intr(pa, cp, channel, interface);
2105
2106 if (cp->hw_ok == 0)
2107 continue;
2108 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2109 }
2110 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2111 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2112 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2113 }
2114
2115 void
2116 apollo_setup_channel(chp)
2117 struct channel_softc *chp;
2118 {
2119 u_int32_t udmatim_reg, datatim_reg;
2120 u_int8_t idedma_ctl;
2121 int mode, drive;
2122 struct ata_drive_datas *drvp;
2123 struct pciide_channel *cp = (struct pciide_channel*)chp;
2124 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2125
2126 idedma_ctl = 0;
2127 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2128 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2129 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2130 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2131
2132 /* setup DMA if needed */
2133 pciide_channel_dma_setup(cp);
2134
2135 for (drive = 0; drive < 2; drive++) {
2136 drvp = &chp->ch_drive[drive];
2137 /* If no drive, skip */
2138 if ((drvp->drive_flags & DRIVE) == 0)
2139 continue;
2140 /* add timing values, setup DMA if needed */
2141 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2142 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2143 mode = drvp->PIO_mode;
2144 goto pio;
2145 }
2146 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2147 (drvp->drive_flags & DRIVE_UDMA)) {
2148 /* use Ultra/DMA */
2149 drvp->drive_flags &= ~DRIVE_DMA;
2150 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2151 APO_UDMA_EN_MTH(chp->channel, drive);
2152 if (sc->sc_wdcdev.UDMA_cap == 5) {
2153 /* 686b */
2154 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2155 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2156 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2157 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2158 /* 596b or 686a */
2159 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2160 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2161 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2162 } else {
2163 /* 596a or 586b */
2164 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2165 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2166 }
2167 /* can use PIO timings, MW DMA unused */
2168 mode = drvp->PIO_mode;
2169 } else {
2170 /* use Multiword DMA */
2171 drvp->drive_flags &= ~DRIVE_UDMA;
2172 /* mode = min(pio, dma+2) */
2173 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2174 mode = drvp->PIO_mode;
2175 else
2176 mode = drvp->DMA_mode + 2;
2177 }
2178 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2179
2180 pio: /* setup PIO mode */
2181 if (mode <= 2) {
2182 drvp->DMA_mode = 0;
2183 drvp->PIO_mode = 0;
2184 mode = 0;
2185 } else {
2186 drvp->PIO_mode = mode;
2187 drvp->DMA_mode = mode - 2;
2188 }
2189 datatim_reg |=
2190 APO_DATATIM_PULSE(chp->channel, drive,
2191 apollo_pio_set[mode]) |
2192 APO_DATATIM_RECOV(chp->channel, drive,
2193 apollo_pio_rec[mode]);
2194 }
2195 if (idedma_ctl != 0) {
2196 /* Add software bits in status register */
2197 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2198 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2199 idedma_ctl);
2200 }
2201 pciide_print_modes(cp);
2202 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2203 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2204 }
2205
2206 void
2207 cmd_channel_map(pa, sc, channel)
2208 struct pci_attach_args *pa;
2209 struct pciide_softc *sc;
2210 int channel;
2211 {
2212 struct pciide_channel *cp = &sc->pciide_channels[channel];
2213 bus_size_t cmdsize, ctlsize;
2214 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2215 int interface;
2216
2217 /*
2218 * The 0648/0649 can be told to identify as a RAID controller.
2219 * In this case, we have to fake interface
2220 */
2221 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2222 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2223 PCIIDE_INTERFACE_SETTABLE(1);
2224 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2225 CMD_CONF_DSA1)
2226 interface |= PCIIDE_INTERFACE_PCI(0) |
2227 PCIIDE_INTERFACE_PCI(1);
2228 } else {
2229 interface = PCI_INTERFACE(pa->pa_class);
2230 }
2231
2232 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2233 cp->name = PCIIDE_CHANNEL_NAME(channel);
2234 cp->wdc_channel.channel = channel;
2235 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2236
2237 if (channel > 0) {
2238 cp->wdc_channel.ch_queue =
2239 sc->pciide_channels[0].wdc_channel.ch_queue;
2240 } else {
2241 cp->wdc_channel.ch_queue =
2242 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2243 }
2244 if (cp->wdc_channel.ch_queue == NULL) {
2245 printf("%s %s channel: "
2246 "can't allocate memory for command queue",
2247 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2248 return;
2249 }
2250
2251 printf("%s: %s channel %s to %s mode\n",
2252 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2253 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2254 "configured" : "wired",
2255 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2256 "native-PCI" : "compatibility");
2257
2258 /*
2259 * with a CMD PCI64x, if we get here, the first channel is enabled:
2260 * there's no way to disable the first channel without disabling
2261 * the whole device
2262 */
2263 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2264 printf("%s: %s channel ignored (disabled)\n",
2265 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2266 return;
2267 }
2268
2269 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2270 if (cp->hw_ok == 0)
2271 return;
2272 if (channel == 1) {
2273 if (pciide_chan_candisable(cp)) {
2274 ctrl &= ~CMD_CTRL_2PORT;
2275 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2276 CMD_CTRL, ctrl);
2277 }
2278 }
2279 pciide_map_compat_intr(pa, cp, channel, interface);
2280 }
2281
2282 int
2283 cmd_pci_intr(arg)
2284 void *arg;
2285 {
2286 struct pciide_softc *sc = arg;
2287 struct pciide_channel *cp;
2288 struct channel_softc *wdc_cp;
2289 int i, rv, crv;
2290 u_int32_t priirq, secirq;
2291
2292 rv = 0;
2293 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2294 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2295 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2296 cp = &sc->pciide_channels[i];
2297 wdc_cp = &cp->wdc_channel;
2298 /* If a compat channel skip. */
2299 if (cp->compat)
2300 continue;
2301 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2302 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2303 crv = wdcintr(wdc_cp);
2304 if (crv == 0)
2305 printf("%s:%d: bogus intr\n",
2306 sc->sc_wdcdev.sc_dev.dv_xname, i);
2307 else
2308 rv = 1;
2309 }
2310 }
2311 return rv;
2312 }
2313
2314 void
2315 cmd_chip_map(sc, pa)
2316 struct pciide_softc *sc;
2317 struct pci_attach_args *pa;
2318 {
2319 int channel;
2320
2321 /*
2322 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2323 * and base adresses registers can be disabled at
2324 * hardware level. In this case, the device is wired
2325 * in compat mode and its first channel is always enabled,
2326 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2327 * In fact, it seems that the first channel of the CMD PCI0640
2328 * can't be disabled.
2329 */
2330
2331 #ifdef PCIIDE_CMD064x_DISABLE
2332 if (pciide_chipen(sc, pa) == 0)
2333 return;
2334 #endif
2335
2336 printf("%s: hardware does not support DMA\n",
2337 sc->sc_wdcdev.sc_dev.dv_xname);
2338 sc->sc_dma_ok = 0;
2339
2340 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2341 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2342 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2343
2344 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2345 cmd_channel_map(pa, sc, channel);
2346 }
2347 }
2348
2349 void
2350 cmd0643_9_chip_map(sc, pa)
2351 struct pciide_softc *sc;
2352 struct pci_attach_args *pa;
2353 {
2354 struct pciide_channel *cp;
2355 int channel;
2356 int rev = PCI_REVISION(
2357 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2358
2359 /*
2360 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2361 * and base adresses registers can be disabled at
2362 * hardware level. In this case, the device is wired
2363 * in compat mode and its first channel is always enabled,
2364 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2365 * In fact, it seems that the first channel of the CMD PCI0640
2366 * can't be disabled.
2367 */
2368
2369 #ifdef PCIIDE_CMD064x_DISABLE
2370 if (pciide_chipen(sc, pa) == 0)
2371 return;
2372 #endif
2373 printf("%s: bus-master DMA support present",
2374 sc->sc_wdcdev.sc_dev.dv_xname);
2375 pciide_mapreg_dma(sc, pa);
2376 printf("\n");
2377 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2378 WDC_CAPABILITY_MODE;
2379 if (sc->sc_dma_ok) {
2380 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2381 switch (sc->sc_pp->ide_product) {
2382 case PCI_PRODUCT_CMDTECH_649:
2383 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2384 sc->sc_wdcdev.UDMA_cap = 5;
2385 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2386 break;
2387 case PCI_PRODUCT_CMDTECH_648:
2388 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2389 sc->sc_wdcdev.UDMA_cap = 4;
2390 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2391 break;
2392 case PCI_PRODUCT_CMDTECH_646:
2393 if (rev >= CMD0646U2_REV) {
2394 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2395 sc->sc_wdcdev.UDMA_cap = 2;
2396 } else if (rev >= CMD0646U_REV) {
2397 /*
2398 * Linux's driver claims that the 646U is broken
2399 * with UDMA. Only enable it if we know what we're
2400 * doing
2401 */
2402 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2403 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2404 sc->sc_wdcdev.UDMA_cap = 2;
2405 #endif
2406 /* explicitely disable UDMA */
2407 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2408 CMD_UDMATIM(0), 0);
2409 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2410 CMD_UDMATIM(1), 0);
2411 }
2412 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2413 break;
2414 default:
2415 sc->sc_wdcdev.irqack = pciide_irqack;
2416 }
2417 }
2418
2419 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2420 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2421 sc->sc_wdcdev.PIO_cap = 4;
2422 sc->sc_wdcdev.DMA_cap = 2;
2423 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2424
2425 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2426 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2427 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2428 DEBUG_PROBE);
2429
2430 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2431 cp = &sc->pciide_channels[channel];
2432 cmd_channel_map(pa, sc, channel);
2433 if (cp->hw_ok == 0)
2434 continue;
2435 cmd0643_9_setup_channel(&cp->wdc_channel);
2436 }
2437 /*
2438 * note - this also makes sure we clear the irq disable and reset
2439 * bits
2440 */
2441 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2442 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2443 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2444 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2445 DEBUG_PROBE);
2446 }
2447
2448 void
2449 cmd0643_9_setup_channel(chp)
2450 struct channel_softc *chp;
2451 {
2452 struct ata_drive_datas *drvp;
2453 u_int8_t tim;
2454 u_int32_t idedma_ctl, udma_reg;
2455 int drive;
2456 struct pciide_channel *cp = (struct pciide_channel*)chp;
2457 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2458
2459 idedma_ctl = 0;
2460 /* setup DMA if needed */
2461 pciide_channel_dma_setup(cp);
2462
2463 for (drive = 0; drive < 2; drive++) {
2464 drvp = &chp->ch_drive[drive];
2465 /* If no drive, skip */
2466 if ((drvp->drive_flags & DRIVE) == 0)
2467 continue;
2468 /* add timing values, setup DMA if needed */
2469 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2470 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2471 if (drvp->drive_flags & DRIVE_UDMA) {
2472 /* UltraDMA on a 646U2, 0648 or 0649 */
2473 drvp->drive_flags &= ~DRIVE_DMA;
2474 udma_reg = pciide_pci_read(sc->sc_pc,
2475 sc->sc_tag, CMD_UDMATIM(chp->channel));
2476 if (drvp->UDMA_mode > 2 &&
2477 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2478 CMD_BICSR) &
2479 CMD_BICSR_80(chp->channel)) == 0)
2480 drvp->UDMA_mode = 2;
2481 if (drvp->UDMA_mode > 2)
2482 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2483 else if (sc->sc_wdcdev.UDMA_cap > 2)
2484 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2485 udma_reg |= CMD_UDMATIM_UDMA(drive);
2486 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2487 CMD_UDMATIM_TIM_OFF(drive));
2488 udma_reg |=
2489 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2490 CMD_UDMATIM_TIM_OFF(drive));
2491 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2492 CMD_UDMATIM(chp->channel), udma_reg);
2493 } else {
2494 /*
2495 * use Multiword DMA.
2496 * Timings will be used for both PIO and DMA,
2497 * so adjust DMA mode if needed
2498 * if we have a 0646U2/8/9, turn off UDMA
2499 */
2500 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2501 udma_reg = pciide_pci_read(sc->sc_pc,
2502 sc->sc_tag,
2503 CMD_UDMATIM(chp->channel));
2504 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2505 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2506 CMD_UDMATIM(chp->channel),
2507 udma_reg);
2508 }
2509 if (drvp->PIO_mode >= 3 &&
2510 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2511 drvp->DMA_mode = drvp->PIO_mode - 2;
2512 }
2513 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2514 }
2515 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2516 }
2517 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2518 CMD_DATA_TIM(chp->channel, drive), tim);
2519 }
2520 if (idedma_ctl != 0) {
2521 /* Add software bits in status register */
2522 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2523 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2524 idedma_ctl);
2525 }
2526 pciide_print_modes(cp);
2527 }
2528
2529 void
2530 cmd646_9_irqack(chp)
2531 struct channel_softc *chp;
2532 {
2533 u_int32_t priirq, secirq;
2534 struct pciide_channel *cp = (struct pciide_channel*)chp;
2535 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2536
2537 if (chp->channel == 0) {
2538 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2539 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2540 } else {
2541 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2542 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2543 }
2544 pciide_irqack(chp);
2545 }
2546
2547 void
2548 cy693_chip_map(sc, pa)
2549 struct pciide_softc *sc;
2550 struct pci_attach_args *pa;
2551 {
2552 struct pciide_channel *cp;
2553 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2554 bus_size_t cmdsize, ctlsize;
2555
2556 if (pciide_chipen(sc, pa) == 0)
2557 return;
2558 /*
2559 * this chip has 2 PCI IDE functions, one for primary and one for
2560 * secondary. So we need to call pciide_mapregs_compat() with
2561 * the real channel
2562 */
2563 if (pa->pa_function == 1) {
2564 sc->sc_cy_compatchan = 0;
2565 } else if (pa->pa_function == 2) {
2566 sc->sc_cy_compatchan = 1;
2567 } else {
2568 printf("%s: unexpected PCI function %d\n",
2569 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2570 return;
2571 }
2572 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2573 printf("%s: bus-master DMA support present",
2574 sc->sc_wdcdev.sc_dev.dv_xname);
2575 pciide_mapreg_dma(sc, pa);
2576 } else {
2577 printf("%s: hardware does not support DMA",
2578 sc->sc_wdcdev.sc_dev.dv_xname);
2579 sc->sc_dma_ok = 0;
2580 }
2581 printf("\n");
2582
2583 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2584 if (sc->sc_cy_handle == NULL) {
2585 printf("%s: unable to map hyperCache control registers\n",
2586 sc->sc_wdcdev.sc_dev.dv_xname);
2587 sc->sc_dma_ok = 0;
2588 }
2589
2590 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2591 WDC_CAPABILITY_MODE;
2592 if (sc->sc_dma_ok) {
2593 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2594 sc->sc_wdcdev.irqack = pciide_irqack;
2595 }
2596 sc->sc_wdcdev.PIO_cap = 4;
2597 sc->sc_wdcdev.DMA_cap = 2;
2598 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2599
2600 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2601 sc->sc_wdcdev.nchannels = 1;
2602
2603 /* Only one channel for this chip; if we are here it's enabled */
2604 cp = &sc->pciide_channels[0];
2605 sc->wdc_chanarray[0] = &cp->wdc_channel;
2606 cp->name = PCIIDE_CHANNEL_NAME(0);
2607 cp->wdc_channel.channel = 0;
2608 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2609 cp->wdc_channel.ch_queue =
2610 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2611 if (cp->wdc_channel.ch_queue == NULL) {
2612 printf("%s primary channel: "
2613 "can't allocate memory for command queue",
2614 sc->sc_wdcdev.sc_dev.dv_xname);
2615 return;
2616 }
2617 printf("%s: primary channel %s to ",
2618 sc->sc_wdcdev.sc_dev.dv_xname,
2619 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2620 "configured" : "wired");
2621 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2622 printf("native-PCI");
2623 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2624 pciide_pci_intr);
2625 } else {
2626 printf("compatibility");
2627 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2628 &cmdsize, &ctlsize);
2629 }
2630 printf(" mode\n");
2631 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2632 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2633 wdcattach(&cp->wdc_channel);
2634 if (pciide_chan_candisable(cp)) {
2635 pci_conf_write(sc->sc_pc, sc->sc_tag,
2636 PCI_COMMAND_STATUS_REG, 0);
2637 }
2638 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2639 if (cp->hw_ok == 0)
2640 return;
2641 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2642 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2643 cy693_setup_channel(&cp->wdc_channel);
2644 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2645 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2646 }
2647
2648 void
2649 cy693_setup_channel(chp)
2650 struct channel_softc *chp;
2651 {
2652 struct ata_drive_datas *drvp;
2653 int drive;
2654 u_int32_t cy_cmd_ctrl;
2655 u_int32_t idedma_ctl;
2656 struct pciide_channel *cp = (struct pciide_channel*)chp;
2657 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2658 int dma_mode = -1;
2659
2660 cy_cmd_ctrl = idedma_ctl = 0;
2661
2662 /* setup DMA if needed */
2663 pciide_channel_dma_setup(cp);
2664
2665 for (drive = 0; drive < 2; drive++) {
2666 drvp = &chp->ch_drive[drive];
2667 /* If no drive, skip */
2668 if ((drvp->drive_flags & DRIVE) == 0)
2669 continue;
2670 /* add timing values, setup DMA if needed */
2671 if (drvp->drive_flags & DRIVE_DMA) {
2672 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2673 /* use Multiword DMA */
2674 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2675 dma_mode = drvp->DMA_mode;
2676 }
2677 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2678 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2679 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2680 CY_CMD_CTRL_IOW_REC_OFF(drive));
2681 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2682 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2683 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2684 CY_CMD_CTRL_IOR_REC_OFF(drive));
2685 }
2686 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2687 chp->ch_drive[0].DMA_mode = dma_mode;
2688 chp->ch_drive[1].DMA_mode = dma_mode;
2689
2690 if (dma_mode == -1)
2691 dma_mode = 0;
2692
2693 if (sc->sc_cy_handle != NULL) {
2694 /* Note: `multiple' is implied. */
2695 cy82c693_write(sc->sc_cy_handle,
2696 (sc->sc_cy_compatchan == 0) ?
2697 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2698 }
2699
2700 pciide_print_modes(cp);
2701
2702 if (idedma_ctl != 0) {
2703 /* Add software bits in status register */
2704 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2705 IDEDMA_CTL, idedma_ctl);
2706 }
2707 }
2708
2709 void
2710 sis_chip_map(sc, pa)
2711 struct pciide_softc *sc;
2712 struct pci_attach_args *pa;
2713 {
2714 struct pciide_channel *cp;
2715 int channel;
2716 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2717 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2718 pcireg_t rev = PCI_REVISION(pa->pa_class);
2719 bus_size_t cmdsize, ctlsize;
2720 pcitag_t pchb_tag;
2721 pcireg_t pchb_id, pchb_class;
2722
2723 if (pciide_chipen(sc, pa) == 0)
2724 return;
2725 printf("%s: bus-master DMA support present",
2726 sc->sc_wdcdev.sc_dev.dv_xname);
2727 pciide_mapreg_dma(sc, pa);
2728 printf("\n");
2729
2730 /* get a PCI tag for the host bridge (function 0 of the same device) */
2731 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2732 /* and read ID and rev of the ISA bridge */
2733 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2734 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2735
2736 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2737 WDC_CAPABILITY_MODE;
2738 if (sc->sc_dma_ok) {
2739 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2740 sc->sc_wdcdev.irqack = pciide_irqack;
2741 /*
2742 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2743 * have problems with UDMA (info provided by Christos)
2744 */
2745 if (rev >= 0xd0 &&
2746 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2747 PCI_REVISION(pchb_class) >= 0x03))
2748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2749 }
2750
2751 sc->sc_wdcdev.PIO_cap = 4;
2752 sc->sc_wdcdev.DMA_cap = 2;
2753 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2754 sc->sc_wdcdev.UDMA_cap = 2;
2755 sc->sc_wdcdev.set_modes = sis_setup_channel;
2756
2757 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2758 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2759
2760 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2761 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2762 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2763
2764 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2765 cp = &sc->pciide_channels[channel];
2766 if (pciide_chansetup(sc, channel, interface) == 0)
2767 continue;
2768 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2769 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2770 printf("%s: %s channel ignored (disabled)\n",
2771 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2772 continue;
2773 }
2774 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2775 pciide_pci_intr);
2776 if (cp->hw_ok == 0)
2777 continue;
2778 if (pciide_chan_candisable(cp)) {
2779 if (channel == 0)
2780 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2781 else
2782 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2783 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2784 sis_ctr0);
2785 }
2786 pciide_map_compat_intr(pa, cp, channel, interface);
2787 if (cp->hw_ok == 0)
2788 continue;
2789 sis_setup_channel(&cp->wdc_channel);
2790 }
2791 }
2792
2793 void
2794 sis_setup_channel(chp)
2795 struct channel_softc *chp;
2796 {
2797 struct ata_drive_datas *drvp;
2798 int drive;
2799 u_int32_t sis_tim;
2800 u_int32_t idedma_ctl;
2801 struct pciide_channel *cp = (struct pciide_channel*)chp;
2802 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2803
2804 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2805 "channel %d 0x%x\n", chp->channel,
2806 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2807 DEBUG_PROBE);
2808 sis_tim = 0;
2809 idedma_ctl = 0;
2810 /* setup DMA if needed */
2811 pciide_channel_dma_setup(cp);
2812
2813 for (drive = 0; drive < 2; drive++) {
2814 drvp = &chp->ch_drive[drive];
2815 /* If no drive, skip */
2816 if ((drvp->drive_flags & DRIVE) == 0)
2817 continue;
2818 /* add timing values, setup DMA if needed */
2819 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2820 (drvp->drive_flags & DRIVE_UDMA) == 0)
2821 goto pio;
2822
2823 if (drvp->drive_flags & DRIVE_UDMA) {
2824 /* use Ultra/DMA */
2825 drvp->drive_flags &= ~DRIVE_DMA;
2826 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2827 SIS_TIM_UDMA_TIME_OFF(drive);
2828 sis_tim |= SIS_TIM_UDMA_EN(drive);
2829 } else {
2830 /*
2831 * use Multiword DMA
2832 * Timings will be used for both PIO and DMA,
2833 * so adjust DMA mode if needed
2834 */
2835 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2836 drvp->PIO_mode = drvp->DMA_mode + 2;
2837 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2838 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2839 drvp->PIO_mode - 2 : 0;
2840 if (drvp->DMA_mode == 0)
2841 drvp->PIO_mode = 0;
2842 }
2843 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2844 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2845 SIS_TIM_ACT_OFF(drive);
2846 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2847 SIS_TIM_REC_OFF(drive);
2848 }
2849 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2850 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2851 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2852 if (idedma_ctl != 0) {
2853 /* Add software bits in status register */
2854 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2855 IDEDMA_CTL, idedma_ctl);
2856 }
2857 pciide_print_modes(cp);
2858 }
2859
2860 void
2861 acer_chip_map(sc, pa)
2862 struct pciide_softc *sc;
2863 struct pci_attach_args *pa;
2864 {
2865 struct pciide_channel *cp;
2866 int channel;
2867 pcireg_t cr, interface;
2868 bus_size_t cmdsize, ctlsize;
2869 pcireg_t rev = PCI_REVISION(pa->pa_class);
2870
2871 if (pciide_chipen(sc, pa) == 0)
2872 return;
2873 printf("%s: bus-master DMA support present",
2874 sc->sc_wdcdev.sc_dev.dv_xname);
2875 pciide_mapreg_dma(sc, pa);
2876 printf("\n");
2877 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2878 WDC_CAPABILITY_MODE;
2879 if (sc->sc_dma_ok) {
2880 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2881 if (rev >= 0x20)
2882 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2883 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2884 sc->sc_wdcdev.irqack = pciide_irqack;
2885 }
2886
2887 sc->sc_wdcdev.PIO_cap = 4;
2888 sc->sc_wdcdev.DMA_cap = 2;
2889 sc->sc_wdcdev.UDMA_cap = 2;
2890 sc->sc_wdcdev.set_modes = acer_setup_channel;
2891 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2892 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2893
2894 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2895 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2896 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2897
2898 /* Enable "microsoft register bits" R/W. */
2899 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2900 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2901 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2902 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2903 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2904 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2905 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2906 ~ACER_CHANSTATUSREGS_RO);
2907 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2908 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2909 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2910 /* Don't use cr, re-read the real register content instead */
2911 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2912 PCI_CLASS_REG));
2913
2914 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2915 cp = &sc->pciide_channels[channel];
2916 if (pciide_chansetup(sc, channel, interface) == 0)
2917 continue;
2918 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2919 printf("%s: %s channel ignored (disabled)\n",
2920 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2921 continue;
2922 }
2923 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2924 acer_pci_intr);
2925 if (cp->hw_ok == 0)
2926 continue;
2927 if (pciide_chan_candisable(cp)) {
2928 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2929 pci_conf_write(sc->sc_pc, sc->sc_tag,
2930 PCI_CLASS_REG, cr);
2931 }
2932 pciide_map_compat_intr(pa, cp, channel, interface);
2933 acer_setup_channel(&cp->wdc_channel);
2934 }
2935 }
2936
2937 void
2938 acer_setup_channel(chp)
2939 struct channel_softc *chp;
2940 {
2941 struct ata_drive_datas *drvp;
2942 int drive;
2943 u_int32_t acer_fifo_udma;
2944 u_int32_t idedma_ctl;
2945 struct pciide_channel *cp = (struct pciide_channel*)chp;
2946 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2947
2948 idedma_ctl = 0;
2949 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2950 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2951 acer_fifo_udma), DEBUG_PROBE);
2952 /* setup DMA if needed */
2953 pciide_channel_dma_setup(cp);
2954
2955 for (drive = 0; drive < 2; drive++) {
2956 drvp = &chp->ch_drive[drive];
2957 /* If no drive, skip */
2958 if ((drvp->drive_flags & DRIVE) == 0)
2959 continue;
2960 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2961 "channel %d drive %d 0x%x\n", chp->channel, drive,
2962 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2963 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2964 /* clear FIFO/DMA mode */
2965 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2966 ACER_UDMA_EN(chp->channel, drive) |
2967 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2968
2969 /* add timing values, setup DMA if needed */
2970 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2971 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2972 acer_fifo_udma |=
2973 ACER_FTH_OPL(chp->channel, drive, 0x1);
2974 goto pio;
2975 }
2976
2977 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2978 if (drvp->drive_flags & DRIVE_UDMA) {
2979 /* use Ultra/DMA */
2980 drvp->drive_flags &= ~DRIVE_DMA;
2981 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2982 acer_fifo_udma |=
2983 ACER_UDMA_TIM(chp->channel, drive,
2984 acer_udma[drvp->UDMA_mode]);
2985 } else {
2986 /*
2987 * use Multiword DMA
2988 * Timings will be used for both PIO and DMA,
2989 * so adjust DMA mode if needed
2990 */
2991 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2992 drvp->PIO_mode = drvp->DMA_mode + 2;
2993 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2994 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2995 drvp->PIO_mode - 2 : 0;
2996 if (drvp->DMA_mode == 0)
2997 drvp->PIO_mode = 0;
2998 }
2999 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3000 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3001 ACER_IDETIM(chp->channel, drive),
3002 acer_pio[drvp->PIO_mode]);
3003 }
3004 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3005 acer_fifo_udma), DEBUG_PROBE);
3006 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3007 if (idedma_ctl != 0) {
3008 /* Add software bits in status register */
3009 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3010 IDEDMA_CTL, idedma_ctl);
3011 }
3012 pciide_print_modes(cp);
3013 }
3014
3015 int
3016 acer_pci_intr(arg)
3017 void *arg;
3018 {
3019 struct pciide_softc *sc = arg;
3020 struct pciide_channel *cp;
3021 struct channel_softc *wdc_cp;
3022 int i, rv, crv;
3023 u_int32_t chids;
3024
3025 rv = 0;
3026 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3027 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3028 cp = &sc->pciide_channels[i];
3029 wdc_cp = &cp->wdc_channel;
3030 /* If a compat channel skip. */
3031 if (cp->compat)
3032 continue;
3033 if (chids & ACER_CHIDS_INT(i)) {
3034 crv = wdcintr(wdc_cp);
3035 if (crv == 0)
3036 printf("%s:%d: bogus intr\n",
3037 sc->sc_wdcdev.sc_dev.dv_xname, i);
3038 else
3039 rv = 1;
3040 }
3041 }
3042 return rv;
3043 }
3044
3045 void
3046 hpt_chip_map(sc, pa)
3047 struct pciide_softc *sc;
3048 struct pci_attach_args *pa;
3049 {
3050 struct pciide_channel *cp;
3051 int i, compatchan, revision;
3052 pcireg_t interface;
3053 bus_size_t cmdsize, ctlsize;
3054
3055 if (pciide_chipen(sc, pa) == 0)
3056 return;
3057 revision = PCI_REVISION(pa->pa_class);
3058 printf(": Triones/Highpoint ");
3059 if (revision == HPT370_REV)
3060 printf("HPT370 IDE Controller\n");
3061 else if (revision == HPT370A_REV)
3062 printf("HPT370A IDE Controller\n");
3063 else if (revision == HPT366_REV)
3064 printf("HPT366 IDE Controller\n");
3065 else
3066 printf("unknown HPT IDE controller rev %d\n", revision);
3067
3068 /*
3069 * when the chip is in native mode it identifies itself as a
3070 * 'misc mass storage'. Fake interface in this case.
3071 */
3072 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3073 interface = PCI_INTERFACE(pa->pa_class);
3074 } else {
3075 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3076 PCIIDE_INTERFACE_PCI(0);
3077 if (revision == HPT370_REV || revision == HPT370A_REV)
3078 interface |= PCIIDE_INTERFACE_PCI(1);
3079 }
3080
3081 printf("%s: bus-master DMA support present",
3082 sc->sc_wdcdev.sc_dev.dv_xname);
3083 pciide_mapreg_dma(sc, pa);
3084 printf("\n");
3085 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3086 WDC_CAPABILITY_MODE;
3087 if (sc->sc_dma_ok) {
3088 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3089 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3090 sc->sc_wdcdev.irqack = pciide_irqack;
3091 }
3092 sc->sc_wdcdev.PIO_cap = 4;
3093 sc->sc_wdcdev.DMA_cap = 2;
3094
3095 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3096 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3097 if (revision == HPT366_REV) {
3098 sc->sc_wdcdev.UDMA_cap = 4;
3099 /*
3100 * The 366 has 2 PCI IDE functions, one for primary and one
3101 * for secondary. So we need to call pciide_mapregs_compat()
3102 * with the real channel
3103 */
3104 if (pa->pa_function == 0) {
3105 compatchan = 0;
3106 } else if (pa->pa_function == 1) {
3107 compatchan = 1;
3108 } else {
3109 printf("%s: unexpected PCI function %d\n",
3110 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3111 return;
3112 }
3113 sc->sc_wdcdev.nchannels = 1;
3114 } else {
3115 sc->sc_wdcdev.nchannels = 2;
3116 sc->sc_wdcdev.UDMA_cap = 5;
3117 }
3118 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3119 cp = &sc->pciide_channels[i];
3120 if (sc->sc_wdcdev.nchannels > 1) {
3121 compatchan = i;
3122 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3123 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3124 printf("%s: %s channel ignored (disabled)\n",
3125 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3126 continue;
3127 }
3128 }
3129 if (pciide_chansetup(sc, i, interface) == 0)
3130 continue;
3131 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3132 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3133 &ctlsize, hpt_pci_intr);
3134 } else {
3135 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3136 &cmdsize, &ctlsize);
3137 }
3138 if (cp->hw_ok == 0)
3139 return;
3140 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3141 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3142 wdcattach(&cp->wdc_channel);
3143 hpt_setup_channel(&cp->wdc_channel);
3144 }
3145 if (revision == HPT370_REV || revision == HPT370A_REV) {
3146 /*
3147 * HPT370_REV has a bit to disable interrupts, make sure
3148 * to clear it
3149 */
3150 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3151 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3152 ~HPT_CSEL_IRQDIS);
3153 }
3154 return;
3155 }
3156
3157 void
3158 hpt_setup_channel(chp)
3159 struct channel_softc *chp;
3160 {
3161 struct ata_drive_datas *drvp;
3162 int drive;
3163 int cable;
3164 u_int32_t before, after;
3165 u_int32_t idedma_ctl;
3166 struct pciide_channel *cp = (struct pciide_channel*)chp;
3167 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3168
3169 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3170
3171 /* setup DMA if needed */
3172 pciide_channel_dma_setup(cp);
3173
3174 idedma_ctl = 0;
3175
3176 /* Per drive settings */
3177 for (drive = 0; drive < 2; drive++) {
3178 drvp = &chp->ch_drive[drive];
3179 /* If no drive, skip */
3180 if ((drvp->drive_flags & DRIVE) == 0)
3181 continue;
3182 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3183 HPT_IDETIM(chp->channel, drive));
3184
3185 /* add timing values, setup DMA if needed */
3186 if (drvp->drive_flags & DRIVE_UDMA) {
3187 /* use Ultra/DMA */
3188 drvp->drive_flags &= ~DRIVE_DMA;
3189 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3190 drvp->UDMA_mode > 2)
3191 drvp->UDMA_mode = 2;
3192 after = (sc->sc_wdcdev.nchannels == 2) ?
3193 hpt370_udma[drvp->UDMA_mode] :
3194 hpt366_udma[drvp->UDMA_mode];
3195 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3196 } else if (drvp->drive_flags & DRIVE_DMA) {
3197 /*
3198 * use Multiword DMA.
3199 * Timings will be used for both PIO and DMA, so adjust
3200 * DMA mode if needed
3201 */
3202 if (drvp->PIO_mode >= 3 &&
3203 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3204 drvp->DMA_mode = drvp->PIO_mode - 2;
3205 }
3206 after = (sc->sc_wdcdev.nchannels == 2) ?
3207 hpt370_dma[drvp->DMA_mode] :
3208 hpt366_dma[drvp->DMA_mode];
3209 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3210 } else {
3211 /* PIO only */
3212 after = (sc->sc_wdcdev.nchannels == 2) ?
3213 hpt370_pio[drvp->PIO_mode] :
3214 hpt366_pio[drvp->PIO_mode];
3215 }
3216 pci_conf_write(sc->sc_pc, sc->sc_tag,
3217 HPT_IDETIM(chp->channel, drive), after);
3218 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3219 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3220 after, before), DEBUG_PROBE);
3221 }
3222 if (idedma_ctl != 0) {
3223 /* Add software bits in status register */
3224 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3225 IDEDMA_CTL, idedma_ctl);
3226 }
3227 pciide_print_modes(cp);
3228 }
3229
3230 int
3231 hpt_pci_intr(arg)
3232 void *arg;
3233 {
3234 struct pciide_softc *sc = arg;
3235 struct pciide_channel *cp;
3236 struct channel_softc *wdc_cp;
3237 int rv = 0;
3238 int dmastat, i, crv;
3239
3240 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3241 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3242 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3243 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3244 IDEDMA_CTL_INTR)
3245 continue;
3246 cp = &sc->pciide_channels[i];
3247 wdc_cp = &cp->wdc_channel;
3248 crv = wdcintr(wdc_cp);
3249 if (crv == 0) {
3250 printf("%s:%d: bogus intr\n",
3251 sc->sc_wdcdev.sc_dev.dv_xname, i);
3252 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3253 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3254 } else
3255 rv = 1;
3256 }
3257 return rv;
3258 }
3259
3260
3261 /* Macros to test product */
3262 #define PDC_IS_262(sc) \
3263 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3264 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3265 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3266 #define PDC_IS_265(sc) \
3267 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3268 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3269
3270 void
3271 pdc202xx_chip_map(sc, pa)
3272 struct pciide_softc *sc;
3273 struct pci_attach_args *pa;
3274 {
3275 struct pciide_channel *cp;
3276 int channel;
3277 pcireg_t interface, st, mode;
3278 bus_size_t cmdsize, ctlsize;
3279
3280 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3281 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3282 DEBUG_PROBE);
3283 if (pciide_chipen(sc, pa) == 0)
3284 return;
3285
3286 /* turn off RAID mode */
3287 st &= ~PDC2xx_STATE_IDERAID;
3288
3289 /*
3290 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3291 * mode. We have to fake interface
3292 */
3293 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3294 if (st & PDC2xx_STATE_NATIVE)
3295 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3296
3297 printf("%s: bus-master DMA support present",
3298 sc->sc_wdcdev.sc_dev.dv_xname);
3299 pciide_mapreg_dma(sc, pa);
3300 printf("\n");
3301 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3302 WDC_CAPABILITY_MODE;
3303 if (sc->sc_dma_ok) {
3304 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3305 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3306 sc->sc_wdcdev.irqack = pciide_irqack;
3307 }
3308 sc->sc_wdcdev.PIO_cap = 4;
3309 sc->sc_wdcdev.DMA_cap = 2;
3310 if (PDC_IS_265(sc))
3311 sc->sc_wdcdev.UDMA_cap = 5;
3312 else if (PDC_IS_262(sc))
3313 sc->sc_wdcdev.UDMA_cap = 4;
3314 else
3315 sc->sc_wdcdev.UDMA_cap = 2;
3316 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3317 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3318 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3319
3320 /* setup failsafe defaults */
3321 mode = 0;
3322 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3323 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3324 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3325 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3326 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3327 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3328 "initial timings 0x%x, now 0x%x\n", channel,
3329 pci_conf_read(sc->sc_pc, sc->sc_tag,
3330 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3331 DEBUG_PROBE);
3332 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3333 mode | PDC2xx_TIM_IORDYp);
3334 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3335 "initial timings 0x%x, now 0x%x\n", channel,
3336 pci_conf_read(sc->sc_pc, sc->sc_tag,
3337 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3338 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3339 mode);
3340 }
3341
3342 mode = PDC2xx_SCR_DMA;
3343 if (PDC_IS_262(sc)) {
3344 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3345 } else {
3346 /* the BIOS set it up this way */
3347 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3348 }
3349 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3350 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3351 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3352 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3353 DEBUG_PROBE);
3354 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3355
3356 /* controller initial state register is OK even without BIOS */
3357 /* Set DMA mode to IDE DMA compatibility */
3358 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3359 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3360 DEBUG_PROBE);
3361 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3362 mode | 0x1);
3363 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3364 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3365 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3366 mode | 0x1);
3367
3368 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3369 cp = &sc->pciide_channels[channel];
3370 if (pciide_chansetup(sc, channel, interface) == 0)
3371 continue;
3372 if ((st & (PDC_IS_262(sc) ?
3373 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3374 printf("%s: %s channel ignored (disabled)\n",
3375 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3376 continue;
3377 }
3378 if (PDC_IS_265(sc))
3379 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3380 pdc20265_pci_intr);
3381 else
3382 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3383 pdc202xx_pci_intr);
3384 if (cp->hw_ok == 0)
3385 continue;
3386 if (pciide_chan_candisable(cp))
3387 st &= ~(PDC_IS_262(sc) ?
3388 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3389 pciide_map_compat_intr(pa, cp, channel, interface);
3390 pdc202xx_setup_channel(&cp->wdc_channel);
3391 }
3392 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3393 DEBUG_PROBE);
3394 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3395 return;
3396 }
3397
3398 void
3399 pdc202xx_setup_channel(chp)
3400 struct channel_softc *chp;
3401 {
3402 struct ata_drive_datas *drvp;
3403 int drive;
3404 pcireg_t mode, st;
3405 u_int32_t idedma_ctl, scr, atapi;
3406 struct pciide_channel *cp = (struct pciide_channel*)chp;
3407 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3408 int channel = chp->channel;
3409
3410 /* setup DMA if needed */
3411 pciide_channel_dma_setup(cp);
3412
3413 idedma_ctl = 0;
3414 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3415 sc->sc_wdcdev.sc_dev.dv_xname,
3416 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3417 DEBUG_PROBE);
3418
3419 /* Per channel settings */
3420 if (PDC_IS_262(sc)) {
3421 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3422 PDC262_U66);
3423 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3424 /* Trimm UDMA mode */
3425 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3426 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3427 chp->ch_drive[0].UDMA_mode <= 2) ||
3428 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3429 chp->ch_drive[1].UDMA_mode <= 2)) {
3430 if (chp->ch_drive[0].UDMA_mode > 2)
3431 chp->ch_drive[0].UDMA_mode = 2;
3432 if (chp->ch_drive[1].UDMA_mode > 2)
3433 chp->ch_drive[1].UDMA_mode = 2;
3434 }
3435 /* Set U66 if needed */
3436 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3437 chp->ch_drive[0].UDMA_mode > 2) ||
3438 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3439 chp->ch_drive[1].UDMA_mode > 2))
3440 scr |= PDC262_U66_EN(channel);
3441 else
3442 scr &= ~PDC262_U66_EN(channel);
3443 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3444 PDC262_U66, scr);
3445 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3446 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3447 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3448 PDC262_ATAPI(channel))), DEBUG_PROBE);
3449 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3450 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3451 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3452 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3453 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3454 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3455 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3456 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3457 atapi = 0;
3458 else
3459 atapi = PDC262_ATAPI_UDMA;
3460 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3461 PDC262_ATAPI(channel), atapi);
3462 }
3463 }
3464 for (drive = 0; drive < 2; drive++) {
3465 drvp = &chp->ch_drive[drive];
3466 /* If no drive, skip */
3467 if ((drvp->drive_flags & DRIVE) == 0)
3468 continue;
3469 mode = 0;
3470 if (drvp->drive_flags & DRIVE_UDMA) {
3471 /* use Ultra/DMA */
3472 drvp->drive_flags &= ~DRIVE_DMA;
3473 mode = PDC2xx_TIM_SET_MB(mode,
3474 pdc2xx_udma_mb[drvp->UDMA_mode]);
3475 mode = PDC2xx_TIM_SET_MC(mode,
3476 pdc2xx_udma_mc[drvp->UDMA_mode]);
3477 drvp->drive_flags &= ~DRIVE_DMA;
3478 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3479 } else if (drvp->drive_flags & DRIVE_DMA) {
3480 mode = PDC2xx_TIM_SET_MB(mode,
3481 pdc2xx_dma_mb[drvp->DMA_mode]);
3482 mode = PDC2xx_TIM_SET_MC(mode,
3483 pdc2xx_dma_mc[drvp->DMA_mode]);
3484 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3485 } else {
3486 mode = PDC2xx_TIM_SET_MB(mode,
3487 pdc2xx_dma_mb[0]);
3488 mode = PDC2xx_TIM_SET_MC(mode,
3489 pdc2xx_dma_mc[0]);
3490 }
3491 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3492 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3493 if (drvp->drive_flags & DRIVE_ATA)
3494 mode |= PDC2xx_TIM_PRE;
3495 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3496 if (drvp->PIO_mode >= 3) {
3497 mode |= PDC2xx_TIM_IORDY;
3498 if (drive == 0)
3499 mode |= PDC2xx_TIM_IORDYp;
3500 }
3501 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3502 "timings 0x%x\n",
3503 sc->sc_wdcdev.sc_dev.dv_xname,
3504 chp->channel, drive, mode), DEBUG_PROBE);
3505 pci_conf_write(sc->sc_pc, sc->sc_tag,
3506 PDC2xx_TIM(chp->channel, drive), mode);
3507 }
3508 if (idedma_ctl != 0) {
3509 /* Add software bits in status register */
3510 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3511 IDEDMA_CTL, idedma_ctl);
3512 }
3513 pciide_print_modes(cp);
3514 }
3515
3516 int
3517 pdc202xx_pci_intr(arg)
3518 void *arg;
3519 {
3520 struct pciide_softc *sc = arg;
3521 struct pciide_channel *cp;
3522 struct channel_softc *wdc_cp;
3523 int i, rv, crv;
3524 u_int32_t scr;
3525
3526 rv = 0;
3527 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3528 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3529 cp = &sc->pciide_channels[i];
3530 wdc_cp = &cp->wdc_channel;
3531 /* If a compat channel skip. */
3532 if (cp->compat)
3533 continue;
3534 if (scr & PDC2xx_SCR_INT(i)) {
3535 crv = wdcintr(wdc_cp);
3536 if (crv == 0)
3537 printf("%s:%d: bogus intr (reg 0x%x)\n",
3538 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3539 else
3540 rv = 1;
3541 }
3542 }
3543 return rv;
3544 }
3545
3546 int
3547 pdc20265_pci_intr(arg)
3548 void *arg;
3549 {
3550 struct pciide_softc *sc = arg;
3551 struct pciide_channel *cp;
3552 struct channel_softc *wdc_cp;
3553 int i, rv, crv;
3554 u_int32_t dmastat;
3555
3556 rv = 0;
3557 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3558 cp = &sc->pciide_channels[i];
3559 wdc_cp = &cp->wdc_channel;
3560 /* If a compat channel skip. */
3561 if (cp->compat)
3562 continue;
3563 /*
3564 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3565 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3566 * So use it instead (requires 2 reg reads instead of 1,
3567 * but we can't do it another way).
3568 */
3569 dmastat = bus_space_read_1(sc->sc_dma_iot,
3570 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3571 if((dmastat & IDEDMA_CTL_INTR) == 0)
3572 continue;
3573 crv = wdcintr(wdc_cp);
3574 if (crv == 0)
3575 printf("%s:%d: bogus intr\n",
3576 sc->sc_wdcdev.sc_dev.dv_xname, i);
3577 else
3578 rv = 1;
3579 }
3580 return rv;
3581 }
3582
3583 void
3584 opti_chip_map(sc, pa)
3585 struct pciide_softc *sc;
3586 struct pci_attach_args *pa;
3587 {
3588 struct pciide_channel *cp;
3589 bus_size_t cmdsize, ctlsize;
3590 pcireg_t interface;
3591 u_int8_t init_ctrl;
3592 int channel;
3593
3594 if (pciide_chipen(sc, pa) == 0)
3595 return;
3596 printf("%s: bus-master DMA support present",
3597 sc->sc_wdcdev.sc_dev.dv_xname);
3598
3599 /*
3600 * XXXSCW:
3601 * There seem to be a couple of buggy revisions/implementations
3602 * of the OPTi pciide chipset. This kludge seems to fix one of
3603 * the reported problems (PR/11644) but still fails for the
3604 * other (PR/13151), although the latter may be due to other
3605 * issues too...
3606 */
3607 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3608 printf(" but disabled due to chip rev. <= 0x12");
3609 sc->sc_dma_ok = 0;
3610 sc->sc_wdcdev.cap = 0;
3611 } else {
3612 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3613 pciide_mapreg_dma(sc, pa);
3614 }
3615 printf("\n");
3616
3617 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3618 sc->sc_wdcdev.PIO_cap = 4;
3619 if (sc->sc_dma_ok) {
3620 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3621 sc->sc_wdcdev.irqack = pciide_irqack;
3622 sc->sc_wdcdev.DMA_cap = 2;
3623 }
3624 sc->sc_wdcdev.set_modes = opti_setup_channel;
3625
3626 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3627 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3628
3629 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3630 OPTI_REG_INIT_CONTROL);
3631
3632 interface = PCI_INTERFACE(pa->pa_class);
3633
3634 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3635 cp = &sc->pciide_channels[channel];
3636 if (pciide_chansetup(sc, channel, interface) == 0)
3637 continue;
3638 if (channel == 1 &&
3639 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3640 printf("%s: %s channel ignored (disabled)\n",
3641 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3642 continue;
3643 }
3644 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3645 pciide_pci_intr);
3646 if (cp->hw_ok == 0)
3647 continue;
3648 pciide_map_compat_intr(pa, cp, channel, interface);
3649 if (cp->hw_ok == 0)
3650 continue;
3651 opti_setup_channel(&cp->wdc_channel);
3652 }
3653 }
3654
3655 void
3656 opti_setup_channel(chp)
3657 struct channel_softc *chp;
3658 {
3659 struct ata_drive_datas *drvp;
3660 struct pciide_channel *cp = (struct pciide_channel*)chp;
3661 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3662 int drive, spd;
3663 int mode[2];
3664 u_int8_t rv, mr;
3665
3666 /*
3667 * The `Delay' and `Address Setup Time' fields of the
3668 * Miscellaneous Register are always zero initially.
3669 */
3670 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3671 mr &= ~(OPTI_MISC_DELAY_MASK |
3672 OPTI_MISC_ADDR_SETUP_MASK |
3673 OPTI_MISC_INDEX_MASK);
3674
3675 /* Prime the control register before setting timing values */
3676 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3677
3678 /* Determine the clockrate of the PCIbus the chip is attached to */
3679 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3680 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3681
3682 /* setup DMA if needed */
3683 pciide_channel_dma_setup(cp);
3684
3685 for (drive = 0; drive < 2; drive++) {
3686 drvp = &chp->ch_drive[drive];
3687 /* If no drive, skip */
3688 if ((drvp->drive_flags & DRIVE) == 0) {
3689 mode[drive] = -1;
3690 continue;
3691 }
3692
3693 if ((drvp->drive_flags & DRIVE_DMA)) {
3694 /*
3695 * Timings will be used for both PIO and DMA,
3696 * so adjust DMA mode if needed
3697 */
3698 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3699 drvp->PIO_mode = drvp->DMA_mode + 2;
3700 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3701 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3702 drvp->PIO_mode - 2 : 0;
3703 if (drvp->DMA_mode == 0)
3704 drvp->PIO_mode = 0;
3705
3706 mode[drive] = drvp->DMA_mode + 5;
3707 } else
3708 mode[drive] = drvp->PIO_mode;
3709
3710 if (drive && mode[0] >= 0 &&
3711 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3712 /*
3713 * Can't have two drives using different values
3714 * for `Address Setup Time'.
3715 * Slow down the faster drive to compensate.
3716 */
3717 int d = (opti_tim_as[spd][mode[0]] >
3718 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3719
3720 mode[d] = mode[1-d];
3721 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3722 chp->ch_drive[d].DMA_mode = 0;
3723 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3724 }
3725 }
3726
3727 for (drive = 0; drive < 2; drive++) {
3728 int m;
3729 if ((m = mode[drive]) < 0)
3730 continue;
3731
3732 /* Set the Address Setup Time and select appropriate index */
3733 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3734 rv |= OPTI_MISC_INDEX(drive);
3735 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3736
3737 /* Set the pulse width and recovery timing parameters */
3738 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3739 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3740 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3741 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3742
3743 /* Set the Enhanced Mode register appropriately */
3744 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3745 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3746 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3747 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3748 }
3749
3750 /* Finally, enable the timings */
3751 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3752
3753 pciide_print_modes(cp);
3754 }
3755
3756 #define ACARD_IS_850(sc) \
3757 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3758
3759 void
3760 acard_chip_map(sc, pa)
3761 struct pciide_softc *sc;
3762 struct pci_attach_args *pa;
3763 {
3764 struct pciide_channel *cp;
3765 int i;
3766 pcireg_t interface;
3767 bus_size_t cmdsize, ctlsize;
3768
3769 if (pciide_chipen(sc, pa) == 0)
3770 return;
3771
3772 /*
3773 * when the chip is in native mode it identifies itself as a
3774 * 'misc mass storage'. Fake interface in this case.
3775 */
3776 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3777 interface = PCI_INTERFACE(pa->pa_class);
3778 } else {
3779 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3780 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3781 }
3782
3783 printf("%s: bus-master DMA support present",
3784 sc->sc_wdcdev.sc_dev.dv_xname);
3785 pciide_mapreg_dma(sc, pa);
3786 printf("\n");
3787 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3788 WDC_CAPABILITY_MODE;
3789
3790 if (sc->sc_dma_ok) {
3791 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3792 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3793 sc->sc_wdcdev.irqack = pciide_irqack;
3794 }
3795 sc->sc_wdcdev.PIO_cap = 4;
3796 sc->sc_wdcdev.DMA_cap = 2;
3797 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3798
3799 sc->sc_wdcdev.set_modes = acard_setup_channel;
3800 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3801 sc->sc_wdcdev.nchannels = 2;
3802
3803 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3804 cp = &sc->pciide_channels[i];
3805 if (pciide_chansetup(sc, i, interface) == 0)
3806 continue;
3807 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3808 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3809 &ctlsize, pciide_pci_intr);
3810 } else {
3811 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
3812 &cmdsize, &ctlsize);
3813 }
3814 if (cp->hw_ok == 0)
3815 return;
3816 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3817 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3818 wdcattach(&cp->wdc_channel);
3819 acard_setup_channel(&cp->wdc_channel);
3820 }
3821 if (!ACARD_IS_850(sc)) {
3822 u_int32_t reg;
3823 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3824 reg &= ~ATP860_CTRL_INT;
3825 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3826 }
3827 }
3828
3829 void
3830 acard_setup_channel(chp)
3831 struct channel_softc *chp;
3832 {
3833 struct ata_drive_datas *drvp;
3834 struct pciide_channel *cp = (struct pciide_channel*)chp;
3835 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3836 int channel = chp->channel;
3837 int drive;
3838 u_int32_t idetime, udma_mode;
3839 u_int32_t idedma_ctl;
3840
3841 /* setup DMA if needed */
3842 pciide_channel_dma_setup(cp);
3843
3844 if (ACARD_IS_850(sc)) {
3845 idetime = 0;
3846 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3847 udma_mode &= ~ATP850_UDMA_MASK(channel);
3848 } else {
3849 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3850 idetime &= ~ATP860_SETTIME_MASK(channel);
3851 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3852 udma_mode &= ~ATP860_UDMA_MASK(channel);
3853
3854 /* check 80 pins cable */
3855 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
3856 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
3857 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3858 & ATP860_CTRL_80P(chp->channel)) {
3859 if (chp->ch_drive[0].UDMA_mode > 2)
3860 chp->ch_drive[0].UDMA_mode = 2;
3861 if (chp->ch_drive[1].UDMA_mode > 2)
3862 chp->ch_drive[1].UDMA_mode = 2;
3863 }
3864 }
3865 }
3866
3867 idedma_ctl = 0;
3868
3869 /* Per drive settings */
3870 for (drive = 0; drive < 2; drive++) {
3871 drvp = &chp->ch_drive[drive];
3872 /* If no drive, skip */
3873 if ((drvp->drive_flags & DRIVE) == 0)
3874 continue;
3875 /* add timing values, setup DMA if needed */
3876 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
3877 (drvp->drive_flags & DRIVE_UDMA)) {
3878 /* use Ultra/DMA */
3879 if (ACARD_IS_850(sc)) {
3880 idetime |= ATP850_SETTIME(drive,
3881 acard_act_udma[drvp->UDMA_mode],
3882 acard_rec_udma[drvp->UDMA_mode]);
3883 udma_mode |= ATP850_UDMA_MODE(channel, drive,
3884 acard_udma_conf[drvp->UDMA_mode]);
3885 } else {
3886 idetime |= ATP860_SETTIME(channel, drive,
3887 acard_act_udma[drvp->UDMA_mode],
3888 acard_rec_udma[drvp->UDMA_mode]);
3889 udma_mode |= ATP860_UDMA_MODE(channel, drive,
3890 acard_udma_conf[drvp->UDMA_mode]);
3891 }
3892 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3893 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
3894 (drvp->drive_flags & DRIVE_DMA)) {
3895 /* use Multiword DMA */
3896 drvp->drive_flags &= ~DRIVE_UDMA;
3897 if (ACARD_IS_850(sc)) {
3898 idetime |= ATP850_SETTIME(drive,
3899 acard_act_dma[drvp->DMA_mode],
3900 acard_rec_dma[drvp->DMA_mode]);
3901 } else {
3902 idetime |= ATP860_SETTIME(channel, drive,
3903 acard_act_dma[drvp->DMA_mode],
3904 acard_rec_dma[drvp->DMA_mode]);
3905 }
3906 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3907 } else {
3908 /* PIO only */
3909 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
3910 if (ACARD_IS_850(sc)) {
3911 idetime |= ATP850_SETTIME(drive,
3912 acard_act_pio[drvp->PIO_mode],
3913 acard_rec_pio[drvp->PIO_mode]);
3914 } else {
3915 idetime |= ATP860_SETTIME(channel, drive,
3916 acard_act_pio[drvp->PIO_mode],
3917 acard_rec_pio[drvp->PIO_mode]);
3918 }
3919 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
3920 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3921 | ATP8x0_CTRL_EN(channel));
3922 }
3923 }
3924
3925 if (idedma_ctl != 0) {
3926 /* Add software bits in status register */
3927 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3928 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
3929 }
3930 pciide_print_modes(cp);
3931
3932 if (ACARD_IS_850(sc)) {
3933 pci_conf_write(sc->sc_pc, sc->sc_tag,
3934 ATP850_IDETIME(channel), idetime);
3935 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
3936 } else {
3937 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
3938 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
3939 }
3940 }
3941
3942 int
3943 acard_pci_intr(arg)
3944 void *arg;
3945 {
3946 struct pciide_softc *sc = arg;
3947 struct pciide_channel *cp;
3948 struct channel_softc *wdc_cp;
3949 int rv = 0;
3950 int dmastat, i, crv;
3951
3952 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3953 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3954 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3955 if ((dmastat & IDEDMA_CTL_INTR) == 0)
3956 continue;
3957 cp = &sc->pciide_channels[i];
3958 wdc_cp = &cp->wdc_channel;
3959 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
3960 (void)wdcintr(wdc_cp);
3961 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3962 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3963 continue;
3964 }
3965 crv = wdcintr(wdc_cp);
3966 if (crv == 0)
3967 printf("%s:%d: bogus intr\n",
3968 sc->sc_wdcdev.sc_dev.dv_xname, i);
3969 else if (crv == 1)
3970 rv = 1;
3971 else if (rv == 0)
3972 rv = crv;
3973 }
3974 return rv;
3975 }
3976