pciide.c revision 1.116 1 /* $NetBSD: pciide.c,v 1.116 2001/05/06 20:06:35 fvdl Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <uvm/uvm_extern.h>
100
101 #include <machine/endian.h>
102
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/pciide_acard_reg.h>
119 #include <dev/pci/cy82c693var.h>
120
121 #include "opt_pciide.h"
122
123 /* inlines for reading/writing 8-bit PCI registers */
124 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
125 int));
126 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
127 int, u_int8_t));
128
129 static __inline u_int8_t
130 pciide_pci_read(pc, pa, reg)
131 pci_chipset_tag_t pc;
132 pcitag_t pa;
133 int reg;
134 {
135
136 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
137 ((reg & 0x03) * 8) & 0xff);
138 }
139
140 static __inline void
141 pciide_pci_write(pc, pa, reg, val)
142 pci_chipset_tag_t pc;
143 pcitag_t pa;
144 int reg;
145 u_int8_t val;
146 {
147 pcireg_t pcival;
148
149 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
150 pcival &= ~(0xff << ((reg & 0x03) * 8));
151 pcival |= (val << ((reg & 0x03) * 8));
152 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
153 }
154
155 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
156
157 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
158 void piix_setup_channel __P((struct channel_softc*));
159 void piix3_4_setup_channel __P((struct channel_softc*));
160 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
161 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
162 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
163
164 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void amd7x6_setup_channel __P((struct channel_softc*));
166
167 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void apollo_setup_channel __P((struct channel_softc*));
169
170 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_setup_channel __P((struct channel_softc*));
173 void cmd_channel_map __P((struct pci_attach_args *,
174 struct pciide_softc *, int));
175 int cmd_pci_intr __P((void *));
176 void cmd646_9_irqack __P((struct channel_softc *));
177
178 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void cy693_setup_channel __P((struct channel_softc*));
180
181 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void sis_setup_channel __P((struct channel_softc*));
183
184 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void acer_setup_channel __P((struct channel_softc*));
186 int acer_pci_intr __P((void *));
187
188 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void pdc202xx_setup_channel __P((struct channel_softc*));
190 int pdc202xx_pci_intr __P((void *));
191 int pdc20265_pci_intr __P((void *));
192
193 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void opti_setup_channel __P((struct channel_softc*));
195
196 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void hpt_setup_channel __P((struct channel_softc*));
198 int hpt_pci_intr __P((void *));
199
200 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
201 void acard_setup_channel __P((struct channel_softc*));
202 int acard_pci_intr __P((void *));
203
204 void pciide_channel_dma_setup __P((struct pciide_channel *));
205 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
206 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
207 void pciide_dma_start __P((void*, int, int));
208 int pciide_dma_finish __P((void*, int, int, int));
209 void pciide_irqack __P((struct channel_softc *));
210 void pciide_print_modes __P((struct pciide_channel *));
211
212 struct pciide_product_desc {
213 u_int32_t ide_product;
214 int ide_flags;
215 const char *ide_name;
216 /* map and setup chip, probe drives */
217 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
218 };
219
220 /* Flags for ide_flags */
221 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
222 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
223
224 /* Default product description for devices not known from this controller */
225 const struct pciide_product_desc default_product_desc = {
226 0,
227 0,
228 "Generic PCI IDE controller",
229 default_chip_map,
230 };
231
232 const struct pciide_product_desc pciide_intel_products[] = {
233 { PCI_PRODUCT_INTEL_82092AA,
234 0,
235 "Intel 82092AA IDE controller",
236 default_chip_map,
237 },
238 { PCI_PRODUCT_INTEL_82371FB_IDE,
239 0,
240 "Intel 82371FB IDE controller (PIIX)",
241 piix_chip_map,
242 },
243 { PCI_PRODUCT_INTEL_82371SB_IDE,
244 0,
245 "Intel 82371SB IDE Interface (PIIX3)",
246 piix_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82371AB_IDE,
249 0,
250 "Intel 82371AB IDE controller (PIIX4)",
251 piix_chip_map,
252 },
253 { PCI_PRODUCT_INTEL_82440MX_IDE,
254 0,
255 "Intel 82440MX IDE controller",
256 piix_chip_map
257 },
258 { PCI_PRODUCT_INTEL_82801AA_IDE,
259 0,
260 "Intel 82801AA IDE Controller (ICH)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82801AB_IDE,
264 0,
265 "Intel 82801AB IDE Controller (ICH0)",
266 piix_chip_map,
267 },
268 { PCI_PRODUCT_INTEL_82801BA_IDE,
269 0,
270 "Intel 82801BA IDE Controller (ICH2)",
271 piix_chip_map,
272 },
273 { PCI_PRODUCT_INTEL_82801BAM_IDE,
274 0,
275 "Intel 82801BAM IDE Controller (ICH2)",
276 piix_chip_map,
277 },
278 { 0,
279 0,
280 NULL,
281 NULL
282 }
283 };
284
285 const struct pciide_product_desc pciide_amd_products[] = {
286 { PCI_PRODUCT_AMD_PBC756_IDE,
287 0,
288 "Advanced Micro Devices AMD756 IDE Controller",
289 amd7x6_chip_map
290 },
291 { PCI_PRODUCT_AMD_PBC766_IDE,
292 0,
293 "Advanced Micro Devices AMD766 IDE Controller",
294 amd7x6_chip_map
295 },
296 { 0,
297 0,
298 NULL,
299 NULL
300 }
301 };
302
303 const struct pciide_product_desc pciide_cmd_products[] = {
304 { PCI_PRODUCT_CMDTECH_640,
305 0,
306 "CMD Technology PCI0640",
307 cmd_chip_map
308 },
309 { PCI_PRODUCT_CMDTECH_643,
310 0,
311 "CMD Technology PCI0643",
312 cmd0643_9_chip_map,
313 },
314 { PCI_PRODUCT_CMDTECH_646,
315 0,
316 "CMD Technology PCI0646",
317 cmd0643_9_chip_map,
318 },
319 { PCI_PRODUCT_CMDTECH_648,
320 IDE_PCI_CLASS_OVERRIDE,
321 "CMD Technology PCI0648",
322 cmd0643_9_chip_map,
323 },
324 { PCI_PRODUCT_CMDTECH_649,
325 IDE_PCI_CLASS_OVERRIDE,
326 "CMD Technology PCI0649",
327 cmd0643_9_chip_map,
328 },
329 { 0,
330 0,
331 NULL,
332 NULL
333 }
334 };
335
336 const struct pciide_product_desc pciide_via_products[] = {
337 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
338 0,
339 NULL,
340 apollo_chip_map,
341 },
342 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
343 0,
344 NULL,
345 apollo_chip_map,
346 },
347 { 0,
348 0,
349 NULL,
350 NULL
351 }
352 };
353
354 const struct pciide_product_desc pciide_cypress_products[] = {
355 { PCI_PRODUCT_CONTAQ_82C693,
356 IDE_16BIT_IOSPACE,
357 "Cypress 82C693 IDE Controller",
358 cy693_chip_map,
359 },
360 { 0,
361 0,
362 NULL,
363 NULL
364 }
365 };
366
367 const struct pciide_product_desc pciide_sis_products[] = {
368 { PCI_PRODUCT_SIS_5597_IDE,
369 0,
370 "Silicon Integrated System 5597/5598 IDE controller",
371 sis_chip_map,
372 },
373 { 0,
374 0,
375 NULL,
376 NULL
377 }
378 };
379
380 const struct pciide_product_desc pciide_acer_products[] = {
381 { PCI_PRODUCT_ALI_M5229,
382 0,
383 "Acer Labs M5229 UDMA IDE Controller",
384 acer_chip_map,
385 },
386 { 0,
387 0,
388 NULL,
389 NULL
390 }
391 };
392
393 const struct pciide_product_desc pciide_promise_products[] = {
394 { PCI_PRODUCT_PROMISE_ULTRA33,
395 IDE_PCI_CLASS_OVERRIDE,
396 "Promise Ultra33/ATA Bus Master IDE Accelerator",
397 pdc202xx_chip_map,
398 },
399 { PCI_PRODUCT_PROMISE_ULTRA66,
400 IDE_PCI_CLASS_OVERRIDE,
401 "Promise Ultra66/ATA Bus Master IDE Accelerator",
402 pdc202xx_chip_map,
403 },
404 { PCI_PRODUCT_PROMISE_ULTRA100,
405 IDE_PCI_CLASS_OVERRIDE,
406 "Promise Ultra100/ATA Bus Master IDE Accelerator",
407 pdc202xx_chip_map,
408 },
409 { PCI_PRODUCT_PROMISE_ULTRA100X,
410 IDE_PCI_CLASS_OVERRIDE,
411 "Promise Ultra100/ATA Bus Master IDE Accelerator",
412 pdc202xx_chip_map,
413 },
414 { 0,
415 0,
416 NULL,
417 NULL
418 }
419 };
420
421 const struct pciide_product_desc pciide_opti_products[] = {
422 { PCI_PRODUCT_OPTI_82C621,
423 0,
424 "OPTi 82c621 PCI IDE controller",
425 opti_chip_map,
426 },
427 { PCI_PRODUCT_OPTI_82C568,
428 0,
429 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
430 opti_chip_map,
431 },
432 { PCI_PRODUCT_OPTI_82D568,
433 0,
434 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
435 opti_chip_map,
436 },
437 { 0,
438 0,
439 NULL,
440 NULL
441 }
442 };
443
444 const struct pciide_product_desc pciide_triones_products[] = {
445 { PCI_PRODUCT_TRIONES_HPT366,
446 IDE_PCI_CLASS_OVERRIDE,
447 NULL,
448 hpt_chip_map,
449 },
450 { 0,
451 0,
452 NULL,
453 NULL
454 }
455 };
456
457 const struct pciide_product_desc pciide_acard_products[] = {
458 { PCI_PRODUCT_ACARD_ATP850U,
459 IDE_PCI_CLASS_OVERRIDE,
460 "Acard ATP850U Ultra33 IDE Controller",
461 acard_chip_map,
462 },
463 { PCI_PRODUCT_ACARD_ATP860,
464 IDE_PCI_CLASS_OVERRIDE,
465 "Acard ATP860 Ultra66 IDE Controller",
466 acard_chip_map,
467 },
468 { PCI_PRODUCT_ACARD_ATP860A,
469 IDE_PCI_CLASS_OVERRIDE,
470 "Acard ATP860-A Ultra66 IDE Controller",
471 acard_chip_map,
472 },
473 { 0,
474 0,
475 NULL,
476 NULL
477 }
478 };
479
480 struct pciide_vendor_desc {
481 u_int32_t ide_vendor;
482 const struct pciide_product_desc *ide_products;
483 };
484
485 const struct pciide_vendor_desc pciide_vendors[] = {
486 { PCI_VENDOR_INTEL, pciide_intel_products },
487 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
488 { PCI_VENDOR_VIATECH, pciide_via_products },
489 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
490 { PCI_VENDOR_SIS, pciide_sis_products },
491 { PCI_VENDOR_ALI, pciide_acer_products },
492 { PCI_VENDOR_PROMISE, pciide_promise_products },
493 { PCI_VENDOR_AMD, pciide_amd_products },
494 { PCI_VENDOR_OPTI, pciide_opti_products },
495 { PCI_VENDOR_TRIONES, pciide_triones_products },
496 #ifdef PCIIDE_ACARD_ENABLE
497 { PCI_VENDOR_ACARD, pciide_acard_products },
498 #endif
499 { 0, NULL }
500 };
501
502 /* options passed via the 'flags' config keyword */
503 #define PCIIDE_OPTIONS_DMA 0x01
504
505 int pciide_match __P((struct device *, struct cfdata *, void *));
506 void pciide_attach __P((struct device *, struct device *, void *));
507
508 struct cfattach pciide_ca = {
509 sizeof(struct pciide_softc), pciide_match, pciide_attach
510 };
511 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
512 int pciide_mapregs_compat __P(( struct pci_attach_args *,
513 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
514 int pciide_mapregs_native __P((struct pci_attach_args *,
515 struct pciide_channel *, bus_size_t *, bus_size_t *,
516 int (*pci_intr) __P((void *))));
517 void pciide_mapreg_dma __P((struct pciide_softc *,
518 struct pci_attach_args *));
519 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
520 void pciide_mapchan __P((struct pci_attach_args *,
521 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
522 int (*pci_intr) __P((void *))));
523 int pciide_chan_candisable __P((struct pciide_channel *));
524 void pciide_map_compat_intr __P(( struct pci_attach_args *,
525 struct pciide_channel *, int, int));
526 int pciide_print __P((void *, const char *pnp));
527 int pciide_compat_intr __P((void *));
528 int pciide_pci_intr __P((void *));
529 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
530
531 const struct pciide_product_desc *
532 pciide_lookup_product(id)
533 u_int32_t id;
534 {
535 const struct pciide_product_desc *pp;
536 const struct pciide_vendor_desc *vp;
537
538 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
539 if (PCI_VENDOR(id) == vp->ide_vendor)
540 break;
541
542 if ((pp = vp->ide_products) == NULL)
543 return NULL;
544
545 for (; pp->chip_map != NULL; pp++)
546 if (PCI_PRODUCT(id) == pp->ide_product)
547 break;
548
549 if (pp->chip_map == NULL)
550 return NULL;
551 return pp;
552 }
553
554 int
555 pciide_match(parent, match, aux)
556 struct device *parent;
557 struct cfdata *match;
558 void *aux;
559 {
560 struct pci_attach_args *pa = aux;
561 const struct pciide_product_desc *pp;
562
563 /*
564 * Check the ID register to see that it's a PCI IDE controller.
565 * If it is, we assume that we can deal with it; it _should_
566 * work in a standardized way...
567 */
568 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
569 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
570 return (1);
571 }
572
573 /*
574 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
575 * controllers. Let see if we can deal with it anyway.
576 */
577 pp = pciide_lookup_product(pa->pa_id);
578 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
579 return (1);
580 }
581
582 return (0);
583 }
584
585 void
586 pciide_attach(parent, self, aux)
587 struct device *parent, *self;
588 void *aux;
589 {
590 struct pci_attach_args *pa = aux;
591 pci_chipset_tag_t pc = pa->pa_pc;
592 pcitag_t tag = pa->pa_tag;
593 struct pciide_softc *sc = (struct pciide_softc *)self;
594 pcireg_t csr;
595 char devinfo[256];
596 const char *displaydev;
597
598 sc->sc_pp = pciide_lookup_product(pa->pa_id);
599 if (sc->sc_pp == NULL) {
600 sc->sc_pp = &default_product_desc;
601 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
602 displaydev = devinfo;
603 } else
604 displaydev = sc->sc_pp->ide_name;
605
606 /* if displaydev == NULL, printf is done in chip-specific map */
607 if (displaydev)
608 printf(": %s (rev. 0x%02x)\n", displaydev,
609 PCI_REVISION(pa->pa_class));
610
611 sc->sc_pc = pa->pa_pc;
612 sc->sc_tag = pa->pa_tag;
613 #ifdef WDCDEBUG
614 if (wdcdebug_pciide_mask & DEBUG_PROBE)
615 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
616 #endif
617 sc->sc_pp->chip_map(sc, pa);
618
619 if (sc->sc_dma_ok) {
620 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
621 csr |= PCI_COMMAND_MASTER_ENABLE;
622 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
623 }
624 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
625 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
626 }
627
628 /* tell wether the chip is enabled or not */
629 int
630 pciide_chipen(sc, pa)
631 struct pciide_softc *sc;
632 struct pci_attach_args *pa;
633 {
634 pcireg_t csr;
635 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
636 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
637 PCI_COMMAND_STATUS_REG);
638 printf("%s: device disabled (at %s)\n",
639 sc->sc_wdcdev.sc_dev.dv_xname,
640 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
641 "device" : "bridge");
642 return 0;
643 }
644 return 1;
645 }
646
647 int
648 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
649 struct pci_attach_args *pa;
650 struct pciide_channel *cp;
651 int compatchan;
652 bus_size_t *cmdsizep, *ctlsizep;
653 {
654 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
655 struct channel_softc *wdc_cp = &cp->wdc_channel;
656
657 cp->compat = 1;
658 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
659 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
660
661 wdc_cp->cmd_iot = pa->pa_iot;
662 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
663 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
664 printf("%s: couldn't map %s channel cmd regs\n",
665 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
666 return (0);
667 }
668
669 wdc_cp->ctl_iot = pa->pa_iot;
670 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
671 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
672 printf("%s: couldn't map %s channel ctl regs\n",
673 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
674 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
675 PCIIDE_COMPAT_CMD_SIZE);
676 return (0);
677 }
678
679 return (1);
680 }
681
682 int
683 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
684 struct pci_attach_args * pa;
685 struct pciide_channel *cp;
686 bus_size_t *cmdsizep, *ctlsizep;
687 int (*pci_intr) __P((void *));
688 {
689 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
690 struct channel_softc *wdc_cp = &cp->wdc_channel;
691 const char *intrstr;
692 pci_intr_handle_t intrhandle;
693
694 cp->compat = 0;
695
696 if (sc->sc_pci_ih == NULL) {
697 if (pci_intr_map(pa, &intrhandle) != 0) {
698 printf("%s: couldn't map native-PCI interrupt\n",
699 sc->sc_wdcdev.sc_dev.dv_xname);
700 return 0;
701 }
702 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
703 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
704 intrhandle, IPL_BIO, pci_intr, sc);
705 if (sc->sc_pci_ih != NULL) {
706 printf("%s: using %s for native-PCI interrupt\n",
707 sc->sc_wdcdev.sc_dev.dv_xname,
708 intrstr ? intrstr : "unknown interrupt");
709 } else {
710 printf("%s: couldn't establish native-PCI interrupt",
711 sc->sc_wdcdev.sc_dev.dv_xname);
712 if (intrstr != NULL)
713 printf(" at %s", intrstr);
714 printf("\n");
715 return 0;
716 }
717 }
718 cp->ih = sc->sc_pci_ih;
719 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
720 PCI_MAPREG_TYPE_IO, 0,
721 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
722 printf("%s: couldn't map %s channel cmd regs\n",
723 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
724 return 0;
725 }
726
727 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
728 PCI_MAPREG_TYPE_IO, 0,
729 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
730 printf("%s: couldn't map %s channel ctl regs\n",
731 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
732 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
733 return 0;
734 }
735 /*
736 * In native mode, 4 bytes of I/O space are mapped for the control
737 * register, the control register is at offset 2. Pass the generic
738 * code a handle for only one byte at the rigth offset.
739 */
740 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
741 &wdc_cp->ctl_ioh) != 0) {
742 printf("%s: unable to subregion %s channel ctl regs\n",
743 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
744 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
745 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
746 return 0;
747 }
748 return (1);
749 }
750
751 void
752 pciide_mapreg_dma(sc, pa)
753 struct pciide_softc *sc;
754 struct pci_attach_args *pa;
755 {
756 pcireg_t maptype;
757 bus_addr_t addr;
758
759 /*
760 * Map DMA registers
761 *
762 * Note that sc_dma_ok is the right variable to test to see if
763 * DMA can be done. If the interface doesn't support DMA,
764 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
765 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
766 * non-zero if the interface supports DMA and the registers
767 * could be mapped.
768 *
769 * XXX Note that despite the fact that the Bus Master IDE specs
770 * XXX say that "The bus master IDE function uses 16 bytes of IO
771 * XXX space," some controllers (at least the United
772 * XXX Microelectronics UM8886BF) place it in memory space.
773 */
774 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
775 PCIIDE_REG_BUS_MASTER_DMA);
776
777 switch (maptype) {
778 case PCI_MAPREG_TYPE_IO:
779 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
780 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
781 &addr, NULL, NULL) == 0);
782 if (sc->sc_dma_ok == 0) {
783 printf(", but unused (couldn't query registers)");
784 break;
785 }
786 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
787 && addr >= 0x10000) {
788 sc->sc_dma_ok = 0;
789 printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr);
790 break;
791 }
792 /* FALLTHROUGH */
793
794 case PCI_MAPREG_MEM_TYPE_32BIT:
795 sc->sc_dma_ok = (pci_mapreg_map(pa,
796 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
797 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
798 sc->sc_dmat = pa->pa_dmat;
799 if (sc->sc_dma_ok == 0) {
800 printf(", but unused (couldn't map registers)");
801 } else {
802 sc->sc_wdcdev.dma_arg = sc;
803 sc->sc_wdcdev.dma_init = pciide_dma_init;
804 sc->sc_wdcdev.dma_start = pciide_dma_start;
805 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
806 }
807 break;
808
809 default:
810 sc->sc_dma_ok = 0;
811 printf(", but unsupported register maptype (0x%x)", maptype);
812 }
813 }
814
815 int
816 pciide_compat_intr(arg)
817 void *arg;
818 {
819 struct pciide_channel *cp = arg;
820
821 #ifdef DIAGNOSTIC
822 /* should only be called for a compat channel */
823 if (cp->compat == 0)
824 panic("pciide compat intr called for non-compat chan %p\n", cp);
825 #endif
826 return (wdcintr(&cp->wdc_channel));
827 }
828
829 int
830 pciide_pci_intr(arg)
831 void *arg;
832 {
833 struct pciide_softc *sc = arg;
834 struct pciide_channel *cp;
835 struct channel_softc *wdc_cp;
836 int i, rv, crv;
837
838 rv = 0;
839 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
840 cp = &sc->pciide_channels[i];
841 wdc_cp = &cp->wdc_channel;
842
843 /* If a compat channel skip. */
844 if (cp->compat)
845 continue;
846 /* if this channel not waiting for intr, skip */
847 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
848 continue;
849
850 crv = wdcintr(wdc_cp);
851 if (crv == 0)
852 ; /* leave rv alone */
853 else if (crv == 1)
854 rv = 1; /* claim the intr */
855 else if (rv == 0) /* crv should be -1 in this case */
856 rv = crv; /* if we've done no better, take it */
857 }
858 return (rv);
859 }
860
861 void
862 pciide_channel_dma_setup(cp)
863 struct pciide_channel *cp;
864 {
865 int drive;
866 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
867 struct ata_drive_datas *drvp;
868
869 for (drive = 0; drive < 2; drive++) {
870 drvp = &cp->wdc_channel.ch_drive[drive];
871 /* If no drive, skip */
872 if ((drvp->drive_flags & DRIVE) == 0)
873 continue;
874 /* setup DMA if needed */
875 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
876 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
877 sc->sc_dma_ok == 0) {
878 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
879 continue;
880 }
881 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
882 != 0) {
883 /* Abort DMA setup */
884 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
885 continue;
886 }
887 }
888 }
889
890 int
891 pciide_dma_table_setup(sc, channel, drive)
892 struct pciide_softc *sc;
893 int channel, drive;
894 {
895 bus_dma_segment_t seg;
896 int error, rseg;
897 const bus_size_t dma_table_size =
898 sizeof(struct idedma_table) * NIDEDMA_TABLES;
899 struct pciide_dma_maps *dma_maps =
900 &sc->pciide_channels[channel].dma_maps[drive];
901
902 /* If table was already allocated, just return */
903 if (dma_maps->dma_table)
904 return 0;
905
906 /* Allocate memory for the DMA tables and map it */
907 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
908 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
909 BUS_DMA_NOWAIT)) != 0) {
910 printf("%s:%d: unable to allocate table DMA for "
911 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
912 channel, drive, error);
913 return error;
914 }
915 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
916 dma_table_size,
917 (caddr_t *)&dma_maps->dma_table,
918 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
919 printf("%s:%d: unable to map table DMA for"
920 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
921 channel, drive, error);
922 return error;
923 }
924 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
925 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
926 (unsigned long)seg.ds_addr), DEBUG_PROBE);
927
928 /* Create and load table DMA map for this disk */
929 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
930 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
931 &dma_maps->dmamap_table)) != 0) {
932 printf("%s:%d: unable to create table DMA map for "
933 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
934 channel, drive, error);
935 return error;
936 }
937 if ((error = bus_dmamap_load(sc->sc_dmat,
938 dma_maps->dmamap_table,
939 dma_maps->dma_table,
940 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
941 printf("%s:%d: unable to load table DMA map for "
942 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
943 channel, drive, error);
944 return error;
945 }
946 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
947 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
948 DEBUG_PROBE);
949 /* Create a xfer DMA map for this drive */
950 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
951 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
952 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
953 &dma_maps->dmamap_xfer)) != 0) {
954 printf("%s:%d: unable to create xfer DMA map for "
955 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
956 channel, drive, error);
957 return error;
958 }
959 return 0;
960 }
961
962 int
963 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
964 void *v;
965 int channel, drive;
966 void *databuf;
967 size_t datalen;
968 int flags;
969 {
970 struct pciide_softc *sc = v;
971 int error, seg;
972 struct pciide_dma_maps *dma_maps =
973 &sc->pciide_channels[channel].dma_maps[drive];
974
975 error = bus_dmamap_load(sc->sc_dmat,
976 dma_maps->dmamap_xfer,
977 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
978 if (error) {
979 printf("%s:%d: unable to load xfer DMA map for"
980 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
981 channel, drive, error);
982 return error;
983 }
984
985 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
986 dma_maps->dmamap_xfer->dm_mapsize,
987 (flags & WDC_DMA_READ) ?
988 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
989
990 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
991 #ifdef DIAGNOSTIC
992 /* A segment must not cross a 64k boundary */
993 {
994 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
995 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
996 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
997 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
998 printf("pciide_dma: segment %d physical addr 0x%lx"
999 " len 0x%lx not properly aligned\n",
1000 seg, phys, len);
1001 panic("pciide_dma: buf align");
1002 }
1003 }
1004 #endif
1005 dma_maps->dma_table[seg].base_addr =
1006 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1007 dma_maps->dma_table[seg].byte_count =
1008 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1009 IDEDMA_BYTE_COUNT_MASK);
1010 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1011 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1012 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1013
1014 }
1015 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1016 htole32(IDEDMA_BYTE_COUNT_EOT);
1017
1018 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1019 dma_maps->dmamap_table->dm_mapsize,
1020 BUS_DMASYNC_PREWRITE);
1021
1022 /* Maps are ready. Start DMA function */
1023 #ifdef DIAGNOSTIC
1024 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1025 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1026 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1027 panic("pciide_dma_init: table align");
1028 }
1029 #endif
1030
1031 /* Clear status bits */
1032 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1033 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1034 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1035 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1036 /* Write table addr */
1037 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1038 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1039 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1040 /* set read/write */
1041 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1042 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1043 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1044 /* remember flags */
1045 dma_maps->dma_flags = flags;
1046 return 0;
1047 }
1048
1049 void
1050 pciide_dma_start(v, channel, drive)
1051 void *v;
1052 int channel, drive;
1053 {
1054 struct pciide_softc *sc = v;
1055
1056 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1057 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1058 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1059 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1060 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1061 }
1062
1063 int
1064 pciide_dma_finish(v, channel, drive, force)
1065 void *v;
1066 int channel, drive;
1067 int force;
1068 {
1069 struct pciide_softc *sc = v;
1070 u_int8_t status;
1071 int error = 0;
1072 struct pciide_dma_maps *dma_maps =
1073 &sc->pciide_channels[channel].dma_maps[drive];
1074
1075 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1076 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1077 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1078 DEBUG_XFERS);
1079
1080 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1081 return WDC_DMAST_NOIRQ;
1082
1083 /* stop DMA channel */
1084 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1085 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1086 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1087 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1088
1089 /* Unload the map of the data buffer */
1090 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1091 dma_maps->dmamap_xfer->dm_mapsize,
1092 (dma_maps->dma_flags & WDC_DMA_READ) ?
1093 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1094 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1095
1096 if ((status & IDEDMA_CTL_ERR) != 0) {
1097 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1098 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1099 error |= WDC_DMAST_ERR;
1100 }
1101
1102 if ((status & IDEDMA_CTL_INTR) == 0) {
1103 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1104 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1105 drive, status);
1106 error |= WDC_DMAST_NOIRQ;
1107 }
1108
1109 if ((status & IDEDMA_CTL_ACT) != 0) {
1110 /* data underrun, may be a valid condition for ATAPI */
1111 error |= WDC_DMAST_UNDER;
1112 }
1113 return error;
1114 }
1115
1116 void
1117 pciide_irqack(chp)
1118 struct channel_softc *chp;
1119 {
1120 struct pciide_channel *cp = (struct pciide_channel*)chp;
1121 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1122
1123 /* clear status bits in IDE DMA registers */
1124 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1125 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1126 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1127 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1128 }
1129
1130 /* some common code used by several chip_map */
1131 int
1132 pciide_chansetup(sc, channel, interface)
1133 struct pciide_softc *sc;
1134 int channel;
1135 pcireg_t interface;
1136 {
1137 struct pciide_channel *cp = &sc->pciide_channels[channel];
1138 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1139 cp->name = PCIIDE_CHANNEL_NAME(channel);
1140 cp->wdc_channel.channel = channel;
1141 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1142 cp->wdc_channel.ch_queue =
1143 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1144 if (cp->wdc_channel.ch_queue == NULL) {
1145 printf("%s %s channel: "
1146 "can't allocate memory for command queue",
1147 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1148 return 0;
1149 }
1150 printf("%s: %s channel %s to %s mode\n",
1151 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1152 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1153 "configured" : "wired",
1154 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1155 "native-PCI" : "compatibility");
1156 return 1;
1157 }
1158
1159 /* some common code used by several chip channel_map */
1160 void
1161 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1162 struct pci_attach_args *pa;
1163 struct pciide_channel *cp;
1164 pcireg_t interface;
1165 bus_size_t *cmdsizep, *ctlsizep;
1166 int (*pci_intr) __P((void *));
1167 {
1168 struct channel_softc *wdc_cp = &cp->wdc_channel;
1169
1170 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1171 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1172 pci_intr);
1173 else
1174 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1175 wdc_cp->channel, cmdsizep, ctlsizep);
1176
1177 if (cp->hw_ok == 0)
1178 return;
1179 wdc_cp->data32iot = wdc_cp->cmd_iot;
1180 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1181 wdcattach(wdc_cp);
1182 }
1183
1184 /*
1185 * Generic code to call to know if a channel can be disabled. Return 1
1186 * if channel can be disabled, 0 if not
1187 */
1188 int
1189 pciide_chan_candisable(cp)
1190 struct pciide_channel *cp;
1191 {
1192 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1193 struct channel_softc *wdc_cp = &cp->wdc_channel;
1194
1195 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1196 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1197 printf("%s: disabling %s channel (no drives)\n",
1198 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1199 cp->hw_ok = 0;
1200 return 1;
1201 }
1202 return 0;
1203 }
1204
1205 /*
1206 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1207 * Set hw_ok=0 on failure
1208 */
1209 void
1210 pciide_map_compat_intr(pa, cp, compatchan, interface)
1211 struct pci_attach_args *pa;
1212 struct pciide_channel *cp;
1213 int compatchan, interface;
1214 {
1215 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1216 struct channel_softc *wdc_cp = &cp->wdc_channel;
1217
1218 if (cp->hw_ok == 0)
1219 return;
1220 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1221 return;
1222
1223 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1224 pa, compatchan, pciide_compat_intr, cp);
1225 if (cp->ih == NULL) {
1226 printf("%s: no compatibility interrupt for use by %s "
1227 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1228 cp->hw_ok = 0;
1229 }
1230 }
1231
1232 void
1233 pciide_print_modes(cp)
1234 struct pciide_channel *cp;
1235 {
1236 wdc_print_modes(&cp->wdc_channel);
1237 }
1238
1239 void
1240 default_chip_map(sc, pa)
1241 struct pciide_softc *sc;
1242 struct pci_attach_args *pa;
1243 {
1244 struct pciide_channel *cp;
1245 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1246 pcireg_t csr;
1247 int channel, drive;
1248 struct ata_drive_datas *drvp;
1249 u_int8_t idedma_ctl;
1250 bus_size_t cmdsize, ctlsize;
1251 char *failreason;
1252
1253 if (pciide_chipen(sc, pa) == 0)
1254 return;
1255
1256 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1257 printf("%s: bus-master DMA support present",
1258 sc->sc_wdcdev.sc_dev.dv_xname);
1259 if (sc->sc_pp == &default_product_desc &&
1260 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1261 PCIIDE_OPTIONS_DMA) == 0) {
1262 printf(", but unused (no driver support)");
1263 sc->sc_dma_ok = 0;
1264 } else {
1265 pciide_mapreg_dma(sc, pa);
1266 if (sc->sc_dma_ok != 0)
1267 printf(", used without full driver "
1268 "support");
1269 }
1270 } else {
1271 printf("%s: hardware does not support DMA",
1272 sc->sc_wdcdev.sc_dev.dv_xname);
1273 sc->sc_dma_ok = 0;
1274 }
1275 printf("\n");
1276 if (sc->sc_dma_ok) {
1277 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1278 sc->sc_wdcdev.irqack = pciide_irqack;
1279 }
1280 sc->sc_wdcdev.PIO_cap = 0;
1281 sc->sc_wdcdev.DMA_cap = 0;
1282
1283 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1284 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1285 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1286
1287 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1288 cp = &sc->pciide_channels[channel];
1289 if (pciide_chansetup(sc, channel, interface) == 0)
1290 continue;
1291 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1292 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1293 &ctlsize, pciide_pci_intr);
1294 } else {
1295 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1296 channel, &cmdsize, &ctlsize);
1297 }
1298 if (cp->hw_ok == 0)
1299 continue;
1300 /*
1301 * Check to see if something appears to be there.
1302 */
1303 failreason = NULL;
1304 if (!wdcprobe(&cp->wdc_channel)) {
1305 failreason = "not responding; disabled or no drives?";
1306 goto next;
1307 }
1308 /*
1309 * Now, make sure it's actually attributable to this PCI IDE
1310 * channel by trying to access the channel again while the
1311 * PCI IDE controller's I/O space is disabled. (If the
1312 * channel no longer appears to be there, it belongs to
1313 * this controller.) YUCK!
1314 */
1315 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1316 PCI_COMMAND_STATUS_REG);
1317 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1318 csr & ~PCI_COMMAND_IO_ENABLE);
1319 if (wdcprobe(&cp->wdc_channel))
1320 failreason = "other hardware responding at addresses";
1321 pci_conf_write(sc->sc_pc, sc->sc_tag,
1322 PCI_COMMAND_STATUS_REG, csr);
1323 next:
1324 if (failreason) {
1325 printf("%s: %s channel ignored (%s)\n",
1326 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1327 failreason);
1328 cp->hw_ok = 0;
1329 bus_space_unmap(cp->wdc_channel.cmd_iot,
1330 cp->wdc_channel.cmd_ioh, cmdsize);
1331 bus_space_unmap(cp->wdc_channel.ctl_iot,
1332 cp->wdc_channel.ctl_ioh, ctlsize);
1333 } else {
1334 pciide_map_compat_intr(pa, cp, channel, interface);
1335 }
1336 if (cp->hw_ok) {
1337 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1338 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1339 wdcattach(&cp->wdc_channel);
1340 }
1341 }
1342
1343 if (sc->sc_dma_ok == 0)
1344 return;
1345
1346 /* Allocate DMA maps */
1347 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1348 idedma_ctl = 0;
1349 cp = &sc->pciide_channels[channel];
1350 for (drive = 0; drive < 2; drive++) {
1351 drvp = &cp->wdc_channel.ch_drive[drive];
1352 /* If no drive, skip */
1353 if ((drvp->drive_flags & DRIVE) == 0)
1354 continue;
1355 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1356 continue;
1357 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1358 /* Abort DMA setup */
1359 printf("%s:%d:%d: can't allocate DMA maps, "
1360 "using PIO transfers\n",
1361 sc->sc_wdcdev.sc_dev.dv_xname,
1362 channel, drive);
1363 drvp->drive_flags &= ~DRIVE_DMA;
1364 }
1365 printf("%s:%d:%d: using DMA data transfers\n",
1366 sc->sc_wdcdev.sc_dev.dv_xname,
1367 channel, drive);
1368 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1369 }
1370 if (idedma_ctl != 0) {
1371 /* Add software bits in status register */
1372 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1373 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1374 idedma_ctl);
1375 }
1376 }
1377 }
1378
1379 void
1380 piix_chip_map(sc, pa)
1381 struct pciide_softc *sc;
1382 struct pci_attach_args *pa;
1383 {
1384 struct pciide_channel *cp;
1385 int channel;
1386 u_int32_t idetim;
1387 bus_size_t cmdsize, ctlsize;
1388
1389 if (pciide_chipen(sc, pa) == 0)
1390 return;
1391
1392 printf("%s: bus-master DMA support present",
1393 sc->sc_wdcdev.sc_dev.dv_xname);
1394 pciide_mapreg_dma(sc, pa);
1395 printf("\n");
1396 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1397 WDC_CAPABILITY_MODE;
1398 if (sc->sc_dma_ok) {
1399 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1400 sc->sc_wdcdev.irqack = pciide_irqack;
1401 switch(sc->sc_pp->ide_product) {
1402 case PCI_PRODUCT_INTEL_82371AB_IDE:
1403 case PCI_PRODUCT_INTEL_82440MX_IDE:
1404 case PCI_PRODUCT_INTEL_82801AA_IDE:
1405 case PCI_PRODUCT_INTEL_82801AB_IDE:
1406 case PCI_PRODUCT_INTEL_82801BA_IDE:
1407 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1408 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1409 }
1410 }
1411 sc->sc_wdcdev.PIO_cap = 4;
1412 sc->sc_wdcdev.DMA_cap = 2;
1413 switch(sc->sc_pp->ide_product) {
1414 case PCI_PRODUCT_INTEL_82801AA_IDE:
1415 sc->sc_wdcdev.UDMA_cap = 4;
1416 break;
1417 case PCI_PRODUCT_INTEL_82801BA_IDE:
1418 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1419 sc->sc_wdcdev.UDMA_cap = 5;
1420 break;
1421 default:
1422 sc->sc_wdcdev.UDMA_cap = 2;
1423 }
1424 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1425 sc->sc_wdcdev.set_modes = piix_setup_channel;
1426 else
1427 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1428 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1429 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1430
1431 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1432 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1433 DEBUG_PROBE);
1434 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1435 WDCDEBUG_PRINT((", sidetim=0x%x",
1436 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1437 DEBUG_PROBE);
1438 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1439 WDCDEBUG_PRINT((", udamreg 0x%x",
1440 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1441 DEBUG_PROBE);
1442 }
1443 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1444 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1445 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1446 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1447 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1448 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1449 DEBUG_PROBE);
1450 }
1451
1452 }
1453 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1454
1455 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1456 cp = &sc->pciide_channels[channel];
1457 /* PIIX is compat-only */
1458 if (pciide_chansetup(sc, channel, 0) == 0)
1459 continue;
1460 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1461 if ((PIIX_IDETIM_READ(idetim, channel) &
1462 PIIX_IDETIM_IDE) == 0) {
1463 printf("%s: %s channel ignored (disabled)\n",
1464 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1465 continue;
1466 }
1467 /* PIIX are compat-only pciide devices */
1468 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1469 if (cp->hw_ok == 0)
1470 continue;
1471 if (pciide_chan_candisable(cp)) {
1472 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1473 channel);
1474 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1475 idetim);
1476 }
1477 pciide_map_compat_intr(pa, cp, channel, 0);
1478 if (cp->hw_ok == 0)
1479 continue;
1480 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1481 }
1482
1483 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1484 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1485 DEBUG_PROBE);
1486 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1487 WDCDEBUG_PRINT((", sidetim=0x%x",
1488 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1489 DEBUG_PROBE);
1490 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1491 WDCDEBUG_PRINT((", udamreg 0x%x",
1492 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1493 DEBUG_PROBE);
1494 }
1495 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1496 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1497 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1498 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1499 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1500 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1501 DEBUG_PROBE);
1502 }
1503 }
1504 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1505 }
1506
1507 void
1508 piix_setup_channel(chp)
1509 struct channel_softc *chp;
1510 {
1511 u_int8_t mode[2], drive;
1512 u_int32_t oidetim, idetim, idedma_ctl;
1513 struct pciide_channel *cp = (struct pciide_channel*)chp;
1514 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1515 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1516
1517 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1518 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1519 idedma_ctl = 0;
1520
1521 /* set up new idetim: Enable IDE registers decode */
1522 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1523 chp->channel);
1524
1525 /* setup DMA */
1526 pciide_channel_dma_setup(cp);
1527
1528 /*
1529 * Here we have to mess up with drives mode: PIIX can't have
1530 * different timings for master and slave drives.
1531 * We need to find the best combination.
1532 */
1533
1534 /* If both drives supports DMA, take the lower mode */
1535 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1536 (drvp[1].drive_flags & DRIVE_DMA)) {
1537 mode[0] = mode[1] =
1538 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1539 drvp[0].DMA_mode = mode[0];
1540 drvp[1].DMA_mode = mode[1];
1541 goto ok;
1542 }
1543 /*
1544 * If only one drive supports DMA, use its mode, and
1545 * put the other one in PIO mode 0 if mode not compatible
1546 */
1547 if (drvp[0].drive_flags & DRIVE_DMA) {
1548 mode[0] = drvp[0].DMA_mode;
1549 mode[1] = drvp[1].PIO_mode;
1550 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1551 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1552 mode[1] = drvp[1].PIO_mode = 0;
1553 goto ok;
1554 }
1555 if (drvp[1].drive_flags & DRIVE_DMA) {
1556 mode[1] = drvp[1].DMA_mode;
1557 mode[0] = drvp[0].PIO_mode;
1558 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1559 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1560 mode[0] = drvp[0].PIO_mode = 0;
1561 goto ok;
1562 }
1563 /*
1564 * If both drives are not DMA, takes the lower mode, unless
1565 * one of them is PIO mode < 2
1566 */
1567 if (drvp[0].PIO_mode < 2) {
1568 mode[0] = drvp[0].PIO_mode = 0;
1569 mode[1] = drvp[1].PIO_mode;
1570 } else if (drvp[1].PIO_mode < 2) {
1571 mode[1] = drvp[1].PIO_mode = 0;
1572 mode[0] = drvp[0].PIO_mode;
1573 } else {
1574 mode[0] = mode[1] =
1575 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1576 drvp[0].PIO_mode = mode[0];
1577 drvp[1].PIO_mode = mode[1];
1578 }
1579 ok: /* The modes are setup */
1580 for (drive = 0; drive < 2; drive++) {
1581 if (drvp[drive].drive_flags & DRIVE_DMA) {
1582 idetim |= piix_setup_idetim_timings(
1583 mode[drive], 1, chp->channel);
1584 goto end;
1585 }
1586 }
1587 /* If we are there, none of the drives are DMA */
1588 if (mode[0] >= 2)
1589 idetim |= piix_setup_idetim_timings(
1590 mode[0], 0, chp->channel);
1591 else
1592 idetim |= piix_setup_idetim_timings(
1593 mode[1], 0, chp->channel);
1594 end: /*
1595 * timing mode is now set up in the controller. Enable
1596 * it per-drive
1597 */
1598 for (drive = 0; drive < 2; drive++) {
1599 /* If no drive, skip */
1600 if ((drvp[drive].drive_flags & DRIVE) == 0)
1601 continue;
1602 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1603 if (drvp[drive].drive_flags & DRIVE_DMA)
1604 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1605 }
1606 if (idedma_ctl != 0) {
1607 /* Add software bits in status register */
1608 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1609 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1610 idedma_ctl);
1611 }
1612 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1613 pciide_print_modes(cp);
1614 }
1615
1616 void
1617 piix3_4_setup_channel(chp)
1618 struct channel_softc *chp;
1619 {
1620 struct ata_drive_datas *drvp;
1621 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1622 struct pciide_channel *cp = (struct pciide_channel*)chp;
1623 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1624 int drive;
1625 int channel = chp->channel;
1626
1627 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1628 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1629 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1630 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1631 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1632 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1633 PIIX_SIDETIM_RTC_MASK(channel));
1634
1635 idedma_ctl = 0;
1636 /* If channel disabled, no need to go further */
1637 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1638 return;
1639 /* set up new idetim: Enable IDE registers decode */
1640 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1641
1642 /* setup DMA if needed */
1643 pciide_channel_dma_setup(cp);
1644
1645 for (drive = 0; drive < 2; drive++) {
1646 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1647 PIIX_UDMATIM_SET(0x3, channel, drive));
1648 drvp = &chp->ch_drive[drive];
1649 /* If no drive, skip */
1650 if ((drvp->drive_flags & DRIVE) == 0)
1651 continue;
1652 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1653 (drvp->drive_flags & DRIVE_UDMA) == 0))
1654 goto pio;
1655
1656 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1657 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1658 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1659 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1660 ideconf |= PIIX_CONFIG_PINGPONG;
1661 }
1662 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1663 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1664 /* setup Ultra/100 */
1665 if (drvp->UDMA_mode > 2 &&
1666 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1667 drvp->UDMA_mode = 2;
1668 if (drvp->UDMA_mode > 4) {
1669 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1670 } else {
1671 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1672 if (drvp->UDMA_mode > 2) {
1673 ideconf |= PIIX_CONFIG_UDMA66(channel,
1674 drive);
1675 } else {
1676 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1677 drive);
1678 }
1679 }
1680 }
1681 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1682 /* setup Ultra/66 */
1683 if (drvp->UDMA_mode > 2 &&
1684 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1685 drvp->UDMA_mode = 2;
1686 if (drvp->UDMA_mode > 2)
1687 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1688 else
1689 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1690 }
1691 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1692 (drvp->drive_flags & DRIVE_UDMA)) {
1693 /* use Ultra/DMA */
1694 drvp->drive_flags &= ~DRIVE_DMA;
1695 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1696 udmareg |= PIIX_UDMATIM_SET(
1697 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1698 } else {
1699 /* use Multiword DMA */
1700 drvp->drive_flags &= ~DRIVE_UDMA;
1701 if (drive == 0) {
1702 idetim |= piix_setup_idetim_timings(
1703 drvp->DMA_mode, 1, channel);
1704 } else {
1705 sidetim |= piix_setup_sidetim_timings(
1706 drvp->DMA_mode, 1, channel);
1707 idetim =PIIX_IDETIM_SET(idetim,
1708 PIIX_IDETIM_SITRE, channel);
1709 }
1710 }
1711 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1712
1713 pio: /* use PIO mode */
1714 idetim |= piix_setup_idetim_drvs(drvp);
1715 if (drive == 0) {
1716 idetim |= piix_setup_idetim_timings(
1717 drvp->PIO_mode, 0, channel);
1718 } else {
1719 sidetim |= piix_setup_sidetim_timings(
1720 drvp->PIO_mode, 0, channel);
1721 idetim =PIIX_IDETIM_SET(idetim,
1722 PIIX_IDETIM_SITRE, channel);
1723 }
1724 }
1725 if (idedma_ctl != 0) {
1726 /* Add software bits in status register */
1727 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1728 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1729 idedma_ctl);
1730 }
1731 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1732 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1733 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1734 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1735 pciide_print_modes(cp);
1736 }
1737
1738
1739 /* setup ISP and RTC fields, based on mode */
1740 static u_int32_t
1741 piix_setup_idetim_timings(mode, dma, channel)
1742 u_int8_t mode;
1743 u_int8_t dma;
1744 u_int8_t channel;
1745 {
1746
1747 if (dma)
1748 return PIIX_IDETIM_SET(0,
1749 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1750 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1751 channel);
1752 else
1753 return PIIX_IDETIM_SET(0,
1754 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1755 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1756 channel);
1757 }
1758
1759 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1760 static u_int32_t
1761 piix_setup_idetim_drvs(drvp)
1762 struct ata_drive_datas *drvp;
1763 {
1764 u_int32_t ret = 0;
1765 struct channel_softc *chp = drvp->chnl_softc;
1766 u_int8_t channel = chp->channel;
1767 u_int8_t drive = drvp->drive;
1768
1769 /*
1770 * If drive is using UDMA, timings setups are independant
1771 * So just check DMA and PIO here.
1772 */
1773 if (drvp->drive_flags & DRIVE_DMA) {
1774 /* if mode = DMA mode 0, use compatible timings */
1775 if ((drvp->drive_flags & DRIVE_DMA) &&
1776 drvp->DMA_mode == 0) {
1777 drvp->PIO_mode = 0;
1778 return ret;
1779 }
1780 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1781 /*
1782 * PIO and DMA timings are the same, use fast timings for PIO
1783 * too, else use compat timings.
1784 */
1785 if ((piix_isp_pio[drvp->PIO_mode] !=
1786 piix_isp_dma[drvp->DMA_mode]) ||
1787 (piix_rtc_pio[drvp->PIO_mode] !=
1788 piix_rtc_dma[drvp->DMA_mode]))
1789 drvp->PIO_mode = 0;
1790 /* if PIO mode <= 2, use compat timings for PIO */
1791 if (drvp->PIO_mode <= 2) {
1792 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1793 channel);
1794 return ret;
1795 }
1796 }
1797
1798 /*
1799 * Now setup PIO modes. If mode < 2, use compat timings.
1800 * Else enable fast timings. Enable IORDY and prefetch/post
1801 * if PIO mode >= 3.
1802 */
1803
1804 if (drvp->PIO_mode < 2)
1805 return ret;
1806
1807 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1808 if (drvp->PIO_mode >= 3) {
1809 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1810 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1811 }
1812 return ret;
1813 }
1814
1815 /* setup values in SIDETIM registers, based on mode */
1816 static u_int32_t
1817 piix_setup_sidetim_timings(mode, dma, channel)
1818 u_int8_t mode;
1819 u_int8_t dma;
1820 u_int8_t channel;
1821 {
1822 if (dma)
1823 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1824 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1825 else
1826 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1827 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1828 }
1829
1830 void
1831 amd7x6_chip_map(sc, pa)
1832 struct pciide_softc *sc;
1833 struct pci_attach_args *pa;
1834 {
1835 struct pciide_channel *cp;
1836 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1837 int channel;
1838 pcireg_t chanenable;
1839 bus_size_t cmdsize, ctlsize;
1840
1841 if (pciide_chipen(sc, pa) == 0)
1842 return;
1843 printf("%s: bus-master DMA support present",
1844 sc->sc_wdcdev.sc_dev.dv_xname);
1845 pciide_mapreg_dma(sc, pa);
1846 printf("\n");
1847 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1848 WDC_CAPABILITY_MODE;
1849 if (sc->sc_dma_ok) {
1850 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1851 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1852 sc->sc_wdcdev.irqack = pciide_irqack;
1853 }
1854 sc->sc_wdcdev.PIO_cap = 4;
1855 sc->sc_wdcdev.DMA_cap = 2;
1856
1857 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1858 sc->sc_wdcdev.UDMA_cap = 5;
1859 else
1860 sc->sc_wdcdev.UDMA_cap = 4;
1861 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1862 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1863 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1864 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1865
1866 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1867 DEBUG_PROBE);
1868 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1869 cp = &sc->pciide_channels[channel];
1870 if (pciide_chansetup(sc, channel, interface) == 0)
1871 continue;
1872
1873 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1874 printf("%s: %s channel ignored (disabled)\n",
1875 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1876 continue;
1877 }
1878 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1879 pciide_pci_intr);
1880
1881 if (pciide_chan_candisable(cp))
1882 chanenable &= ~AMD7X6_CHAN_EN(channel);
1883 pciide_map_compat_intr(pa, cp, channel, interface);
1884 if (cp->hw_ok == 0)
1885 continue;
1886
1887 amd7x6_setup_channel(&cp->wdc_channel);
1888 }
1889 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1890 chanenable);
1891 return;
1892 }
1893
1894 void
1895 amd7x6_setup_channel(chp)
1896 struct channel_softc *chp;
1897 {
1898 u_int32_t udmatim_reg, datatim_reg;
1899 u_int8_t idedma_ctl;
1900 int mode, drive;
1901 struct ata_drive_datas *drvp;
1902 struct pciide_channel *cp = (struct pciide_channel*)chp;
1903 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1904 #ifndef PCIIDE_AMD756_ENABLEDMA
1905 int rev = PCI_REVISION(
1906 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1907 #endif
1908
1909 idedma_ctl = 0;
1910 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1911 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1912 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1913 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1914
1915 /* setup DMA if needed */
1916 pciide_channel_dma_setup(cp);
1917
1918 for (drive = 0; drive < 2; drive++) {
1919 drvp = &chp->ch_drive[drive];
1920 /* If no drive, skip */
1921 if ((drvp->drive_flags & DRIVE) == 0)
1922 continue;
1923 /* add timing values, setup DMA if needed */
1924 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1925 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1926 mode = drvp->PIO_mode;
1927 goto pio;
1928 }
1929 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1930 (drvp->drive_flags & DRIVE_UDMA)) {
1931 /* use Ultra/DMA */
1932 drvp->drive_flags &= ~DRIVE_DMA;
1933 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1934 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1935 AMD7X6_UDMA_TIME(chp->channel, drive,
1936 amd7x6_udma_tim[drvp->UDMA_mode]);
1937 /* can use PIO timings, MW DMA unused */
1938 mode = drvp->PIO_mode;
1939 } else {
1940 /* use Multiword DMA, but only if revision is OK */
1941 drvp->drive_flags &= ~DRIVE_UDMA;
1942 #ifndef PCIIDE_AMD756_ENABLEDMA
1943 /*
1944 * The workaround doesn't seem to be necessary
1945 * with all drives, so it can be disabled by
1946 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1947 * triggered.
1948 */
1949 if (sc->sc_pp->ide_product ==
1950 PCI_PRODUCT_AMD_PBC756_IDE &&
1951 AMD756_CHIPREV_DISABLEDMA(rev)) {
1952 printf("%s:%d:%d: multi-word DMA disabled due "
1953 "to chip revision\n",
1954 sc->sc_wdcdev.sc_dev.dv_xname,
1955 chp->channel, drive);
1956 mode = drvp->PIO_mode;
1957 drvp->drive_flags &= ~DRIVE_DMA;
1958 goto pio;
1959 }
1960 #endif
1961 /* mode = min(pio, dma+2) */
1962 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1963 mode = drvp->PIO_mode;
1964 else
1965 mode = drvp->DMA_mode + 2;
1966 }
1967 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1968
1969 pio: /* setup PIO mode */
1970 if (mode <= 2) {
1971 drvp->DMA_mode = 0;
1972 drvp->PIO_mode = 0;
1973 mode = 0;
1974 } else {
1975 drvp->PIO_mode = mode;
1976 drvp->DMA_mode = mode - 2;
1977 }
1978 datatim_reg |=
1979 AMD7X6_DATATIM_PULSE(chp->channel, drive,
1980 amd7x6_pio_set[mode]) |
1981 AMD7X6_DATATIM_RECOV(chp->channel, drive,
1982 amd7x6_pio_rec[mode]);
1983 }
1984 if (idedma_ctl != 0) {
1985 /* Add software bits in status register */
1986 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1987 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1988 idedma_ctl);
1989 }
1990 pciide_print_modes(cp);
1991 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
1992 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
1993 }
1994
1995 void
1996 apollo_chip_map(sc, pa)
1997 struct pciide_softc *sc;
1998 struct pci_attach_args *pa;
1999 {
2000 struct pciide_channel *cp;
2001 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2002 int channel;
2003 u_int32_t ideconf;
2004 bus_size_t cmdsize, ctlsize;
2005 pcitag_t pcib_tag;
2006 pcireg_t pcib_id, pcib_class;
2007
2008 if (pciide_chipen(sc, pa) == 0)
2009 return;
2010 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2011 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2012 /* and read ID and rev of the ISA bridge */
2013 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2014 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2015 printf(": VIA Technologies ");
2016 switch (PCI_PRODUCT(pcib_id)) {
2017 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2018 printf("VT82C586 (Apollo VP) ");
2019 if(PCI_REVISION(pcib_class) >= 0x02) {
2020 printf("ATA33 controller\n");
2021 sc->sc_wdcdev.UDMA_cap = 2;
2022 } else {
2023 printf("controller\n");
2024 sc->sc_wdcdev.UDMA_cap = 0;
2025 }
2026 break;
2027 case PCI_PRODUCT_VIATECH_VT82C596A:
2028 printf("VT82C596A (Apollo Pro) ");
2029 if (PCI_REVISION(pcib_class) >= 0x12) {
2030 printf("ATA66 controller\n");
2031 sc->sc_wdcdev.UDMA_cap = 4;
2032 } else {
2033 printf("ATA33 controller\n");
2034 sc->sc_wdcdev.UDMA_cap = 2;
2035 }
2036 break;
2037 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2038 printf("VT82C686A (Apollo KX133) ");
2039 if (PCI_REVISION(pcib_class) >= 0x40) {
2040 printf("ATA100 controller\n");
2041 sc->sc_wdcdev.UDMA_cap = 5;
2042 } else {
2043 printf("ATA66 controller\n");
2044 sc->sc_wdcdev.UDMA_cap = 4;
2045 }
2046 break;
2047 default:
2048 printf("unknown ATA controller\n");
2049 sc->sc_wdcdev.UDMA_cap = 0;
2050 }
2051
2052 printf("%s: bus-master DMA support present",
2053 sc->sc_wdcdev.sc_dev.dv_xname);
2054 pciide_mapreg_dma(sc, pa);
2055 printf("\n");
2056 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2057 WDC_CAPABILITY_MODE;
2058 if (sc->sc_dma_ok) {
2059 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2060 sc->sc_wdcdev.irqack = pciide_irqack;
2061 if (sc->sc_wdcdev.UDMA_cap > 0)
2062 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2063 }
2064 sc->sc_wdcdev.PIO_cap = 4;
2065 sc->sc_wdcdev.DMA_cap = 2;
2066 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2067 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2068 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2069
2070 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2071 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2072 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2073 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2074 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2075 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2076 DEBUG_PROBE);
2077
2078 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2079 cp = &sc->pciide_channels[channel];
2080 if (pciide_chansetup(sc, channel, interface) == 0)
2081 continue;
2082
2083 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2084 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2085 printf("%s: %s channel ignored (disabled)\n",
2086 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2087 continue;
2088 }
2089 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2090 pciide_pci_intr);
2091 if (cp->hw_ok == 0)
2092 continue;
2093 if (pciide_chan_candisable(cp)) {
2094 ideconf &= ~APO_IDECONF_EN(channel);
2095 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2096 ideconf);
2097 }
2098 pciide_map_compat_intr(pa, cp, channel, interface);
2099
2100 if (cp->hw_ok == 0)
2101 continue;
2102 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2103 }
2104 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2105 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2106 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2107 }
2108
2109 void
2110 apollo_setup_channel(chp)
2111 struct channel_softc *chp;
2112 {
2113 u_int32_t udmatim_reg, datatim_reg;
2114 u_int8_t idedma_ctl;
2115 int mode, drive;
2116 struct ata_drive_datas *drvp;
2117 struct pciide_channel *cp = (struct pciide_channel*)chp;
2118 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2119
2120 idedma_ctl = 0;
2121 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2122 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2123 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2124 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2125
2126 /* setup DMA if needed */
2127 pciide_channel_dma_setup(cp);
2128
2129 for (drive = 0; drive < 2; drive++) {
2130 drvp = &chp->ch_drive[drive];
2131 /* If no drive, skip */
2132 if ((drvp->drive_flags & DRIVE) == 0)
2133 continue;
2134 /* add timing values, setup DMA if needed */
2135 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2136 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2137 mode = drvp->PIO_mode;
2138 goto pio;
2139 }
2140 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2141 (drvp->drive_flags & DRIVE_UDMA)) {
2142 /* use Ultra/DMA */
2143 drvp->drive_flags &= ~DRIVE_DMA;
2144 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2145 APO_UDMA_EN_MTH(chp->channel, drive);
2146 if (sc->sc_wdcdev.UDMA_cap == 5) {
2147 /* 686b */
2148 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2149 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2150 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2151 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2152 /* 596b or 686a */
2153 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2154 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2155 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2156 } else {
2157 /* 596a or 586b */
2158 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2159 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2160 }
2161 /* can use PIO timings, MW DMA unused */
2162 mode = drvp->PIO_mode;
2163 } else {
2164 /* use Multiword DMA */
2165 drvp->drive_flags &= ~DRIVE_UDMA;
2166 /* mode = min(pio, dma+2) */
2167 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2168 mode = drvp->PIO_mode;
2169 else
2170 mode = drvp->DMA_mode + 2;
2171 }
2172 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2173
2174 pio: /* setup PIO mode */
2175 if (mode <= 2) {
2176 drvp->DMA_mode = 0;
2177 drvp->PIO_mode = 0;
2178 mode = 0;
2179 } else {
2180 drvp->PIO_mode = mode;
2181 drvp->DMA_mode = mode - 2;
2182 }
2183 datatim_reg |=
2184 APO_DATATIM_PULSE(chp->channel, drive,
2185 apollo_pio_set[mode]) |
2186 APO_DATATIM_RECOV(chp->channel, drive,
2187 apollo_pio_rec[mode]);
2188 }
2189 if (idedma_ctl != 0) {
2190 /* Add software bits in status register */
2191 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2192 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2193 idedma_ctl);
2194 }
2195 pciide_print_modes(cp);
2196 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2197 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2198 }
2199
2200 void
2201 cmd_channel_map(pa, sc, channel)
2202 struct pci_attach_args *pa;
2203 struct pciide_softc *sc;
2204 int channel;
2205 {
2206 struct pciide_channel *cp = &sc->pciide_channels[channel];
2207 bus_size_t cmdsize, ctlsize;
2208 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2209 int interface;
2210
2211 /*
2212 * The 0648/0649 can be told to identify as a RAID controller.
2213 * In this case, we have to fake interface
2214 */
2215 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2216 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2217 PCIIDE_INTERFACE_SETTABLE(1);
2218 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2219 CMD_CONF_DSA1)
2220 interface |= PCIIDE_INTERFACE_PCI(0) |
2221 PCIIDE_INTERFACE_PCI(1);
2222 } else {
2223 interface = PCI_INTERFACE(pa->pa_class);
2224 }
2225
2226 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2227 cp->name = PCIIDE_CHANNEL_NAME(channel);
2228 cp->wdc_channel.channel = channel;
2229 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2230
2231 if (channel > 0) {
2232 cp->wdc_channel.ch_queue =
2233 sc->pciide_channels[0].wdc_channel.ch_queue;
2234 } else {
2235 cp->wdc_channel.ch_queue =
2236 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2237 }
2238 if (cp->wdc_channel.ch_queue == NULL) {
2239 printf("%s %s channel: "
2240 "can't allocate memory for command queue",
2241 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2242 return;
2243 }
2244
2245 printf("%s: %s channel %s to %s mode\n",
2246 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2247 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2248 "configured" : "wired",
2249 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2250 "native-PCI" : "compatibility");
2251
2252 /*
2253 * with a CMD PCI64x, if we get here, the first channel is enabled:
2254 * there's no way to disable the first channel without disabling
2255 * the whole device
2256 */
2257 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2258 printf("%s: %s channel ignored (disabled)\n",
2259 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2260 return;
2261 }
2262
2263 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2264 if (cp->hw_ok == 0)
2265 return;
2266 if (channel == 1) {
2267 if (pciide_chan_candisable(cp)) {
2268 ctrl &= ~CMD_CTRL_2PORT;
2269 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2270 CMD_CTRL, ctrl);
2271 }
2272 }
2273 pciide_map_compat_intr(pa, cp, channel, interface);
2274 }
2275
2276 int
2277 cmd_pci_intr(arg)
2278 void *arg;
2279 {
2280 struct pciide_softc *sc = arg;
2281 struct pciide_channel *cp;
2282 struct channel_softc *wdc_cp;
2283 int i, rv, crv;
2284 u_int32_t priirq, secirq;
2285
2286 rv = 0;
2287 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2288 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2289 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2290 cp = &sc->pciide_channels[i];
2291 wdc_cp = &cp->wdc_channel;
2292 /* If a compat channel skip. */
2293 if (cp->compat)
2294 continue;
2295 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2296 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2297 crv = wdcintr(wdc_cp);
2298 if (crv == 0)
2299 printf("%s:%d: bogus intr\n",
2300 sc->sc_wdcdev.sc_dev.dv_xname, i);
2301 else
2302 rv = 1;
2303 }
2304 }
2305 return rv;
2306 }
2307
2308 void
2309 cmd_chip_map(sc, pa)
2310 struct pciide_softc *sc;
2311 struct pci_attach_args *pa;
2312 {
2313 int channel;
2314
2315 /*
2316 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2317 * and base adresses registers can be disabled at
2318 * hardware level. In this case, the device is wired
2319 * in compat mode and its first channel is always enabled,
2320 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2321 * In fact, it seems that the first channel of the CMD PCI0640
2322 * can't be disabled.
2323 */
2324
2325 #ifdef PCIIDE_CMD064x_DISABLE
2326 if (pciide_chipen(sc, pa) == 0)
2327 return;
2328 #endif
2329
2330 printf("%s: hardware does not support DMA\n",
2331 sc->sc_wdcdev.sc_dev.dv_xname);
2332 sc->sc_dma_ok = 0;
2333
2334 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2335 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2336 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2337
2338 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2339 cmd_channel_map(pa, sc, channel);
2340 }
2341 }
2342
2343 void
2344 cmd0643_9_chip_map(sc, pa)
2345 struct pciide_softc *sc;
2346 struct pci_attach_args *pa;
2347 {
2348 struct pciide_channel *cp;
2349 int channel;
2350 int rev = PCI_REVISION(
2351 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2352
2353 /*
2354 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2355 * and base adresses registers can be disabled at
2356 * hardware level. In this case, the device is wired
2357 * in compat mode and its first channel is always enabled,
2358 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2359 * In fact, it seems that the first channel of the CMD PCI0640
2360 * can't be disabled.
2361 */
2362
2363 #ifdef PCIIDE_CMD064x_DISABLE
2364 if (pciide_chipen(sc, pa) == 0)
2365 return;
2366 #endif
2367 printf("%s: bus-master DMA support present",
2368 sc->sc_wdcdev.sc_dev.dv_xname);
2369 pciide_mapreg_dma(sc, pa);
2370 printf("\n");
2371 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2372 WDC_CAPABILITY_MODE;
2373 if (sc->sc_dma_ok) {
2374 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2375 switch (sc->sc_pp->ide_product) {
2376 case PCI_PRODUCT_CMDTECH_649:
2377 case PCI_PRODUCT_CMDTECH_648:
2378 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2379 sc->sc_wdcdev.UDMA_cap = 4;
2380 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2381 break;
2382 case PCI_PRODUCT_CMDTECH_646:
2383 if (rev >= CMD0646U2_REV) {
2384 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2385 sc->sc_wdcdev.UDMA_cap = 2;
2386 } else if (rev >= CMD0646U_REV) {
2387 /*
2388 * Linux's driver claims that the 646U is broken
2389 * with UDMA. Only enable it if we know what we're
2390 * doing
2391 */
2392 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2393 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2394 sc->sc_wdcdev.UDMA_cap = 2;
2395 #endif
2396 /* explicitely disable UDMA */
2397 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2398 CMD_UDMATIM(0), 0);
2399 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2400 CMD_UDMATIM(1), 0);
2401 }
2402 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2403 break;
2404 default:
2405 sc->sc_wdcdev.irqack = pciide_irqack;
2406 }
2407 }
2408
2409 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2410 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2411 sc->sc_wdcdev.PIO_cap = 4;
2412 sc->sc_wdcdev.DMA_cap = 2;
2413 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2414
2415 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2416 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2417 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2418 DEBUG_PROBE);
2419
2420 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2421 cp = &sc->pciide_channels[channel];
2422 cmd_channel_map(pa, sc, channel);
2423 if (cp->hw_ok == 0)
2424 continue;
2425 cmd0643_9_setup_channel(&cp->wdc_channel);
2426 }
2427 /*
2428 * note - this also makes sure we clear the irq disable and reset
2429 * bits
2430 */
2431 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2432 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2433 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2434 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2435 DEBUG_PROBE);
2436 }
2437
2438 void
2439 cmd0643_9_setup_channel(chp)
2440 struct channel_softc *chp;
2441 {
2442 struct ata_drive_datas *drvp;
2443 u_int8_t tim;
2444 u_int32_t idedma_ctl, udma_reg;
2445 int drive;
2446 struct pciide_channel *cp = (struct pciide_channel*)chp;
2447 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2448
2449 idedma_ctl = 0;
2450 /* setup DMA if needed */
2451 pciide_channel_dma_setup(cp);
2452
2453 for (drive = 0; drive < 2; drive++) {
2454 drvp = &chp->ch_drive[drive];
2455 /* If no drive, skip */
2456 if ((drvp->drive_flags & DRIVE) == 0)
2457 continue;
2458 /* add timing values, setup DMA if needed */
2459 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2460 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2461 if (drvp->drive_flags & DRIVE_UDMA) {
2462 /* UltraDMA on a 646U2, 0648 or 0649 */
2463 drvp->drive_flags &= ~DRIVE_DMA;
2464 udma_reg = pciide_pci_read(sc->sc_pc,
2465 sc->sc_tag, CMD_UDMATIM(chp->channel));
2466 if (drvp->UDMA_mode > 2 &&
2467 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2468 CMD_BICSR) &
2469 CMD_BICSR_80(chp->channel)) == 0)
2470 drvp->UDMA_mode = 2;
2471 if (drvp->UDMA_mode > 2)
2472 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2473 else if (sc->sc_wdcdev.UDMA_cap > 2)
2474 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2475 udma_reg |= CMD_UDMATIM_UDMA(drive);
2476 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2477 CMD_UDMATIM_TIM_OFF(drive));
2478 udma_reg |=
2479 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2480 CMD_UDMATIM_TIM_OFF(drive));
2481 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2482 CMD_UDMATIM(chp->channel), udma_reg);
2483 } else {
2484 /*
2485 * use Multiword DMA.
2486 * Timings will be used for both PIO and DMA,
2487 * so adjust DMA mode if needed
2488 * if we have a 0646U2/8/9, turn off UDMA
2489 */
2490 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2491 udma_reg = pciide_pci_read(sc->sc_pc,
2492 sc->sc_tag,
2493 CMD_UDMATIM(chp->channel));
2494 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2495 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2496 CMD_UDMATIM(chp->channel),
2497 udma_reg);
2498 }
2499 if (drvp->PIO_mode >= 3 &&
2500 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2501 drvp->DMA_mode = drvp->PIO_mode - 2;
2502 }
2503 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2504 }
2505 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2506 }
2507 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2508 CMD_DATA_TIM(chp->channel, drive), tim);
2509 }
2510 if (idedma_ctl != 0) {
2511 /* Add software bits in status register */
2512 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2513 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2514 idedma_ctl);
2515 }
2516 pciide_print_modes(cp);
2517 }
2518
2519 void
2520 cmd646_9_irqack(chp)
2521 struct channel_softc *chp;
2522 {
2523 u_int32_t priirq, secirq;
2524 struct pciide_channel *cp = (struct pciide_channel*)chp;
2525 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2526
2527 if (chp->channel == 0) {
2528 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2529 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2530 } else {
2531 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2532 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2533 }
2534 pciide_irqack(chp);
2535 }
2536
2537 void
2538 cy693_chip_map(sc, pa)
2539 struct pciide_softc *sc;
2540 struct pci_attach_args *pa;
2541 {
2542 struct pciide_channel *cp;
2543 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2544 bus_size_t cmdsize, ctlsize;
2545
2546 if (pciide_chipen(sc, pa) == 0)
2547 return;
2548 /*
2549 * this chip has 2 PCI IDE functions, one for primary and one for
2550 * secondary. So we need to call pciide_mapregs_compat() with
2551 * the real channel
2552 */
2553 if (pa->pa_function == 1) {
2554 sc->sc_cy_compatchan = 0;
2555 } else if (pa->pa_function == 2) {
2556 sc->sc_cy_compatchan = 1;
2557 } else {
2558 printf("%s: unexpected PCI function %d\n",
2559 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2560 return;
2561 }
2562 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2563 printf("%s: bus-master DMA support present",
2564 sc->sc_wdcdev.sc_dev.dv_xname);
2565 pciide_mapreg_dma(sc, pa);
2566 } else {
2567 printf("%s: hardware does not support DMA",
2568 sc->sc_wdcdev.sc_dev.dv_xname);
2569 sc->sc_dma_ok = 0;
2570 }
2571 printf("\n");
2572
2573 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2574 if (sc->sc_cy_handle == NULL) {
2575 printf("%s: unable to map hyperCache control registers\n",
2576 sc->sc_wdcdev.sc_dev.dv_xname);
2577 sc->sc_dma_ok = 0;
2578 }
2579
2580 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2581 WDC_CAPABILITY_MODE;
2582 if (sc->sc_dma_ok) {
2583 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2584 sc->sc_wdcdev.irqack = pciide_irqack;
2585 }
2586 sc->sc_wdcdev.PIO_cap = 4;
2587 sc->sc_wdcdev.DMA_cap = 2;
2588 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2589
2590 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2591 sc->sc_wdcdev.nchannels = 1;
2592
2593 /* Only one channel for this chip; if we are here it's enabled */
2594 cp = &sc->pciide_channels[0];
2595 sc->wdc_chanarray[0] = &cp->wdc_channel;
2596 cp->name = PCIIDE_CHANNEL_NAME(0);
2597 cp->wdc_channel.channel = 0;
2598 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2599 cp->wdc_channel.ch_queue =
2600 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2601 if (cp->wdc_channel.ch_queue == NULL) {
2602 printf("%s primary channel: "
2603 "can't allocate memory for command queue",
2604 sc->sc_wdcdev.sc_dev.dv_xname);
2605 return;
2606 }
2607 printf("%s: primary channel %s to ",
2608 sc->sc_wdcdev.sc_dev.dv_xname,
2609 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2610 "configured" : "wired");
2611 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2612 printf("native-PCI");
2613 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2614 pciide_pci_intr);
2615 } else {
2616 printf("compatibility");
2617 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2618 &cmdsize, &ctlsize);
2619 }
2620 printf(" mode\n");
2621 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2622 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2623 wdcattach(&cp->wdc_channel);
2624 if (pciide_chan_candisable(cp)) {
2625 pci_conf_write(sc->sc_pc, sc->sc_tag,
2626 PCI_COMMAND_STATUS_REG, 0);
2627 }
2628 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2629 if (cp->hw_ok == 0)
2630 return;
2631 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2632 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2633 cy693_setup_channel(&cp->wdc_channel);
2634 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2635 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2636 }
2637
2638 void
2639 cy693_setup_channel(chp)
2640 struct channel_softc *chp;
2641 {
2642 struct ata_drive_datas *drvp;
2643 int drive;
2644 u_int32_t cy_cmd_ctrl;
2645 u_int32_t idedma_ctl;
2646 struct pciide_channel *cp = (struct pciide_channel*)chp;
2647 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2648 int dma_mode = -1;
2649
2650 cy_cmd_ctrl = idedma_ctl = 0;
2651
2652 /* setup DMA if needed */
2653 pciide_channel_dma_setup(cp);
2654
2655 for (drive = 0; drive < 2; drive++) {
2656 drvp = &chp->ch_drive[drive];
2657 /* If no drive, skip */
2658 if ((drvp->drive_flags & DRIVE) == 0)
2659 continue;
2660 /* add timing values, setup DMA if needed */
2661 if (drvp->drive_flags & DRIVE_DMA) {
2662 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2663 /* use Multiword DMA */
2664 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2665 dma_mode = drvp->DMA_mode;
2666 }
2667 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2668 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2669 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2670 CY_CMD_CTRL_IOW_REC_OFF(drive));
2671 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2672 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2673 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2674 CY_CMD_CTRL_IOR_REC_OFF(drive));
2675 }
2676 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2677 chp->ch_drive[0].DMA_mode = dma_mode;
2678 chp->ch_drive[1].DMA_mode = dma_mode;
2679
2680 if (dma_mode == -1)
2681 dma_mode = 0;
2682
2683 if (sc->sc_cy_handle != NULL) {
2684 /* Note: `multiple' is implied. */
2685 cy82c693_write(sc->sc_cy_handle,
2686 (sc->sc_cy_compatchan == 0) ?
2687 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2688 }
2689
2690 pciide_print_modes(cp);
2691
2692 if (idedma_ctl != 0) {
2693 /* Add software bits in status register */
2694 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2695 IDEDMA_CTL, idedma_ctl);
2696 }
2697 }
2698
2699 void
2700 sis_chip_map(sc, pa)
2701 struct pciide_softc *sc;
2702 struct pci_attach_args *pa;
2703 {
2704 struct pciide_channel *cp;
2705 int channel;
2706 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2707 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2708 pcireg_t rev = PCI_REVISION(pa->pa_class);
2709 bus_size_t cmdsize, ctlsize;
2710
2711 if (pciide_chipen(sc, pa) == 0)
2712 return;
2713 printf("%s: bus-master DMA support present",
2714 sc->sc_wdcdev.sc_dev.dv_xname);
2715 pciide_mapreg_dma(sc, pa);
2716 printf("\n");
2717 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2718 WDC_CAPABILITY_MODE;
2719 if (sc->sc_dma_ok) {
2720 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2721 sc->sc_wdcdev.irqack = pciide_irqack;
2722 if (rev > 0xd0)
2723 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2724 }
2725
2726 sc->sc_wdcdev.PIO_cap = 4;
2727 sc->sc_wdcdev.DMA_cap = 2;
2728 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2729 sc->sc_wdcdev.UDMA_cap = 2;
2730 sc->sc_wdcdev.set_modes = sis_setup_channel;
2731
2732 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2733 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2734
2735 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2736 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2737 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2738
2739 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2740 cp = &sc->pciide_channels[channel];
2741 if (pciide_chansetup(sc, channel, interface) == 0)
2742 continue;
2743 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2744 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2745 printf("%s: %s channel ignored (disabled)\n",
2746 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2747 continue;
2748 }
2749 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2750 pciide_pci_intr);
2751 if (cp->hw_ok == 0)
2752 continue;
2753 if (pciide_chan_candisable(cp)) {
2754 if (channel == 0)
2755 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2756 else
2757 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2758 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2759 sis_ctr0);
2760 }
2761 pciide_map_compat_intr(pa, cp, channel, interface);
2762 if (cp->hw_ok == 0)
2763 continue;
2764 sis_setup_channel(&cp->wdc_channel);
2765 }
2766 }
2767
2768 void
2769 sis_setup_channel(chp)
2770 struct channel_softc *chp;
2771 {
2772 struct ata_drive_datas *drvp;
2773 int drive;
2774 u_int32_t sis_tim;
2775 u_int32_t idedma_ctl;
2776 struct pciide_channel *cp = (struct pciide_channel*)chp;
2777 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2778
2779 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2780 "channel %d 0x%x\n", chp->channel,
2781 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2782 DEBUG_PROBE);
2783 sis_tim = 0;
2784 idedma_ctl = 0;
2785 /* setup DMA if needed */
2786 pciide_channel_dma_setup(cp);
2787
2788 for (drive = 0; drive < 2; drive++) {
2789 drvp = &chp->ch_drive[drive];
2790 /* If no drive, skip */
2791 if ((drvp->drive_flags & DRIVE) == 0)
2792 continue;
2793 /* add timing values, setup DMA if needed */
2794 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2795 (drvp->drive_flags & DRIVE_UDMA) == 0)
2796 goto pio;
2797
2798 if (drvp->drive_flags & DRIVE_UDMA) {
2799 /* use Ultra/DMA */
2800 drvp->drive_flags &= ~DRIVE_DMA;
2801 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2802 SIS_TIM_UDMA_TIME_OFF(drive);
2803 sis_tim |= SIS_TIM_UDMA_EN(drive);
2804 } else {
2805 /*
2806 * use Multiword DMA
2807 * Timings will be used for both PIO and DMA,
2808 * so adjust DMA mode if needed
2809 */
2810 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2811 drvp->PIO_mode = drvp->DMA_mode + 2;
2812 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2813 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2814 drvp->PIO_mode - 2 : 0;
2815 if (drvp->DMA_mode == 0)
2816 drvp->PIO_mode = 0;
2817 }
2818 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2819 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2820 SIS_TIM_ACT_OFF(drive);
2821 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2822 SIS_TIM_REC_OFF(drive);
2823 }
2824 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2825 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2826 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2827 if (idedma_ctl != 0) {
2828 /* Add software bits in status register */
2829 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2830 IDEDMA_CTL, idedma_ctl);
2831 }
2832 pciide_print_modes(cp);
2833 }
2834
2835 void
2836 acer_chip_map(sc, pa)
2837 struct pciide_softc *sc;
2838 struct pci_attach_args *pa;
2839 {
2840 struct pciide_channel *cp;
2841 int channel;
2842 pcireg_t cr, interface;
2843 bus_size_t cmdsize, ctlsize;
2844 pcireg_t rev = PCI_REVISION(pa->pa_class);
2845
2846 if (pciide_chipen(sc, pa) == 0)
2847 return;
2848 printf("%s: bus-master DMA support present",
2849 sc->sc_wdcdev.sc_dev.dv_xname);
2850 pciide_mapreg_dma(sc, pa);
2851 printf("\n");
2852 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2853 WDC_CAPABILITY_MODE;
2854 if (sc->sc_dma_ok) {
2855 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2856 if (rev >= 0x20)
2857 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2858 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2859 sc->sc_wdcdev.irqack = pciide_irqack;
2860 }
2861
2862 sc->sc_wdcdev.PIO_cap = 4;
2863 sc->sc_wdcdev.DMA_cap = 2;
2864 sc->sc_wdcdev.UDMA_cap = 2;
2865 sc->sc_wdcdev.set_modes = acer_setup_channel;
2866 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2867 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2868
2869 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2870 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2871 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2872
2873 /* Enable "microsoft register bits" R/W. */
2874 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2875 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2876 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2877 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2878 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2879 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2880 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2881 ~ACER_CHANSTATUSREGS_RO);
2882 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2883 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2884 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2885 /* Don't use cr, re-read the real register content instead */
2886 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2887 PCI_CLASS_REG));
2888
2889 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2890 cp = &sc->pciide_channels[channel];
2891 if (pciide_chansetup(sc, channel, interface) == 0)
2892 continue;
2893 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2894 printf("%s: %s channel ignored (disabled)\n",
2895 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2896 continue;
2897 }
2898 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2899 acer_pci_intr);
2900 if (cp->hw_ok == 0)
2901 continue;
2902 if (pciide_chan_candisable(cp)) {
2903 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2904 pci_conf_write(sc->sc_pc, sc->sc_tag,
2905 PCI_CLASS_REG, cr);
2906 }
2907 pciide_map_compat_intr(pa, cp, channel, interface);
2908 acer_setup_channel(&cp->wdc_channel);
2909 }
2910 }
2911
2912 void
2913 acer_setup_channel(chp)
2914 struct channel_softc *chp;
2915 {
2916 struct ata_drive_datas *drvp;
2917 int drive;
2918 u_int32_t acer_fifo_udma;
2919 u_int32_t idedma_ctl;
2920 struct pciide_channel *cp = (struct pciide_channel*)chp;
2921 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2922
2923 idedma_ctl = 0;
2924 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2925 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2926 acer_fifo_udma), DEBUG_PROBE);
2927 /* setup DMA if needed */
2928 pciide_channel_dma_setup(cp);
2929
2930 for (drive = 0; drive < 2; drive++) {
2931 drvp = &chp->ch_drive[drive];
2932 /* If no drive, skip */
2933 if ((drvp->drive_flags & DRIVE) == 0)
2934 continue;
2935 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2936 "channel %d drive %d 0x%x\n", chp->channel, drive,
2937 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2938 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2939 /* clear FIFO/DMA mode */
2940 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2941 ACER_UDMA_EN(chp->channel, drive) |
2942 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2943
2944 /* add timing values, setup DMA if needed */
2945 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2946 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2947 acer_fifo_udma |=
2948 ACER_FTH_OPL(chp->channel, drive, 0x1);
2949 goto pio;
2950 }
2951
2952 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2953 if (drvp->drive_flags & DRIVE_UDMA) {
2954 /* use Ultra/DMA */
2955 drvp->drive_flags &= ~DRIVE_DMA;
2956 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2957 acer_fifo_udma |=
2958 ACER_UDMA_TIM(chp->channel, drive,
2959 acer_udma[drvp->UDMA_mode]);
2960 } else {
2961 /*
2962 * use Multiword DMA
2963 * Timings will be used for both PIO and DMA,
2964 * so adjust DMA mode if needed
2965 */
2966 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2967 drvp->PIO_mode = drvp->DMA_mode + 2;
2968 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2969 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2970 drvp->PIO_mode - 2 : 0;
2971 if (drvp->DMA_mode == 0)
2972 drvp->PIO_mode = 0;
2973 }
2974 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2975 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2976 ACER_IDETIM(chp->channel, drive),
2977 acer_pio[drvp->PIO_mode]);
2978 }
2979 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2980 acer_fifo_udma), DEBUG_PROBE);
2981 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2982 if (idedma_ctl != 0) {
2983 /* Add software bits in status register */
2984 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2985 IDEDMA_CTL, idedma_ctl);
2986 }
2987 pciide_print_modes(cp);
2988 }
2989
2990 int
2991 acer_pci_intr(arg)
2992 void *arg;
2993 {
2994 struct pciide_softc *sc = arg;
2995 struct pciide_channel *cp;
2996 struct channel_softc *wdc_cp;
2997 int i, rv, crv;
2998 u_int32_t chids;
2999
3000 rv = 0;
3001 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3002 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3003 cp = &sc->pciide_channels[i];
3004 wdc_cp = &cp->wdc_channel;
3005 /* If a compat channel skip. */
3006 if (cp->compat)
3007 continue;
3008 if (chids & ACER_CHIDS_INT(i)) {
3009 crv = wdcintr(wdc_cp);
3010 if (crv == 0)
3011 printf("%s:%d: bogus intr\n",
3012 sc->sc_wdcdev.sc_dev.dv_xname, i);
3013 else
3014 rv = 1;
3015 }
3016 }
3017 return rv;
3018 }
3019
3020 void
3021 hpt_chip_map(sc, pa)
3022 struct pciide_softc *sc;
3023 struct pci_attach_args *pa;
3024 {
3025 struct pciide_channel *cp;
3026 int i, compatchan, revision;
3027 pcireg_t interface;
3028 bus_size_t cmdsize, ctlsize;
3029
3030 if (pciide_chipen(sc, pa) == 0)
3031 return;
3032 revision = PCI_REVISION(pa->pa_class);
3033 printf(": Triones/Highpoint ");
3034 if (revision == HPT370_REV)
3035 printf("HPT370 IDE Controller\n");
3036 else
3037 printf("HPT366 IDE Controller\n");
3038
3039 /*
3040 * when the chip is in native mode it identifies itself as a
3041 * 'misc mass storage'. Fake interface in this case.
3042 */
3043 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3044 interface = PCI_INTERFACE(pa->pa_class);
3045 } else {
3046 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3047 PCIIDE_INTERFACE_PCI(0);
3048 if (revision == HPT370_REV)
3049 interface |= PCIIDE_INTERFACE_PCI(1);
3050 }
3051
3052 printf("%s: bus-master DMA support present",
3053 sc->sc_wdcdev.sc_dev.dv_xname);
3054 pciide_mapreg_dma(sc, pa);
3055 printf("\n");
3056 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3057 WDC_CAPABILITY_MODE;
3058 if (sc->sc_dma_ok) {
3059 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3060 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3061 sc->sc_wdcdev.irqack = pciide_irqack;
3062 }
3063 sc->sc_wdcdev.PIO_cap = 4;
3064 sc->sc_wdcdev.DMA_cap = 2;
3065
3066 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3067 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3068 if (revision == HPT366_REV) {
3069 sc->sc_wdcdev.UDMA_cap = 4;
3070 /*
3071 * The 366 has 2 PCI IDE functions, one for primary and one
3072 * for secondary. So we need to call pciide_mapregs_compat()
3073 * with the real channel
3074 */
3075 if (pa->pa_function == 0) {
3076 compatchan = 0;
3077 } else if (pa->pa_function == 1) {
3078 compatchan = 1;
3079 } else {
3080 printf("%s: unexpected PCI function %d\n",
3081 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3082 return;
3083 }
3084 sc->sc_wdcdev.nchannels = 1;
3085 } else {
3086 sc->sc_wdcdev.nchannels = 2;
3087 sc->sc_wdcdev.UDMA_cap = 5;
3088 }
3089 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3090 cp = &sc->pciide_channels[i];
3091 if (sc->sc_wdcdev.nchannels > 1) {
3092 compatchan = i;
3093 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3094 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3095 printf("%s: %s channel ignored (disabled)\n",
3096 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3097 continue;
3098 }
3099 }
3100 if (pciide_chansetup(sc, i, interface) == 0)
3101 continue;
3102 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3103 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3104 &ctlsize, hpt_pci_intr);
3105 } else {
3106 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3107 &cmdsize, &ctlsize);
3108 }
3109 if (cp->hw_ok == 0)
3110 return;
3111 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3112 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3113 wdcattach(&cp->wdc_channel);
3114 hpt_setup_channel(&cp->wdc_channel);
3115 }
3116 if (revision == HPT370_REV) {
3117 /*
3118 * HPT370_REV has a bit to disable interrupts, make sure
3119 * to clear it
3120 */
3121 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3122 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3123 ~HPT_CSEL_IRQDIS);
3124 }
3125 return;
3126 }
3127
3128 void
3129 hpt_setup_channel(chp)
3130 struct channel_softc *chp;
3131 {
3132 struct ata_drive_datas *drvp;
3133 int drive;
3134 int cable;
3135 u_int32_t before, after;
3136 u_int32_t idedma_ctl;
3137 struct pciide_channel *cp = (struct pciide_channel*)chp;
3138 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3139
3140 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3141
3142 /* setup DMA if needed */
3143 pciide_channel_dma_setup(cp);
3144
3145 idedma_ctl = 0;
3146
3147 /* Per drive settings */
3148 for (drive = 0; drive < 2; drive++) {
3149 drvp = &chp->ch_drive[drive];
3150 /* If no drive, skip */
3151 if ((drvp->drive_flags & DRIVE) == 0)
3152 continue;
3153 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3154 HPT_IDETIM(chp->channel, drive));
3155
3156 /* add timing values, setup DMA if needed */
3157 if (drvp->drive_flags & DRIVE_UDMA) {
3158 /* use Ultra/DMA */
3159 drvp->drive_flags &= ~DRIVE_DMA;
3160 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3161 drvp->UDMA_mode > 2)
3162 drvp->UDMA_mode = 2;
3163 after = (sc->sc_wdcdev.nchannels == 2) ?
3164 hpt370_udma[drvp->UDMA_mode] :
3165 hpt366_udma[drvp->UDMA_mode];
3166 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3167 } else if (drvp->drive_flags & DRIVE_DMA) {
3168 /*
3169 * use Multiword DMA.
3170 * Timings will be used for both PIO and DMA, so adjust
3171 * DMA mode if needed
3172 */
3173 if (drvp->PIO_mode >= 3 &&
3174 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3175 drvp->DMA_mode = drvp->PIO_mode - 2;
3176 }
3177 after = (sc->sc_wdcdev.nchannels == 2) ?
3178 hpt370_dma[drvp->DMA_mode] :
3179 hpt366_dma[drvp->DMA_mode];
3180 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3181 } else {
3182 /* PIO only */
3183 after = (sc->sc_wdcdev.nchannels == 2) ?
3184 hpt370_pio[drvp->PIO_mode] :
3185 hpt366_pio[drvp->PIO_mode];
3186 }
3187 pci_conf_write(sc->sc_pc, sc->sc_tag,
3188 HPT_IDETIM(chp->channel, drive), after);
3189 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3190 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3191 after, before), DEBUG_PROBE);
3192 }
3193 if (idedma_ctl != 0) {
3194 /* Add software bits in status register */
3195 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3196 IDEDMA_CTL, idedma_ctl);
3197 }
3198 pciide_print_modes(cp);
3199 }
3200
3201 int
3202 hpt_pci_intr(arg)
3203 void *arg;
3204 {
3205 struct pciide_softc *sc = arg;
3206 struct pciide_channel *cp;
3207 struct channel_softc *wdc_cp;
3208 int rv = 0;
3209 int dmastat, i, crv;
3210
3211 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3212 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3213 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3214 if((dmastat & IDEDMA_CTL_INTR) == 0)
3215 continue;
3216 cp = &sc->pciide_channels[i];
3217 wdc_cp = &cp->wdc_channel;
3218 crv = wdcintr(wdc_cp);
3219 if (crv == 0) {
3220 printf("%s:%d: bogus intr\n",
3221 sc->sc_wdcdev.sc_dev.dv_xname, i);
3222 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3223 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3224 } else
3225 rv = 1;
3226 }
3227 return rv;
3228 }
3229
3230
3231 /* Macros to test product */
3232 #define PDC_IS_262(sc) \
3233 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3234 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3235 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3236 #define PDC_IS_265(sc) \
3237 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3238 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3239
3240 void
3241 pdc202xx_chip_map(sc, pa)
3242 struct pciide_softc *sc;
3243 struct pci_attach_args *pa;
3244 {
3245 struct pciide_channel *cp;
3246 int channel;
3247 pcireg_t interface, st, mode;
3248 bus_size_t cmdsize, ctlsize;
3249
3250 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3251 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3252 DEBUG_PROBE);
3253 if (pciide_chipen(sc, pa) == 0)
3254 return;
3255
3256 /* turn off RAID mode */
3257 st &= ~PDC2xx_STATE_IDERAID;
3258
3259 /*
3260 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3261 * mode. We have to fake interface
3262 */
3263 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3264 if (st & PDC2xx_STATE_NATIVE)
3265 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3266
3267 printf("%s: bus-master DMA support present",
3268 sc->sc_wdcdev.sc_dev.dv_xname);
3269 pciide_mapreg_dma(sc, pa);
3270 printf("\n");
3271 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3272 WDC_CAPABILITY_MODE;
3273 if (sc->sc_dma_ok) {
3274 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3275 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3276 sc->sc_wdcdev.irqack = pciide_irqack;
3277 }
3278 sc->sc_wdcdev.PIO_cap = 4;
3279 sc->sc_wdcdev.DMA_cap = 2;
3280 if (PDC_IS_265(sc))
3281 sc->sc_wdcdev.UDMA_cap = 5;
3282 else if (PDC_IS_262(sc))
3283 sc->sc_wdcdev.UDMA_cap = 4;
3284 else
3285 sc->sc_wdcdev.UDMA_cap = 2;
3286 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3287 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3288 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3289
3290 /* setup failsafe defaults */
3291 mode = 0;
3292 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3293 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3294 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3295 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3296 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3297 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3298 "initial timings 0x%x, now 0x%x\n", channel,
3299 pci_conf_read(sc->sc_pc, sc->sc_tag,
3300 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3301 DEBUG_PROBE);
3302 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3303 mode | PDC2xx_TIM_IORDYp);
3304 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3305 "initial timings 0x%x, now 0x%x\n", channel,
3306 pci_conf_read(sc->sc_pc, sc->sc_tag,
3307 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3308 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3309 mode);
3310 }
3311
3312 mode = PDC2xx_SCR_DMA;
3313 if (PDC_IS_262(sc)) {
3314 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3315 } else {
3316 /* the BIOS set it up this way */
3317 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3318 }
3319 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3320 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3321 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3322 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3323 DEBUG_PROBE);
3324 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3325
3326 /* controller initial state register is OK even without BIOS */
3327 /* Set DMA mode to IDE DMA compatibility */
3328 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3329 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3330 DEBUG_PROBE);
3331 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3332 mode | 0x1);
3333 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3334 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3335 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3336 mode | 0x1);
3337
3338 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3339 cp = &sc->pciide_channels[channel];
3340 if (pciide_chansetup(sc, channel, interface) == 0)
3341 continue;
3342 if ((st & (PDC_IS_262(sc) ?
3343 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3344 printf("%s: %s channel ignored (disabled)\n",
3345 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3346 continue;
3347 }
3348 if (PDC_IS_265(sc))
3349 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3350 pdc20265_pci_intr);
3351 else
3352 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3353 pdc202xx_pci_intr);
3354 if (cp->hw_ok == 0)
3355 continue;
3356 if (pciide_chan_candisable(cp))
3357 st &= ~(PDC_IS_262(sc) ?
3358 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3359 pciide_map_compat_intr(pa, cp, channel, interface);
3360 pdc202xx_setup_channel(&cp->wdc_channel);
3361 }
3362 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3363 DEBUG_PROBE);
3364 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3365 return;
3366 }
3367
3368 void
3369 pdc202xx_setup_channel(chp)
3370 struct channel_softc *chp;
3371 {
3372 struct ata_drive_datas *drvp;
3373 int drive;
3374 pcireg_t mode, st;
3375 u_int32_t idedma_ctl, scr, atapi;
3376 struct pciide_channel *cp = (struct pciide_channel*)chp;
3377 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3378 int channel = chp->channel;
3379
3380 /* setup DMA if needed */
3381 pciide_channel_dma_setup(cp);
3382
3383 idedma_ctl = 0;
3384 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3385 sc->sc_wdcdev.sc_dev.dv_xname,
3386 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3387 DEBUG_PROBE);
3388
3389 /* Per channel settings */
3390 if (PDC_IS_262(sc)) {
3391 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3392 PDC262_U66);
3393 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3394 /* Trimm UDMA mode */
3395 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3396 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3397 chp->ch_drive[0].UDMA_mode <= 2) ||
3398 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3399 chp->ch_drive[1].UDMA_mode <= 2)) {
3400 if (chp->ch_drive[0].UDMA_mode > 2)
3401 chp->ch_drive[0].UDMA_mode = 2;
3402 if (chp->ch_drive[1].UDMA_mode > 2)
3403 chp->ch_drive[1].UDMA_mode = 2;
3404 }
3405 /* Set U66 if needed */
3406 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3407 chp->ch_drive[0].UDMA_mode > 2) ||
3408 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3409 chp->ch_drive[1].UDMA_mode > 2))
3410 scr |= PDC262_U66_EN(channel);
3411 else
3412 scr &= ~PDC262_U66_EN(channel);
3413 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3414 PDC262_U66, scr);
3415 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3416 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3417 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3418 PDC262_ATAPI(channel))), DEBUG_PROBE);
3419 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3420 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3421 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3422 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3423 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3424 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3425 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3426 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3427 atapi = 0;
3428 else
3429 atapi = PDC262_ATAPI_UDMA;
3430 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3431 PDC262_ATAPI(channel), atapi);
3432 }
3433 }
3434 for (drive = 0; drive < 2; drive++) {
3435 drvp = &chp->ch_drive[drive];
3436 /* If no drive, skip */
3437 if ((drvp->drive_flags & DRIVE) == 0)
3438 continue;
3439 mode = 0;
3440 if (drvp->drive_flags & DRIVE_UDMA) {
3441 /* use Ultra/DMA */
3442 drvp->drive_flags &= ~DRIVE_DMA;
3443 mode = PDC2xx_TIM_SET_MB(mode,
3444 pdc2xx_udma_mb[drvp->UDMA_mode]);
3445 mode = PDC2xx_TIM_SET_MC(mode,
3446 pdc2xx_udma_mc[drvp->UDMA_mode]);
3447 drvp->drive_flags &= ~DRIVE_DMA;
3448 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3449 } else if (drvp->drive_flags & DRIVE_DMA) {
3450 mode = PDC2xx_TIM_SET_MB(mode,
3451 pdc2xx_dma_mb[drvp->DMA_mode]);
3452 mode = PDC2xx_TIM_SET_MC(mode,
3453 pdc2xx_dma_mc[drvp->DMA_mode]);
3454 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3455 } else {
3456 mode = PDC2xx_TIM_SET_MB(mode,
3457 pdc2xx_dma_mb[0]);
3458 mode = PDC2xx_TIM_SET_MC(mode,
3459 pdc2xx_dma_mc[0]);
3460 }
3461 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3462 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3463 if (drvp->drive_flags & DRIVE_ATA)
3464 mode |= PDC2xx_TIM_PRE;
3465 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3466 if (drvp->PIO_mode >= 3) {
3467 mode |= PDC2xx_TIM_IORDY;
3468 if (drive == 0)
3469 mode |= PDC2xx_TIM_IORDYp;
3470 }
3471 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3472 "timings 0x%x\n",
3473 sc->sc_wdcdev.sc_dev.dv_xname,
3474 chp->channel, drive, mode), DEBUG_PROBE);
3475 pci_conf_write(sc->sc_pc, sc->sc_tag,
3476 PDC2xx_TIM(chp->channel, drive), mode);
3477 }
3478 if (idedma_ctl != 0) {
3479 /* Add software bits in status register */
3480 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3481 IDEDMA_CTL, idedma_ctl);
3482 }
3483 pciide_print_modes(cp);
3484 }
3485
3486 int
3487 pdc202xx_pci_intr(arg)
3488 void *arg;
3489 {
3490 struct pciide_softc *sc = arg;
3491 struct pciide_channel *cp;
3492 struct channel_softc *wdc_cp;
3493 int i, rv, crv;
3494 u_int32_t scr;
3495
3496 rv = 0;
3497 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3498 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3499 cp = &sc->pciide_channels[i];
3500 wdc_cp = &cp->wdc_channel;
3501 /* If a compat channel skip. */
3502 if (cp->compat)
3503 continue;
3504 if (scr & PDC2xx_SCR_INT(i)) {
3505 crv = wdcintr(wdc_cp);
3506 if (crv == 0)
3507 printf("%s:%d: bogus intr (reg 0x%x)\n",
3508 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3509 else
3510 rv = 1;
3511 }
3512 }
3513 return rv;
3514 }
3515
3516 int
3517 pdc20265_pci_intr(arg)
3518 void *arg;
3519 {
3520 struct pciide_softc *sc = arg;
3521 struct pciide_channel *cp;
3522 struct channel_softc *wdc_cp;
3523 int i, rv, crv;
3524 u_int32_t dmastat;
3525
3526 rv = 0;
3527 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3528 cp = &sc->pciide_channels[i];
3529 wdc_cp = &cp->wdc_channel;
3530 /* If a compat channel skip. */
3531 if (cp->compat)
3532 continue;
3533 /*
3534 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3535 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3536 * So use it instead (requires 2 reg reads instead of 1,
3537 * but we can't do it another way).
3538 */
3539 dmastat = bus_space_read_1(sc->sc_dma_iot,
3540 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3541 if((dmastat & IDEDMA_CTL_INTR) == 0)
3542 continue;
3543 crv = wdcintr(wdc_cp);
3544 if (crv == 0)
3545 printf("%s:%d: bogus intr\n",
3546 sc->sc_wdcdev.sc_dev.dv_xname, i);
3547 else
3548 rv = 1;
3549 }
3550 return rv;
3551 }
3552
3553 void
3554 opti_chip_map(sc, pa)
3555 struct pciide_softc *sc;
3556 struct pci_attach_args *pa;
3557 {
3558 struct pciide_channel *cp;
3559 bus_size_t cmdsize, ctlsize;
3560 pcireg_t interface;
3561 u_int8_t init_ctrl;
3562 int channel;
3563
3564 if (pciide_chipen(sc, pa) == 0)
3565 return;
3566 printf("%s: bus-master DMA support present",
3567 sc->sc_wdcdev.sc_dev.dv_xname);
3568 pciide_mapreg_dma(sc, pa);
3569 printf("\n");
3570
3571 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3572 WDC_CAPABILITY_MODE;
3573 sc->sc_wdcdev.PIO_cap = 4;
3574 if (sc->sc_dma_ok) {
3575 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3576 sc->sc_wdcdev.irqack = pciide_irqack;
3577 sc->sc_wdcdev.DMA_cap = 2;
3578 }
3579 sc->sc_wdcdev.set_modes = opti_setup_channel;
3580
3581 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3582 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3583
3584 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3585 OPTI_REG_INIT_CONTROL);
3586
3587 interface = PCI_INTERFACE(pa->pa_class);
3588
3589 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3590 cp = &sc->pciide_channels[channel];
3591 if (pciide_chansetup(sc, channel, interface) == 0)
3592 continue;
3593 if (channel == 1 &&
3594 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3595 printf("%s: %s channel ignored (disabled)\n",
3596 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3597 continue;
3598 }
3599 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3600 pciide_pci_intr);
3601 if (cp->hw_ok == 0)
3602 continue;
3603 pciide_map_compat_intr(pa, cp, channel, interface);
3604 if (cp->hw_ok == 0)
3605 continue;
3606 opti_setup_channel(&cp->wdc_channel);
3607 }
3608 }
3609
3610 void
3611 opti_setup_channel(chp)
3612 struct channel_softc *chp;
3613 {
3614 struct ata_drive_datas *drvp;
3615 struct pciide_channel *cp = (struct pciide_channel*)chp;
3616 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3617 int drive, spd;
3618 int mode[2];
3619 u_int8_t rv, mr;
3620
3621 /*
3622 * The `Delay' and `Address Setup Time' fields of the
3623 * Miscellaneous Register are always zero initially.
3624 */
3625 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3626 mr &= ~(OPTI_MISC_DELAY_MASK |
3627 OPTI_MISC_ADDR_SETUP_MASK |
3628 OPTI_MISC_INDEX_MASK);
3629
3630 /* Prime the control register before setting timing values */
3631 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3632
3633 /* Determine the clockrate of the PCIbus the chip is attached to */
3634 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3635 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3636
3637 /* setup DMA if needed */
3638 pciide_channel_dma_setup(cp);
3639
3640 for (drive = 0; drive < 2; drive++) {
3641 drvp = &chp->ch_drive[drive];
3642 /* If no drive, skip */
3643 if ((drvp->drive_flags & DRIVE) == 0) {
3644 mode[drive] = -1;
3645 continue;
3646 }
3647
3648 if ((drvp->drive_flags & DRIVE_DMA)) {
3649 /*
3650 * Timings will be used for both PIO and DMA,
3651 * so adjust DMA mode if needed
3652 */
3653 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3654 drvp->PIO_mode = drvp->DMA_mode + 2;
3655 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3656 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3657 drvp->PIO_mode - 2 : 0;
3658 if (drvp->DMA_mode == 0)
3659 drvp->PIO_mode = 0;
3660
3661 mode[drive] = drvp->DMA_mode + 5;
3662 } else
3663 mode[drive] = drvp->PIO_mode;
3664
3665 if (drive && mode[0] >= 0 &&
3666 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3667 /*
3668 * Can't have two drives using different values
3669 * for `Address Setup Time'.
3670 * Slow down the faster drive to compensate.
3671 */
3672 int d = (opti_tim_as[spd][mode[0]] >
3673 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3674
3675 mode[d] = mode[1-d];
3676 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3677 chp->ch_drive[d].DMA_mode = 0;
3678 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3679 }
3680 }
3681
3682 for (drive = 0; drive < 2; drive++) {
3683 int m;
3684 if ((m = mode[drive]) < 0)
3685 continue;
3686
3687 /* Set the Address Setup Time and select appropriate index */
3688 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3689 rv |= OPTI_MISC_INDEX(drive);
3690 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3691
3692 /* Set the pulse width and recovery timing parameters */
3693 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3694 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3695 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3696 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3697
3698 /* Set the Enhanced Mode register appropriately */
3699 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3700 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3701 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3702 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3703 }
3704
3705 /* Finally, enable the timings */
3706 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3707
3708 pciide_print_modes(cp);
3709 }
3710
3711 #define ACARD_IS_850(sc) \
3712 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3713
3714 void
3715 acard_chip_map(sc, pa)
3716 struct pciide_softc *sc;
3717 struct pci_attach_args *pa;
3718 {
3719 struct pciide_channel *cp;
3720 int i, compatchan;
3721 pcireg_t interface;
3722 bus_size_t cmdsize, ctlsize;
3723
3724 if (pciide_chipen(sc, pa) == 0)
3725 return;
3726
3727 /*
3728 * when the chip is in native mode it identifies itself as a
3729 * 'misc mass storage'. Fake interface in this case.
3730 */
3731 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3732 interface = PCI_INTERFACE(pa->pa_class);
3733 } else {
3734 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3735 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3736 }
3737
3738 printf("%s: bus-master DMA support present",
3739 sc->sc_wdcdev.sc_dev.dv_xname);
3740 pciide_mapreg_dma(sc, pa);
3741 printf("\n");
3742 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3743 WDC_CAPABILITY_MODE;
3744
3745 if (sc->sc_dma_ok) {
3746 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3747 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3748 sc->sc_wdcdev.irqack = pciide_irqack;
3749 }
3750 sc->sc_wdcdev.PIO_cap = 4;
3751 sc->sc_wdcdev.DMA_cap = 2;
3752 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3753
3754 sc->sc_wdcdev.set_modes = acard_setup_channel;
3755 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3756 sc->sc_wdcdev.nchannels = 2;
3757
3758 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3759 cp = &sc->pciide_channels[i];
3760 if (pciide_chansetup(sc, i, interface) == 0)
3761 continue;
3762 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3763 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3764 &ctlsize, pciide_pci_intr);
3765 } else {
3766 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3767 &cmdsize, &ctlsize);
3768 }
3769 if (cp->hw_ok == 0)
3770 return;
3771 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3772 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3773 wdcattach(&cp->wdc_channel);
3774 acard_setup_channel(&cp->wdc_channel);
3775 }
3776 if (!ACARD_IS_850(sc)) {
3777 u_int32_t reg;
3778 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3779 reg &= ~ATP860_CTRL_INT;
3780 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3781 }
3782 }
3783
3784 void
3785 acard_setup_channel(chp)
3786 struct channel_softc *chp;
3787 {
3788 struct ata_drive_datas *drvp;
3789 struct pciide_channel *cp = (struct pciide_channel*)chp;
3790 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3791 int channel = chp->channel;
3792 int drive;
3793 u_int32_t idetime, udma_mode;
3794 u_int32_t idedma_ctl;
3795
3796 /* setup DMA if needed */
3797 pciide_channel_dma_setup(cp);
3798
3799 if (ACARD_IS_850(sc)) {
3800 idetime = 0;
3801 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3802 udma_mode &= ~ATP850_UDMA_MASK(channel);
3803 } else {
3804 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3805 idetime &= ~ATP860_SETTIME_MASK(channel);
3806 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3807 udma_mode &= ~ATP860_UDMA_MASK(channel);
3808 }
3809
3810 idedma_ctl = 0;
3811
3812 /* Per drive settings */
3813 for (drive = 0; drive < 2; drive++) {
3814 drvp = &chp->ch_drive[drive];
3815 /* If no drive, skip */
3816 if ((drvp->drive_flags & DRIVE) == 0)
3817 continue;
3818 /* add timing values, setup DMA if needed */
3819 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
3820 (drvp->drive_flags & DRIVE_UDMA)) {
3821 /* use Ultra/DMA */
3822 if (ACARD_IS_850(sc)) {
3823 idetime |= ATP850_SETTIME(drive,
3824 acard_act_udma[drvp->UDMA_mode],
3825 acard_rec_udma[drvp->UDMA_mode]);
3826 udma_mode |= ATP850_UDMA_MODE(channel, drive,
3827 acard_udma_conf[drvp->UDMA_mode]);
3828 } else {
3829 idetime |= ATP860_SETTIME(channel, drive,
3830 acard_act_udma[drvp->UDMA_mode],
3831 acard_rec_udma[drvp->UDMA_mode]);
3832 udma_mode |= ATP860_UDMA_MODE(channel, drive,
3833 acard_udma_conf[drvp->UDMA_mode]);
3834 }
3835 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3836 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
3837 (drvp->drive_flags & DRIVE_DMA)) {
3838 /* use Multiword DMA */
3839 drvp->drive_flags &= ~DRIVE_UDMA;
3840 if (ACARD_IS_850(sc)) {
3841 idetime |= ATP850_SETTIME(drive,
3842 acard_act_dma[drvp->DMA_mode],
3843 acard_rec_dma[drvp->DMA_mode]);
3844 } else {
3845 idetime |= ATP860_SETTIME(channel, drive,
3846 acard_act_dma[drvp->DMA_mode],
3847 acard_rec_dma[drvp->DMA_mode]);
3848 }
3849 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3850 } else {
3851 /* PIO only */
3852 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
3853 if (ACARD_IS_850(sc)) {
3854 idetime |= ATP850_SETTIME(drive,
3855 acard_act_pio[drvp->PIO_mode],
3856 acard_rec_pio[drvp->PIO_mode]);
3857 } else {
3858 idetime |= ATP860_SETTIME(channel, drive,
3859 acard_act_pio[drvp->PIO_mode],
3860 acard_rec_pio[drvp->PIO_mode]);
3861 }
3862 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
3863 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3864 | ATP8x0_CTRL_EN(channel));
3865 }
3866 }
3867
3868 if (idedma_ctl != 0) {
3869 /* Add software bits in status register */
3870 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3871 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
3872 }
3873 pciide_print_modes(cp);
3874
3875 if (ACARD_IS_850(sc)) {
3876 pci_conf_write(sc->sc_pc, sc->sc_tag,
3877 ATP850_IDETIME(channel), idetime);
3878 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
3879 } else {
3880 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
3881 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
3882 }
3883 }
3884
3885 int
3886 acard_pci_intr(arg)
3887 void *arg;
3888 {
3889 struct pciide_softc *sc = arg;
3890 struct pciide_channel *cp;
3891 struct channel_softc *wdc_cp;
3892 int rv = 0;
3893 int dmastat, i, crv;
3894
3895 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3896 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3897 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3898 if ((dmastat & IDEDMA_CTL_INTR) == 0)
3899 continue;
3900 cp = &sc->pciide_channels[i];
3901 wdc_cp = &cp->wdc_channel;
3902 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
3903 (void)wdcintr(wdc_cp);
3904 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3905 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3906 continue;
3907 }
3908 crv = wdcintr(wdc_cp);
3909 if (crv == 0)
3910 printf("%s:%d: bogus intr\n",
3911 sc->sc_wdcdev.sc_dev.dv_xname, i);
3912 else if (crv == 1)
3913 rv = 1;
3914 else if (rv == 0)
3915 rv = crv;
3916 }
3917 return rv;
3918 }
3919