pciide.c revision 1.68.2.27 1 /* $NetBSD: pciide.c,v 1.68.2.27 2001/11/13 21:48:11 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/cy82c693var.h>
121
122 #include "opt_pciide.h"
123
124 /* inlines for reading/writing 8-bit PCI registers */
125 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
126 int));
127 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
128 int, u_int8_t));
129
130 static __inline u_int8_t
131 pciide_pci_read(pc, pa, reg)
132 pci_chipset_tag_t pc;
133 pcitag_t pa;
134 int reg;
135 {
136
137 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
138 ((reg & 0x03) * 8) & 0xff);
139 }
140
141 static __inline void
142 pciide_pci_write(pc, pa, reg, val)
143 pci_chipset_tag_t pc;
144 pcitag_t pa;
145 int reg;
146 u_int8_t val;
147 {
148 pcireg_t pcival;
149
150 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
151 pcival &= ~(0xff << ((reg & 0x03) * 8));
152 pcival |= (val << ((reg & 0x03) * 8));
153 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
154 }
155
156 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157
158 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159 void piix_setup_channel __P((struct channel_softc*));
160 void piix3_4_setup_channel __P((struct channel_softc*));
161 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
163 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164
165 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
166 void amd7x6_setup_channel __P((struct channel_softc*));
167
168 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void apollo_setup_channel __P((struct channel_softc*));
170
171 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
173 void cmd0643_9_setup_channel __P((struct channel_softc*));
174 void cmd_channel_map __P((struct pci_attach_args *,
175 struct pciide_softc *, int));
176 int cmd_pci_intr __P((void *));
177 void cmd646_9_irqack __P((struct channel_softc *));
178
179 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void cy693_setup_channel __P((struct channel_softc*));
181
182 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void sis_setup_channel __P((struct channel_softc*));
184
185 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void acer_setup_channel __P((struct channel_softc*));
187 int acer_pci_intr __P((void *));
188
189 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void pdc202xx_setup_channel __P((struct channel_softc*));
191 int pdc202xx_pci_intr __P((void *));
192 int pdc20265_pci_intr __P((void *));
193
194 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void opti_setup_channel __P((struct channel_softc*));
196
197 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
198 void hpt_setup_channel __P((struct channel_softc*));
199 int hpt_pci_intr __P((void *));
200
201 void pciide_channel_dma_setup __P((struct pciide_channel *));
202 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
203 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
204 void pciide_dma_start __P((void*, int, int));
205 int pciide_dma_finish __P((void*, int, int, int));
206 void pciide_irqack __P((struct channel_softc *));
207 void pciide_print_modes __P((struct pciide_channel *));
208
209 struct pciide_product_desc {
210 u_int32_t ide_product;
211 int ide_flags;
212 const char *ide_name;
213 /* map and setup chip, probe drives */
214 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
215 };
216
217 /* Flags for ide_flags */
218 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
219
220 /* Default product description for devices not known from this controller */
221 const struct pciide_product_desc default_product_desc = {
222 0,
223 0,
224 "Generic PCI IDE controller",
225 default_chip_map,
226 };
227
228 const struct pciide_product_desc pciide_intel_products[] = {
229 { PCI_PRODUCT_INTEL_82092AA,
230 0,
231 "Intel 82092AA IDE controller",
232 default_chip_map,
233 },
234 { PCI_PRODUCT_INTEL_82371FB_IDE,
235 0,
236 "Intel 82371FB IDE controller (PIIX)",
237 piix_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371SB_IDE,
240 0,
241 "Intel 82371SB IDE Interface (PIIX3)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82371AB_IDE,
245 0,
246 "Intel 82371AB IDE controller (PIIX4)",
247 piix_chip_map,
248 },
249 { PCI_PRODUCT_INTEL_82801AA_IDE,
250 0,
251 "Intel 82801AA IDE Controller (ICH)",
252 piix_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82801AB_IDE,
255 0,
256 "Intel 82801AB IDE Controller (ICH0)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82801BA_IDE,
260 0,
261 "Intel 82801BA IDE Controller (ICH2)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82801BAM_IDE,
265 0,
266 "Intel 82801BAM IDE Controller (ICH2)",
267 piix_chip_map,
268 },
269 { 0,
270 0,
271 NULL,
272 NULL
273 }
274 };
275
276 const struct pciide_product_desc pciide_amd_products[] = {
277 { PCI_PRODUCT_AMD_PBC756_IDE,
278 0,
279 "Advanced Micro Devices AMD756 IDE Controller",
280 amd7x6_chip_map
281 },
282 { PCI_PRODUCT_AMD_PBC766_IDE,
283 0,
284 "Advanced Micro Devices AMD766 IDE Controller",
285 amd7x6_chip_map
286 },
287 { 0,
288 0,
289 NULL,
290 NULL
291 }
292 };
293
294 const struct pciide_product_desc pciide_cmd_products[] = {
295 { PCI_PRODUCT_CMDTECH_640,
296 0,
297 "CMD Technology PCI0640",
298 cmd_chip_map
299 },
300 { PCI_PRODUCT_CMDTECH_643,
301 0,
302 "CMD Technology PCI0643",
303 cmd0643_9_chip_map,
304 },
305 { PCI_PRODUCT_CMDTECH_646,
306 0,
307 "CMD Technology PCI0646",
308 cmd0643_9_chip_map,
309 },
310 { PCI_PRODUCT_CMDTECH_648,
311 IDE_PCI_CLASS_OVERRIDE,
312 "CMD Technology PCI0648",
313 cmd0643_9_chip_map,
314 },
315 { PCI_PRODUCT_CMDTECH_649,
316 IDE_PCI_CLASS_OVERRIDE,
317 "CMD Technology PCI0649",
318 cmd0643_9_chip_map,
319 },
320 { 0,
321 0,
322 NULL,
323 NULL
324 }
325 };
326
327 const struct pciide_product_desc pciide_via_products[] = {
328 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
329 0,
330 NULL,
331 apollo_chip_map,
332 },
333 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
334 0,
335 NULL,
336 apollo_chip_map,
337 },
338 { 0,
339 0,
340 NULL,
341 NULL
342 }
343 };
344
345 const struct pciide_product_desc pciide_cypress_products[] = {
346 { PCI_PRODUCT_CONTAQ_82C693,
347 0,
348 "Cypress 82C693 IDE Controller",
349 cy693_chip_map,
350 },
351 { 0,
352 0,
353 NULL,
354 NULL
355 }
356 };
357
358 const struct pciide_product_desc pciide_sis_products[] = {
359 { PCI_PRODUCT_SIS_5597_IDE,
360 0,
361 "Silicon Integrated System 5597/5598 IDE controller",
362 sis_chip_map,
363 },
364 { 0,
365 0,
366 NULL,
367 NULL
368 }
369 };
370
371 const struct pciide_product_desc pciide_acer_products[] = {
372 { PCI_PRODUCT_ALI_M5229,
373 0,
374 "Acer Labs M5229 UDMA IDE Controller",
375 acer_chip_map,
376 },
377 { 0,
378 0,
379 NULL,
380 NULL
381 }
382 };
383
384 const struct pciide_product_desc pciide_promise_products[] = {
385 { PCI_PRODUCT_PROMISE_ULTRA33,
386 IDE_PCI_CLASS_OVERRIDE,
387 "Promise Ultra33/ATA Bus Master IDE Accelerator",
388 pdc202xx_chip_map,
389 },
390 { PCI_PRODUCT_PROMISE_ULTRA66,
391 IDE_PCI_CLASS_OVERRIDE,
392 "Promise Ultra66/ATA Bus Master IDE Accelerator",
393 pdc202xx_chip_map,
394 },
395 { PCI_PRODUCT_PROMISE_ULTRA100,
396 IDE_PCI_CLASS_OVERRIDE,
397 "Promise Ultra100/ATA Bus Master IDE Accelerator",
398 pdc202xx_chip_map,
399 },
400 { PCI_PRODUCT_PROMISE_ULTRA100X,
401 IDE_PCI_CLASS_OVERRIDE,
402 "Promise Ultra100/ATA Bus Master IDE Accelerator",
403 pdc202xx_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_opti_products[] = {
413 { PCI_PRODUCT_OPTI_82C621,
414 0,
415 "OPTi 82c621 PCI IDE controller",
416 opti_chip_map,
417 },
418 { PCI_PRODUCT_OPTI_82C568,
419 0,
420 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
421 opti_chip_map,
422 },
423 { PCI_PRODUCT_OPTI_82D568,
424 0,
425 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
426 opti_chip_map,
427 },
428 { 0,
429 0,
430 NULL,
431 NULL
432 }
433 };
434
435 const struct pciide_product_desc pciide_triones_products[] = {
436 { PCI_PRODUCT_TRIONES_HPT366,
437 IDE_PCI_CLASS_OVERRIDE,
438 NULL,
439 hpt_chip_map,
440 },
441 { 0,
442 0,
443 NULL,
444 NULL
445 }
446 };
447
448 struct pciide_vendor_desc {
449 u_int32_t ide_vendor;
450 const struct pciide_product_desc *ide_products;
451 };
452
453 const struct pciide_vendor_desc pciide_vendors[] = {
454 { PCI_VENDOR_INTEL, pciide_intel_products },
455 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
456 { PCI_VENDOR_VIATECH, pciide_via_products },
457 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
458 { PCI_VENDOR_SIS, pciide_sis_products },
459 { PCI_VENDOR_ALI, pciide_acer_products },
460 { PCI_VENDOR_PROMISE, pciide_promise_products },
461 { PCI_VENDOR_AMD, pciide_amd_products },
462 { PCI_VENDOR_OPTI, pciide_opti_products },
463 { PCI_VENDOR_TRIONES, pciide_triones_products },
464 { 0, NULL }
465 };
466
467 /* options passed via the 'flags' config keyword */
468 #define PCIIDE_OPTIONS_DMA 0x01
469
470 int pciide_match __P((struct device *, struct cfdata *, void *));
471 void pciide_attach __P((struct device *, struct device *, void *));
472
473 struct cfattach pciide_ca = {
474 sizeof(struct pciide_softc), pciide_match, pciide_attach
475 };
476 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
477 int pciide_mapregs_compat __P(( struct pci_attach_args *,
478 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
479 int pciide_mapregs_native __P((struct pci_attach_args *,
480 struct pciide_channel *, bus_size_t *, bus_size_t *,
481 int (*pci_intr) __P((void *))));
482 void pciide_mapreg_dma __P((struct pciide_softc *,
483 struct pci_attach_args *));
484 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
485 void pciide_mapchan __P((struct pci_attach_args *,
486 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
487 int (*pci_intr) __P((void *))));
488 int pciide_chan_candisable __P((struct pciide_channel *));
489 void pciide_map_compat_intr __P(( struct pci_attach_args *,
490 struct pciide_channel *, int, int));
491 int pciide_print __P((void *, const char *pnp));
492 int pciide_compat_intr __P((void *));
493 int pciide_pci_intr __P((void *));
494 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
495
496 const struct pciide_product_desc *
497 pciide_lookup_product(id)
498 u_int32_t id;
499 {
500 const struct pciide_product_desc *pp;
501 const struct pciide_vendor_desc *vp;
502
503 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
504 if (PCI_VENDOR(id) == vp->ide_vendor)
505 break;
506
507 if ((pp = vp->ide_products) == NULL)
508 return NULL;
509
510 for (; pp->chip_map != NULL; pp++)
511 if (PCI_PRODUCT(id) == pp->ide_product)
512 break;
513
514 if (pp->chip_map == NULL)
515 return NULL;
516 return pp;
517 }
518
519 int
520 pciide_match(parent, match, aux)
521 struct device *parent;
522 struct cfdata *match;
523 void *aux;
524 {
525 struct pci_attach_args *pa = aux;
526 const struct pciide_product_desc *pp;
527
528 /*
529 * Check the ID register to see that it's a PCI IDE controller.
530 * If it is, we assume that we can deal with it; it _should_
531 * work in a standardized way...
532 */
533 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
534 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
535 return (1);
536 }
537
538 /*
539 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
540 * controllers. Let see if we can deal with it anyway.
541 */
542 pp = pciide_lookup_product(pa->pa_id);
543 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
544 return (1);
545 }
546
547 return (0);
548 }
549
550 void
551 pciide_attach(parent, self, aux)
552 struct device *parent, *self;
553 void *aux;
554 {
555 struct pci_attach_args *pa = aux;
556 pci_chipset_tag_t pc = pa->pa_pc;
557 pcitag_t tag = pa->pa_tag;
558 struct pciide_softc *sc = (struct pciide_softc *)self;
559 pcireg_t csr;
560 char devinfo[256];
561 const char *displaydev;
562
563 sc->sc_pp = pciide_lookup_product(pa->pa_id);
564 if (sc->sc_pp == NULL) {
565 sc->sc_pp = &default_product_desc;
566 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
567 displaydev = devinfo;
568 } else
569 displaydev = sc->sc_pp->ide_name;
570
571 /* if displaydev == NULL, printf is done in chip-specific map */
572 if (displaydev)
573 printf(": %s (rev. 0x%02x)\n", displaydev,
574 PCI_REVISION(pa->pa_class));
575
576 sc->sc_pc = pa->pa_pc;
577 sc->sc_tag = pa->pa_tag;
578 #ifdef WDCDEBUG
579 if (wdcdebug_pciide_mask & DEBUG_PROBE)
580 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
581 #endif
582 sc->sc_pp->chip_map(sc, pa);
583
584 if (sc->sc_dma_ok) {
585 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
586 csr |= PCI_COMMAND_MASTER_ENABLE;
587 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
588 }
589 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
590 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
591 }
592
593 /* tell wether the chip is enabled or not */
594 int
595 pciide_chipen(sc, pa)
596 struct pciide_softc *sc;
597 struct pci_attach_args *pa;
598 {
599 pcireg_t csr;
600 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
601 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
602 PCI_COMMAND_STATUS_REG);
603 printf("%s: device disabled (at %s)\n",
604 sc->sc_wdcdev.sc_dev.dv_xname,
605 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
606 "device" : "bridge");
607 return 0;
608 }
609 return 1;
610 }
611
612 int
613 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
614 struct pci_attach_args *pa;
615 struct pciide_channel *cp;
616 int compatchan;
617 bus_size_t *cmdsizep, *ctlsizep;
618 {
619 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
620 struct channel_softc *wdc_cp = &cp->wdc_channel;
621
622 cp->compat = 1;
623 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
624 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
625
626 wdc_cp->cmd_iot = pa->pa_iot;
627 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
628 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
629 printf("%s: couldn't map %s channel cmd regs\n",
630 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
631 return (0);
632 }
633
634 wdc_cp->ctl_iot = pa->pa_iot;
635 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
636 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
637 printf("%s: couldn't map %s channel ctl regs\n",
638 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
639 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
640 PCIIDE_COMPAT_CMD_SIZE);
641 return (0);
642 }
643
644 return (1);
645 }
646
647 int
648 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
649 struct pci_attach_args * pa;
650 struct pciide_channel *cp;
651 bus_size_t *cmdsizep, *ctlsizep;
652 int (*pci_intr) __P((void *));
653 {
654 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
655 struct channel_softc *wdc_cp = &cp->wdc_channel;
656 const char *intrstr;
657 pci_intr_handle_t intrhandle;
658
659 cp->compat = 0;
660
661 if (sc->sc_pci_ih == NULL) {
662 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
663 pa->pa_intrline, &intrhandle) != 0) {
664 printf("%s: couldn't map native-PCI interrupt\n",
665 sc->sc_wdcdev.sc_dev.dv_xname);
666 return 0;
667 }
668 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
669 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
670 intrhandle, IPL_BIO, pci_intr, sc);
671 if (sc->sc_pci_ih != NULL) {
672 printf("%s: using %s for native-PCI interrupt\n",
673 sc->sc_wdcdev.sc_dev.dv_xname,
674 intrstr ? intrstr : "unknown interrupt");
675 } else {
676 printf("%s: couldn't establish native-PCI interrupt",
677 sc->sc_wdcdev.sc_dev.dv_xname);
678 if (intrstr != NULL)
679 printf(" at %s", intrstr);
680 printf("\n");
681 return 0;
682 }
683 }
684 cp->ih = sc->sc_pci_ih;
685 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
686 PCI_MAPREG_TYPE_IO, 0,
687 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
688 printf("%s: couldn't map %s channel cmd regs\n",
689 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
690 return 0;
691 }
692
693 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
694 PCI_MAPREG_TYPE_IO, 0,
695 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
696 printf("%s: couldn't map %s channel ctl regs\n",
697 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
698 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
699 return 0;
700 }
701 /*
702 * In native mode, 4 bytes of I/O space are mapped for the control
703 * register, the control register is at offset 2. Pass the generic
704 * code a handle for only one byte at the rigth offset.
705 */
706 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
707 &wdc_cp->ctl_ioh) != 0) {
708 printf("%s: unable to subregion %s channel ctl regs\n",
709 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
710 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
711 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
712 return 0;
713 }
714 return (1);
715 }
716
717 void
718 pciide_mapreg_dma(sc, pa)
719 struct pciide_softc *sc;
720 struct pci_attach_args *pa;
721 {
722 pcireg_t maptype;
723
724 /*
725 * Map DMA registers
726 *
727 * Note that sc_dma_ok is the right variable to test to see if
728 * DMA can be done. If the interface doesn't support DMA,
729 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
730 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
731 * non-zero if the interface supports DMA and the registers
732 * could be mapped.
733 *
734 * XXX Note that despite the fact that the Bus Master IDE specs
735 * XXX say that "The bus master IDE function uses 16 bytes of IO
736 * XXX space," some controllers (at least the United
737 * XXX Microelectronics UM8886BF) place it in memory space.
738 */
739 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
740 PCIIDE_REG_BUS_MASTER_DMA);
741
742 switch (maptype) {
743 case PCI_MAPREG_TYPE_IO:
744 case PCI_MAPREG_MEM_TYPE_32BIT:
745 sc->sc_dma_ok = (pci_mapreg_map(pa,
746 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
747 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
748 sc->sc_dmat = pa->pa_dmat;
749 if (sc->sc_dma_ok == 0) {
750 printf(", but unused (couldn't map registers)");
751 } else {
752 sc->sc_wdcdev.dma_arg = sc;
753 sc->sc_wdcdev.dma_init = pciide_dma_init;
754 sc->sc_wdcdev.dma_start = pciide_dma_start;
755 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
756 }
757 break;
758
759 default:
760 sc->sc_dma_ok = 0;
761 printf(", but unsupported register maptype (0x%x)", maptype);
762 }
763 }
764
765 int
766 pciide_compat_intr(arg)
767 void *arg;
768 {
769 struct pciide_channel *cp = arg;
770
771 #ifdef DIAGNOSTIC
772 /* should only be called for a compat channel */
773 if (cp->compat == 0)
774 panic("pciide compat intr called for non-compat chan %p\n", cp);
775 #endif
776 return (wdcintr(&cp->wdc_channel));
777 }
778
779 int
780 pciide_pci_intr(arg)
781 void *arg;
782 {
783 struct pciide_softc *sc = arg;
784 struct pciide_channel *cp;
785 struct channel_softc *wdc_cp;
786 int i, rv, crv;
787
788 rv = 0;
789 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
790 cp = &sc->pciide_channels[i];
791 wdc_cp = &cp->wdc_channel;
792
793 /* If a compat channel skip. */
794 if (cp->compat)
795 continue;
796 /* if this channel not waiting for intr, skip */
797 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
798 continue;
799
800 crv = wdcintr(wdc_cp);
801 if (crv == 0)
802 ; /* leave rv alone */
803 else if (crv == 1)
804 rv = 1; /* claim the intr */
805 else if (rv == 0) /* crv should be -1 in this case */
806 rv = crv; /* if we've done no better, take it */
807 }
808 return (rv);
809 }
810
811 void
812 pciide_channel_dma_setup(cp)
813 struct pciide_channel *cp;
814 {
815 int drive;
816 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
817 struct ata_drive_datas *drvp;
818
819 for (drive = 0; drive < 2; drive++) {
820 drvp = &cp->wdc_channel.ch_drive[drive];
821 /* If no drive, skip */
822 if ((drvp->drive_flags & DRIVE) == 0)
823 continue;
824 /* setup DMA if needed */
825 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
826 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
827 sc->sc_dma_ok == 0) {
828 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
829 continue;
830 }
831 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
832 != 0) {
833 /* Abort DMA setup */
834 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
835 continue;
836 }
837 }
838 }
839
840 int
841 pciide_dma_table_setup(sc, channel, drive)
842 struct pciide_softc *sc;
843 int channel, drive;
844 {
845 bus_dma_segment_t seg;
846 int error, rseg;
847 const bus_size_t dma_table_size =
848 sizeof(struct idedma_table) * NIDEDMA_TABLES;
849 struct pciide_dma_maps *dma_maps =
850 &sc->pciide_channels[channel].dma_maps[drive];
851
852 /* If table was already allocated, just return */
853 if (dma_maps->dma_table)
854 return 0;
855
856 /* Allocate memory for the DMA tables and map it */
857 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
858 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
859 BUS_DMA_NOWAIT)) != 0) {
860 printf("%s:%d: unable to allocate table DMA for "
861 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
862 channel, drive, error);
863 return error;
864 }
865 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
866 dma_table_size,
867 (caddr_t *)&dma_maps->dma_table,
868 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
869 printf("%s:%d: unable to map table DMA for"
870 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
871 channel, drive, error);
872 return error;
873 }
874 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
875 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
876 seg.ds_addr), DEBUG_PROBE);
877
878 /* Create and load table DMA map for this disk */
879 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
880 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
881 &dma_maps->dmamap_table)) != 0) {
882 printf("%s:%d: unable to create table DMA map for "
883 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
884 channel, drive, error);
885 return error;
886 }
887 if ((error = bus_dmamap_load(sc->sc_dmat,
888 dma_maps->dmamap_table,
889 dma_maps->dma_table,
890 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
891 printf("%s:%d: unable to load table DMA map for "
892 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
893 channel, drive, error);
894 return error;
895 }
896 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
897 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
898 /* Create a xfer DMA map for this drive */
899 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
900 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
901 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
902 &dma_maps->dmamap_xfer)) != 0) {
903 printf("%s:%d: unable to create xfer DMA map for "
904 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
905 channel, drive, error);
906 return error;
907 }
908 return 0;
909 }
910
911 int
912 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
913 void *v;
914 int channel, drive;
915 void *databuf;
916 size_t datalen;
917 int flags;
918 {
919 struct pciide_softc *sc = v;
920 int error, seg;
921 struct pciide_dma_maps *dma_maps =
922 &sc->pciide_channels[channel].dma_maps[drive];
923
924 error = bus_dmamap_load(sc->sc_dmat,
925 dma_maps->dmamap_xfer,
926 databuf, datalen, NULL, BUS_DMA_NOWAIT);
927 if (error) {
928 printf("%s:%d: unable to load xfer DMA map for"
929 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
930 channel, drive, error);
931 return error;
932 }
933
934 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
935 dma_maps->dmamap_xfer->dm_mapsize,
936 (flags & WDC_DMA_READ) ?
937 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
938
939 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
940 #ifdef DIAGNOSTIC
941 /* A segment must not cross a 64k boundary */
942 {
943 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
944 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
945 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
946 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
947 printf("pciide_dma: segment %d physical addr 0x%lx"
948 " len 0x%lx not properly aligned\n",
949 seg, phys, len);
950 panic("pciide_dma: buf align");
951 }
952 }
953 #endif
954 dma_maps->dma_table[seg].base_addr =
955 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
956 dma_maps->dma_table[seg].byte_count =
957 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
958 IDEDMA_BYTE_COUNT_MASK);
959 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
960 seg, le32toh(dma_maps->dma_table[seg].byte_count),
961 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
962
963 }
964 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
965 htole32(IDEDMA_BYTE_COUNT_EOT);
966
967 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
968 dma_maps->dmamap_table->dm_mapsize,
969 BUS_DMASYNC_PREWRITE);
970
971 /* Maps are ready. Start DMA function */
972 #ifdef DIAGNOSTIC
973 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
974 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
975 dma_maps->dmamap_table->dm_segs[0].ds_addr);
976 panic("pciide_dma_init: table align");
977 }
978 #endif
979
980 /* Clear status bits */
981 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
982 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
983 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
984 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
985 /* Write table addr */
986 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
987 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
988 dma_maps->dmamap_table->dm_segs[0].ds_addr);
989 /* set read/write */
990 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
991 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
992 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
993 /* remember flags */
994 dma_maps->dma_flags = flags;
995 return 0;
996 }
997
998 void
999 pciide_dma_start(v, channel, drive)
1000 void *v;
1001 int channel, drive;
1002 {
1003 struct pciide_softc *sc = v;
1004
1005 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1006 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1007 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1008 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1009 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1010 }
1011
1012 int
1013 pciide_dma_finish(v, channel, drive, force)
1014 void *v;
1015 int channel, drive;
1016 int force;
1017 {
1018 struct pciide_softc *sc = v;
1019 u_int8_t status;
1020 int error = 0;
1021 struct pciide_dma_maps *dma_maps =
1022 &sc->pciide_channels[channel].dma_maps[drive];
1023
1024 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1025 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1026 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1027 DEBUG_XFERS);
1028
1029 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1030 return WDC_DMAST_NOIRQ;
1031
1032 /* stop DMA channel */
1033 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1034 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1035 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1036 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1037
1038 /* Unload the map of the data buffer */
1039 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1040 dma_maps->dmamap_xfer->dm_mapsize,
1041 (dma_maps->dma_flags & WDC_DMA_READ) ?
1042 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1043 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1044
1045 if ((status & IDEDMA_CTL_ERR) != 0) {
1046 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1047 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1048 error |= WDC_DMAST_ERR;
1049 }
1050
1051 if ((status & IDEDMA_CTL_INTR) == 0) {
1052 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1053 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1054 drive, status);
1055 error |= WDC_DMAST_NOIRQ;
1056 }
1057
1058 if ((status & IDEDMA_CTL_ACT) != 0) {
1059 /* data underrun, may be a valid condition for ATAPI */
1060 error |= WDC_DMAST_UNDER;
1061 }
1062 return error;
1063 }
1064
1065 void
1066 pciide_irqack(chp)
1067 struct channel_softc *chp;
1068 {
1069 struct pciide_channel *cp = (struct pciide_channel*)chp;
1070 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1071
1072 /* clear status bits in IDE DMA registers */
1073 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1074 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1075 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1076 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1077 }
1078
1079 /* some common code used by several chip_map */
1080 int
1081 pciide_chansetup(sc, channel, interface)
1082 struct pciide_softc *sc;
1083 int channel;
1084 pcireg_t interface;
1085 {
1086 struct pciide_channel *cp = &sc->pciide_channels[channel];
1087 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1088 cp->name = PCIIDE_CHANNEL_NAME(channel);
1089 cp->wdc_channel.channel = channel;
1090 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1091 cp->wdc_channel.ch_queue =
1092 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1093 if (cp->wdc_channel.ch_queue == NULL) {
1094 printf("%s %s channel: "
1095 "can't allocate memory for command queue",
1096 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1097 return 0;
1098 }
1099 printf("%s: %s channel %s to %s mode\n",
1100 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1101 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1102 "configured" : "wired",
1103 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1104 "native-PCI" : "compatibility");
1105 return 1;
1106 }
1107
1108 /* some common code used by several chip channel_map */
1109 void
1110 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1111 struct pci_attach_args *pa;
1112 struct pciide_channel *cp;
1113 pcireg_t interface;
1114 bus_size_t *cmdsizep, *ctlsizep;
1115 int (*pci_intr) __P((void *));
1116 {
1117 struct channel_softc *wdc_cp = &cp->wdc_channel;
1118
1119 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1120 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1121 pci_intr);
1122 else
1123 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1124 wdc_cp->channel, cmdsizep, ctlsizep);
1125
1126 if (cp->hw_ok == 0)
1127 return;
1128 wdc_cp->data32iot = wdc_cp->cmd_iot;
1129 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1130 wdcattach(wdc_cp);
1131 }
1132
1133 /*
1134 * Generic code to call to know if a channel can be disabled. Return 1
1135 * if channel can be disabled, 0 if not
1136 */
1137 int
1138 pciide_chan_candisable(cp)
1139 struct pciide_channel *cp;
1140 {
1141 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1142 struct channel_softc *wdc_cp = &cp->wdc_channel;
1143
1144 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1145 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1146 printf("%s: disabling %s channel (no drives)\n",
1147 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1148 cp->hw_ok = 0;
1149 return 1;
1150 }
1151 return 0;
1152 }
1153
1154 /*
1155 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1156 * Set hw_ok=0 on failure
1157 */
1158 void
1159 pciide_map_compat_intr(pa, cp, compatchan, interface)
1160 struct pci_attach_args *pa;
1161 struct pciide_channel *cp;
1162 int compatchan, interface;
1163 {
1164 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1165 struct channel_softc *wdc_cp = &cp->wdc_channel;
1166
1167 if (cp->hw_ok == 0)
1168 return;
1169 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1170 return;
1171
1172 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1173 pa, compatchan, pciide_compat_intr, cp);
1174 if (cp->ih == NULL) {
1175 printf("%s: no compatibility interrupt for use by %s "
1176 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1177 cp->hw_ok = 0;
1178 }
1179 }
1180
1181 void
1182 pciide_print_modes(cp)
1183 struct pciide_channel *cp;
1184 {
1185 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1186 int drive;
1187 struct channel_softc *chp;
1188 struct ata_drive_datas *drvp;
1189
1190 chp = &cp->wdc_channel;
1191 for (drive = 0; drive < 2; drive++) {
1192 drvp = &chp->ch_drive[drive];
1193 if ((drvp->drive_flags & DRIVE) == 0)
1194 continue;
1195 printf("%s(%s:%d:%d): using PIO mode %d",
1196 drvp->drv_softc->dv_xname,
1197 sc->sc_wdcdev.sc_dev.dv_xname,
1198 chp->channel, drive, drvp->PIO_mode);
1199 if (drvp->drive_flags & DRIVE_DMA)
1200 printf(", DMA mode %d", drvp->DMA_mode);
1201 if (drvp->drive_flags & DRIVE_UDMA)
1202 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1203 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1204 printf(" (using DMA data transfers)");
1205 printf("\n");
1206 }
1207 }
1208
1209 void
1210 default_chip_map(sc, pa)
1211 struct pciide_softc *sc;
1212 struct pci_attach_args *pa;
1213 {
1214 struct pciide_channel *cp;
1215 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1216 pcireg_t csr;
1217 int channel, drive;
1218 struct ata_drive_datas *drvp;
1219 u_int8_t idedma_ctl;
1220 bus_size_t cmdsize, ctlsize;
1221 char *failreason;
1222
1223 if (pciide_chipen(sc, pa) == 0)
1224 return;
1225
1226 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1227 printf("%s: bus-master DMA support present",
1228 sc->sc_wdcdev.sc_dev.dv_xname);
1229 if (sc->sc_pp == &default_product_desc &&
1230 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1231 PCIIDE_OPTIONS_DMA) == 0) {
1232 printf(", but unused (no driver support)");
1233 sc->sc_dma_ok = 0;
1234 } else {
1235 pciide_mapreg_dma(sc, pa);
1236 if (sc->sc_dma_ok != 0)
1237 printf(", used without full driver "
1238 "support");
1239 }
1240 } else {
1241 printf("%s: hardware does not support DMA",
1242 sc->sc_wdcdev.sc_dev.dv_xname);
1243 sc->sc_dma_ok = 0;
1244 }
1245 printf("\n");
1246 if (sc->sc_dma_ok) {
1247 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1248 sc->sc_wdcdev.irqack = pciide_irqack;
1249 }
1250 sc->sc_wdcdev.PIO_cap = 0;
1251 sc->sc_wdcdev.DMA_cap = 0;
1252
1253 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1254 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1256
1257 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1258 cp = &sc->pciide_channels[channel];
1259 if (pciide_chansetup(sc, channel, interface) == 0)
1260 continue;
1261 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1262 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1263 &ctlsize, pciide_pci_intr);
1264 } else {
1265 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1266 channel, &cmdsize, &ctlsize);
1267 }
1268 if (cp->hw_ok == 0)
1269 continue;
1270 /*
1271 * Check to see if something appears to be there.
1272 */
1273 failreason = NULL;
1274 if (!wdcprobe(&cp->wdc_channel)) {
1275 failreason = "not responding; disabled or no drives?";
1276 goto next;
1277 }
1278 /*
1279 * Now, make sure it's actually attributable to this PCI IDE
1280 * channel by trying to access the channel again while the
1281 * PCI IDE controller's I/O space is disabled. (If the
1282 * channel no longer appears to be there, it belongs to
1283 * this controller.) YUCK!
1284 */
1285 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1286 PCI_COMMAND_STATUS_REG);
1287 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1288 csr & ~PCI_COMMAND_IO_ENABLE);
1289 if (wdcprobe(&cp->wdc_channel))
1290 failreason = "other hardware responding at addresses";
1291 pci_conf_write(sc->sc_pc, sc->sc_tag,
1292 PCI_COMMAND_STATUS_REG, csr);
1293 next:
1294 if (failreason) {
1295 printf("%s: %s channel ignored (%s)\n",
1296 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1297 failreason);
1298 cp->hw_ok = 0;
1299 bus_space_unmap(cp->wdc_channel.cmd_iot,
1300 cp->wdc_channel.cmd_ioh, cmdsize);
1301 bus_space_unmap(cp->wdc_channel.ctl_iot,
1302 cp->wdc_channel.ctl_ioh, ctlsize);
1303 } else {
1304 pciide_map_compat_intr(pa, cp, channel, interface);
1305 }
1306 if (cp->hw_ok) {
1307 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1308 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1309 wdcattach(&cp->wdc_channel);
1310 }
1311 }
1312
1313 if (sc->sc_dma_ok == 0)
1314 return;
1315
1316 /* Allocate DMA maps */
1317 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1318 idedma_ctl = 0;
1319 cp = &sc->pciide_channels[channel];
1320 for (drive = 0; drive < 2; drive++) {
1321 drvp = &cp->wdc_channel.ch_drive[drive];
1322 /* If no drive, skip */
1323 if ((drvp->drive_flags & DRIVE) == 0)
1324 continue;
1325 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1326 continue;
1327 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1328 /* Abort DMA setup */
1329 printf("%s:%d:%d: can't allocate DMA maps, "
1330 "using PIO transfers\n",
1331 sc->sc_wdcdev.sc_dev.dv_xname,
1332 channel, drive);
1333 drvp->drive_flags &= ~DRIVE_DMA;
1334 }
1335 printf("%s:%d:%d: using DMA data transfers\n",
1336 sc->sc_wdcdev.sc_dev.dv_xname,
1337 channel, drive);
1338 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1339 }
1340 if (idedma_ctl != 0) {
1341 /* Add software bits in status register */
1342 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1343 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1344 idedma_ctl);
1345 }
1346 }
1347 }
1348
1349 void
1350 piix_chip_map(sc, pa)
1351 struct pciide_softc *sc;
1352 struct pci_attach_args *pa;
1353 {
1354 struct pciide_channel *cp;
1355 int channel;
1356 u_int32_t idetim;
1357 bus_size_t cmdsize, ctlsize;
1358
1359 if (pciide_chipen(sc, pa) == 0)
1360 return;
1361
1362 printf("%s: bus-master DMA support present",
1363 sc->sc_wdcdev.sc_dev.dv_xname);
1364 pciide_mapreg_dma(sc, pa);
1365 printf("\n");
1366 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1367 WDC_CAPABILITY_MODE;
1368 if (sc->sc_dma_ok) {
1369 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1370 sc->sc_wdcdev.irqack = pciide_irqack;
1371 switch(sc->sc_pp->ide_product) {
1372 case PCI_PRODUCT_INTEL_82371AB_IDE:
1373 case PCI_PRODUCT_INTEL_82801AA_IDE:
1374 case PCI_PRODUCT_INTEL_82801AB_IDE:
1375 case PCI_PRODUCT_INTEL_82801BA_IDE:
1376 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1377 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1378 }
1379 }
1380 sc->sc_wdcdev.PIO_cap = 4;
1381 sc->sc_wdcdev.DMA_cap = 2;
1382 switch(sc->sc_pp->ide_product) {
1383 case PCI_PRODUCT_INTEL_82801AA_IDE:
1384 sc->sc_wdcdev.UDMA_cap = 4;
1385 break;
1386 case PCI_PRODUCT_INTEL_82801BA_IDE:
1387 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1388 sc->sc_wdcdev.UDMA_cap = 5;
1389 break;
1390 default:
1391 sc->sc_wdcdev.UDMA_cap = 2;
1392 }
1393 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1394 sc->sc_wdcdev.set_modes = piix_setup_channel;
1395 else
1396 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1397 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1398 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1399
1400 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1401 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1402 DEBUG_PROBE);
1403 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1404 WDCDEBUG_PRINT((", sidetim=0x%x",
1405 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1406 DEBUG_PROBE);
1407 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1408 WDCDEBUG_PRINT((", udamreg 0x%x",
1409 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1410 DEBUG_PROBE);
1411 }
1412 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1413 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1414 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1415 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1416 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1417 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1418 DEBUG_PROBE);
1419 }
1420
1421 }
1422 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1423
1424 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1425 cp = &sc->pciide_channels[channel];
1426 /* PIIX is compat-only */
1427 if (pciide_chansetup(sc, channel, 0) == 0)
1428 continue;
1429 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1430 if ((PIIX_IDETIM_READ(idetim, channel) &
1431 PIIX_IDETIM_IDE) == 0) {
1432 printf("%s: %s channel ignored (disabled)\n",
1433 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1434 continue;
1435 }
1436 /* PIIX are compat-only pciide devices */
1437 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1438 if (cp->hw_ok == 0)
1439 continue;
1440 if (pciide_chan_candisable(cp)) {
1441 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1442 channel);
1443 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1444 idetim);
1445 }
1446 pciide_map_compat_intr(pa, cp, channel, 0);
1447 if (cp->hw_ok == 0)
1448 continue;
1449 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1450 }
1451
1452 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1453 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1454 DEBUG_PROBE);
1455 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1456 WDCDEBUG_PRINT((", sidetim=0x%x",
1457 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1458 DEBUG_PROBE);
1459 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1460 WDCDEBUG_PRINT((", udamreg 0x%x",
1461 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1462 DEBUG_PROBE);
1463 }
1464 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1465 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1466 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1467 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1468 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1469 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1470 DEBUG_PROBE);
1471 }
1472 }
1473 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1474 }
1475
1476 void
1477 piix_setup_channel(chp)
1478 struct channel_softc *chp;
1479 {
1480 u_int8_t mode[2], drive;
1481 u_int32_t oidetim, idetim, idedma_ctl;
1482 struct pciide_channel *cp = (struct pciide_channel*)chp;
1483 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1484 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1485
1486 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1487 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1488 idedma_ctl = 0;
1489
1490 /* set up new idetim: Enable IDE registers decode */
1491 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1492 chp->channel);
1493
1494 /* setup DMA */
1495 pciide_channel_dma_setup(cp);
1496
1497 /*
1498 * Here we have to mess up with drives mode: PIIX can't have
1499 * different timings for master and slave drives.
1500 * We need to find the best combination.
1501 */
1502
1503 /* If both drives supports DMA, take the lower mode */
1504 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1505 (drvp[1].drive_flags & DRIVE_DMA)) {
1506 mode[0] = mode[1] =
1507 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1508 drvp[0].DMA_mode = mode[0];
1509 drvp[1].DMA_mode = mode[1];
1510 goto ok;
1511 }
1512 /*
1513 * If only one drive supports DMA, use its mode, and
1514 * put the other one in PIO mode 0 if mode not compatible
1515 */
1516 if (drvp[0].drive_flags & DRIVE_DMA) {
1517 mode[0] = drvp[0].DMA_mode;
1518 mode[1] = drvp[1].PIO_mode;
1519 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1520 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1521 mode[1] = drvp[1].PIO_mode = 0;
1522 goto ok;
1523 }
1524 if (drvp[1].drive_flags & DRIVE_DMA) {
1525 mode[1] = drvp[1].DMA_mode;
1526 mode[0] = drvp[0].PIO_mode;
1527 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1528 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1529 mode[0] = drvp[0].PIO_mode = 0;
1530 goto ok;
1531 }
1532 /*
1533 * If both drives are not DMA, takes the lower mode, unless
1534 * one of them is PIO mode < 2
1535 */
1536 if (drvp[0].PIO_mode < 2) {
1537 mode[0] = drvp[0].PIO_mode = 0;
1538 mode[1] = drvp[1].PIO_mode;
1539 } else if (drvp[1].PIO_mode < 2) {
1540 mode[1] = drvp[1].PIO_mode = 0;
1541 mode[0] = drvp[0].PIO_mode;
1542 } else {
1543 mode[0] = mode[1] =
1544 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1545 drvp[0].PIO_mode = mode[0];
1546 drvp[1].PIO_mode = mode[1];
1547 }
1548 ok: /* The modes are setup */
1549 for (drive = 0; drive < 2; drive++) {
1550 if (drvp[drive].drive_flags & DRIVE_DMA) {
1551 idetim |= piix_setup_idetim_timings(
1552 mode[drive], 1, chp->channel);
1553 goto end;
1554 }
1555 }
1556 /* If we are there, none of the drives are DMA */
1557 if (mode[0] >= 2)
1558 idetim |= piix_setup_idetim_timings(
1559 mode[0], 0, chp->channel);
1560 else
1561 idetim |= piix_setup_idetim_timings(
1562 mode[1], 0, chp->channel);
1563 end: /*
1564 * timing mode is now set up in the controller. Enable
1565 * it per-drive
1566 */
1567 for (drive = 0; drive < 2; drive++) {
1568 /* If no drive, skip */
1569 if ((drvp[drive].drive_flags & DRIVE) == 0)
1570 continue;
1571 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1572 if (drvp[drive].drive_flags & DRIVE_DMA)
1573 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1574 }
1575 if (idedma_ctl != 0) {
1576 /* Add software bits in status register */
1577 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1578 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1579 idedma_ctl);
1580 }
1581 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1582 pciide_print_modes(cp);
1583 }
1584
1585 void
1586 piix3_4_setup_channel(chp)
1587 struct channel_softc *chp;
1588 {
1589 struct ata_drive_datas *drvp;
1590 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1591 struct pciide_channel *cp = (struct pciide_channel*)chp;
1592 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1593 int drive;
1594 int channel = chp->channel;
1595
1596 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1597 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1598 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1599 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1600 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1601 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1602 PIIX_SIDETIM_RTC_MASK(channel));
1603
1604 idedma_ctl = 0;
1605 /* If channel disabled, no need to go further */
1606 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1607 return;
1608 /* set up new idetim: Enable IDE registers decode */
1609 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1610
1611 /* setup DMA if needed */
1612 pciide_channel_dma_setup(cp);
1613
1614 for (drive = 0; drive < 2; drive++) {
1615 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1616 PIIX_UDMATIM_SET(0x3, channel, drive));
1617 drvp = &chp->ch_drive[drive];
1618 /* If no drive, skip */
1619 if ((drvp->drive_flags & DRIVE) == 0)
1620 continue;
1621 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1622 (drvp->drive_flags & DRIVE_UDMA) == 0))
1623 goto pio;
1624
1625 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1626 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1627 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1628 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1629 ideconf |= PIIX_CONFIG_PINGPONG;
1630 }
1631 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1632 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1633 /* setup Ultra/100 */
1634 if (drvp->UDMA_mode > 2 &&
1635 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1636 drvp->UDMA_mode = 2;
1637 if (drvp->UDMA_mode > 4) {
1638 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1639 } else {
1640 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1641 if (drvp->UDMA_mode > 2) {
1642 ideconf |= PIIX_CONFIG_UDMA66(channel,
1643 drive);
1644 } else {
1645 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1646 drive);
1647 }
1648 }
1649 }
1650 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1651 /* setup Ultra/66 */
1652 if (drvp->UDMA_mode > 2 &&
1653 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1654 drvp->UDMA_mode = 2;
1655 if (drvp->UDMA_mode > 2)
1656 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1657 else
1658 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1659 }
1660 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1661 (drvp->drive_flags & DRIVE_UDMA)) {
1662 /* use Ultra/DMA */
1663 drvp->drive_flags &= ~DRIVE_DMA;
1664 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1665 udmareg |= PIIX_UDMATIM_SET(
1666 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1667 } else {
1668 /* use Multiword DMA */
1669 drvp->drive_flags &= ~DRIVE_UDMA;
1670 if (drive == 0) {
1671 idetim |= piix_setup_idetim_timings(
1672 drvp->DMA_mode, 1, channel);
1673 } else {
1674 sidetim |= piix_setup_sidetim_timings(
1675 drvp->DMA_mode, 1, channel);
1676 idetim =PIIX_IDETIM_SET(idetim,
1677 PIIX_IDETIM_SITRE, channel);
1678 }
1679 }
1680 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1681
1682 pio: /* use PIO mode */
1683 idetim |= piix_setup_idetim_drvs(drvp);
1684 if (drive == 0) {
1685 idetim |= piix_setup_idetim_timings(
1686 drvp->PIO_mode, 0, channel);
1687 } else {
1688 sidetim |= piix_setup_sidetim_timings(
1689 drvp->PIO_mode, 0, channel);
1690 idetim =PIIX_IDETIM_SET(idetim,
1691 PIIX_IDETIM_SITRE, channel);
1692 }
1693 }
1694 if (idedma_ctl != 0) {
1695 /* Add software bits in status register */
1696 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1697 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1698 idedma_ctl);
1699 }
1700 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1701 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1702 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1703 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1704 pciide_print_modes(cp);
1705 }
1706
1707
1708 /* setup ISP and RTC fields, based on mode */
1709 static u_int32_t
1710 piix_setup_idetim_timings(mode, dma, channel)
1711 u_int8_t mode;
1712 u_int8_t dma;
1713 u_int8_t channel;
1714 {
1715
1716 if (dma)
1717 return PIIX_IDETIM_SET(0,
1718 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1719 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1720 channel);
1721 else
1722 return PIIX_IDETIM_SET(0,
1723 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1724 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1725 channel);
1726 }
1727
1728 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1729 static u_int32_t
1730 piix_setup_idetim_drvs(drvp)
1731 struct ata_drive_datas *drvp;
1732 {
1733 u_int32_t ret = 0;
1734 struct channel_softc *chp = drvp->chnl_softc;
1735 u_int8_t channel = chp->channel;
1736 u_int8_t drive = drvp->drive;
1737
1738 /*
1739 * If drive is using UDMA, timings setups are independant
1740 * So just check DMA and PIO here.
1741 */
1742 if (drvp->drive_flags & DRIVE_DMA) {
1743 /* if mode = DMA mode 0, use compatible timings */
1744 if ((drvp->drive_flags & DRIVE_DMA) &&
1745 drvp->DMA_mode == 0) {
1746 drvp->PIO_mode = 0;
1747 return ret;
1748 }
1749 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1750 /*
1751 * PIO and DMA timings are the same, use fast timings for PIO
1752 * too, else use compat timings.
1753 */
1754 if ((piix_isp_pio[drvp->PIO_mode] !=
1755 piix_isp_dma[drvp->DMA_mode]) ||
1756 (piix_rtc_pio[drvp->PIO_mode] !=
1757 piix_rtc_dma[drvp->DMA_mode]))
1758 drvp->PIO_mode = 0;
1759 /* if PIO mode <= 2, use compat timings for PIO */
1760 if (drvp->PIO_mode <= 2) {
1761 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1762 channel);
1763 return ret;
1764 }
1765 }
1766
1767 /*
1768 * Now setup PIO modes. If mode < 2, use compat timings.
1769 * Else enable fast timings. Enable IORDY and prefetch/post
1770 * if PIO mode >= 3.
1771 */
1772
1773 if (drvp->PIO_mode < 2)
1774 return ret;
1775
1776 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1777 if (drvp->PIO_mode >= 3) {
1778 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1779 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1780 }
1781 return ret;
1782 }
1783
1784 /* setup values in SIDETIM registers, based on mode */
1785 static u_int32_t
1786 piix_setup_sidetim_timings(mode, dma, channel)
1787 u_int8_t mode;
1788 u_int8_t dma;
1789 u_int8_t channel;
1790 {
1791 if (dma)
1792 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1793 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1794 else
1795 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1796 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1797 }
1798
1799 void
1800 amd7x6_chip_map(sc, pa)
1801 struct pciide_softc *sc;
1802 struct pci_attach_args *pa;
1803 {
1804 struct pciide_channel *cp;
1805 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1806 int channel;
1807 pcireg_t chanenable;
1808 bus_size_t cmdsize, ctlsize;
1809
1810 if (pciide_chipen(sc, pa) == 0)
1811 return;
1812 printf("%s: bus-master DMA support present",
1813 sc->sc_wdcdev.sc_dev.dv_xname);
1814 pciide_mapreg_dma(sc, pa);
1815 printf("\n");
1816 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1817 WDC_CAPABILITY_MODE;
1818 if (sc->sc_dma_ok) {
1819 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1820 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1821 sc->sc_wdcdev.irqack = pciide_irqack;
1822 }
1823 sc->sc_wdcdev.PIO_cap = 4;
1824 sc->sc_wdcdev.DMA_cap = 2;
1825
1826 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1827 sc->sc_wdcdev.UDMA_cap = 5;
1828 else
1829 sc->sc_wdcdev.UDMA_cap = 4;
1830 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1831 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1832 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1833 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1834
1835 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1836 DEBUG_PROBE);
1837 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1838 cp = &sc->pciide_channels[channel];
1839 if (pciide_chansetup(sc, channel, interface) == 0)
1840 continue;
1841
1842 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1843 printf("%s: %s channel ignored (disabled)\n",
1844 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1845 continue;
1846 }
1847 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1848 pciide_pci_intr);
1849
1850 if (pciide_chan_candisable(cp))
1851 chanenable &= ~AMD7X6_CHAN_EN(channel);
1852 pciide_map_compat_intr(pa, cp, channel, interface);
1853 if (cp->hw_ok == 0)
1854 continue;
1855
1856 amd7x6_setup_channel(&cp->wdc_channel);
1857 }
1858 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1859 chanenable);
1860 return;
1861 }
1862
1863 void
1864 amd7x6_setup_channel(chp)
1865 struct channel_softc *chp;
1866 {
1867 u_int32_t udmatim_reg, datatim_reg;
1868 u_int8_t idedma_ctl;
1869 int mode, drive;
1870 struct ata_drive_datas *drvp;
1871 struct pciide_channel *cp = (struct pciide_channel*)chp;
1872 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1873 #ifndef PCIIDE_AMD756_ENABLEDMA
1874 int rev = PCI_REVISION(
1875 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1876 #endif
1877
1878 idedma_ctl = 0;
1879 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1880 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1881 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1882 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1883
1884 /* setup DMA if needed */
1885 pciide_channel_dma_setup(cp);
1886
1887 for (drive = 0; drive < 2; drive++) {
1888 drvp = &chp->ch_drive[drive];
1889 /* If no drive, skip */
1890 if ((drvp->drive_flags & DRIVE) == 0)
1891 continue;
1892 /* add timing values, setup DMA if needed */
1893 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1894 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1895 mode = drvp->PIO_mode;
1896 goto pio;
1897 }
1898 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1899 (drvp->drive_flags & DRIVE_UDMA)) {
1900 /* use Ultra/DMA */
1901 drvp->drive_flags &= ~DRIVE_DMA;
1902 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1903 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1904 AMD7X6_UDMA_TIME(chp->channel, drive,
1905 amd7x6_udma_tim[drvp->UDMA_mode]);
1906 /* can use PIO timings, MW DMA unused */
1907 mode = drvp->PIO_mode;
1908 } else {
1909 /* use Multiword DMA, but only if revision is OK */
1910 drvp->drive_flags &= ~DRIVE_UDMA;
1911 #ifndef PCIIDE_AMD756_ENABLEDMA
1912 /*
1913 * The workaround doesn't seem to be necessary
1914 * with all drives, so it can be disabled by
1915 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1916 * triggered.
1917 */
1918 if (sc->sc_pp->ide_product ==
1919 PCI_PRODUCT_AMD_PBC756_IDE &&
1920 AMD756_CHIPREV_DISABLEDMA(rev)) {
1921 printf("%s:%d:%d: multi-word DMA disabled due "
1922 "to chip revision\n",
1923 sc->sc_wdcdev.sc_dev.dv_xname,
1924 chp->channel, drive);
1925 mode = drvp->PIO_mode;
1926 drvp->drive_flags &= ~DRIVE_DMA;
1927 goto pio;
1928 }
1929 #endif
1930 /* mode = min(pio, dma+2) */
1931 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1932 mode = drvp->PIO_mode;
1933 else
1934 mode = drvp->DMA_mode + 2;
1935 }
1936 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1937
1938 pio: /* setup PIO mode */
1939 if (mode <= 2) {
1940 drvp->DMA_mode = 0;
1941 drvp->PIO_mode = 0;
1942 mode = 0;
1943 } else {
1944 drvp->PIO_mode = mode;
1945 drvp->DMA_mode = mode - 2;
1946 }
1947 datatim_reg |=
1948 AMD7X6_DATATIM_PULSE(chp->channel, drive,
1949 amd7x6_pio_set[mode]) |
1950 AMD7X6_DATATIM_RECOV(chp->channel, drive,
1951 amd7x6_pio_rec[mode]);
1952 }
1953 if (idedma_ctl != 0) {
1954 /* Add software bits in status register */
1955 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1956 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1957 idedma_ctl);
1958 }
1959 pciide_print_modes(cp);
1960 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
1961 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
1962 }
1963
1964 void
1965 apollo_chip_map(sc, pa)
1966 struct pciide_softc *sc;
1967 struct pci_attach_args *pa;
1968 {
1969 struct pciide_channel *cp;
1970 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1971 int channel;
1972 u_int32_t ideconf;
1973 bus_size_t cmdsize, ctlsize;
1974 pcitag_t pcib_tag;
1975 pcireg_t pcib_id, pcib_class;
1976
1977 if (pciide_chipen(sc, pa) == 0)
1978 return;
1979 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
1980 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
1981 /* and read ID and rev of the ISA bridge */
1982 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
1983 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
1984 printf(": VIA Technologies ");
1985 switch (PCI_PRODUCT(pcib_id)) {
1986 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
1987 printf("VT82C586 (Apollo VP) ");
1988 if(PCI_REVISION(pcib_class) >= 0x02) {
1989 printf("ATA33 controller\n");
1990 sc->sc_wdcdev.UDMA_cap = 2;
1991 } else {
1992 printf("controller\n");
1993 sc->sc_wdcdev.UDMA_cap = 0;
1994 }
1995 break;
1996 case PCI_PRODUCT_VIATECH_VT82C596A:
1997 printf("VT82C596A (Apollo Pro) ");
1998 if (PCI_REVISION(pcib_class) >= 0x12) {
1999 printf("ATA66 controller\n");
2000 sc->sc_wdcdev.UDMA_cap = 4;
2001 } else {
2002 printf("ATA33 controller\n");
2003 sc->sc_wdcdev.UDMA_cap = 2;
2004 }
2005 break;
2006 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2007 printf("VT82C686A (Apollo KX133) ");
2008 if (PCI_REVISION(pcib_class) >= 0x40) {
2009 printf("ATA100 controller\n");
2010 sc->sc_wdcdev.UDMA_cap = 5;
2011 } else {
2012 printf("ATA66 controller\n");
2013 sc->sc_wdcdev.UDMA_cap = 4;
2014 }
2015 break;
2016 default:
2017 printf("unknown ATA controller\n");
2018 sc->sc_wdcdev.UDMA_cap = 0;
2019 }
2020
2021 printf("%s: bus-master DMA support present",
2022 sc->sc_wdcdev.sc_dev.dv_xname);
2023 pciide_mapreg_dma(sc, pa);
2024 printf("\n");
2025 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2026 WDC_CAPABILITY_MODE;
2027 if (sc->sc_dma_ok) {
2028 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2029 sc->sc_wdcdev.irqack = pciide_irqack;
2030 if (sc->sc_wdcdev.UDMA_cap > 0)
2031 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2032 }
2033 sc->sc_wdcdev.PIO_cap = 4;
2034 sc->sc_wdcdev.DMA_cap = 2;
2035 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2036 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2037 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2038
2039 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2040 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2041 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2042 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2043 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2044 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2045 DEBUG_PROBE);
2046
2047 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2048 cp = &sc->pciide_channels[channel];
2049 if (pciide_chansetup(sc, channel, interface) == 0)
2050 continue;
2051
2052 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2053 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2054 printf("%s: %s channel ignored (disabled)\n",
2055 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2056 continue;
2057 }
2058 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2059 pciide_pci_intr);
2060 if (cp->hw_ok == 0)
2061 continue;
2062 if (pciide_chan_candisable(cp)) {
2063 ideconf &= ~APO_IDECONF_EN(channel);
2064 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2065 ideconf);
2066 }
2067 pciide_map_compat_intr(pa, cp, channel, interface);
2068
2069 if (cp->hw_ok == 0)
2070 continue;
2071 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2072 }
2073 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2074 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2075 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2076 }
2077
2078 void
2079 apollo_setup_channel(chp)
2080 struct channel_softc *chp;
2081 {
2082 u_int32_t udmatim_reg, datatim_reg;
2083 u_int8_t idedma_ctl;
2084 int mode, drive;
2085 struct ata_drive_datas *drvp;
2086 struct pciide_channel *cp = (struct pciide_channel*)chp;
2087 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2088
2089 idedma_ctl = 0;
2090 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2091 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2092 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2093 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2094
2095 /* setup DMA if needed */
2096 pciide_channel_dma_setup(cp);
2097
2098 for (drive = 0; drive < 2; drive++) {
2099 drvp = &chp->ch_drive[drive];
2100 /* If no drive, skip */
2101 if ((drvp->drive_flags & DRIVE) == 0)
2102 continue;
2103 /* add timing values, setup DMA if needed */
2104 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2105 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2106 mode = drvp->PIO_mode;
2107 goto pio;
2108 }
2109 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2110 (drvp->drive_flags & DRIVE_UDMA)) {
2111 /* use Ultra/DMA */
2112 drvp->drive_flags &= ~DRIVE_DMA;
2113 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2114 APO_UDMA_EN_MTH(chp->channel, drive);
2115 if (sc->sc_wdcdev.UDMA_cap == 5) {
2116 /* 686b */
2117 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2118 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2119 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2120 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2121 /* 596b or 686a */
2122 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2123 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2124 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2125 } else {
2126 /* 596a or 586b */
2127 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2128 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2129 }
2130 /* can use PIO timings, MW DMA unused */
2131 mode = drvp->PIO_mode;
2132 } else {
2133 /* use Multiword DMA */
2134 drvp->drive_flags &= ~DRIVE_UDMA;
2135 /* mode = min(pio, dma+2) */
2136 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2137 mode = drvp->PIO_mode;
2138 else
2139 mode = drvp->DMA_mode + 2;
2140 }
2141 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2142
2143 pio: /* setup PIO mode */
2144 if (mode <= 2) {
2145 drvp->DMA_mode = 0;
2146 drvp->PIO_mode = 0;
2147 mode = 0;
2148 } else {
2149 drvp->PIO_mode = mode;
2150 drvp->DMA_mode = mode - 2;
2151 }
2152 datatim_reg |=
2153 APO_DATATIM_PULSE(chp->channel, drive,
2154 apollo_pio_set[mode]) |
2155 APO_DATATIM_RECOV(chp->channel, drive,
2156 apollo_pio_rec[mode]);
2157 }
2158 if (idedma_ctl != 0) {
2159 /* Add software bits in status register */
2160 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2161 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2162 idedma_ctl);
2163 }
2164 pciide_print_modes(cp);
2165 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2166 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2167 }
2168
2169 void
2170 cmd_channel_map(pa, sc, channel)
2171 struct pci_attach_args *pa;
2172 struct pciide_softc *sc;
2173 int channel;
2174 {
2175 struct pciide_channel *cp = &sc->pciide_channels[channel];
2176 bus_size_t cmdsize, ctlsize;
2177 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2178 int interface;
2179
2180 /*
2181 * The 0648/0649 can be told to identify as a RAID controller.
2182 * In this case, we have to fake interface
2183 */
2184 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2185 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2186 PCIIDE_INTERFACE_SETTABLE(1);
2187 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2188 CMD_CONF_DSA1)
2189 interface |= PCIIDE_INTERFACE_PCI(0) |
2190 PCIIDE_INTERFACE_PCI(1);
2191 } else {
2192 interface = PCI_INTERFACE(pa->pa_class);
2193 }
2194
2195 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2196 cp->name = PCIIDE_CHANNEL_NAME(channel);
2197 cp->wdc_channel.channel = channel;
2198 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2199
2200 if (channel > 0) {
2201 cp->wdc_channel.ch_queue =
2202 sc->pciide_channels[0].wdc_channel.ch_queue;
2203 } else {
2204 cp->wdc_channel.ch_queue =
2205 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2206 }
2207 if (cp->wdc_channel.ch_queue == NULL) {
2208 printf("%s %s channel: "
2209 "can't allocate memory for command queue",
2210 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2211 return;
2212 }
2213
2214 printf("%s: %s channel %s to %s mode\n",
2215 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2216 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2217 "configured" : "wired",
2218 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2219 "native-PCI" : "compatibility");
2220
2221 /*
2222 * with a CMD PCI64x, if we get here, the first channel is enabled:
2223 * there's no way to disable the first channel without disabling
2224 * the whole device
2225 */
2226 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2227 printf("%s: %s channel ignored (disabled)\n",
2228 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2229 return;
2230 }
2231
2232 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2233 if (cp->hw_ok == 0)
2234 return;
2235 if (channel == 1) {
2236 if (pciide_chan_candisable(cp)) {
2237 ctrl &= ~CMD_CTRL_2PORT;
2238 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2239 CMD_CTRL, ctrl);
2240 }
2241 }
2242 pciide_map_compat_intr(pa, cp, channel, interface);
2243 }
2244
2245 int
2246 cmd_pci_intr(arg)
2247 void *arg;
2248 {
2249 struct pciide_softc *sc = arg;
2250 struct pciide_channel *cp;
2251 struct channel_softc *wdc_cp;
2252 int i, rv, crv;
2253 u_int32_t priirq, secirq;
2254
2255 rv = 0;
2256 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2257 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2258 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2259 cp = &sc->pciide_channels[i];
2260 wdc_cp = &cp->wdc_channel;
2261 /* If a compat channel skip. */
2262 if (cp->compat)
2263 continue;
2264 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2265 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2266 crv = wdcintr(wdc_cp);
2267 if (crv == 0)
2268 printf("%s:%d: bogus intr\n",
2269 sc->sc_wdcdev.sc_dev.dv_xname, i);
2270 else
2271 rv = 1;
2272 }
2273 }
2274 return rv;
2275 }
2276
2277 void
2278 cmd_chip_map(sc, pa)
2279 struct pciide_softc *sc;
2280 struct pci_attach_args *pa;
2281 {
2282 int channel;
2283
2284 /*
2285 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2286 * and base adresses registers can be disabled at
2287 * hardware level. In this case, the device is wired
2288 * in compat mode and its first channel is always enabled,
2289 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2290 * In fact, it seems that the first channel of the CMD PCI0640
2291 * can't be disabled.
2292 */
2293
2294 #ifdef PCIIDE_CMD064x_DISABLE
2295 if (pciide_chipen(sc, pa) == 0)
2296 return;
2297 #endif
2298
2299 printf("%s: hardware does not support DMA\n",
2300 sc->sc_wdcdev.sc_dev.dv_xname);
2301 sc->sc_dma_ok = 0;
2302
2303 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2304 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2305 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2306
2307 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2308 cmd_channel_map(pa, sc, channel);
2309 }
2310 }
2311
2312 void
2313 cmd0643_9_chip_map(sc, pa)
2314 struct pciide_softc *sc;
2315 struct pci_attach_args *pa;
2316 {
2317 struct pciide_channel *cp;
2318 int channel;
2319 int rev = PCI_REVISION(
2320 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2321
2322 /*
2323 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2324 * and base adresses registers can be disabled at
2325 * hardware level. In this case, the device is wired
2326 * in compat mode and its first channel is always enabled,
2327 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2328 * In fact, it seems that the first channel of the CMD PCI0640
2329 * can't be disabled.
2330 */
2331
2332 #ifdef PCIIDE_CMD064x_DISABLE
2333 if (pciide_chipen(sc, pa) == 0)
2334 return;
2335 #endif
2336 printf("%s: bus-master DMA support present",
2337 sc->sc_wdcdev.sc_dev.dv_xname);
2338 pciide_mapreg_dma(sc, pa);
2339 printf("\n");
2340 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2341 WDC_CAPABILITY_MODE;
2342 if (sc->sc_dma_ok) {
2343 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2344 switch (sc->sc_pp->ide_product) {
2345 case PCI_PRODUCT_CMDTECH_649:
2346 case PCI_PRODUCT_CMDTECH_648:
2347 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2348 sc->sc_wdcdev.UDMA_cap = 4;
2349 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2350 break;
2351 case PCI_PRODUCT_CMDTECH_646:
2352 if (rev >= CMD0646U2_REV) {
2353 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2354 sc->sc_wdcdev.UDMA_cap = 2;
2355 } else if (rev >= CMD0646U_REV) {
2356 /*
2357 * Linux's driver claims that the 646U is broken
2358 * with UDMA. Only enable it if we know what we're
2359 * doing
2360 */
2361 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2362 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2363 sc->sc_wdcdev.UDMA_cap = 2;
2364 #endif
2365 /* explicitely disable UDMA */
2366 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2367 CMD_UDMATIM(0), 0);
2368 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2369 CMD_UDMATIM(1), 0);
2370 }
2371 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2372 break;
2373 default:
2374 sc->sc_wdcdev.irqack = pciide_irqack;
2375 }
2376 }
2377
2378 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2379 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2380 sc->sc_wdcdev.PIO_cap = 4;
2381 sc->sc_wdcdev.DMA_cap = 2;
2382 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2383
2384 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2385 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2386 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2387 DEBUG_PROBE);
2388
2389 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2390 cp = &sc->pciide_channels[channel];
2391 cmd_channel_map(pa, sc, channel);
2392 if (cp->hw_ok == 0)
2393 continue;
2394 cmd0643_9_setup_channel(&cp->wdc_channel);
2395 }
2396 /*
2397 * note - this also makes sure we clear the irq disable and reset
2398 * bits
2399 */
2400 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2401 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2402 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2403 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2404 DEBUG_PROBE);
2405 }
2406
2407 void
2408 cmd0643_9_setup_channel(chp)
2409 struct channel_softc *chp;
2410 {
2411 struct ata_drive_datas *drvp;
2412 u_int8_t tim;
2413 u_int32_t idedma_ctl, udma_reg;
2414 int drive;
2415 struct pciide_channel *cp = (struct pciide_channel*)chp;
2416 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2417
2418 idedma_ctl = 0;
2419 /* setup DMA if needed */
2420 pciide_channel_dma_setup(cp);
2421
2422 for (drive = 0; drive < 2; drive++) {
2423 drvp = &chp->ch_drive[drive];
2424 /* If no drive, skip */
2425 if ((drvp->drive_flags & DRIVE) == 0)
2426 continue;
2427 /* add timing values, setup DMA if needed */
2428 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2429 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2430 if (drvp->drive_flags & DRIVE_UDMA) {
2431 /* UltraDMA on a 646U2, 0648 or 0649 */
2432 drvp->drive_flags &= ~DRIVE_DMA;
2433 udma_reg = pciide_pci_read(sc->sc_pc,
2434 sc->sc_tag, CMD_UDMATIM(chp->channel));
2435 if (drvp->UDMA_mode > 2 &&
2436 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2437 CMD_BICSR) &
2438 CMD_BICSR_80(chp->channel)) == 0)
2439 drvp->UDMA_mode = 2;
2440 if (drvp->UDMA_mode > 2)
2441 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2442 else if (sc->sc_wdcdev.UDMA_cap > 2)
2443 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2444 udma_reg |= CMD_UDMATIM_UDMA(drive);
2445 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2446 CMD_UDMATIM_TIM_OFF(drive));
2447 udma_reg |=
2448 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2449 CMD_UDMATIM_TIM_OFF(drive));
2450 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2451 CMD_UDMATIM(chp->channel), udma_reg);
2452 } else {
2453 /*
2454 * use Multiword DMA.
2455 * Timings will be used for both PIO and DMA,
2456 * so adjust DMA mode if needed
2457 * if we have a 0646U2/8/9, turn off UDMA
2458 */
2459 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2460 udma_reg = pciide_pci_read(sc->sc_pc,
2461 sc->sc_tag,
2462 CMD_UDMATIM(chp->channel));
2463 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2464 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2465 CMD_UDMATIM(chp->channel),
2466 udma_reg);
2467 }
2468 if (drvp->PIO_mode >= 3 &&
2469 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2470 drvp->DMA_mode = drvp->PIO_mode - 2;
2471 }
2472 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2473 }
2474 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2475 }
2476 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2477 CMD_DATA_TIM(chp->channel, drive), tim);
2478 }
2479 if (idedma_ctl != 0) {
2480 /* Add software bits in status register */
2481 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2482 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2483 idedma_ctl);
2484 }
2485 pciide_print_modes(cp);
2486 }
2487
2488 void
2489 cmd646_9_irqack(chp)
2490 struct channel_softc *chp;
2491 {
2492 u_int32_t priirq, secirq;
2493 struct pciide_channel *cp = (struct pciide_channel*)chp;
2494 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2495
2496 if (chp->channel == 0) {
2497 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2498 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2499 } else {
2500 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2501 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2502 }
2503 pciide_irqack(chp);
2504 }
2505
2506 void
2507 cy693_chip_map(sc, pa)
2508 struct pciide_softc *sc;
2509 struct pci_attach_args *pa;
2510 {
2511 struct pciide_channel *cp;
2512 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2513 bus_size_t cmdsize, ctlsize;
2514
2515 if (pciide_chipen(sc, pa) == 0)
2516 return;
2517 /*
2518 * this chip has 2 PCI IDE functions, one for primary and one for
2519 * secondary. So we need to call pciide_mapregs_compat() with
2520 * the real channel
2521 */
2522 if (pa->pa_function == 1) {
2523 sc->sc_cy_compatchan = 0;
2524 } else if (pa->pa_function == 2) {
2525 sc->sc_cy_compatchan = 1;
2526 } else {
2527 printf("%s: unexpected PCI function %d\n",
2528 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2529 return;
2530 }
2531 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2532 printf("%s: bus-master DMA support present",
2533 sc->sc_wdcdev.sc_dev.dv_xname);
2534 pciide_mapreg_dma(sc, pa);
2535 } else {
2536 printf("%s: hardware does not support DMA",
2537 sc->sc_wdcdev.sc_dev.dv_xname);
2538 sc->sc_dma_ok = 0;
2539 }
2540 printf("\n");
2541
2542 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2543 if (sc->sc_cy_handle == NULL) {
2544 printf("%s: unable to map hyperCache control registers\n",
2545 sc->sc_wdcdev.sc_dev.dv_xname);
2546 sc->sc_dma_ok = 0;
2547 }
2548
2549 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2550 WDC_CAPABILITY_MODE;
2551 if (sc->sc_dma_ok) {
2552 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2553 sc->sc_wdcdev.irqack = pciide_irqack;
2554 }
2555 sc->sc_wdcdev.PIO_cap = 4;
2556 sc->sc_wdcdev.DMA_cap = 2;
2557 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2558
2559 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2560 sc->sc_wdcdev.nchannels = 1;
2561
2562 /* Only one channel for this chip; if we are here it's enabled */
2563 cp = &sc->pciide_channels[0];
2564 sc->wdc_chanarray[0] = &cp->wdc_channel;
2565 cp->name = PCIIDE_CHANNEL_NAME(0);
2566 cp->wdc_channel.channel = 0;
2567 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2568 cp->wdc_channel.ch_queue =
2569 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2570 if (cp->wdc_channel.ch_queue == NULL) {
2571 printf("%s primary channel: "
2572 "can't allocate memory for command queue",
2573 sc->sc_wdcdev.sc_dev.dv_xname);
2574 return;
2575 }
2576 printf("%s: primary channel %s to ",
2577 sc->sc_wdcdev.sc_dev.dv_xname,
2578 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2579 "configured" : "wired");
2580 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2581 printf("native-PCI");
2582 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2583 pciide_pci_intr);
2584 } else {
2585 printf("compatibility");
2586 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2587 &cmdsize, &ctlsize);
2588 }
2589 printf(" mode\n");
2590 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2591 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2592 wdcattach(&cp->wdc_channel);
2593 if (pciide_chan_candisable(cp)) {
2594 pci_conf_write(sc->sc_pc, sc->sc_tag,
2595 PCI_COMMAND_STATUS_REG, 0);
2596 }
2597 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2598 if (cp->hw_ok == 0)
2599 return;
2600 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2601 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2602 cy693_setup_channel(&cp->wdc_channel);
2603 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2604 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2605 }
2606
2607 void
2608 cy693_setup_channel(chp)
2609 struct channel_softc *chp;
2610 {
2611 struct ata_drive_datas *drvp;
2612 int drive;
2613 u_int32_t cy_cmd_ctrl;
2614 u_int32_t idedma_ctl;
2615 struct pciide_channel *cp = (struct pciide_channel*)chp;
2616 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2617 int dma_mode = -1;
2618
2619 cy_cmd_ctrl = idedma_ctl = 0;
2620
2621 /* setup DMA if needed */
2622 pciide_channel_dma_setup(cp);
2623
2624 for (drive = 0; drive < 2; drive++) {
2625 drvp = &chp->ch_drive[drive];
2626 /* If no drive, skip */
2627 if ((drvp->drive_flags & DRIVE) == 0)
2628 continue;
2629 /* add timing values, setup DMA if needed */
2630 if (drvp->drive_flags & DRIVE_DMA) {
2631 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2632 /* use Multiword DMA */
2633 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2634 dma_mode = drvp->DMA_mode;
2635 }
2636 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2637 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2638 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2639 CY_CMD_CTRL_IOW_REC_OFF(drive));
2640 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2641 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2642 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2643 CY_CMD_CTRL_IOR_REC_OFF(drive));
2644 }
2645 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2646 chp->ch_drive[0].DMA_mode = dma_mode;
2647 chp->ch_drive[1].DMA_mode = dma_mode;
2648
2649 if (dma_mode == -1)
2650 dma_mode = 0;
2651
2652 if (sc->sc_cy_handle != NULL) {
2653 /* Note: `multiple' is implied. */
2654 cy82c693_write(sc->sc_cy_handle,
2655 (sc->sc_cy_compatchan == 0) ?
2656 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2657 }
2658
2659 pciide_print_modes(cp);
2660
2661 if (idedma_ctl != 0) {
2662 /* Add software bits in status register */
2663 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2664 IDEDMA_CTL, idedma_ctl);
2665 }
2666 }
2667
2668 void
2669 sis_chip_map(sc, pa)
2670 struct pciide_softc *sc;
2671 struct pci_attach_args *pa;
2672 {
2673 struct pciide_channel *cp;
2674 int channel;
2675 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2676 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2677 pcireg_t rev = PCI_REVISION(pa->pa_class);
2678 bus_size_t cmdsize, ctlsize;
2679 pcitag_t pchb_tag;
2680 pcireg_t pchb_id, pchb_class;
2681
2682 if (pciide_chipen(sc, pa) == 0)
2683 return;
2684 printf("%s: bus-master DMA support present",
2685 sc->sc_wdcdev.sc_dev.dv_xname);
2686 pciide_mapreg_dma(sc, pa);
2687 printf("\n");
2688
2689 /* get a PCI tag for the host bridge (function 0 of the same device) */
2690 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2691 /* and read ID and rev of the ISA bridge */
2692 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2693 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2694
2695 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2696 WDC_CAPABILITY_MODE;
2697 if (sc->sc_dma_ok) {
2698 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2699 sc->sc_wdcdev.irqack = pciide_irqack;
2700 /*
2701 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2702 * have problems with UDMA (info provided by Christos)
2703 */
2704 if (rev >= 0xd0 &&
2705 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2706 PCI_REVISION(pchb_class) >= 0x03))
2707 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2708 }
2709
2710 sc->sc_wdcdev.PIO_cap = 4;
2711 sc->sc_wdcdev.DMA_cap = 2;
2712 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2713 sc->sc_wdcdev.UDMA_cap = 2;
2714 sc->sc_wdcdev.set_modes = sis_setup_channel;
2715
2716 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2717 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2718
2719 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2720 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2721 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2722
2723 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2724 cp = &sc->pciide_channels[channel];
2725 if (pciide_chansetup(sc, channel, interface) == 0)
2726 continue;
2727 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2728 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2729 printf("%s: %s channel ignored (disabled)\n",
2730 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2731 continue;
2732 }
2733 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2734 pciide_pci_intr);
2735 if (cp->hw_ok == 0)
2736 continue;
2737 if (pciide_chan_candisable(cp)) {
2738 if (channel == 0)
2739 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2740 else
2741 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2742 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2743 sis_ctr0);
2744 }
2745 pciide_map_compat_intr(pa, cp, channel, interface);
2746 if (cp->hw_ok == 0)
2747 continue;
2748 sis_setup_channel(&cp->wdc_channel);
2749 }
2750 }
2751
2752 void
2753 sis_setup_channel(chp)
2754 struct channel_softc *chp;
2755 {
2756 struct ata_drive_datas *drvp;
2757 int drive;
2758 u_int32_t sis_tim;
2759 u_int32_t idedma_ctl;
2760 struct pciide_channel *cp = (struct pciide_channel*)chp;
2761 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2762
2763 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2764 "channel %d 0x%x\n", chp->channel,
2765 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2766 DEBUG_PROBE);
2767 sis_tim = 0;
2768 idedma_ctl = 0;
2769 /* setup DMA if needed */
2770 pciide_channel_dma_setup(cp);
2771
2772 for (drive = 0; drive < 2; drive++) {
2773 drvp = &chp->ch_drive[drive];
2774 /* If no drive, skip */
2775 if ((drvp->drive_flags & DRIVE) == 0)
2776 continue;
2777 /* add timing values, setup DMA if needed */
2778 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2779 (drvp->drive_flags & DRIVE_UDMA) == 0)
2780 goto pio;
2781
2782 if (drvp->drive_flags & DRIVE_UDMA) {
2783 /* use Ultra/DMA */
2784 drvp->drive_flags &= ~DRIVE_DMA;
2785 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2786 SIS_TIM_UDMA_TIME_OFF(drive);
2787 sis_tim |= SIS_TIM_UDMA_EN(drive);
2788 } else {
2789 /*
2790 * use Multiword DMA
2791 * Timings will be used for both PIO and DMA,
2792 * so adjust DMA mode if needed
2793 */
2794 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2795 drvp->PIO_mode = drvp->DMA_mode + 2;
2796 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2797 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2798 drvp->PIO_mode - 2 : 0;
2799 if (drvp->DMA_mode == 0)
2800 drvp->PIO_mode = 0;
2801 }
2802 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2803 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2804 SIS_TIM_ACT_OFF(drive);
2805 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2806 SIS_TIM_REC_OFF(drive);
2807 }
2808 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2809 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2810 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2811 if (idedma_ctl != 0) {
2812 /* Add software bits in status register */
2813 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2814 IDEDMA_CTL, idedma_ctl);
2815 }
2816 pciide_print_modes(cp);
2817 }
2818
2819 void
2820 acer_chip_map(sc, pa)
2821 struct pciide_softc *sc;
2822 struct pci_attach_args *pa;
2823 {
2824 struct pciide_channel *cp;
2825 int channel;
2826 pcireg_t cr, interface;
2827 bus_size_t cmdsize, ctlsize;
2828 pcireg_t rev = PCI_REVISION(pa->pa_class);
2829
2830 if (pciide_chipen(sc, pa) == 0)
2831 return;
2832 printf("%s: bus-master DMA support present",
2833 sc->sc_wdcdev.sc_dev.dv_xname);
2834 pciide_mapreg_dma(sc, pa);
2835 printf("\n");
2836 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2837 WDC_CAPABILITY_MODE;
2838 if (sc->sc_dma_ok) {
2839 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2840 if (rev >= 0x20)
2841 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2842 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2843 sc->sc_wdcdev.irqack = pciide_irqack;
2844 }
2845
2846 sc->sc_wdcdev.PIO_cap = 4;
2847 sc->sc_wdcdev.DMA_cap = 2;
2848 sc->sc_wdcdev.UDMA_cap = 2;
2849 sc->sc_wdcdev.set_modes = acer_setup_channel;
2850 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2851 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2852
2853 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2854 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2855 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2856
2857 /* Enable "microsoft register bits" R/W. */
2858 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2859 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2860 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2861 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2862 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2863 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2864 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2865 ~ACER_CHANSTATUSREGS_RO);
2866 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2867 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2868 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2869 /* Don't use cr, re-read the real register content instead */
2870 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2871 PCI_CLASS_REG));
2872
2873 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2874 cp = &sc->pciide_channels[channel];
2875 if (pciide_chansetup(sc, channel, interface) == 0)
2876 continue;
2877 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2878 printf("%s: %s channel ignored (disabled)\n",
2879 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2880 continue;
2881 }
2882 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2883 acer_pci_intr);
2884 if (cp->hw_ok == 0)
2885 continue;
2886 if (pciide_chan_candisable(cp)) {
2887 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2888 pci_conf_write(sc->sc_pc, sc->sc_tag,
2889 PCI_CLASS_REG, cr);
2890 }
2891 pciide_map_compat_intr(pa, cp, channel, interface);
2892 acer_setup_channel(&cp->wdc_channel);
2893 }
2894 }
2895
2896 void
2897 acer_setup_channel(chp)
2898 struct channel_softc *chp;
2899 {
2900 struct ata_drive_datas *drvp;
2901 int drive;
2902 u_int32_t acer_fifo_udma;
2903 u_int32_t idedma_ctl;
2904 struct pciide_channel *cp = (struct pciide_channel*)chp;
2905 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2906
2907 idedma_ctl = 0;
2908 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2909 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2910 acer_fifo_udma), DEBUG_PROBE);
2911 /* setup DMA if needed */
2912 pciide_channel_dma_setup(cp);
2913
2914 for (drive = 0; drive < 2; drive++) {
2915 drvp = &chp->ch_drive[drive];
2916 /* If no drive, skip */
2917 if ((drvp->drive_flags & DRIVE) == 0)
2918 continue;
2919 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2920 "channel %d drive %d 0x%x\n", chp->channel, drive,
2921 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2922 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2923 /* clear FIFO/DMA mode */
2924 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2925 ACER_UDMA_EN(chp->channel, drive) |
2926 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2927
2928 /* add timing values, setup DMA if needed */
2929 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2930 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2931 acer_fifo_udma |=
2932 ACER_FTH_OPL(chp->channel, drive, 0x1);
2933 goto pio;
2934 }
2935
2936 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2937 if (drvp->drive_flags & DRIVE_UDMA) {
2938 /* use Ultra/DMA */
2939 drvp->drive_flags &= ~DRIVE_DMA;
2940 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2941 acer_fifo_udma |=
2942 ACER_UDMA_TIM(chp->channel, drive,
2943 acer_udma[drvp->UDMA_mode]);
2944 } else {
2945 /*
2946 * use Multiword DMA
2947 * Timings will be used for both PIO and DMA,
2948 * so adjust DMA mode if needed
2949 */
2950 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2951 drvp->PIO_mode = drvp->DMA_mode + 2;
2952 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2953 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2954 drvp->PIO_mode - 2 : 0;
2955 if (drvp->DMA_mode == 0)
2956 drvp->PIO_mode = 0;
2957 }
2958 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2959 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2960 ACER_IDETIM(chp->channel, drive),
2961 acer_pio[drvp->PIO_mode]);
2962 }
2963 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2964 acer_fifo_udma), DEBUG_PROBE);
2965 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2966 if (idedma_ctl != 0) {
2967 /* Add software bits in status register */
2968 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2969 IDEDMA_CTL, idedma_ctl);
2970 }
2971 pciide_print_modes(cp);
2972 }
2973
2974 int
2975 acer_pci_intr(arg)
2976 void *arg;
2977 {
2978 struct pciide_softc *sc = arg;
2979 struct pciide_channel *cp;
2980 struct channel_softc *wdc_cp;
2981 int i, rv, crv;
2982 u_int32_t chids;
2983
2984 rv = 0;
2985 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2986 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2987 cp = &sc->pciide_channels[i];
2988 wdc_cp = &cp->wdc_channel;
2989 /* If a compat channel skip. */
2990 if (cp->compat)
2991 continue;
2992 if (chids & ACER_CHIDS_INT(i)) {
2993 crv = wdcintr(wdc_cp);
2994 if (crv == 0)
2995 printf("%s:%d: bogus intr\n",
2996 sc->sc_wdcdev.sc_dev.dv_xname, i);
2997 else
2998 rv = 1;
2999 }
3000 }
3001 return rv;
3002 }
3003
3004 void
3005 hpt_chip_map(sc, pa)
3006 struct pciide_softc *sc;
3007 struct pci_attach_args *pa;
3008 {
3009 struct pciide_channel *cp;
3010 int i, compatchan, revision;
3011 pcireg_t interface;
3012 bus_size_t cmdsize, ctlsize;
3013
3014 if (pciide_chipen(sc, pa) == 0)
3015 return;
3016 revision = PCI_REVISION(pa->pa_class);
3017 printf(": Triones/Highpoint ");
3018 if (revision == HPT370_REV)
3019 printf("HPT370 IDE Controller\n");
3020 else
3021 printf("HPT366 IDE Controller\n");
3022
3023 /*
3024 * when the chip is in native mode it identifies itself as a
3025 * 'misc mass storage'. Fake interface in this case.
3026 */
3027 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3028 interface = PCI_INTERFACE(pa->pa_class);
3029 } else {
3030 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3031 PCIIDE_INTERFACE_PCI(0);
3032 if (revision == HPT370_REV)
3033 interface |= PCIIDE_INTERFACE_PCI(1);
3034 }
3035
3036 printf("%s: bus-master DMA support present",
3037 sc->sc_wdcdev.sc_dev.dv_xname);
3038 pciide_mapreg_dma(sc, pa);
3039 printf("\n");
3040 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3041 WDC_CAPABILITY_MODE;
3042 if (sc->sc_dma_ok) {
3043 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3044 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3045 sc->sc_wdcdev.irqack = pciide_irqack;
3046 }
3047 sc->sc_wdcdev.PIO_cap = 4;
3048 sc->sc_wdcdev.DMA_cap = 2;
3049
3050 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3051 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3052 if (revision == HPT366_REV) {
3053 sc->sc_wdcdev.UDMA_cap = 4;
3054 /*
3055 * The 366 has 2 PCI IDE functions, one for primary and one
3056 * for secondary. So we need to call pciide_mapregs_compat()
3057 * with the real channel
3058 */
3059 if (pa->pa_function == 0) {
3060 compatchan = 0;
3061 } else if (pa->pa_function == 1) {
3062 compatchan = 1;
3063 } else {
3064 printf("%s: unexpected PCI function %d\n",
3065 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3066 return;
3067 }
3068 sc->sc_wdcdev.nchannels = 1;
3069 } else {
3070 sc->sc_wdcdev.nchannels = 2;
3071 sc->sc_wdcdev.UDMA_cap = 5;
3072 }
3073 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3074 cp = &sc->pciide_channels[i];
3075 if (sc->sc_wdcdev.nchannels > 1) {
3076 compatchan = i;
3077 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3078 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3079 printf("%s: %s channel ignored (disabled)\n",
3080 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3081 continue;
3082 }
3083 }
3084 if (pciide_chansetup(sc, i, interface) == 0)
3085 continue;
3086 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3087 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3088 &ctlsize, hpt_pci_intr);
3089 } else {
3090 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3091 &cmdsize, &ctlsize);
3092 }
3093 if (cp->hw_ok == 0)
3094 return;
3095 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3096 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3097 wdcattach(&cp->wdc_channel);
3098 hpt_setup_channel(&cp->wdc_channel);
3099 }
3100 if (revision == HPT370_REV) {
3101 /*
3102 * HPT370_REV has a bit to disable interrupts, make sure
3103 * to clear it
3104 */
3105 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3106 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3107 ~HPT_CSEL_IRQDIS);
3108 }
3109 return;
3110 }
3111
3112 void
3113 hpt_setup_channel(chp)
3114 struct channel_softc *chp;
3115 {
3116 struct ata_drive_datas *drvp;
3117 int drive;
3118 int cable;
3119 u_int32_t before, after;
3120 u_int32_t idedma_ctl;
3121 struct pciide_channel *cp = (struct pciide_channel*)chp;
3122 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3123
3124 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3125
3126 /* setup DMA if needed */
3127 pciide_channel_dma_setup(cp);
3128
3129 idedma_ctl = 0;
3130
3131 /* Per drive settings */
3132 for (drive = 0; drive < 2; drive++) {
3133 drvp = &chp->ch_drive[drive];
3134 /* If no drive, skip */
3135 if ((drvp->drive_flags & DRIVE) == 0)
3136 continue;
3137 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3138 HPT_IDETIM(chp->channel, drive));
3139
3140 /* add timing values, setup DMA if needed */
3141 if (drvp->drive_flags & DRIVE_UDMA) {
3142 /* use Ultra/DMA */
3143 drvp->drive_flags &= ~DRIVE_DMA;
3144 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3145 drvp->UDMA_mode > 2)
3146 drvp->UDMA_mode = 2;
3147 after = (sc->sc_wdcdev.nchannels == 2) ?
3148 hpt370_udma[drvp->UDMA_mode] :
3149 hpt366_udma[drvp->UDMA_mode];
3150 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3151 } else if (drvp->drive_flags & DRIVE_DMA) {
3152 /*
3153 * use Multiword DMA.
3154 * Timings will be used for both PIO and DMA, so adjust
3155 * DMA mode if needed
3156 */
3157 if (drvp->PIO_mode >= 3 &&
3158 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3159 drvp->DMA_mode = drvp->PIO_mode - 2;
3160 }
3161 after = (sc->sc_wdcdev.nchannels == 2) ?
3162 hpt370_dma[drvp->DMA_mode] :
3163 hpt366_dma[drvp->DMA_mode];
3164 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3165 } else {
3166 /* PIO only */
3167 after = (sc->sc_wdcdev.nchannels == 2) ?
3168 hpt370_pio[drvp->PIO_mode] :
3169 hpt366_pio[drvp->PIO_mode];
3170 }
3171 pci_conf_write(sc->sc_pc, sc->sc_tag,
3172 HPT_IDETIM(chp->channel, drive), after);
3173 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3174 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3175 after, before), DEBUG_PROBE);
3176 }
3177 if (idedma_ctl != 0) {
3178 /* Add software bits in status register */
3179 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3180 IDEDMA_CTL, idedma_ctl);
3181 }
3182 pciide_print_modes(cp);
3183 }
3184
3185 int
3186 hpt_pci_intr(arg)
3187 void *arg;
3188 {
3189 struct pciide_softc *sc = arg;
3190 struct pciide_channel *cp;
3191 struct channel_softc *wdc_cp;
3192 int rv = 0;
3193 int dmastat, i, crv;
3194
3195 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3196 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3197 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3198 if((dmastat & IDEDMA_CTL_INTR) == 0)
3199 continue;
3200 cp = &sc->pciide_channels[i];
3201 wdc_cp = &cp->wdc_channel;
3202 crv = wdcintr(wdc_cp);
3203 if (crv == 0) {
3204 printf("%s:%d: bogus intr\n",
3205 sc->sc_wdcdev.sc_dev.dv_xname, i);
3206 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3207 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3208 } else
3209 rv = 1;
3210 }
3211 return rv;
3212 }
3213
3214
3215 /* Macros to test product */
3216 #define PDC_IS_262(sc) \
3217 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3218 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3219 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3220 #define PDC_IS_265(sc) \
3221 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3222 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3223
3224 void
3225 pdc202xx_chip_map(sc, pa)
3226 struct pciide_softc *sc;
3227 struct pci_attach_args *pa;
3228 {
3229 struct pciide_channel *cp;
3230 int channel;
3231 pcireg_t interface, st, mode;
3232 bus_size_t cmdsize, ctlsize;
3233
3234 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3235 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3236 DEBUG_PROBE);
3237 if (pciide_chipen(sc, pa) == 0)
3238 return;
3239
3240 /* turn off RAID mode */
3241 st &= ~PDC2xx_STATE_IDERAID;
3242
3243 /*
3244 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3245 * mode. We have to fake interface
3246 */
3247 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3248 if (st & PDC2xx_STATE_NATIVE)
3249 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3250
3251 printf("%s: bus-master DMA support present",
3252 sc->sc_wdcdev.sc_dev.dv_xname);
3253 pciide_mapreg_dma(sc, pa);
3254 printf("\n");
3255 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3256 WDC_CAPABILITY_MODE;
3257 if (sc->sc_dma_ok) {
3258 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3259 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3260 sc->sc_wdcdev.irqack = pciide_irqack;
3261 }
3262 sc->sc_wdcdev.PIO_cap = 4;
3263 sc->sc_wdcdev.DMA_cap = 2;
3264 if (PDC_IS_265(sc))
3265 sc->sc_wdcdev.UDMA_cap = 5;
3266 else if (PDC_IS_262(sc))
3267 sc->sc_wdcdev.UDMA_cap = 4;
3268 else
3269 sc->sc_wdcdev.UDMA_cap = 2;
3270 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3271 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3272 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3273
3274 /* setup failsafe defaults */
3275 mode = 0;
3276 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3277 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3278 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3279 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3280 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3281 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3282 "initial timings 0x%x, now 0x%x\n", channel,
3283 pci_conf_read(sc->sc_pc, sc->sc_tag,
3284 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3285 DEBUG_PROBE);
3286 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3287 mode | PDC2xx_TIM_IORDYp);
3288 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3289 "initial timings 0x%x, now 0x%x\n", channel,
3290 pci_conf_read(sc->sc_pc, sc->sc_tag,
3291 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3292 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3293 mode);
3294 }
3295
3296 mode = PDC2xx_SCR_DMA;
3297 if (PDC_IS_262(sc)) {
3298 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3299 } else {
3300 /* the BIOS set it up this way */
3301 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3302 }
3303 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3304 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3305 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3306 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3307 DEBUG_PROBE);
3308 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3309
3310 /* controller initial state register is OK even without BIOS */
3311 /* Set DMA mode to IDE DMA compatibility */
3312 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3313 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3314 DEBUG_PROBE);
3315 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3316 mode | 0x1);
3317 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3318 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3319 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3320 mode | 0x1);
3321
3322 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3323 cp = &sc->pciide_channels[channel];
3324 if (pciide_chansetup(sc, channel, interface) == 0)
3325 continue;
3326 if ((st & (PDC_IS_262(sc) ?
3327 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3328 printf("%s: %s channel ignored (disabled)\n",
3329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3330 continue;
3331 }
3332 if (PDC_IS_265(sc))
3333 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3334 pdc20265_pci_intr);
3335 else
3336 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3337 pdc202xx_pci_intr);
3338 if (cp->hw_ok == 0)
3339 continue;
3340 if (pciide_chan_candisable(cp))
3341 st &= ~(PDC_IS_262(sc) ?
3342 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3343 pciide_map_compat_intr(pa, cp, channel, interface);
3344 pdc202xx_setup_channel(&cp->wdc_channel);
3345 }
3346 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3347 DEBUG_PROBE);
3348 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3349 return;
3350 }
3351
3352 void
3353 pdc202xx_setup_channel(chp)
3354 struct channel_softc *chp;
3355 {
3356 struct ata_drive_datas *drvp;
3357 int drive;
3358 pcireg_t mode, st;
3359 u_int32_t idedma_ctl, scr, atapi;
3360 struct pciide_channel *cp = (struct pciide_channel*)chp;
3361 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3362 int channel = chp->channel;
3363
3364 /* setup DMA if needed */
3365 pciide_channel_dma_setup(cp);
3366
3367 idedma_ctl = 0;
3368 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3369 sc->sc_wdcdev.sc_dev.dv_xname,
3370 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3371 DEBUG_PROBE);
3372
3373 /* Per channel settings */
3374 if (PDC_IS_262(sc)) {
3375 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3376 PDC262_U66);
3377 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3378 /* Trimm UDMA mode */
3379 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3380 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3381 chp->ch_drive[0].UDMA_mode <= 2) ||
3382 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3383 chp->ch_drive[1].UDMA_mode <= 2)) {
3384 if (chp->ch_drive[0].UDMA_mode > 2)
3385 chp->ch_drive[0].UDMA_mode = 2;
3386 if (chp->ch_drive[1].UDMA_mode > 2)
3387 chp->ch_drive[1].UDMA_mode = 2;
3388 }
3389 /* Set U66 if needed */
3390 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3391 chp->ch_drive[0].UDMA_mode > 2) ||
3392 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3393 chp->ch_drive[1].UDMA_mode > 2))
3394 scr |= PDC262_U66_EN(channel);
3395 else
3396 scr &= ~PDC262_U66_EN(channel);
3397 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3398 PDC262_U66, scr);
3399 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3400 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3401 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3402 PDC262_ATAPI(channel))), DEBUG_PROBE);
3403 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3404 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3405 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3406 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3407 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3408 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3409 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3410 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3411 atapi = 0;
3412 else
3413 atapi = PDC262_ATAPI_UDMA;
3414 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3415 PDC262_ATAPI(channel), atapi);
3416 }
3417 }
3418 for (drive = 0; drive < 2; drive++) {
3419 drvp = &chp->ch_drive[drive];
3420 /* If no drive, skip */
3421 if ((drvp->drive_flags & DRIVE) == 0)
3422 continue;
3423 mode = 0;
3424 if (drvp->drive_flags & DRIVE_UDMA) {
3425 /* use Ultra/DMA */
3426 drvp->drive_flags &= ~DRIVE_DMA;
3427 mode = PDC2xx_TIM_SET_MB(mode,
3428 pdc2xx_udma_mb[drvp->UDMA_mode]);
3429 mode = PDC2xx_TIM_SET_MC(mode,
3430 pdc2xx_udma_mc[drvp->UDMA_mode]);
3431 drvp->drive_flags &= ~DRIVE_DMA;
3432 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3433 } else if (drvp->drive_flags & DRIVE_DMA) {
3434 mode = PDC2xx_TIM_SET_MB(mode,
3435 pdc2xx_dma_mb[drvp->DMA_mode]);
3436 mode = PDC2xx_TIM_SET_MC(mode,
3437 pdc2xx_dma_mc[drvp->DMA_mode]);
3438 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3439 } else {
3440 mode = PDC2xx_TIM_SET_MB(mode,
3441 pdc2xx_dma_mb[0]);
3442 mode = PDC2xx_TIM_SET_MC(mode,
3443 pdc2xx_dma_mc[0]);
3444 }
3445 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3446 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3447 if (drvp->drive_flags & DRIVE_ATA)
3448 mode |= PDC2xx_TIM_PRE;
3449 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3450 if (drvp->PIO_mode >= 3) {
3451 mode |= PDC2xx_TIM_IORDY;
3452 if (drive == 0)
3453 mode |= PDC2xx_TIM_IORDYp;
3454 }
3455 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3456 "timings 0x%x\n",
3457 sc->sc_wdcdev.sc_dev.dv_xname,
3458 chp->channel, drive, mode), DEBUG_PROBE);
3459 pci_conf_write(sc->sc_pc, sc->sc_tag,
3460 PDC2xx_TIM(chp->channel, drive), mode);
3461 }
3462 if (idedma_ctl != 0) {
3463 /* Add software bits in status register */
3464 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3465 IDEDMA_CTL, idedma_ctl);
3466 }
3467 pciide_print_modes(cp);
3468 }
3469
3470 int
3471 pdc202xx_pci_intr(arg)
3472 void *arg;
3473 {
3474 struct pciide_softc *sc = arg;
3475 struct pciide_channel *cp;
3476 struct channel_softc *wdc_cp;
3477 int i, rv, crv;
3478 u_int32_t scr;
3479
3480 rv = 0;
3481 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3482 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3483 cp = &sc->pciide_channels[i];
3484 wdc_cp = &cp->wdc_channel;
3485 /* If a compat channel skip. */
3486 if (cp->compat)
3487 continue;
3488 if (scr & PDC2xx_SCR_INT(i)) {
3489 crv = wdcintr(wdc_cp);
3490 if (crv == 0)
3491 printf("%s:%d: bogus intr (reg 0x%x)\n",
3492 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3493 else
3494 rv = 1;
3495 }
3496 }
3497 return rv;
3498 }
3499
3500 int
3501 pdc20265_pci_intr(arg)
3502 void *arg;
3503 {
3504 struct pciide_softc *sc = arg;
3505 struct pciide_channel *cp;
3506 struct channel_softc *wdc_cp;
3507 int i, rv, crv;
3508 u_int32_t dmastat;
3509
3510 rv = 0;
3511 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3512 cp = &sc->pciide_channels[i];
3513 wdc_cp = &cp->wdc_channel;
3514 /* If a compat channel skip. */
3515 if (cp->compat)
3516 continue;
3517 /*
3518 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3519 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3520 * So use it instead (requires 2 reg reads instead of 1,
3521 * but we can't do it another way).
3522 */
3523 dmastat = bus_space_read_1(sc->sc_dma_iot,
3524 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3525 if((dmastat & IDEDMA_CTL_INTR) == 0)
3526 continue;
3527 crv = wdcintr(wdc_cp);
3528 if (crv == 0)
3529 printf("%s:%d: bogus intr\n",
3530 sc->sc_wdcdev.sc_dev.dv_xname, i);
3531 else
3532 rv = 1;
3533 }
3534 return rv;
3535 }
3536
3537 void
3538 opti_chip_map(sc, pa)
3539 struct pciide_softc *sc;
3540 struct pci_attach_args *pa;
3541 {
3542 struct pciide_channel *cp;
3543 bus_size_t cmdsize, ctlsize;
3544 pcireg_t interface;
3545 u_int8_t init_ctrl;
3546 int channel;
3547
3548 if (pciide_chipen(sc, pa) == 0)
3549 return;
3550 printf("%s: bus-master DMA support present",
3551 sc->sc_wdcdev.sc_dev.dv_xname);
3552
3553 /*
3554 * XXXSCW:
3555 * There seem to be a couple of buggy revisions/implementations
3556 * of the OPTi pciide chipset. This kludge seems to fix one of
3557 * the reported problems (PR/11644) but still fails for the
3558 * other (PR/13151), although the latter may be due to other
3559 * issues too...
3560 */
3561 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3562 printf(" but disabled due to chip rev. <= 0x12");
3563 sc->sc_dma_ok = 0;
3564 sc->sc_wdcdev.cap = 0;
3565 } else {
3566 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3567 pciide_mapreg_dma(sc, pa);
3568 }
3569 printf("\n");
3570
3571 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3572 sc->sc_wdcdev.PIO_cap = 4;
3573 if (sc->sc_dma_ok) {
3574 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3575 sc->sc_wdcdev.irqack = pciide_irqack;
3576 sc->sc_wdcdev.DMA_cap = 2;
3577 }
3578 sc->sc_wdcdev.set_modes = opti_setup_channel;
3579
3580 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3581 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3582
3583 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3584 OPTI_REG_INIT_CONTROL);
3585
3586 interface = PCI_INTERFACE(pa->pa_class);
3587
3588 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3589 cp = &sc->pciide_channels[channel];
3590 if (pciide_chansetup(sc, channel, interface) == 0)
3591 continue;
3592 if (channel == 1 &&
3593 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3594 printf("%s: %s channel ignored (disabled)\n",
3595 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3596 continue;
3597 }
3598 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3599 pciide_pci_intr);
3600 if (cp->hw_ok == 0)
3601 continue;
3602 pciide_map_compat_intr(pa, cp, channel, interface);
3603 if (cp->hw_ok == 0)
3604 continue;
3605 opti_setup_channel(&cp->wdc_channel);
3606 }
3607 }
3608
3609 void
3610 opti_setup_channel(chp)
3611 struct channel_softc *chp;
3612 {
3613 struct ata_drive_datas *drvp;
3614 struct pciide_channel *cp = (struct pciide_channel*)chp;
3615 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3616 int drive, spd;
3617 int mode[2];
3618 u_int8_t rv, mr;
3619
3620 /*
3621 * The `Delay' and `Address Setup Time' fields of the
3622 * Miscellaneous Register are always zero initially.
3623 */
3624 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3625 mr &= ~(OPTI_MISC_DELAY_MASK |
3626 OPTI_MISC_ADDR_SETUP_MASK |
3627 OPTI_MISC_INDEX_MASK);
3628
3629 /* Prime the control register before setting timing values */
3630 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3631
3632 /* Determine the clockrate of the PCIbus the chip is attached to */
3633 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3634 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3635
3636 /* setup DMA if needed */
3637 pciide_channel_dma_setup(cp);
3638
3639 for (drive = 0; drive < 2; drive++) {
3640 drvp = &chp->ch_drive[drive];
3641 /* If no drive, skip */
3642 if ((drvp->drive_flags & DRIVE) == 0) {
3643 mode[drive] = -1;
3644 continue;
3645 }
3646
3647 if ((drvp->drive_flags & DRIVE_DMA)) {
3648 /*
3649 * Timings will be used for both PIO and DMA,
3650 * so adjust DMA mode if needed
3651 */
3652 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3653 drvp->PIO_mode = drvp->DMA_mode + 2;
3654 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3655 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3656 drvp->PIO_mode - 2 : 0;
3657 if (drvp->DMA_mode == 0)
3658 drvp->PIO_mode = 0;
3659
3660 mode[drive] = drvp->DMA_mode + 5;
3661 } else
3662 mode[drive] = drvp->PIO_mode;
3663
3664 if (drive && mode[0] >= 0 &&
3665 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3666 /*
3667 * Can't have two drives using different values
3668 * for `Address Setup Time'.
3669 * Slow down the faster drive to compensate.
3670 */
3671 int d = (opti_tim_as[spd][mode[0]] >
3672 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3673
3674 mode[d] = mode[1-d];
3675 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3676 chp->ch_drive[d].DMA_mode = 0;
3677 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3678 }
3679 }
3680
3681 for (drive = 0; drive < 2; drive++) {
3682 int m;
3683 if ((m = mode[drive]) < 0)
3684 continue;
3685
3686 /* Set the Address Setup Time and select appropriate index */
3687 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3688 rv |= OPTI_MISC_INDEX(drive);
3689 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3690
3691 /* Set the pulse width and recovery timing parameters */
3692 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3693 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3694 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3695 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3696
3697 /* Set the Enhanced Mode register appropriately */
3698 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3699 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3700 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3701 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3702 }
3703
3704 /* Finally, enable the timings */
3705 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3706
3707 pciide_print_modes(cp);
3708 }
3709