pciide.c revision 1.68.2.29 1 /* $NetBSD: pciide.c,v 1.68.2.29 2002/01/05 18:05:58 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/cy82c693var.h>
121
122 #include "opt_pciide.h"
123
124 /* inlines for reading/writing 8-bit PCI registers */
125 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
126 int));
127 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
128 int, u_int8_t));
129
130 static __inline u_int8_t
131 pciide_pci_read(pc, pa, reg)
132 pci_chipset_tag_t pc;
133 pcitag_t pa;
134 int reg;
135 {
136
137 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
138 ((reg & 0x03) * 8) & 0xff);
139 }
140
141 static __inline void
142 pciide_pci_write(pc, pa, reg, val)
143 pci_chipset_tag_t pc;
144 pcitag_t pa;
145 int reg;
146 u_int8_t val;
147 {
148 pcireg_t pcival;
149
150 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
151 pcival &= ~(0xff << ((reg & 0x03) * 8));
152 pcival |= (val << ((reg & 0x03) * 8));
153 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
154 }
155
156 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157
158 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159 void piix_setup_channel __P((struct channel_softc*));
160 void piix3_4_setup_channel __P((struct channel_softc*));
161 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
163 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164
165 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
166 void amd7x6_setup_channel __P((struct channel_softc*));
167
168 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void apollo_setup_channel __P((struct channel_softc*));
170
171 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
173 void cmd0643_9_setup_channel __P((struct channel_softc*));
174 void cmd_channel_map __P((struct pci_attach_args *,
175 struct pciide_softc *, int));
176 int cmd_pci_intr __P((void *));
177 void cmd646_9_irqack __P((struct channel_softc *));
178
179 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void cy693_setup_channel __P((struct channel_softc*));
181
182 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void sis_setup_channel __P((struct channel_softc*));
184
185 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void acer_setup_channel __P((struct channel_softc*));
187 int acer_pci_intr __P((void *));
188
189 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void pdc202xx_setup_channel __P((struct channel_softc*));
191 int pdc202xx_pci_intr __P((void *));
192 int pdc20265_pci_intr __P((void *));
193
194 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void opti_setup_channel __P((struct channel_softc*));
196
197 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
198 void hpt_setup_channel __P((struct channel_softc*));
199 int hpt_pci_intr __P((void *));
200
201 void pciide_channel_dma_setup __P((struct pciide_channel *));
202 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
203 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
204 void pciide_dma_start __P((void*, int, int));
205 int pciide_dma_finish __P((void*, int, int, int));
206 void pciide_irqack __P((struct channel_softc *));
207 void pciide_print_modes __P((struct pciide_channel *));
208
209 struct pciide_product_desc {
210 u_int32_t ide_product;
211 int ide_flags;
212 const char *ide_name;
213 /* map and setup chip, probe drives */
214 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
215 };
216
217 /* Flags for ide_flags */
218 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
219
220 /* Default product description for devices not known from this controller */
221 const struct pciide_product_desc default_product_desc = {
222 0,
223 0,
224 "Generic PCI IDE controller",
225 default_chip_map,
226 };
227
228 const struct pciide_product_desc pciide_intel_products[] = {
229 { PCI_PRODUCT_INTEL_82092AA,
230 0,
231 "Intel 82092AA IDE controller",
232 default_chip_map,
233 },
234 { PCI_PRODUCT_INTEL_82371FB_IDE,
235 0,
236 "Intel 82371FB IDE controller (PIIX)",
237 piix_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371SB_IDE,
240 0,
241 "Intel 82371SB IDE Interface (PIIX3)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82371AB_IDE,
245 0,
246 "Intel 82371AB IDE controller (PIIX4)",
247 piix_chip_map,
248 },
249 { PCI_PRODUCT_INTEL_82801AA_IDE,
250 0,
251 "Intel 82801AA IDE Controller (ICH)",
252 piix_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82801AB_IDE,
255 0,
256 "Intel 82801AB IDE Controller (ICH0)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82801BA_IDE,
260 0,
261 "Intel 82801BA IDE Controller (ICH2)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82801BAM_IDE,
265 0,
266 "Intel 82801BAM IDE Controller (ICH2)",
267 piix_chip_map,
268 },
269 { 0,
270 0,
271 NULL,
272 NULL
273 }
274 };
275
276 const struct pciide_product_desc pciide_amd_products[] = {
277 { PCI_PRODUCT_AMD_PBC756_IDE,
278 0,
279 "Advanced Micro Devices AMD756 IDE Controller",
280 amd7x6_chip_map
281 },
282 { PCI_PRODUCT_AMD_PBC766_IDE,
283 0,
284 "Advanced Micro Devices AMD766 IDE Controller",
285 amd7x6_chip_map
286 },
287 { 0,
288 0,
289 NULL,
290 NULL
291 }
292 };
293
294 const struct pciide_product_desc pciide_cmd_products[] = {
295 { PCI_PRODUCT_CMDTECH_640,
296 0,
297 "CMD Technology PCI0640",
298 cmd_chip_map
299 },
300 { PCI_PRODUCT_CMDTECH_643,
301 0,
302 "CMD Technology PCI0643",
303 cmd0643_9_chip_map,
304 },
305 { PCI_PRODUCT_CMDTECH_646,
306 0,
307 "CMD Technology PCI0646",
308 cmd0643_9_chip_map,
309 },
310 { PCI_PRODUCT_CMDTECH_648,
311 IDE_PCI_CLASS_OVERRIDE,
312 "CMD Technology PCI0648",
313 cmd0643_9_chip_map,
314 },
315 { PCI_PRODUCT_CMDTECH_649,
316 IDE_PCI_CLASS_OVERRIDE,
317 "CMD Technology PCI0649",
318 cmd0643_9_chip_map,
319 },
320 { 0,
321 0,
322 NULL,
323 NULL
324 }
325 };
326
327 const struct pciide_product_desc pciide_via_products[] = {
328 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
329 0,
330 NULL,
331 apollo_chip_map,
332 },
333 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
334 0,
335 NULL,
336 apollo_chip_map,
337 },
338 { 0,
339 0,
340 NULL,
341 NULL
342 }
343 };
344
345 const struct pciide_product_desc pciide_cypress_products[] = {
346 { PCI_PRODUCT_CONTAQ_82C693,
347 0,
348 "Cypress 82C693 IDE Controller",
349 cy693_chip_map,
350 },
351 { 0,
352 0,
353 NULL,
354 NULL
355 }
356 };
357
358 const struct pciide_product_desc pciide_sis_products[] = {
359 { PCI_PRODUCT_SIS_5597_IDE,
360 0,
361 "Silicon Integrated System 5597/5598 IDE controller",
362 sis_chip_map,
363 },
364 { 0,
365 0,
366 NULL,
367 NULL
368 }
369 };
370
371 const struct pciide_product_desc pciide_acer_products[] = {
372 { PCI_PRODUCT_ALI_M5229,
373 0,
374 "Acer Labs M5229 UDMA IDE Controller",
375 acer_chip_map,
376 },
377 { 0,
378 0,
379 NULL,
380 NULL
381 }
382 };
383
384 const struct pciide_product_desc pciide_promise_products[] = {
385 { PCI_PRODUCT_PROMISE_ULTRA33,
386 IDE_PCI_CLASS_OVERRIDE,
387 "Promise Ultra33/ATA Bus Master IDE Accelerator",
388 pdc202xx_chip_map,
389 },
390 { PCI_PRODUCT_PROMISE_ULTRA66,
391 IDE_PCI_CLASS_OVERRIDE,
392 "Promise Ultra66/ATA Bus Master IDE Accelerator",
393 pdc202xx_chip_map,
394 },
395 { PCI_PRODUCT_PROMISE_ULTRA100,
396 IDE_PCI_CLASS_OVERRIDE,
397 "Promise Ultra100/ATA Bus Master IDE Accelerator",
398 pdc202xx_chip_map,
399 },
400 { PCI_PRODUCT_PROMISE_ULTRA100X,
401 IDE_PCI_CLASS_OVERRIDE,
402 "Promise Ultra100/ATA Bus Master IDE Accelerator",
403 pdc202xx_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_opti_products[] = {
413 { PCI_PRODUCT_OPTI_82C621,
414 0,
415 "OPTi 82c621 PCI IDE controller",
416 opti_chip_map,
417 },
418 { PCI_PRODUCT_OPTI_82C568,
419 0,
420 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
421 opti_chip_map,
422 },
423 { PCI_PRODUCT_OPTI_82D568,
424 0,
425 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
426 opti_chip_map,
427 },
428 { 0,
429 0,
430 NULL,
431 NULL
432 }
433 };
434
435 const struct pciide_product_desc pciide_triones_products[] = {
436 { PCI_PRODUCT_TRIONES_HPT366,
437 IDE_PCI_CLASS_OVERRIDE,
438 NULL,
439 hpt_chip_map,
440 },
441 { 0,
442 0,
443 NULL,
444 NULL
445 }
446 };
447
448 struct pciide_vendor_desc {
449 u_int32_t ide_vendor;
450 const struct pciide_product_desc *ide_products;
451 };
452
453 const struct pciide_vendor_desc pciide_vendors[] = {
454 { PCI_VENDOR_INTEL, pciide_intel_products },
455 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
456 { PCI_VENDOR_VIATECH, pciide_via_products },
457 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
458 { PCI_VENDOR_SIS, pciide_sis_products },
459 { PCI_VENDOR_ALI, pciide_acer_products },
460 { PCI_VENDOR_PROMISE, pciide_promise_products },
461 { PCI_VENDOR_AMD, pciide_amd_products },
462 { PCI_VENDOR_OPTI, pciide_opti_products },
463 { PCI_VENDOR_TRIONES, pciide_triones_products },
464 { 0, NULL }
465 };
466
467 /* options passed via the 'flags' config keyword */
468 #define PCIIDE_OPTIONS_DMA 0x01
469
470 int pciide_match __P((struct device *, struct cfdata *, void *));
471 void pciide_attach __P((struct device *, struct device *, void *));
472
473 struct cfattach pciide_ca = {
474 sizeof(struct pciide_softc), pciide_match, pciide_attach
475 };
476 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
477 int pciide_mapregs_compat __P(( struct pci_attach_args *,
478 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
479 int pciide_mapregs_native __P((struct pci_attach_args *,
480 struct pciide_channel *, bus_size_t *, bus_size_t *,
481 int (*pci_intr) __P((void *))));
482 void pciide_mapreg_dma __P((struct pciide_softc *,
483 struct pci_attach_args *));
484 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
485 void pciide_mapchan __P((struct pci_attach_args *,
486 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
487 int (*pci_intr) __P((void *))));
488 int pciide_chan_candisable __P((struct pciide_channel *));
489 void pciide_map_compat_intr __P(( struct pci_attach_args *,
490 struct pciide_channel *, int, int));
491 int pciide_print __P((void *, const char *pnp));
492 int pciide_compat_intr __P((void *));
493 int pciide_pci_intr __P((void *));
494 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
495
496 const struct pciide_product_desc *
497 pciide_lookup_product(id)
498 u_int32_t id;
499 {
500 const struct pciide_product_desc *pp;
501 const struct pciide_vendor_desc *vp;
502
503 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
504 if (PCI_VENDOR(id) == vp->ide_vendor)
505 break;
506
507 if ((pp = vp->ide_products) == NULL)
508 return NULL;
509
510 for (; pp->chip_map != NULL; pp++)
511 if (PCI_PRODUCT(id) == pp->ide_product)
512 break;
513
514 if (pp->chip_map == NULL)
515 return NULL;
516 return pp;
517 }
518
519 int
520 pciide_match(parent, match, aux)
521 struct device *parent;
522 struct cfdata *match;
523 void *aux;
524 {
525 struct pci_attach_args *pa = aux;
526 const struct pciide_product_desc *pp;
527
528 /*
529 * Check the ID register to see that it's a PCI IDE controller.
530 * If it is, we assume that we can deal with it; it _should_
531 * work in a standardized way...
532 */
533 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
534 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
535 return (1);
536 }
537
538 /*
539 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
540 * controllers. Let see if we can deal with it anyway.
541 */
542 pp = pciide_lookup_product(pa->pa_id);
543 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
544 return (1);
545 }
546
547 return (0);
548 }
549
550 void
551 pciide_attach(parent, self, aux)
552 struct device *parent, *self;
553 void *aux;
554 {
555 struct pci_attach_args *pa = aux;
556 pci_chipset_tag_t pc = pa->pa_pc;
557 pcitag_t tag = pa->pa_tag;
558 struct pciide_softc *sc = (struct pciide_softc *)self;
559 pcireg_t csr;
560 char devinfo[256];
561 const char *displaydev;
562
563 sc->sc_pp = pciide_lookup_product(pa->pa_id);
564 if (sc->sc_pp == NULL) {
565 sc->sc_pp = &default_product_desc;
566 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
567 displaydev = devinfo;
568 } else
569 displaydev = sc->sc_pp->ide_name;
570
571 /* if displaydev == NULL, printf is done in chip-specific map */
572 if (displaydev)
573 printf(": %s (rev. 0x%02x)\n", displaydev,
574 PCI_REVISION(pa->pa_class));
575
576 sc->sc_pc = pa->pa_pc;
577 sc->sc_tag = pa->pa_tag;
578 #ifdef WDCDEBUG
579 if (wdcdebug_pciide_mask & DEBUG_PROBE)
580 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
581 #endif
582 sc->sc_pp->chip_map(sc, pa);
583
584 if (sc->sc_dma_ok) {
585 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
586 csr |= PCI_COMMAND_MASTER_ENABLE;
587 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
588 }
589 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
590 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
591 }
592
593 /* tell wether the chip is enabled or not */
594 int
595 pciide_chipen(sc, pa)
596 struct pciide_softc *sc;
597 struct pci_attach_args *pa;
598 {
599 pcireg_t csr;
600 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
601 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
602 PCI_COMMAND_STATUS_REG);
603 printf("%s: device disabled (at %s)\n",
604 sc->sc_wdcdev.sc_dev.dv_xname,
605 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
606 "device" : "bridge");
607 return 0;
608 }
609 return 1;
610 }
611
612 int
613 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
614 struct pci_attach_args *pa;
615 struct pciide_channel *cp;
616 int compatchan;
617 bus_size_t *cmdsizep, *ctlsizep;
618 {
619 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
620 struct channel_softc *wdc_cp = &cp->wdc_channel;
621
622 cp->compat = 1;
623 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
624 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
625
626 wdc_cp->cmd_iot = pa->pa_iot;
627 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
628 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
629 printf("%s: couldn't map %s channel cmd regs\n",
630 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
631 return (0);
632 }
633
634 wdc_cp->ctl_iot = pa->pa_iot;
635 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
636 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
637 printf("%s: couldn't map %s channel ctl regs\n",
638 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
639 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
640 PCIIDE_COMPAT_CMD_SIZE);
641 return (0);
642 }
643
644 return (1);
645 }
646
647 int
648 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
649 struct pci_attach_args * pa;
650 struct pciide_channel *cp;
651 bus_size_t *cmdsizep, *ctlsizep;
652 int (*pci_intr) __P((void *));
653 {
654 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
655 struct channel_softc *wdc_cp = &cp->wdc_channel;
656 const char *intrstr;
657 pci_intr_handle_t intrhandle;
658
659 cp->compat = 0;
660
661 if (sc->sc_pci_ih == NULL) {
662 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
663 pa->pa_intrline, &intrhandle) != 0) {
664 printf("%s: couldn't map native-PCI interrupt\n",
665 sc->sc_wdcdev.sc_dev.dv_xname);
666 return 0;
667 }
668 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
669 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
670 intrhandle, IPL_BIO, pci_intr, sc);
671 if (sc->sc_pci_ih != NULL) {
672 printf("%s: using %s for native-PCI interrupt\n",
673 sc->sc_wdcdev.sc_dev.dv_xname,
674 intrstr ? intrstr : "unknown interrupt");
675 } else {
676 printf("%s: couldn't establish native-PCI interrupt",
677 sc->sc_wdcdev.sc_dev.dv_xname);
678 if (intrstr != NULL)
679 printf(" at %s", intrstr);
680 printf("\n");
681 return 0;
682 }
683 }
684 cp->ih = sc->sc_pci_ih;
685 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
686 PCI_MAPREG_TYPE_IO, 0,
687 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
688 printf("%s: couldn't map %s channel cmd regs\n",
689 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
690 return 0;
691 }
692
693 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
694 PCI_MAPREG_TYPE_IO, 0,
695 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
696 printf("%s: couldn't map %s channel ctl regs\n",
697 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
698 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
699 return 0;
700 }
701 /*
702 * In native mode, 4 bytes of I/O space are mapped for the control
703 * register, the control register is at offset 2. Pass the generic
704 * code a handle for only one byte at the rigth offset.
705 */
706 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
707 &wdc_cp->ctl_ioh) != 0) {
708 printf("%s: unable to subregion %s channel ctl regs\n",
709 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
710 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
711 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
712 return 0;
713 }
714 return (1);
715 }
716
717 void
718 pciide_mapreg_dma(sc, pa)
719 struct pciide_softc *sc;
720 struct pci_attach_args *pa;
721 {
722 pcireg_t maptype;
723
724 /*
725 * Map DMA registers
726 *
727 * Note that sc_dma_ok is the right variable to test to see if
728 * DMA can be done. If the interface doesn't support DMA,
729 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
730 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
731 * non-zero if the interface supports DMA and the registers
732 * could be mapped.
733 *
734 * XXX Note that despite the fact that the Bus Master IDE specs
735 * XXX say that "The bus master IDE function uses 16 bytes of IO
736 * XXX space," some controllers (at least the United
737 * XXX Microelectronics UM8886BF) place it in memory space.
738 */
739 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
740 PCIIDE_REG_BUS_MASTER_DMA);
741
742 switch (maptype) {
743 case PCI_MAPREG_TYPE_IO:
744 case PCI_MAPREG_MEM_TYPE_32BIT:
745 sc->sc_dma_ok = (pci_mapreg_map(pa,
746 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
747 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
748 sc->sc_dmat = pa->pa_dmat;
749 if (sc->sc_dma_ok == 0) {
750 printf(", but unused (couldn't map registers)");
751 } else {
752 sc->sc_wdcdev.dma_arg = sc;
753 sc->sc_wdcdev.dma_init = pciide_dma_init;
754 sc->sc_wdcdev.dma_start = pciide_dma_start;
755 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
756 }
757 break;
758
759 default:
760 sc->sc_dma_ok = 0;
761 printf(", but unsupported register maptype (0x%x)", maptype);
762 }
763 }
764
765 int
766 pciide_compat_intr(arg)
767 void *arg;
768 {
769 struct pciide_channel *cp = arg;
770
771 #ifdef DIAGNOSTIC
772 /* should only be called for a compat channel */
773 if (cp->compat == 0)
774 panic("pciide compat intr called for non-compat chan %p\n", cp);
775 #endif
776 return (wdcintr(&cp->wdc_channel));
777 }
778
779 int
780 pciide_pci_intr(arg)
781 void *arg;
782 {
783 struct pciide_softc *sc = arg;
784 struct pciide_channel *cp;
785 struct channel_softc *wdc_cp;
786 int i, rv, crv;
787
788 rv = 0;
789 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
790 cp = &sc->pciide_channels[i];
791 wdc_cp = &cp->wdc_channel;
792
793 /* If a compat channel skip. */
794 if (cp->compat)
795 continue;
796 /* if this channel not waiting for intr, skip */
797 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
798 continue;
799
800 crv = wdcintr(wdc_cp);
801 if (crv == 0)
802 ; /* leave rv alone */
803 else if (crv == 1)
804 rv = 1; /* claim the intr */
805 else if (rv == 0) /* crv should be -1 in this case */
806 rv = crv; /* if we've done no better, take it */
807 }
808 return (rv);
809 }
810
811 void
812 pciide_channel_dma_setup(cp)
813 struct pciide_channel *cp;
814 {
815 int drive;
816 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
817 struct ata_drive_datas *drvp;
818
819 for (drive = 0; drive < 2; drive++) {
820 drvp = &cp->wdc_channel.ch_drive[drive];
821 /* If no drive, skip */
822 if ((drvp->drive_flags & DRIVE) == 0)
823 continue;
824 /* setup DMA if needed */
825 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
826 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
827 sc->sc_dma_ok == 0) {
828 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
829 continue;
830 }
831 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
832 != 0) {
833 /* Abort DMA setup */
834 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
835 continue;
836 }
837 }
838 }
839
840 int
841 pciide_dma_table_setup(sc, channel, drive)
842 struct pciide_softc *sc;
843 int channel, drive;
844 {
845 bus_dma_segment_t seg;
846 int error, rseg;
847 const bus_size_t dma_table_size =
848 sizeof(struct idedma_table) * NIDEDMA_TABLES;
849 struct pciide_dma_maps *dma_maps =
850 &sc->pciide_channels[channel].dma_maps[drive];
851
852 /* If table was already allocated, just return */
853 if (dma_maps->dma_table)
854 return 0;
855
856 /* Allocate memory for the DMA tables and map it */
857 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
858 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
859 BUS_DMA_NOWAIT)) != 0) {
860 printf("%s:%d: unable to allocate table DMA for "
861 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
862 channel, drive, error);
863 return error;
864 }
865 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
866 dma_table_size,
867 (caddr_t *)&dma_maps->dma_table,
868 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
869 printf("%s:%d: unable to map table DMA for"
870 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
871 channel, drive, error);
872 return error;
873 }
874 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
875 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
876 seg.ds_addr), DEBUG_PROBE);
877
878 /* Create and load table DMA map for this disk */
879 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
880 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
881 &dma_maps->dmamap_table)) != 0) {
882 printf("%s:%d: unable to create table DMA map for "
883 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
884 channel, drive, error);
885 return error;
886 }
887 if ((error = bus_dmamap_load(sc->sc_dmat,
888 dma_maps->dmamap_table,
889 dma_maps->dma_table,
890 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
891 printf("%s:%d: unable to load table DMA map for "
892 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
893 channel, drive, error);
894 return error;
895 }
896 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
897 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
898 /* Create a xfer DMA map for this drive */
899 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
900 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
901 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
902 &dma_maps->dmamap_xfer)) != 0) {
903 printf("%s:%d: unable to create xfer DMA map for "
904 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
905 channel, drive, error);
906 return error;
907 }
908 return 0;
909 }
910
911 int
912 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
913 void *v;
914 int channel, drive;
915 void *databuf;
916 size_t datalen;
917 int flags;
918 {
919 struct pciide_softc *sc = v;
920 int error, seg;
921 struct pciide_dma_maps *dma_maps =
922 &sc->pciide_channels[channel].dma_maps[drive];
923
924 error = bus_dmamap_load(sc->sc_dmat,
925 dma_maps->dmamap_xfer,
926 databuf, datalen, NULL, BUS_DMA_NOWAIT);
927 if (error) {
928 printf("%s:%d: unable to load xfer DMA map for"
929 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
930 channel, drive, error);
931 return error;
932 }
933
934 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
935 dma_maps->dmamap_xfer->dm_mapsize,
936 (flags & WDC_DMA_READ) ?
937 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
938
939 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
940 #ifdef DIAGNOSTIC
941 /* A segment must not cross a 64k boundary */
942 {
943 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
944 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
945 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
946 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
947 printf("pciide_dma: segment %d physical addr 0x%lx"
948 " len 0x%lx not properly aligned\n",
949 seg, phys, len);
950 panic("pciide_dma: buf align");
951 }
952 }
953 #endif
954 dma_maps->dma_table[seg].base_addr =
955 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
956 dma_maps->dma_table[seg].byte_count =
957 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
958 IDEDMA_BYTE_COUNT_MASK);
959 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
960 seg, le32toh(dma_maps->dma_table[seg].byte_count),
961 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
962
963 }
964 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
965 htole32(IDEDMA_BYTE_COUNT_EOT);
966
967 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
968 dma_maps->dmamap_table->dm_mapsize,
969 BUS_DMASYNC_PREWRITE);
970
971 /* Maps are ready. Start DMA function */
972 #ifdef DIAGNOSTIC
973 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
974 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
975 dma_maps->dmamap_table->dm_segs[0].ds_addr);
976 panic("pciide_dma_init: table align");
977 }
978 #endif
979
980 /* Clear status bits */
981 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
982 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
983 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
984 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
985 /* Write table addr */
986 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
987 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
988 dma_maps->dmamap_table->dm_segs[0].ds_addr);
989 /* set read/write */
990 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
991 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
992 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
993 /* remember flags */
994 dma_maps->dma_flags = flags;
995 return 0;
996 }
997
998 void
999 pciide_dma_start(v, channel, drive)
1000 void *v;
1001 int channel, drive;
1002 {
1003 struct pciide_softc *sc = v;
1004
1005 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1006 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1007 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1008 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1009 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1010 }
1011
1012 int
1013 pciide_dma_finish(v, channel, drive, force)
1014 void *v;
1015 int channel, drive;
1016 int force;
1017 {
1018 struct pciide_softc *sc = v;
1019 u_int8_t status;
1020 int error = 0;
1021 struct pciide_dma_maps *dma_maps =
1022 &sc->pciide_channels[channel].dma_maps[drive];
1023
1024 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1025 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1026 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1027 DEBUG_XFERS);
1028
1029 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1030 return WDC_DMAST_NOIRQ;
1031
1032 /* stop DMA channel */
1033 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1034 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1035 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1036 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1037
1038 /* Unload the map of the data buffer */
1039 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1040 dma_maps->dmamap_xfer->dm_mapsize,
1041 (dma_maps->dma_flags & WDC_DMA_READ) ?
1042 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1043 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1044
1045 if ((status & IDEDMA_CTL_ERR) != 0) {
1046 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1047 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1048 error |= WDC_DMAST_ERR;
1049 }
1050
1051 if ((status & IDEDMA_CTL_INTR) == 0) {
1052 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1053 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1054 drive, status);
1055 error |= WDC_DMAST_NOIRQ;
1056 }
1057
1058 if ((status & IDEDMA_CTL_ACT) != 0) {
1059 /* data underrun, may be a valid condition for ATAPI */
1060 error |= WDC_DMAST_UNDER;
1061 }
1062 return error;
1063 }
1064
1065 void
1066 pciide_irqack(chp)
1067 struct channel_softc *chp;
1068 {
1069 struct pciide_channel *cp = (struct pciide_channel*)chp;
1070 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1071
1072 /* clear status bits in IDE DMA registers */
1073 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1074 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1075 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1076 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1077 }
1078
1079 /* some common code used by several chip_map */
1080 int
1081 pciide_chansetup(sc, channel, interface)
1082 struct pciide_softc *sc;
1083 int channel;
1084 pcireg_t interface;
1085 {
1086 struct pciide_channel *cp = &sc->pciide_channels[channel];
1087 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1088 cp->name = PCIIDE_CHANNEL_NAME(channel);
1089 cp->wdc_channel.channel = channel;
1090 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1091 cp->wdc_channel.ch_queue =
1092 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1093 if (cp->wdc_channel.ch_queue == NULL) {
1094 printf("%s %s channel: "
1095 "can't allocate memory for command queue",
1096 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1097 return 0;
1098 }
1099 printf("%s: %s channel %s to %s mode\n",
1100 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1101 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1102 "configured" : "wired",
1103 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1104 "native-PCI" : "compatibility");
1105 return 1;
1106 }
1107
1108 /* some common code used by several chip channel_map */
1109 void
1110 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1111 struct pci_attach_args *pa;
1112 struct pciide_channel *cp;
1113 pcireg_t interface;
1114 bus_size_t *cmdsizep, *ctlsizep;
1115 int (*pci_intr) __P((void *));
1116 {
1117 struct channel_softc *wdc_cp = &cp->wdc_channel;
1118
1119 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1120 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1121 pci_intr);
1122 else
1123 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1124 wdc_cp->channel, cmdsizep, ctlsizep);
1125
1126 if (cp->hw_ok == 0)
1127 return;
1128 wdc_cp->data32iot = wdc_cp->cmd_iot;
1129 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1130 wdcattach(wdc_cp);
1131 }
1132
1133 /*
1134 * Generic code to call to know if a channel can be disabled. Return 1
1135 * if channel can be disabled, 0 if not
1136 */
1137 int
1138 pciide_chan_candisable(cp)
1139 struct pciide_channel *cp;
1140 {
1141 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1142 struct channel_softc *wdc_cp = &cp->wdc_channel;
1143
1144 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1145 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1146 printf("%s: disabling %s channel (no drives)\n",
1147 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1148 cp->hw_ok = 0;
1149 return 1;
1150 }
1151 return 0;
1152 }
1153
1154 /*
1155 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1156 * Set hw_ok=0 on failure
1157 */
1158 void
1159 pciide_map_compat_intr(pa, cp, compatchan, interface)
1160 struct pci_attach_args *pa;
1161 struct pciide_channel *cp;
1162 int compatchan, interface;
1163 {
1164 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1165 struct channel_softc *wdc_cp = &cp->wdc_channel;
1166
1167 if (cp->hw_ok == 0)
1168 return;
1169 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1170 return;
1171
1172 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1173 pa, compatchan, pciide_compat_intr, cp);
1174 if (cp->ih == NULL) {
1175 printf("%s: no compatibility interrupt for use by %s "
1176 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1177 cp->hw_ok = 0;
1178 }
1179 }
1180
1181 void
1182 pciide_print_modes(cp)
1183 struct pciide_channel *cp;
1184 {
1185 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1186 int drive;
1187 struct channel_softc *chp;
1188 struct ata_drive_datas *drvp;
1189
1190 chp = &cp->wdc_channel;
1191 for (drive = 0; drive < 2; drive++) {
1192 drvp = &chp->ch_drive[drive];
1193 if ((drvp->drive_flags & DRIVE) == 0)
1194 continue;
1195 printf("%s(%s:%d:%d): using PIO mode %d",
1196 drvp->drv_softc->dv_xname,
1197 sc->sc_wdcdev.sc_dev.dv_xname,
1198 chp->channel, drive, drvp->PIO_mode);
1199 if (drvp->drive_flags & DRIVE_DMA)
1200 printf(", DMA mode %d", drvp->DMA_mode);
1201 if (drvp->drive_flags & DRIVE_UDMA)
1202 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1203 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1204 printf(" (using DMA data transfers)");
1205 printf("\n");
1206 }
1207 }
1208
1209 void
1210 default_chip_map(sc, pa)
1211 struct pciide_softc *sc;
1212 struct pci_attach_args *pa;
1213 {
1214 struct pciide_channel *cp;
1215 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1216 pcireg_t csr;
1217 int channel, drive;
1218 struct ata_drive_datas *drvp;
1219 u_int8_t idedma_ctl;
1220 bus_size_t cmdsize, ctlsize;
1221 char *failreason;
1222
1223 if (pciide_chipen(sc, pa) == 0)
1224 return;
1225
1226 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1227 printf("%s: bus-master DMA support present",
1228 sc->sc_wdcdev.sc_dev.dv_xname);
1229 if (sc->sc_pp == &default_product_desc &&
1230 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1231 PCIIDE_OPTIONS_DMA) == 0) {
1232 printf(", but unused (no driver support)");
1233 sc->sc_dma_ok = 0;
1234 } else {
1235 pciide_mapreg_dma(sc, pa);
1236 if (sc->sc_dma_ok != 0)
1237 printf(", used without full driver "
1238 "support");
1239 }
1240 } else {
1241 printf("%s: hardware does not support DMA",
1242 sc->sc_wdcdev.sc_dev.dv_xname);
1243 sc->sc_dma_ok = 0;
1244 }
1245 printf("\n");
1246 if (sc->sc_dma_ok) {
1247 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1248 sc->sc_wdcdev.irqack = pciide_irqack;
1249 }
1250 sc->sc_wdcdev.PIO_cap = 0;
1251 sc->sc_wdcdev.DMA_cap = 0;
1252
1253 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1254 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1256
1257 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1258 cp = &sc->pciide_channels[channel];
1259 if (pciide_chansetup(sc, channel, interface) == 0)
1260 continue;
1261 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1262 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1263 &ctlsize, pciide_pci_intr);
1264 } else {
1265 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1266 channel, &cmdsize, &ctlsize);
1267 }
1268 if (cp->hw_ok == 0)
1269 continue;
1270 /*
1271 * Check to see if something appears to be there.
1272 */
1273 failreason = NULL;
1274 if (!wdcprobe(&cp->wdc_channel)) {
1275 failreason = "not responding; disabled or no drives?";
1276 goto next;
1277 }
1278 /*
1279 * Now, make sure it's actually attributable to this PCI IDE
1280 * channel by trying to access the channel again while the
1281 * PCI IDE controller's I/O space is disabled. (If the
1282 * channel no longer appears to be there, it belongs to
1283 * this controller.) YUCK!
1284 */
1285 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1286 PCI_COMMAND_STATUS_REG);
1287 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1288 csr & ~PCI_COMMAND_IO_ENABLE);
1289 if (wdcprobe(&cp->wdc_channel))
1290 failreason = "other hardware responding at addresses";
1291 pci_conf_write(sc->sc_pc, sc->sc_tag,
1292 PCI_COMMAND_STATUS_REG, csr);
1293 next:
1294 if (failreason) {
1295 printf("%s: %s channel ignored (%s)\n",
1296 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1297 failreason);
1298 cp->hw_ok = 0;
1299 bus_space_unmap(cp->wdc_channel.cmd_iot,
1300 cp->wdc_channel.cmd_ioh, cmdsize);
1301 bus_space_unmap(cp->wdc_channel.ctl_iot,
1302 cp->wdc_channel.ctl_ioh, ctlsize);
1303 } else {
1304 pciide_map_compat_intr(pa, cp, channel, interface);
1305 }
1306 if (cp->hw_ok) {
1307 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1308 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1309 wdcattach(&cp->wdc_channel);
1310 }
1311 }
1312
1313 if (sc->sc_dma_ok == 0)
1314 return;
1315
1316 /* Allocate DMA maps */
1317 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1318 idedma_ctl = 0;
1319 cp = &sc->pciide_channels[channel];
1320 for (drive = 0; drive < 2; drive++) {
1321 drvp = &cp->wdc_channel.ch_drive[drive];
1322 /* If no drive, skip */
1323 if ((drvp->drive_flags & DRIVE) == 0)
1324 continue;
1325 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1326 continue;
1327 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1328 /* Abort DMA setup */
1329 printf("%s:%d:%d: can't allocate DMA maps, "
1330 "using PIO transfers\n",
1331 sc->sc_wdcdev.sc_dev.dv_xname,
1332 channel, drive);
1333 drvp->drive_flags &= ~DRIVE_DMA;
1334 }
1335 printf("%s:%d:%d: using DMA data transfers\n",
1336 sc->sc_wdcdev.sc_dev.dv_xname,
1337 channel, drive);
1338 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1339 }
1340 if (idedma_ctl != 0) {
1341 /* Add software bits in status register */
1342 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1343 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1344 idedma_ctl);
1345 }
1346 }
1347 }
1348
1349 void
1350 piix_chip_map(sc, pa)
1351 struct pciide_softc *sc;
1352 struct pci_attach_args *pa;
1353 {
1354 struct pciide_channel *cp;
1355 int channel;
1356 u_int32_t idetim;
1357 bus_size_t cmdsize, ctlsize;
1358
1359 if (pciide_chipen(sc, pa) == 0)
1360 return;
1361
1362 printf("%s: bus-master DMA support present",
1363 sc->sc_wdcdev.sc_dev.dv_xname);
1364 pciide_mapreg_dma(sc, pa);
1365 printf("\n");
1366 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1367 WDC_CAPABILITY_MODE;
1368 if (sc->sc_dma_ok) {
1369 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1370 sc->sc_wdcdev.irqack = pciide_irqack;
1371 switch(sc->sc_pp->ide_product) {
1372 case PCI_PRODUCT_INTEL_82371AB_IDE:
1373 case PCI_PRODUCT_INTEL_82801AA_IDE:
1374 case PCI_PRODUCT_INTEL_82801AB_IDE:
1375 case PCI_PRODUCT_INTEL_82801BA_IDE:
1376 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1377 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1378 }
1379 }
1380 sc->sc_wdcdev.PIO_cap = 4;
1381 sc->sc_wdcdev.DMA_cap = 2;
1382 switch(sc->sc_pp->ide_product) {
1383 case PCI_PRODUCT_INTEL_82801AA_IDE:
1384 sc->sc_wdcdev.UDMA_cap = 4;
1385 break;
1386 case PCI_PRODUCT_INTEL_82801BA_IDE:
1387 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1388 sc->sc_wdcdev.UDMA_cap = 5;
1389 break;
1390 default:
1391 sc->sc_wdcdev.UDMA_cap = 2;
1392 }
1393 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1394 sc->sc_wdcdev.set_modes = piix_setup_channel;
1395 else
1396 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1397 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1398 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1399
1400 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1401 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1402 DEBUG_PROBE);
1403 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1404 WDCDEBUG_PRINT((", sidetim=0x%x",
1405 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1406 DEBUG_PROBE);
1407 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1408 WDCDEBUG_PRINT((", udamreg 0x%x",
1409 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1410 DEBUG_PROBE);
1411 }
1412 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1413 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1414 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1415 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1416 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1417 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1418 DEBUG_PROBE);
1419 }
1420
1421 }
1422 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1423
1424 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1425 cp = &sc->pciide_channels[channel];
1426 /* PIIX is compat-only */
1427 if (pciide_chansetup(sc, channel, 0) == 0)
1428 continue;
1429 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1430 if ((PIIX_IDETIM_READ(idetim, channel) &
1431 PIIX_IDETIM_IDE) == 0) {
1432 printf("%s: %s channel ignored (disabled)\n",
1433 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1434 continue;
1435 }
1436 /* PIIX are compat-only pciide devices */
1437 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1438 if (cp->hw_ok == 0)
1439 continue;
1440 if (pciide_chan_candisable(cp)) {
1441 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1442 channel);
1443 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1444 idetim);
1445 }
1446 pciide_map_compat_intr(pa, cp, channel, 0);
1447 if (cp->hw_ok == 0)
1448 continue;
1449 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1450 }
1451
1452 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1453 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1454 DEBUG_PROBE);
1455 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1456 WDCDEBUG_PRINT((", sidetim=0x%x",
1457 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1458 DEBUG_PROBE);
1459 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1460 WDCDEBUG_PRINT((", udamreg 0x%x",
1461 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1462 DEBUG_PROBE);
1463 }
1464 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1465 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1466 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1467 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1468 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1469 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1470 DEBUG_PROBE);
1471 }
1472 }
1473 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1474 }
1475
1476 void
1477 piix_setup_channel(chp)
1478 struct channel_softc *chp;
1479 {
1480 u_int8_t mode[2], drive;
1481 u_int32_t oidetim, idetim, idedma_ctl;
1482 struct pciide_channel *cp = (struct pciide_channel*)chp;
1483 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1484 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1485
1486 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1487 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1488 idedma_ctl = 0;
1489
1490 /* set up new idetim: Enable IDE registers decode */
1491 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1492 chp->channel);
1493
1494 /* setup DMA */
1495 pciide_channel_dma_setup(cp);
1496
1497 /*
1498 * Here we have to mess up with drives mode: PIIX can't have
1499 * different timings for master and slave drives.
1500 * We need to find the best combination.
1501 */
1502
1503 /* If both drives supports DMA, take the lower mode */
1504 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1505 (drvp[1].drive_flags & DRIVE_DMA)) {
1506 mode[0] = mode[1] =
1507 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1508 drvp[0].DMA_mode = mode[0];
1509 drvp[1].DMA_mode = mode[1];
1510 goto ok;
1511 }
1512 /*
1513 * If only one drive supports DMA, use its mode, and
1514 * put the other one in PIO mode 0 if mode not compatible
1515 */
1516 if (drvp[0].drive_flags & DRIVE_DMA) {
1517 mode[0] = drvp[0].DMA_mode;
1518 mode[1] = drvp[1].PIO_mode;
1519 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1520 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1521 mode[1] = drvp[1].PIO_mode = 0;
1522 goto ok;
1523 }
1524 if (drvp[1].drive_flags & DRIVE_DMA) {
1525 mode[1] = drvp[1].DMA_mode;
1526 mode[0] = drvp[0].PIO_mode;
1527 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1528 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1529 mode[0] = drvp[0].PIO_mode = 0;
1530 goto ok;
1531 }
1532 /*
1533 * If both drives are not DMA, takes the lower mode, unless
1534 * one of them is PIO mode < 2
1535 */
1536 if (drvp[0].PIO_mode < 2) {
1537 mode[0] = drvp[0].PIO_mode = 0;
1538 mode[1] = drvp[1].PIO_mode;
1539 } else if (drvp[1].PIO_mode < 2) {
1540 mode[1] = drvp[1].PIO_mode = 0;
1541 mode[0] = drvp[0].PIO_mode;
1542 } else {
1543 mode[0] = mode[1] =
1544 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1545 drvp[0].PIO_mode = mode[0];
1546 drvp[1].PIO_mode = mode[1];
1547 }
1548 ok: /* The modes are setup */
1549 for (drive = 0; drive < 2; drive++) {
1550 if (drvp[drive].drive_flags & DRIVE_DMA) {
1551 idetim |= piix_setup_idetim_timings(
1552 mode[drive], 1, chp->channel);
1553 goto end;
1554 }
1555 }
1556 /* If we are there, none of the drives are DMA */
1557 if (mode[0] >= 2)
1558 idetim |= piix_setup_idetim_timings(
1559 mode[0], 0, chp->channel);
1560 else
1561 idetim |= piix_setup_idetim_timings(
1562 mode[1], 0, chp->channel);
1563 end: /*
1564 * timing mode is now set up in the controller. Enable
1565 * it per-drive
1566 */
1567 for (drive = 0; drive < 2; drive++) {
1568 /* If no drive, skip */
1569 if ((drvp[drive].drive_flags & DRIVE) == 0)
1570 continue;
1571 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1572 if (drvp[drive].drive_flags & DRIVE_DMA)
1573 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1574 }
1575 if (idedma_ctl != 0) {
1576 /* Add software bits in status register */
1577 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1578 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1579 idedma_ctl);
1580 }
1581 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1582 pciide_print_modes(cp);
1583 }
1584
1585 void
1586 piix3_4_setup_channel(chp)
1587 struct channel_softc *chp;
1588 {
1589 struct ata_drive_datas *drvp;
1590 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1591 struct pciide_channel *cp = (struct pciide_channel*)chp;
1592 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1593 int drive;
1594 int channel = chp->channel;
1595
1596 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1597 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1598 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1599 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1600 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1601 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1602 PIIX_SIDETIM_RTC_MASK(channel));
1603
1604 idedma_ctl = 0;
1605 /* If channel disabled, no need to go further */
1606 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1607 return;
1608 /* set up new idetim: Enable IDE registers decode */
1609 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1610
1611 /* setup DMA if needed */
1612 pciide_channel_dma_setup(cp);
1613
1614 for (drive = 0; drive < 2; drive++) {
1615 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1616 PIIX_UDMATIM_SET(0x3, channel, drive));
1617 drvp = &chp->ch_drive[drive];
1618 /* If no drive, skip */
1619 if ((drvp->drive_flags & DRIVE) == 0)
1620 continue;
1621 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1622 (drvp->drive_flags & DRIVE_UDMA) == 0))
1623 goto pio;
1624
1625 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1626 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1627 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1628 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1629 ideconf |= PIIX_CONFIG_PINGPONG;
1630 }
1631 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1632 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1633 /* setup Ultra/100 */
1634 if (drvp->UDMA_mode > 2 &&
1635 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1636 drvp->UDMA_mode = 2;
1637 if (drvp->UDMA_mode > 4) {
1638 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1639 } else {
1640 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1641 if (drvp->UDMA_mode > 2) {
1642 ideconf |= PIIX_CONFIG_UDMA66(channel,
1643 drive);
1644 } else {
1645 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1646 drive);
1647 }
1648 }
1649 }
1650 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1651 /* setup Ultra/66 */
1652 if (drvp->UDMA_mode > 2 &&
1653 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1654 drvp->UDMA_mode = 2;
1655 if (drvp->UDMA_mode > 2)
1656 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1657 else
1658 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1659 }
1660 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1661 (drvp->drive_flags & DRIVE_UDMA)) {
1662 /* use Ultra/DMA */
1663 drvp->drive_flags &= ~DRIVE_DMA;
1664 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1665 udmareg |= PIIX_UDMATIM_SET(
1666 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1667 } else {
1668 /* use Multiword DMA */
1669 drvp->drive_flags &= ~DRIVE_UDMA;
1670 if (drive == 0) {
1671 idetim |= piix_setup_idetim_timings(
1672 drvp->DMA_mode, 1, channel);
1673 } else {
1674 sidetim |= piix_setup_sidetim_timings(
1675 drvp->DMA_mode, 1, channel);
1676 idetim =PIIX_IDETIM_SET(idetim,
1677 PIIX_IDETIM_SITRE, channel);
1678 }
1679 }
1680 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1681
1682 pio: /* use PIO mode */
1683 idetim |= piix_setup_idetim_drvs(drvp);
1684 if (drive == 0) {
1685 idetim |= piix_setup_idetim_timings(
1686 drvp->PIO_mode, 0, channel);
1687 } else {
1688 sidetim |= piix_setup_sidetim_timings(
1689 drvp->PIO_mode, 0, channel);
1690 idetim =PIIX_IDETIM_SET(idetim,
1691 PIIX_IDETIM_SITRE, channel);
1692 }
1693 }
1694 if (idedma_ctl != 0) {
1695 /* Add software bits in status register */
1696 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1697 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1698 idedma_ctl);
1699 }
1700 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1701 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1702 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1703 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1704 pciide_print_modes(cp);
1705 }
1706
1707
1708 /* setup ISP and RTC fields, based on mode */
1709 static u_int32_t
1710 piix_setup_idetim_timings(mode, dma, channel)
1711 u_int8_t mode;
1712 u_int8_t dma;
1713 u_int8_t channel;
1714 {
1715
1716 if (dma)
1717 return PIIX_IDETIM_SET(0,
1718 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1719 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1720 channel);
1721 else
1722 return PIIX_IDETIM_SET(0,
1723 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1724 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1725 channel);
1726 }
1727
1728 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1729 static u_int32_t
1730 piix_setup_idetim_drvs(drvp)
1731 struct ata_drive_datas *drvp;
1732 {
1733 u_int32_t ret = 0;
1734 struct channel_softc *chp = drvp->chnl_softc;
1735 u_int8_t channel = chp->channel;
1736 u_int8_t drive = drvp->drive;
1737
1738 /*
1739 * If drive is using UDMA, timings setups are independant
1740 * So just check DMA and PIO here.
1741 */
1742 if (drvp->drive_flags & DRIVE_DMA) {
1743 /* if mode = DMA mode 0, use compatible timings */
1744 if ((drvp->drive_flags & DRIVE_DMA) &&
1745 drvp->DMA_mode == 0) {
1746 drvp->PIO_mode = 0;
1747 return ret;
1748 }
1749 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1750 /*
1751 * PIO and DMA timings are the same, use fast timings for PIO
1752 * too, else use compat timings.
1753 */
1754 if ((piix_isp_pio[drvp->PIO_mode] !=
1755 piix_isp_dma[drvp->DMA_mode]) ||
1756 (piix_rtc_pio[drvp->PIO_mode] !=
1757 piix_rtc_dma[drvp->DMA_mode]))
1758 drvp->PIO_mode = 0;
1759 /* if PIO mode <= 2, use compat timings for PIO */
1760 if (drvp->PIO_mode <= 2) {
1761 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1762 channel);
1763 return ret;
1764 }
1765 }
1766
1767 /*
1768 * Now setup PIO modes. If mode < 2, use compat timings.
1769 * Else enable fast timings. Enable IORDY and prefetch/post
1770 * if PIO mode >= 3.
1771 */
1772
1773 if (drvp->PIO_mode < 2)
1774 return ret;
1775
1776 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1777 if (drvp->PIO_mode >= 3) {
1778 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1779 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1780 }
1781 return ret;
1782 }
1783
1784 /* setup values in SIDETIM registers, based on mode */
1785 static u_int32_t
1786 piix_setup_sidetim_timings(mode, dma, channel)
1787 u_int8_t mode;
1788 u_int8_t dma;
1789 u_int8_t channel;
1790 {
1791 if (dma)
1792 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1793 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1794 else
1795 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1796 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1797 }
1798
1799 void
1800 amd7x6_chip_map(sc, pa)
1801 struct pciide_softc *sc;
1802 struct pci_attach_args *pa;
1803 {
1804 struct pciide_channel *cp;
1805 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1806 int channel;
1807 pcireg_t chanenable;
1808 bus_size_t cmdsize, ctlsize;
1809
1810 if (pciide_chipen(sc, pa) == 0)
1811 return;
1812 printf("%s: bus-master DMA support present",
1813 sc->sc_wdcdev.sc_dev.dv_xname);
1814 pciide_mapreg_dma(sc, pa);
1815 printf("\n");
1816 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1817 WDC_CAPABILITY_MODE;
1818 if (sc->sc_dma_ok) {
1819 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1820 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1821 sc->sc_wdcdev.irqack = pciide_irqack;
1822 }
1823 sc->sc_wdcdev.PIO_cap = 4;
1824 sc->sc_wdcdev.DMA_cap = 2;
1825
1826 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1827 sc->sc_wdcdev.UDMA_cap = 5;
1828 else
1829 sc->sc_wdcdev.UDMA_cap = 4;
1830 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1831 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1832 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1833 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1834
1835 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1836 DEBUG_PROBE);
1837 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1838 cp = &sc->pciide_channels[channel];
1839 if (pciide_chansetup(sc, channel, interface) == 0)
1840 continue;
1841
1842 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1843 printf("%s: %s channel ignored (disabled)\n",
1844 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1845 continue;
1846 }
1847 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1848 pciide_pci_intr);
1849
1850 if (pciide_chan_candisable(cp))
1851 chanenable &= ~AMD7X6_CHAN_EN(channel);
1852 pciide_map_compat_intr(pa, cp, channel, interface);
1853 if (cp->hw_ok == 0)
1854 continue;
1855
1856 amd7x6_setup_channel(&cp->wdc_channel);
1857 }
1858 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1859 chanenable);
1860 return;
1861 }
1862
1863 void
1864 amd7x6_setup_channel(chp)
1865 struct channel_softc *chp;
1866 {
1867 u_int32_t udmatim_reg, datatim_reg;
1868 u_int8_t idedma_ctl;
1869 int mode, drive;
1870 struct ata_drive_datas *drvp;
1871 struct pciide_channel *cp = (struct pciide_channel*)chp;
1872 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1873 #ifndef PCIIDE_AMD756_ENABLEDMA
1874 int rev = PCI_REVISION(
1875 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1876 #endif
1877
1878 idedma_ctl = 0;
1879 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1880 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1881 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1882 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1883
1884 /* setup DMA if needed */
1885 pciide_channel_dma_setup(cp);
1886
1887 for (drive = 0; drive < 2; drive++) {
1888 drvp = &chp->ch_drive[drive];
1889 /* If no drive, skip */
1890 if ((drvp->drive_flags & DRIVE) == 0)
1891 continue;
1892 /* add timing values, setup DMA if needed */
1893 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1894 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1895 mode = drvp->PIO_mode;
1896 goto pio;
1897 }
1898 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1899 (drvp->drive_flags & DRIVE_UDMA)) {
1900 /* use Ultra/DMA */
1901 drvp->drive_flags &= ~DRIVE_DMA;
1902 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1903 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1904 AMD7X6_UDMA_TIME(chp->channel, drive,
1905 amd7x6_udma_tim[drvp->UDMA_mode]);
1906 /* can use PIO timings, MW DMA unused */
1907 mode = drvp->PIO_mode;
1908 } else {
1909 /* use Multiword DMA, but only if revision is OK */
1910 drvp->drive_flags &= ~DRIVE_UDMA;
1911 #ifndef PCIIDE_AMD756_ENABLEDMA
1912 /*
1913 * The workaround doesn't seem to be necessary
1914 * with all drives, so it can be disabled by
1915 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1916 * triggered.
1917 */
1918 if (sc->sc_pp->ide_product ==
1919 PCI_PRODUCT_AMD_PBC756_IDE &&
1920 AMD756_CHIPREV_DISABLEDMA(rev)) {
1921 printf("%s:%d:%d: multi-word DMA disabled due "
1922 "to chip revision\n",
1923 sc->sc_wdcdev.sc_dev.dv_xname,
1924 chp->channel, drive);
1925 mode = drvp->PIO_mode;
1926 drvp->drive_flags &= ~DRIVE_DMA;
1927 goto pio;
1928 }
1929 #endif
1930 /* mode = min(pio, dma+2) */
1931 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1932 mode = drvp->PIO_mode;
1933 else
1934 mode = drvp->DMA_mode + 2;
1935 }
1936 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1937
1938 pio: /* setup PIO mode */
1939 if (mode <= 2) {
1940 drvp->DMA_mode = 0;
1941 drvp->PIO_mode = 0;
1942 mode = 0;
1943 } else {
1944 drvp->PIO_mode = mode;
1945 drvp->DMA_mode = mode - 2;
1946 }
1947 datatim_reg |=
1948 AMD7X6_DATATIM_PULSE(chp->channel, drive,
1949 amd7x6_pio_set[mode]) |
1950 AMD7X6_DATATIM_RECOV(chp->channel, drive,
1951 amd7x6_pio_rec[mode]);
1952 }
1953 if (idedma_ctl != 0) {
1954 /* Add software bits in status register */
1955 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1956 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1957 idedma_ctl);
1958 }
1959 pciide_print_modes(cp);
1960 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
1961 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
1962 }
1963
1964 void
1965 apollo_chip_map(sc, pa)
1966 struct pciide_softc *sc;
1967 struct pci_attach_args *pa;
1968 {
1969 struct pciide_channel *cp;
1970 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1971 int channel;
1972 u_int32_t ideconf;
1973 bus_size_t cmdsize, ctlsize;
1974 pcitag_t pcib_tag;
1975 pcireg_t pcib_id, pcib_class;
1976
1977 if (pciide_chipen(sc, pa) == 0)
1978 return;
1979 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
1980 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
1981 /* and read ID and rev of the ISA bridge */
1982 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
1983 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
1984 printf(": VIA Technologies ");
1985 switch (PCI_PRODUCT(pcib_id)) {
1986 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
1987 printf("VT82C586 (Apollo VP) ");
1988 if(PCI_REVISION(pcib_class) >= 0x02) {
1989 printf("ATA33 controller\n");
1990 sc->sc_wdcdev.UDMA_cap = 2;
1991 } else {
1992 printf("controller\n");
1993 sc->sc_wdcdev.UDMA_cap = 0;
1994 }
1995 break;
1996 case PCI_PRODUCT_VIATECH_VT82C596A:
1997 printf("VT82C596A (Apollo Pro) ");
1998 if (PCI_REVISION(pcib_class) >= 0x12) {
1999 printf("ATA66 controller\n");
2000 sc->sc_wdcdev.UDMA_cap = 4;
2001 } else {
2002 printf("ATA33 controller\n");
2003 sc->sc_wdcdev.UDMA_cap = 2;
2004 }
2005 break;
2006 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2007 printf("VT82C686A (Apollo KX133) ");
2008 if (PCI_REVISION(pcib_class) >= 0x40) {
2009 printf("ATA100 controller\n");
2010 sc->sc_wdcdev.UDMA_cap = 5;
2011 } else {
2012 printf("ATA66 controller\n");
2013 sc->sc_wdcdev.UDMA_cap = 4;
2014 }
2015 break;
2016 default:
2017 printf("unknown ATA controller\n");
2018 sc->sc_wdcdev.UDMA_cap = 0;
2019 }
2020
2021 printf("%s: bus-master DMA support present",
2022 sc->sc_wdcdev.sc_dev.dv_xname);
2023 pciide_mapreg_dma(sc, pa);
2024 printf("\n");
2025 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2026 WDC_CAPABILITY_MODE;
2027 if (sc->sc_dma_ok) {
2028 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2029 sc->sc_wdcdev.irqack = pciide_irqack;
2030 if (sc->sc_wdcdev.UDMA_cap > 0)
2031 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2032 }
2033 sc->sc_wdcdev.PIO_cap = 4;
2034 sc->sc_wdcdev.DMA_cap = 2;
2035 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2036 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2037 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2038
2039 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2040 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2041 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2042 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2043 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2044 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2045 DEBUG_PROBE);
2046
2047 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2048 cp = &sc->pciide_channels[channel];
2049 if (pciide_chansetup(sc, channel, interface) == 0)
2050 continue;
2051
2052 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2053 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2054 printf("%s: %s channel ignored (disabled)\n",
2055 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2056 continue;
2057 }
2058 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2059 pciide_pci_intr);
2060 if (cp->hw_ok == 0)
2061 continue;
2062 if (pciide_chan_candisable(cp)) {
2063 ideconf &= ~APO_IDECONF_EN(channel);
2064 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2065 ideconf);
2066 }
2067 pciide_map_compat_intr(pa, cp, channel, interface);
2068
2069 if (cp->hw_ok == 0)
2070 continue;
2071 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2072 }
2073 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2074 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2075 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2076 }
2077
2078 void
2079 apollo_setup_channel(chp)
2080 struct channel_softc *chp;
2081 {
2082 u_int32_t udmatim_reg, datatim_reg;
2083 u_int8_t idedma_ctl;
2084 int mode, drive;
2085 struct ata_drive_datas *drvp;
2086 struct pciide_channel *cp = (struct pciide_channel*)chp;
2087 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2088
2089 idedma_ctl = 0;
2090 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2091 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2092 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2093 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2094
2095 /* setup DMA if needed */
2096 pciide_channel_dma_setup(cp);
2097
2098 for (drive = 0; drive < 2; drive++) {
2099 drvp = &chp->ch_drive[drive];
2100 /* If no drive, skip */
2101 if ((drvp->drive_flags & DRIVE) == 0)
2102 continue;
2103 /* add timing values, setup DMA if needed */
2104 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2105 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2106 mode = drvp->PIO_mode;
2107 goto pio;
2108 }
2109 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2110 (drvp->drive_flags & DRIVE_UDMA)) {
2111 /* use Ultra/DMA */
2112 drvp->drive_flags &= ~DRIVE_DMA;
2113 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2114 APO_UDMA_EN_MTH(chp->channel, drive);
2115 if (sc->sc_wdcdev.UDMA_cap == 5) {
2116 /* 686b */
2117 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2118 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2119 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2120 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2121 /* 596b or 686a */
2122 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2123 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2124 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2125 } else {
2126 /* 596a or 586b */
2127 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2128 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2129 }
2130 /* can use PIO timings, MW DMA unused */
2131 mode = drvp->PIO_mode;
2132 } else {
2133 /* use Multiword DMA */
2134 drvp->drive_flags &= ~DRIVE_UDMA;
2135 /* mode = min(pio, dma+2) */
2136 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2137 mode = drvp->PIO_mode;
2138 else
2139 mode = drvp->DMA_mode + 2;
2140 }
2141 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2142
2143 pio: /* setup PIO mode */
2144 if (mode <= 2) {
2145 drvp->DMA_mode = 0;
2146 drvp->PIO_mode = 0;
2147 mode = 0;
2148 } else {
2149 drvp->PIO_mode = mode;
2150 drvp->DMA_mode = mode - 2;
2151 }
2152 datatim_reg |=
2153 APO_DATATIM_PULSE(chp->channel, drive,
2154 apollo_pio_set[mode]) |
2155 APO_DATATIM_RECOV(chp->channel, drive,
2156 apollo_pio_rec[mode]);
2157 }
2158 if (idedma_ctl != 0) {
2159 /* Add software bits in status register */
2160 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2161 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2162 idedma_ctl);
2163 }
2164 pciide_print_modes(cp);
2165 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2166 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2167 }
2168
2169 void
2170 cmd_channel_map(pa, sc, channel)
2171 struct pci_attach_args *pa;
2172 struct pciide_softc *sc;
2173 int channel;
2174 {
2175 struct pciide_channel *cp = &sc->pciide_channels[channel];
2176 bus_size_t cmdsize, ctlsize;
2177 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2178 int interface;
2179
2180 /*
2181 * The 0648/0649 can be told to identify as a RAID controller.
2182 * In this case, we have to fake interface
2183 */
2184 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2185 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2186 PCIIDE_INTERFACE_SETTABLE(1);
2187 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2188 CMD_CONF_DSA1)
2189 interface |= PCIIDE_INTERFACE_PCI(0) |
2190 PCIIDE_INTERFACE_PCI(1);
2191 } else {
2192 interface = PCI_INTERFACE(pa->pa_class);
2193 }
2194
2195 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2196 cp->name = PCIIDE_CHANNEL_NAME(channel);
2197 cp->wdc_channel.channel = channel;
2198 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2199
2200 if (channel > 0) {
2201 cp->wdc_channel.ch_queue =
2202 sc->pciide_channels[0].wdc_channel.ch_queue;
2203 } else {
2204 cp->wdc_channel.ch_queue =
2205 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2206 }
2207 if (cp->wdc_channel.ch_queue == NULL) {
2208 printf("%s %s channel: "
2209 "can't allocate memory for command queue",
2210 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2211 return;
2212 }
2213
2214 printf("%s: %s channel %s to %s mode\n",
2215 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2216 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2217 "configured" : "wired",
2218 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2219 "native-PCI" : "compatibility");
2220
2221 /*
2222 * with a CMD PCI64x, if we get here, the first channel is enabled:
2223 * there's no way to disable the first channel without disabling
2224 * the whole device
2225 */
2226 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2227 printf("%s: %s channel ignored (disabled)\n",
2228 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2229 return;
2230 }
2231
2232 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2233 if (cp->hw_ok == 0)
2234 return;
2235 if (channel == 1) {
2236 if (pciide_chan_candisable(cp)) {
2237 ctrl &= ~CMD_CTRL_2PORT;
2238 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2239 CMD_CTRL, ctrl);
2240 }
2241 }
2242 pciide_map_compat_intr(pa, cp, channel, interface);
2243 }
2244
2245 int
2246 cmd_pci_intr(arg)
2247 void *arg;
2248 {
2249 struct pciide_softc *sc = arg;
2250 struct pciide_channel *cp;
2251 struct channel_softc *wdc_cp;
2252 int i, rv, crv;
2253 u_int32_t priirq, secirq;
2254
2255 rv = 0;
2256 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2257 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2258 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2259 cp = &sc->pciide_channels[i];
2260 wdc_cp = &cp->wdc_channel;
2261 /* If a compat channel skip. */
2262 if (cp->compat)
2263 continue;
2264 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2265 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2266 crv = wdcintr(wdc_cp);
2267 if (crv == 0)
2268 printf("%s:%d: bogus intr\n",
2269 sc->sc_wdcdev.sc_dev.dv_xname, i);
2270 else
2271 rv = 1;
2272 }
2273 }
2274 return rv;
2275 }
2276
2277 void
2278 cmd_chip_map(sc, pa)
2279 struct pciide_softc *sc;
2280 struct pci_attach_args *pa;
2281 {
2282 int channel;
2283
2284 /*
2285 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2286 * and base adresses registers can be disabled at
2287 * hardware level. In this case, the device is wired
2288 * in compat mode and its first channel is always enabled,
2289 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2290 * In fact, it seems that the first channel of the CMD PCI0640
2291 * can't be disabled.
2292 */
2293
2294 #ifdef PCIIDE_CMD064x_DISABLE
2295 if (pciide_chipen(sc, pa) == 0)
2296 return;
2297 #endif
2298
2299 printf("%s: hardware does not support DMA\n",
2300 sc->sc_wdcdev.sc_dev.dv_xname);
2301 sc->sc_dma_ok = 0;
2302
2303 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2304 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2305 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2306
2307 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2308 cmd_channel_map(pa, sc, channel);
2309 }
2310 }
2311
2312 void
2313 cmd0643_9_chip_map(sc, pa)
2314 struct pciide_softc *sc;
2315 struct pci_attach_args *pa;
2316 {
2317 struct pciide_channel *cp;
2318 int channel;
2319 int rev = PCI_REVISION(
2320 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2321
2322 /*
2323 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2324 * and base adresses registers can be disabled at
2325 * hardware level. In this case, the device is wired
2326 * in compat mode and its first channel is always enabled,
2327 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2328 * In fact, it seems that the first channel of the CMD PCI0640
2329 * can't be disabled.
2330 */
2331
2332 #ifdef PCIIDE_CMD064x_DISABLE
2333 if (pciide_chipen(sc, pa) == 0)
2334 return;
2335 #endif
2336 printf("%s: bus-master DMA support present",
2337 sc->sc_wdcdev.sc_dev.dv_xname);
2338 pciide_mapreg_dma(sc, pa);
2339 printf("\n");
2340 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2341 WDC_CAPABILITY_MODE;
2342 if (sc->sc_dma_ok) {
2343 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2344 switch (sc->sc_pp->ide_product) {
2345 case PCI_PRODUCT_CMDTECH_649:
2346 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2347 sc->sc_wdcdev.UDMA_cap = 5;
2348 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2349 break;
2350 case PCI_PRODUCT_CMDTECH_648:
2351 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2352 sc->sc_wdcdev.UDMA_cap = 4;
2353 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2354 break;
2355 case PCI_PRODUCT_CMDTECH_646:
2356 if (rev >= CMD0646U2_REV) {
2357 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2358 sc->sc_wdcdev.UDMA_cap = 2;
2359 } else if (rev >= CMD0646U_REV) {
2360 /*
2361 * Linux's driver claims that the 646U is broken
2362 * with UDMA. Only enable it if we know what we're
2363 * doing
2364 */
2365 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2366 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2367 sc->sc_wdcdev.UDMA_cap = 2;
2368 #endif
2369 /* explicitely disable UDMA */
2370 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2371 CMD_UDMATIM(0), 0);
2372 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2373 CMD_UDMATIM(1), 0);
2374 }
2375 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2376 break;
2377 default:
2378 sc->sc_wdcdev.irqack = pciide_irqack;
2379 }
2380 }
2381
2382 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2383 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2384 sc->sc_wdcdev.PIO_cap = 4;
2385 sc->sc_wdcdev.DMA_cap = 2;
2386 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2387
2388 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2389 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2390 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2391 DEBUG_PROBE);
2392
2393 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2394 cp = &sc->pciide_channels[channel];
2395 cmd_channel_map(pa, sc, channel);
2396 if (cp->hw_ok == 0)
2397 continue;
2398 cmd0643_9_setup_channel(&cp->wdc_channel);
2399 }
2400 /*
2401 * note - this also makes sure we clear the irq disable and reset
2402 * bits
2403 */
2404 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2405 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2406 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2407 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2408 DEBUG_PROBE);
2409 }
2410
2411 void
2412 cmd0643_9_setup_channel(chp)
2413 struct channel_softc *chp;
2414 {
2415 struct ata_drive_datas *drvp;
2416 u_int8_t tim;
2417 u_int32_t idedma_ctl, udma_reg;
2418 int drive;
2419 struct pciide_channel *cp = (struct pciide_channel*)chp;
2420 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2421
2422 idedma_ctl = 0;
2423 /* setup DMA if needed */
2424 pciide_channel_dma_setup(cp);
2425
2426 for (drive = 0; drive < 2; drive++) {
2427 drvp = &chp->ch_drive[drive];
2428 /* If no drive, skip */
2429 if ((drvp->drive_flags & DRIVE) == 0)
2430 continue;
2431 /* add timing values, setup DMA if needed */
2432 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2433 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2434 if (drvp->drive_flags & DRIVE_UDMA) {
2435 /* UltraDMA on a 646U2, 0648 or 0649 */
2436 drvp->drive_flags &= ~DRIVE_DMA;
2437 udma_reg = pciide_pci_read(sc->sc_pc,
2438 sc->sc_tag, CMD_UDMATIM(chp->channel));
2439 if (drvp->UDMA_mode > 2 &&
2440 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2441 CMD_BICSR) &
2442 CMD_BICSR_80(chp->channel)) == 0)
2443 drvp->UDMA_mode = 2;
2444 if (drvp->UDMA_mode > 2)
2445 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2446 else if (sc->sc_wdcdev.UDMA_cap > 2)
2447 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2448 udma_reg |= CMD_UDMATIM_UDMA(drive);
2449 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2450 CMD_UDMATIM_TIM_OFF(drive));
2451 udma_reg |=
2452 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2453 CMD_UDMATIM_TIM_OFF(drive));
2454 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2455 CMD_UDMATIM(chp->channel), udma_reg);
2456 } else {
2457 /*
2458 * use Multiword DMA.
2459 * Timings will be used for both PIO and DMA,
2460 * so adjust DMA mode if needed
2461 * if we have a 0646U2/8/9, turn off UDMA
2462 */
2463 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2464 udma_reg = pciide_pci_read(sc->sc_pc,
2465 sc->sc_tag,
2466 CMD_UDMATIM(chp->channel));
2467 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2468 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2469 CMD_UDMATIM(chp->channel),
2470 udma_reg);
2471 }
2472 if (drvp->PIO_mode >= 3 &&
2473 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2474 drvp->DMA_mode = drvp->PIO_mode - 2;
2475 }
2476 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2477 }
2478 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2479 }
2480 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2481 CMD_DATA_TIM(chp->channel, drive), tim);
2482 }
2483 if (idedma_ctl != 0) {
2484 /* Add software bits in status register */
2485 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2486 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2487 idedma_ctl);
2488 }
2489 pciide_print_modes(cp);
2490 }
2491
2492 void
2493 cmd646_9_irqack(chp)
2494 struct channel_softc *chp;
2495 {
2496 u_int32_t priirq, secirq;
2497 struct pciide_channel *cp = (struct pciide_channel*)chp;
2498 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2499
2500 if (chp->channel == 0) {
2501 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2502 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2503 } else {
2504 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2505 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2506 }
2507 pciide_irqack(chp);
2508 }
2509
2510 void
2511 cy693_chip_map(sc, pa)
2512 struct pciide_softc *sc;
2513 struct pci_attach_args *pa;
2514 {
2515 struct pciide_channel *cp;
2516 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2517 bus_size_t cmdsize, ctlsize;
2518
2519 if (pciide_chipen(sc, pa) == 0)
2520 return;
2521 /*
2522 * this chip has 2 PCI IDE functions, one for primary and one for
2523 * secondary. So we need to call pciide_mapregs_compat() with
2524 * the real channel
2525 */
2526 if (pa->pa_function == 1) {
2527 sc->sc_cy_compatchan = 0;
2528 } else if (pa->pa_function == 2) {
2529 sc->sc_cy_compatchan = 1;
2530 } else {
2531 printf("%s: unexpected PCI function %d\n",
2532 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2533 return;
2534 }
2535 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2536 printf("%s: bus-master DMA support present",
2537 sc->sc_wdcdev.sc_dev.dv_xname);
2538 pciide_mapreg_dma(sc, pa);
2539 } else {
2540 printf("%s: hardware does not support DMA",
2541 sc->sc_wdcdev.sc_dev.dv_xname);
2542 sc->sc_dma_ok = 0;
2543 }
2544 printf("\n");
2545
2546 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2547 if (sc->sc_cy_handle == NULL) {
2548 printf("%s: unable to map hyperCache control registers\n",
2549 sc->sc_wdcdev.sc_dev.dv_xname);
2550 sc->sc_dma_ok = 0;
2551 }
2552
2553 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2554 WDC_CAPABILITY_MODE;
2555 if (sc->sc_dma_ok) {
2556 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2557 sc->sc_wdcdev.irqack = pciide_irqack;
2558 }
2559 sc->sc_wdcdev.PIO_cap = 4;
2560 sc->sc_wdcdev.DMA_cap = 2;
2561 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2562
2563 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2564 sc->sc_wdcdev.nchannels = 1;
2565
2566 /* Only one channel for this chip; if we are here it's enabled */
2567 cp = &sc->pciide_channels[0];
2568 sc->wdc_chanarray[0] = &cp->wdc_channel;
2569 cp->name = PCIIDE_CHANNEL_NAME(0);
2570 cp->wdc_channel.channel = 0;
2571 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2572 cp->wdc_channel.ch_queue =
2573 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2574 if (cp->wdc_channel.ch_queue == NULL) {
2575 printf("%s primary channel: "
2576 "can't allocate memory for command queue",
2577 sc->sc_wdcdev.sc_dev.dv_xname);
2578 return;
2579 }
2580 printf("%s: primary channel %s to ",
2581 sc->sc_wdcdev.sc_dev.dv_xname,
2582 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2583 "configured" : "wired");
2584 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2585 printf("native-PCI");
2586 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2587 pciide_pci_intr);
2588 } else {
2589 printf("compatibility");
2590 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2591 &cmdsize, &ctlsize);
2592 }
2593 printf(" mode\n");
2594 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2595 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2596 wdcattach(&cp->wdc_channel);
2597 if (pciide_chan_candisable(cp)) {
2598 pci_conf_write(sc->sc_pc, sc->sc_tag,
2599 PCI_COMMAND_STATUS_REG, 0);
2600 }
2601 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2602 if (cp->hw_ok == 0)
2603 return;
2604 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2605 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2606 cy693_setup_channel(&cp->wdc_channel);
2607 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2608 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2609 }
2610
2611 void
2612 cy693_setup_channel(chp)
2613 struct channel_softc *chp;
2614 {
2615 struct ata_drive_datas *drvp;
2616 int drive;
2617 u_int32_t cy_cmd_ctrl;
2618 u_int32_t idedma_ctl;
2619 struct pciide_channel *cp = (struct pciide_channel*)chp;
2620 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2621 int dma_mode = -1;
2622
2623 cy_cmd_ctrl = idedma_ctl = 0;
2624
2625 /* setup DMA if needed */
2626 pciide_channel_dma_setup(cp);
2627
2628 for (drive = 0; drive < 2; drive++) {
2629 drvp = &chp->ch_drive[drive];
2630 /* If no drive, skip */
2631 if ((drvp->drive_flags & DRIVE) == 0)
2632 continue;
2633 /* add timing values, setup DMA if needed */
2634 if (drvp->drive_flags & DRIVE_DMA) {
2635 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2636 /* use Multiword DMA */
2637 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2638 dma_mode = drvp->DMA_mode;
2639 }
2640 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2641 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2642 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2643 CY_CMD_CTRL_IOW_REC_OFF(drive));
2644 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2645 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2646 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2647 CY_CMD_CTRL_IOR_REC_OFF(drive));
2648 }
2649 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2650 chp->ch_drive[0].DMA_mode = dma_mode;
2651 chp->ch_drive[1].DMA_mode = dma_mode;
2652
2653 if (dma_mode == -1)
2654 dma_mode = 0;
2655
2656 if (sc->sc_cy_handle != NULL) {
2657 /* Note: `multiple' is implied. */
2658 cy82c693_write(sc->sc_cy_handle,
2659 (sc->sc_cy_compatchan == 0) ?
2660 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2661 }
2662
2663 pciide_print_modes(cp);
2664
2665 if (idedma_ctl != 0) {
2666 /* Add software bits in status register */
2667 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2668 IDEDMA_CTL, idedma_ctl);
2669 }
2670 }
2671
2672 void
2673 sis_chip_map(sc, pa)
2674 struct pciide_softc *sc;
2675 struct pci_attach_args *pa;
2676 {
2677 struct pciide_channel *cp;
2678 int channel;
2679 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2680 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2681 pcireg_t rev = PCI_REVISION(pa->pa_class);
2682 bus_size_t cmdsize, ctlsize;
2683 pcitag_t pchb_tag;
2684 pcireg_t pchb_id, pchb_class;
2685
2686 if (pciide_chipen(sc, pa) == 0)
2687 return;
2688 printf("%s: bus-master DMA support present",
2689 sc->sc_wdcdev.sc_dev.dv_xname);
2690 pciide_mapreg_dma(sc, pa);
2691 printf("\n");
2692
2693 /* get a PCI tag for the host bridge (function 0 of the same device) */
2694 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2695 /* and read ID and rev of the ISA bridge */
2696 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2697 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2698
2699 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2700 WDC_CAPABILITY_MODE;
2701 if (sc->sc_dma_ok) {
2702 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2703 sc->sc_wdcdev.irqack = pciide_irqack;
2704 /*
2705 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2706 * have problems with UDMA (info provided by Christos)
2707 */
2708 if (rev >= 0xd0 &&
2709 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2710 PCI_REVISION(pchb_class) >= 0x03))
2711 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2712 }
2713
2714 sc->sc_wdcdev.PIO_cap = 4;
2715 sc->sc_wdcdev.DMA_cap = 2;
2716 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2717 sc->sc_wdcdev.UDMA_cap = 2;
2718 sc->sc_wdcdev.set_modes = sis_setup_channel;
2719
2720 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2721 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2722
2723 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2724 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2725 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2726
2727 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2728 cp = &sc->pciide_channels[channel];
2729 if (pciide_chansetup(sc, channel, interface) == 0)
2730 continue;
2731 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2732 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2733 printf("%s: %s channel ignored (disabled)\n",
2734 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2735 continue;
2736 }
2737 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2738 pciide_pci_intr);
2739 if (cp->hw_ok == 0)
2740 continue;
2741 if (pciide_chan_candisable(cp)) {
2742 if (channel == 0)
2743 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2744 else
2745 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2746 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2747 sis_ctr0);
2748 }
2749 pciide_map_compat_intr(pa, cp, channel, interface);
2750 if (cp->hw_ok == 0)
2751 continue;
2752 sis_setup_channel(&cp->wdc_channel);
2753 }
2754 }
2755
2756 void
2757 sis_setup_channel(chp)
2758 struct channel_softc *chp;
2759 {
2760 struct ata_drive_datas *drvp;
2761 int drive;
2762 u_int32_t sis_tim;
2763 u_int32_t idedma_ctl;
2764 struct pciide_channel *cp = (struct pciide_channel*)chp;
2765 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2766
2767 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2768 "channel %d 0x%x\n", chp->channel,
2769 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2770 DEBUG_PROBE);
2771 sis_tim = 0;
2772 idedma_ctl = 0;
2773 /* setup DMA if needed */
2774 pciide_channel_dma_setup(cp);
2775
2776 for (drive = 0; drive < 2; drive++) {
2777 drvp = &chp->ch_drive[drive];
2778 /* If no drive, skip */
2779 if ((drvp->drive_flags & DRIVE) == 0)
2780 continue;
2781 /* add timing values, setup DMA if needed */
2782 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2783 (drvp->drive_flags & DRIVE_UDMA) == 0)
2784 goto pio;
2785
2786 if (drvp->drive_flags & DRIVE_UDMA) {
2787 /* use Ultra/DMA */
2788 drvp->drive_flags &= ~DRIVE_DMA;
2789 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2790 SIS_TIM_UDMA_TIME_OFF(drive);
2791 sis_tim |= SIS_TIM_UDMA_EN(drive);
2792 } else {
2793 /*
2794 * use Multiword DMA
2795 * Timings will be used for both PIO and DMA,
2796 * so adjust DMA mode if needed
2797 */
2798 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2799 drvp->PIO_mode = drvp->DMA_mode + 2;
2800 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2801 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2802 drvp->PIO_mode - 2 : 0;
2803 if (drvp->DMA_mode == 0)
2804 drvp->PIO_mode = 0;
2805 }
2806 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2807 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2808 SIS_TIM_ACT_OFF(drive);
2809 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2810 SIS_TIM_REC_OFF(drive);
2811 }
2812 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2813 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2814 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2815 if (idedma_ctl != 0) {
2816 /* Add software bits in status register */
2817 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2818 IDEDMA_CTL, idedma_ctl);
2819 }
2820 pciide_print_modes(cp);
2821 }
2822
2823 void
2824 acer_chip_map(sc, pa)
2825 struct pciide_softc *sc;
2826 struct pci_attach_args *pa;
2827 {
2828 struct pciide_channel *cp;
2829 int channel;
2830 pcireg_t cr, interface;
2831 bus_size_t cmdsize, ctlsize;
2832 pcireg_t rev = PCI_REVISION(pa->pa_class);
2833
2834 if (pciide_chipen(sc, pa) == 0)
2835 return;
2836 printf("%s: bus-master DMA support present",
2837 sc->sc_wdcdev.sc_dev.dv_xname);
2838 pciide_mapreg_dma(sc, pa);
2839 printf("\n");
2840 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2841 WDC_CAPABILITY_MODE;
2842 if (sc->sc_dma_ok) {
2843 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2844 if (rev >= 0x20)
2845 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2846 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2847 sc->sc_wdcdev.irqack = pciide_irqack;
2848 }
2849
2850 sc->sc_wdcdev.PIO_cap = 4;
2851 sc->sc_wdcdev.DMA_cap = 2;
2852 sc->sc_wdcdev.UDMA_cap = 2;
2853 sc->sc_wdcdev.set_modes = acer_setup_channel;
2854 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2855 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2856
2857 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2858 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2859 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2860
2861 /* Enable "microsoft register bits" R/W. */
2862 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2863 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2864 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2865 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2866 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2867 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2868 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2869 ~ACER_CHANSTATUSREGS_RO);
2870 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2871 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2872 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2873 /* Don't use cr, re-read the real register content instead */
2874 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2875 PCI_CLASS_REG));
2876
2877 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2878 cp = &sc->pciide_channels[channel];
2879 if (pciide_chansetup(sc, channel, interface) == 0)
2880 continue;
2881 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2882 printf("%s: %s channel ignored (disabled)\n",
2883 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2884 continue;
2885 }
2886 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2887 acer_pci_intr);
2888 if (cp->hw_ok == 0)
2889 continue;
2890 if (pciide_chan_candisable(cp)) {
2891 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2892 pci_conf_write(sc->sc_pc, sc->sc_tag,
2893 PCI_CLASS_REG, cr);
2894 }
2895 pciide_map_compat_intr(pa, cp, channel, interface);
2896 acer_setup_channel(&cp->wdc_channel);
2897 }
2898 }
2899
2900 void
2901 acer_setup_channel(chp)
2902 struct channel_softc *chp;
2903 {
2904 struct ata_drive_datas *drvp;
2905 int drive;
2906 u_int32_t acer_fifo_udma;
2907 u_int32_t idedma_ctl;
2908 struct pciide_channel *cp = (struct pciide_channel*)chp;
2909 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2910
2911 idedma_ctl = 0;
2912 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2913 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2914 acer_fifo_udma), DEBUG_PROBE);
2915 /* setup DMA if needed */
2916 pciide_channel_dma_setup(cp);
2917
2918 for (drive = 0; drive < 2; drive++) {
2919 drvp = &chp->ch_drive[drive];
2920 /* If no drive, skip */
2921 if ((drvp->drive_flags & DRIVE) == 0)
2922 continue;
2923 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2924 "channel %d drive %d 0x%x\n", chp->channel, drive,
2925 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2926 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2927 /* clear FIFO/DMA mode */
2928 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2929 ACER_UDMA_EN(chp->channel, drive) |
2930 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2931
2932 /* add timing values, setup DMA if needed */
2933 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2934 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2935 acer_fifo_udma |=
2936 ACER_FTH_OPL(chp->channel, drive, 0x1);
2937 goto pio;
2938 }
2939
2940 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2941 if (drvp->drive_flags & DRIVE_UDMA) {
2942 /* use Ultra/DMA */
2943 drvp->drive_flags &= ~DRIVE_DMA;
2944 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2945 acer_fifo_udma |=
2946 ACER_UDMA_TIM(chp->channel, drive,
2947 acer_udma[drvp->UDMA_mode]);
2948 } else {
2949 /*
2950 * use Multiword DMA
2951 * Timings will be used for both PIO and DMA,
2952 * so adjust DMA mode if needed
2953 */
2954 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2955 drvp->PIO_mode = drvp->DMA_mode + 2;
2956 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2957 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2958 drvp->PIO_mode - 2 : 0;
2959 if (drvp->DMA_mode == 0)
2960 drvp->PIO_mode = 0;
2961 }
2962 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2963 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2964 ACER_IDETIM(chp->channel, drive),
2965 acer_pio[drvp->PIO_mode]);
2966 }
2967 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2968 acer_fifo_udma), DEBUG_PROBE);
2969 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2970 if (idedma_ctl != 0) {
2971 /* Add software bits in status register */
2972 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2973 IDEDMA_CTL, idedma_ctl);
2974 }
2975 pciide_print_modes(cp);
2976 }
2977
2978 int
2979 acer_pci_intr(arg)
2980 void *arg;
2981 {
2982 struct pciide_softc *sc = arg;
2983 struct pciide_channel *cp;
2984 struct channel_softc *wdc_cp;
2985 int i, rv, crv;
2986 u_int32_t chids;
2987
2988 rv = 0;
2989 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2990 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2991 cp = &sc->pciide_channels[i];
2992 wdc_cp = &cp->wdc_channel;
2993 /* If a compat channel skip. */
2994 if (cp->compat)
2995 continue;
2996 if (chids & ACER_CHIDS_INT(i)) {
2997 crv = wdcintr(wdc_cp);
2998 if (crv == 0)
2999 printf("%s:%d: bogus intr\n",
3000 sc->sc_wdcdev.sc_dev.dv_xname, i);
3001 else
3002 rv = 1;
3003 }
3004 }
3005 return rv;
3006 }
3007
3008 void
3009 hpt_chip_map(sc, pa)
3010 struct pciide_softc *sc;
3011 struct pci_attach_args *pa;
3012 {
3013 struct pciide_channel *cp;
3014 int i, compatchan, revision;
3015 pcireg_t interface;
3016 bus_size_t cmdsize, ctlsize;
3017
3018 if (pciide_chipen(sc, pa) == 0)
3019 return;
3020 revision = PCI_REVISION(pa->pa_class);
3021 printf(": Triones/Highpoint ");
3022 if (revision == HPT370_REV)
3023 printf("HPT370 IDE Controller\n");
3024 else if (revision == HPT370A_REV)
3025 printf("HPT370A IDE Controller\n");
3026 else if (revision == HPT366_REV)
3027 printf("HPT366 IDE Controller\n");
3028 else
3029 printf("unknown HPT IDE controller rev %d\n", revision);
3030
3031 /*
3032 * when the chip is in native mode it identifies itself as a
3033 * 'misc mass storage'. Fake interface in this case.
3034 */
3035 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3036 interface = PCI_INTERFACE(pa->pa_class);
3037 } else {
3038 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3039 PCIIDE_INTERFACE_PCI(0);
3040 if (revision == HPT370_REV || revision == HPT370A_REV)
3041 interface |= PCIIDE_INTERFACE_PCI(1);
3042 }
3043
3044 printf("%s: bus-master DMA support present",
3045 sc->sc_wdcdev.sc_dev.dv_xname);
3046 pciide_mapreg_dma(sc, pa);
3047 printf("\n");
3048 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3049 WDC_CAPABILITY_MODE;
3050 if (sc->sc_dma_ok) {
3051 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3052 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3053 sc->sc_wdcdev.irqack = pciide_irqack;
3054 }
3055 sc->sc_wdcdev.PIO_cap = 4;
3056 sc->sc_wdcdev.DMA_cap = 2;
3057
3058 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3059 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3060 if (revision == HPT366_REV) {
3061 sc->sc_wdcdev.UDMA_cap = 4;
3062 /*
3063 * The 366 has 2 PCI IDE functions, one for primary and one
3064 * for secondary. So we need to call pciide_mapregs_compat()
3065 * with the real channel
3066 */
3067 if (pa->pa_function == 0) {
3068 compatchan = 0;
3069 } else if (pa->pa_function == 1) {
3070 compatchan = 1;
3071 } else {
3072 printf("%s: unexpected PCI function %d\n",
3073 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3074 return;
3075 }
3076 sc->sc_wdcdev.nchannels = 1;
3077 } else {
3078 sc->sc_wdcdev.nchannels = 2;
3079 sc->sc_wdcdev.UDMA_cap = 5;
3080 }
3081 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3082 cp = &sc->pciide_channels[i];
3083 if (sc->sc_wdcdev.nchannels > 1) {
3084 compatchan = i;
3085 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3086 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3087 printf("%s: %s channel ignored (disabled)\n",
3088 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3089 continue;
3090 }
3091 }
3092 if (pciide_chansetup(sc, i, interface) == 0)
3093 continue;
3094 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3095 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3096 &ctlsize, hpt_pci_intr);
3097 } else {
3098 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3099 &cmdsize, &ctlsize);
3100 }
3101 if (cp->hw_ok == 0)
3102 return;
3103 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3104 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3105 wdcattach(&cp->wdc_channel);
3106 hpt_setup_channel(&cp->wdc_channel);
3107 }
3108 if (revision == HPT370_REV || revision == HPT370A_REV) {
3109 /*
3110 * HPT370_REV has a bit to disable interrupts, make sure
3111 * to clear it
3112 */
3113 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3114 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3115 ~HPT_CSEL_IRQDIS);
3116 }
3117 return;
3118 }
3119
3120 void
3121 hpt_setup_channel(chp)
3122 struct channel_softc *chp;
3123 {
3124 struct ata_drive_datas *drvp;
3125 int drive;
3126 int cable;
3127 u_int32_t before, after;
3128 u_int32_t idedma_ctl;
3129 struct pciide_channel *cp = (struct pciide_channel*)chp;
3130 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3131
3132 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3133
3134 /* setup DMA if needed */
3135 pciide_channel_dma_setup(cp);
3136
3137 idedma_ctl = 0;
3138
3139 /* Per drive settings */
3140 for (drive = 0; drive < 2; drive++) {
3141 drvp = &chp->ch_drive[drive];
3142 /* If no drive, skip */
3143 if ((drvp->drive_flags & DRIVE) == 0)
3144 continue;
3145 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3146 HPT_IDETIM(chp->channel, drive));
3147
3148 /* add timing values, setup DMA if needed */
3149 if (drvp->drive_flags & DRIVE_UDMA) {
3150 /* use Ultra/DMA */
3151 drvp->drive_flags &= ~DRIVE_DMA;
3152 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3153 drvp->UDMA_mode > 2)
3154 drvp->UDMA_mode = 2;
3155 after = (sc->sc_wdcdev.nchannels == 2) ?
3156 hpt370_udma[drvp->UDMA_mode] :
3157 hpt366_udma[drvp->UDMA_mode];
3158 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3159 } else if (drvp->drive_flags & DRIVE_DMA) {
3160 /*
3161 * use Multiword DMA.
3162 * Timings will be used for both PIO and DMA, so adjust
3163 * DMA mode if needed
3164 */
3165 if (drvp->PIO_mode >= 3 &&
3166 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3167 drvp->DMA_mode = drvp->PIO_mode - 2;
3168 }
3169 after = (sc->sc_wdcdev.nchannels == 2) ?
3170 hpt370_dma[drvp->DMA_mode] :
3171 hpt366_dma[drvp->DMA_mode];
3172 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3173 } else {
3174 /* PIO only */
3175 after = (sc->sc_wdcdev.nchannels == 2) ?
3176 hpt370_pio[drvp->PIO_mode] :
3177 hpt366_pio[drvp->PIO_mode];
3178 }
3179 pci_conf_write(sc->sc_pc, sc->sc_tag,
3180 HPT_IDETIM(chp->channel, drive), after);
3181 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3182 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3183 after, before), DEBUG_PROBE);
3184 }
3185 if (idedma_ctl != 0) {
3186 /* Add software bits in status register */
3187 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3188 IDEDMA_CTL, idedma_ctl);
3189 }
3190 pciide_print_modes(cp);
3191 }
3192
3193 int
3194 hpt_pci_intr(arg)
3195 void *arg;
3196 {
3197 struct pciide_softc *sc = arg;
3198 struct pciide_channel *cp;
3199 struct channel_softc *wdc_cp;
3200 int rv = 0;
3201 int dmastat, i, crv;
3202
3203 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3204 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3205 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3206 if((dmastat & IDEDMA_CTL_INTR) == 0)
3207 continue;
3208 cp = &sc->pciide_channels[i];
3209 wdc_cp = &cp->wdc_channel;
3210 crv = wdcintr(wdc_cp);
3211 if (crv == 0) {
3212 printf("%s:%d: bogus intr\n",
3213 sc->sc_wdcdev.sc_dev.dv_xname, i);
3214 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3215 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3216 } else
3217 rv = 1;
3218 }
3219 return rv;
3220 }
3221
3222
3223 /* Macros to test product */
3224 #define PDC_IS_262(sc) \
3225 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3226 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3227 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3228 #define PDC_IS_265(sc) \
3229 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3230 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3231
3232 void
3233 pdc202xx_chip_map(sc, pa)
3234 struct pciide_softc *sc;
3235 struct pci_attach_args *pa;
3236 {
3237 struct pciide_channel *cp;
3238 int channel;
3239 pcireg_t interface, st, mode;
3240 bus_size_t cmdsize, ctlsize;
3241
3242 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3243 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3244 DEBUG_PROBE);
3245 if (pciide_chipen(sc, pa) == 0)
3246 return;
3247
3248 /* turn off RAID mode */
3249 st &= ~PDC2xx_STATE_IDERAID;
3250
3251 /*
3252 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3253 * mode. We have to fake interface
3254 */
3255 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3256 if (st & PDC2xx_STATE_NATIVE)
3257 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3258
3259 printf("%s: bus-master DMA support present",
3260 sc->sc_wdcdev.sc_dev.dv_xname);
3261 pciide_mapreg_dma(sc, pa);
3262 printf("\n");
3263 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3264 WDC_CAPABILITY_MODE;
3265 if (sc->sc_dma_ok) {
3266 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3267 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3268 sc->sc_wdcdev.irqack = pciide_irqack;
3269 }
3270 sc->sc_wdcdev.PIO_cap = 4;
3271 sc->sc_wdcdev.DMA_cap = 2;
3272 if (PDC_IS_265(sc))
3273 sc->sc_wdcdev.UDMA_cap = 5;
3274 else if (PDC_IS_262(sc))
3275 sc->sc_wdcdev.UDMA_cap = 4;
3276 else
3277 sc->sc_wdcdev.UDMA_cap = 2;
3278 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3279 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3280 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3281
3282 /* setup failsafe defaults */
3283 mode = 0;
3284 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3285 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3286 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3287 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3288 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3289 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3290 "initial timings 0x%x, now 0x%x\n", channel,
3291 pci_conf_read(sc->sc_pc, sc->sc_tag,
3292 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3293 DEBUG_PROBE);
3294 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3295 mode | PDC2xx_TIM_IORDYp);
3296 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3297 "initial timings 0x%x, now 0x%x\n", channel,
3298 pci_conf_read(sc->sc_pc, sc->sc_tag,
3299 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3300 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3301 mode);
3302 }
3303
3304 mode = PDC2xx_SCR_DMA;
3305 if (PDC_IS_262(sc)) {
3306 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3307 } else {
3308 /* the BIOS set it up this way */
3309 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3310 }
3311 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3312 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3313 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3314 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3315 DEBUG_PROBE);
3316 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3317
3318 /* controller initial state register is OK even without BIOS */
3319 /* Set DMA mode to IDE DMA compatibility */
3320 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3321 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3322 DEBUG_PROBE);
3323 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3324 mode | 0x1);
3325 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3326 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3327 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3328 mode | 0x1);
3329
3330 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3331 cp = &sc->pciide_channels[channel];
3332 if (pciide_chansetup(sc, channel, interface) == 0)
3333 continue;
3334 if ((st & (PDC_IS_262(sc) ?
3335 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3336 printf("%s: %s channel ignored (disabled)\n",
3337 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3338 continue;
3339 }
3340 if (PDC_IS_265(sc))
3341 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3342 pdc20265_pci_intr);
3343 else
3344 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3345 pdc202xx_pci_intr);
3346 if (cp->hw_ok == 0)
3347 continue;
3348 if (pciide_chan_candisable(cp))
3349 st &= ~(PDC_IS_262(sc) ?
3350 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3351 pciide_map_compat_intr(pa, cp, channel, interface);
3352 pdc202xx_setup_channel(&cp->wdc_channel);
3353 }
3354 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3355 DEBUG_PROBE);
3356 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3357 return;
3358 }
3359
3360 void
3361 pdc202xx_setup_channel(chp)
3362 struct channel_softc *chp;
3363 {
3364 struct ata_drive_datas *drvp;
3365 int drive;
3366 pcireg_t mode, st;
3367 u_int32_t idedma_ctl, scr, atapi;
3368 struct pciide_channel *cp = (struct pciide_channel*)chp;
3369 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3370 int channel = chp->channel;
3371
3372 /* setup DMA if needed */
3373 pciide_channel_dma_setup(cp);
3374
3375 idedma_ctl = 0;
3376 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3377 sc->sc_wdcdev.sc_dev.dv_xname,
3378 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3379 DEBUG_PROBE);
3380
3381 /* Per channel settings */
3382 if (PDC_IS_262(sc)) {
3383 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3384 PDC262_U66);
3385 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3386 /* Trimm UDMA mode */
3387 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3388 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3389 chp->ch_drive[0].UDMA_mode <= 2) ||
3390 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3391 chp->ch_drive[1].UDMA_mode <= 2)) {
3392 if (chp->ch_drive[0].UDMA_mode > 2)
3393 chp->ch_drive[0].UDMA_mode = 2;
3394 if (chp->ch_drive[1].UDMA_mode > 2)
3395 chp->ch_drive[1].UDMA_mode = 2;
3396 }
3397 /* Set U66 if needed */
3398 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3399 chp->ch_drive[0].UDMA_mode > 2) ||
3400 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3401 chp->ch_drive[1].UDMA_mode > 2))
3402 scr |= PDC262_U66_EN(channel);
3403 else
3404 scr &= ~PDC262_U66_EN(channel);
3405 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3406 PDC262_U66, scr);
3407 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3408 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3409 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3410 PDC262_ATAPI(channel))), DEBUG_PROBE);
3411 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3412 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3413 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3414 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3415 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3416 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3417 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3418 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3419 atapi = 0;
3420 else
3421 atapi = PDC262_ATAPI_UDMA;
3422 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3423 PDC262_ATAPI(channel), atapi);
3424 }
3425 }
3426 for (drive = 0; drive < 2; drive++) {
3427 drvp = &chp->ch_drive[drive];
3428 /* If no drive, skip */
3429 if ((drvp->drive_flags & DRIVE) == 0)
3430 continue;
3431 mode = 0;
3432 if (drvp->drive_flags & DRIVE_UDMA) {
3433 /* use Ultra/DMA */
3434 drvp->drive_flags &= ~DRIVE_DMA;
3435 mode = PDC2xx_TIM_SET_MB(mode,
3436 pdc2xx_udma_mb[drvp->UDMA_mode]);
3437 mode = PDC2xx_TIM_SET_MC(mode,
3438 pdc2xx_udma_mc[drvp->UDMA_mode]);
3439 drvp->drive_flags &= ~DRIVE_DMA;
3440 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3441 } else if (drvp->drive_flags & DRIVE_DMA) {
3442 mode = PDC2xx_TIM_SET_MB(mode,
3443 pdc2xx_dma_mb[drvp->DMA_mode]);
3444 mode = PDC2xx_TIM_SET_MC(mode,
3445 pdc2xx_dma_mc[drvp->DMA_mode]);
3446 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3447 } else {
3448 mode = PDC2xx_TIM_SET_MB(mode,
3449 pdc2xx_dma_mb[0]);
3450 mode = PDC2xx_TIM_SET_MC(mode,
3451 pdc2xx_dma_mc[0]);
3452 }
3453 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3454 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3455 if (drvp->drive_flags & DRIVE_ATA)
3456 mode |= PDC2xx_TIM_PRE;
3457 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3458 if (drvp->PIO_mode >= 3) {
3459 mode |= PDC2xx_TIM_IORDY;
3460 if (drive == 0)
3461 mode |= PDC2xx_TIM_IORDYp;
3462 }
3463 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3464 "timings 0x%x\n",
3465 sc->sc_wdcdev.sc_dev.dv_xname,
3466 chp->channel, drive, mode), DEBUG_PROBE);
3467 pci_conf_write(sc->sc_pc, sc->sc_tag,
3468 PDC2xx_TIM(chp->channel, drive), mode);
3469 }
3470 if (idedma_ctl != 0) {
3471 /* Add software bits in status register */
3472 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3473 IDEDMA_CTL, idedma_ctl);
3474 }
3475 pciide_print_modes(cp);
3476 }
3477
3478 int
3479 pdc202xx_pci_intr(arg)
3480 void *arg;
3481 {
3482 struct pciide_softc *sc = arg;
3483 struct pciide_channel *cp;
3484 struct channel_softc *wdc_cp;
3485 int i, rv, crv;
3486 u_int32_t scr;
3487
3488 rv = 0;
3489 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3490 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3491 cp = &sc->pciide_channels[i];
3492 wdc_cp = &cp->wdc_channel;
3493 /* If a compat channel skip. */
3494 if (cp->compat)
3495 continue;
3496 if (scr & PDC2xx_SCR_INT(i)) {
3497 crv = wdcintr(wdc_cp);
3498 if (crv == 0)
3499 printf("%s:%d: bogus intr (reg 0x%x)\n",
3500 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3501 else
3502 rv = 1;
3503 }
3504 }
3505 return rv;
3506 }
3507
3508 int
3509 pdc20265_pci_intr(arg)
3510 void *arg;
3511 {
3512 struct pciide_softc *sc = arg;
3513 struct pciide_channel *cp;
3514 struct channel_softc *wdc_cp;
3515 int i, rv, crv;
3516 u_int32_t dmastat;
3517
3518 rv = 0;
3519 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3520 cp = &sc->pciide_channels[i];
3521 wdc_cp = &cp->wdc_channel;
3522 /* If a compat channel skip. */
3523 if (cp->compat)
3524 continue;
3525 /*
3526 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3527 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3528 * So use it instead (requires 2 reg reads instead of 1,
3529 * but we can't do it another way).
3530 */
3531 dmastat = bus_space_read_1(sc->sc_dma_iot,
3532 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3533 if((dmastat & IDEDMA_CTL_INTR) == 0)
3534 continue;
3535 crv = wdcintr(wdc_cp);
3536 if (crv == 0)
3537 printf("%s:%d: bogus intr\n",
3538 sc->sc_wdcdev.sc_dev.dv_xname, i);
3539 else
3540 rv = 1;
3541 }
3542 return rv;
3543 }
3544
3545 void
3546 opti_chip_map(sc, pa)
3547 struct pciide_softc *sc;
3548 struct pci_attach_args *pa;
3549 {
3550 struct pciide_channel *cp;
3551 bus_size_t cmdsize, ctlsize;
3552 pcireg_t interface;
3553 u_int8_t init_ctrl;
3554 int channel;
3555
3556 if (pciide_chipen(sc, pa) == 0)
3557 return;
3558 printf("%s: bus-master DMA support present",
3559 sc->sc_wdcdev.sc_dev.dv_xname);
3560
3561 /*
3562 * XXXSCW:
3563 * There seem to be a couple of buggy revisions/implementations
3564 * of the OPTi pciide chipset. This kludge seems to fix one of
3565 * the reported problems (PR/11644) but still fails for the
3566 * other (PR/13151), although the latter may be due to other
3567 * issues too...
3568 */
3569 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3570 printf(" but disabled due to chip rev. <= 0x12");
3571 sc->sc_dma_ok = 0;
3572 sc->sc_wdcdev.cap = 0;
3573 } else {
3574 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3575 pciide_mapreg_dma(sc, pa);
3576 }
3577 printf("\n");
3578
3579 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3580 sc->sc_wdcdev.PIO_cap = 4;
3581 if (sc->sc_dma_ok) {
3582 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3583 sc->sc_wdcdev.irqack = pciide_irqack;
3584 sc->sc_wdcdev.DMA_cap = 2;
3585 }
3586 sc->sc_wdcdev.set_modes = opti_setup_channel;
3587
3588 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3589 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3590
3591 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3592 OPTI_REG_INIT_CONTROL);
3593
3594 interface = PCI_INTERFACE(pa->pa_class);
3595
3596 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3597 cp = &sc->pciide_channels[channel];
3598 if (pciide_chansetup(sc, channel, interface) == 0)
3599 continue;
3600 if (channel == 1 &&
3601 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3602 printf("%s: %s channel ignored (disabled)\n",
3603 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3604 continue;
3605 }
3606 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3607 pciide_pci_intr);
3608 if (cp->hw_ok == 0)
3609 continue;
3610 pciide_map_compat_intr(pa, cp, channel, interface);
3611 if (cp->hw_ok == 0)
3612 continue;
3613 opti_setup_channel(&cp->wdc_channel);
3614 }
3615 }
3616
3617 void
3618 opti_setup_channel(chp)
3619 struct channel_softc *chp;
3620 {
3621 struct ata_drive_datas *drvp;
3622 struct pciide_channel *cp = (struct pciide_channel*)chp;
3623 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3624 int drive, spd;
3625 int mode[2];
3626 u_int8_t rv, mr;
3627
3628 /*
3629 * The `Delay' and `Address Setup Time' fields of the
3630 * Miscellaneous Register are always zero initially.
3631 */
3632 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3633 mr &= ~(OPTI_MISC_DELAY_MASK |
3634 OPTI_MISC_ADDR_SETUP_MASK |
3635 OPTI_MISC_INDEX_MASK);
3636
3637 /* Prime the control register before setting timing values */
3638 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3639
3640 /* Determine the clockrate of the PCIbus the chip is attached to */
3641 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3642 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3643
3644 /* setup DMA if needed */
3645 pciide_channel_dma_setup(cp);
3646
3647 for (drive = 0; drive < 2; drive++) {
3648 drvp = &chp->ch_drive[drive];
3649 /* If no drive, skip */
3650 if ((drvp->drive_flags & DRIVE) == 0) {
3651 mode[drive] = -1;
3652 continue;
3653 }
3654
3655 if ((drvp->drive_flags & DRIVE_DMA)) {
3656 /*
3657 * Timings will be used for both PIO and DMA,
3658 * so adjust DMA mode if needed
3659 */
3660 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3661 drvp->PIO_mode = drvp->DMA_mode + 2;
3662 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3663 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3664 drvp->PIO_mode - 2 : 0;
3665 if (drvp->DMA_mode == 0)
3666 drvp->PIO_mode = 0;
3667
3668 mode[drive] = drvp->DMA_mode + 5;
3669 } else
3670 mode[drive] = drvp->PIO_mode;
3671
3672 if (drive && mode[0] >= 0 &&
3673 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3674 /*
3675 * Can't have two drives using different values
3676 * for `Address Setup Time'.
3677 * Slow down the faster drive to compensate.
3678 */
3679 int d = (opti_tim_as[spd][mode[0]] >
3680 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3681
3682 mode[d] = mode[1-d];
3683 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3684 chp->ch_drive[d].DMA_mode = 0;
3685 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3686 }
3687 }
3688
3689 for (drive = 0; drive < 2; drive++) {
3690 int m;
3691 if ((m = mode[drive]) < 0)
3692 continue;
3693
3694 /* Set the Address Setup Time and select appropriate index */
3695 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3696 rv |= OPTI_MISC_INDEX(drive);
3697 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3698
3699 /* Set the pulse width and recovery timing parameters */
3700 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3701 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3702 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3703 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3704
3705 /* Set the Enhanced Mode register appropriately */
3706 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3707 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3708 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3709 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3710 }
3711
3712 /* Finally, enable the timings */
3713 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3714
3715 pciide_print_modes(cp);
3716 }
3717