pciide.c revision 1.68.2.22 1 /* $NetBSD: pciide.c,v 1.68.2.22 2001/03/13 21:23:35 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/cy82c693var.h>
121
122 #include "opt_pciide.h"
123
124 /* inlines for reading/writing 8-bit PCI registers */
125 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
126 int));
127 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
128 int, u_int8_t));
129
130 static __inline u_int8_t
131 pciide_pci_read(pc, pa, reg)
132 pci_chipset_tag_t pc;
133 pcitag_t pa;
134 int reg;
135 {
136
137 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
138 ((reg & 0x03) * 8) & 0xff);
139 }
140
141 static __inline void
142 pciide_pci_write(pc, pa, reg, val)
143 pci_chipset_tag_t pc;
144 pcitag_t pa;
145 int reg;
146 u_int8_t val;
147 {
148 pcireg_t pcival;
149
150 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
151 pcival &= ~(0xff << ((reg & 0x03) * 8));
152 pcival |= (val << ((reg & 0x03) * 8));
153 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
154 }
155
156 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157
158 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159 void piix_setup_channel __P((struct channel_softc*));
160 void piix3_4_setup_channel __P((struct channel_softc*));
161 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
163 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164
165 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
166 void amd756_setup_channel __P((struct channel_softc*));
167
168 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void apollo_setup_channel __P((struct channel_softc*));
170
171 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
173 void cmd0643_9_setup_channel __P((struct channel_softc*));
174 void cmd_channel_map __P((struct pci_attach_args *,
175 struct pciide_softc *, int));
176 int cmd_pci_intr __P((void *));
177 void cmd646_9_irqack __P((struct channel_softc *));
178
179 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void cy693_setup_channel __P((struct channel_softc*));
181
182 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void sis_setup_channel __P((struct channel_softc*));
184
185 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void acer_setup_channel __P((struct channel_softc*));
187 int acer_pci_intr __P((void *));
188
189 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void pdc202xx_setup_channel __P((struct channel_softc*));
191 int pdc202xx_pci_intr __P((void *));
192 int pdc20265_pci_intr __P((void *));
193
194 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void opti_setup_channel __P((struct channel_softc*));
196
197 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
198 void hpt_setup_channel __P((struct channel_softc*));
199 int hpt_pci_intr __P((void *));
200
201 void pciide_channel_dma_setup __P((struct pciide_channel *));
202 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
203 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
204 void pciide_dma_start __P((void*, int, int));
205 int pciide_dma_finish __P((void*, int, int, int));
206 void pciide_irqack __P((struct channel_softc *));
207 void pciide_print_modes __P((struct pciide_channel *));
208
209 struct pciide_product_desc {
210 u_int32_t ide_product;
211 int ide_flags;
212 const char *ide_name;
213 /* map and setup chip, probe drives */
214 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
215 };
216
217 /* Flags for ide_flags */
218 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
219
220 /* Default product description for devices not known from this controller */
221 const struct pciide_product_desc default_product_desc = {
222 0,
223 0,
224 "Generic PCI IDE controller",
225 default_chip_map,
226 };
227
228 const struct pciide_product_desc pciide_intel_products[] = {
229 { PCI_PRODUCT_INTEL_82092AA,
230 0,
231 "Intel 82092AA IDE controller",
232 default_chip_map,
233 },
234 { PCI_PRODUCT_INTEL_82371FB_IDE,
235 0,
236 "Intel 82371FB IDE controller (PIIX)",
237 piix_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371SB_IDE,
240 0,
241 "Intel 82371SB IDE Interface (PIIX3)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82371AB_IDE,
245 0,
246 "Intel 82371AB IDE controller (PIIX4)",
247 piix_chip_map,
248 },
249 { PCI_PRODUCT_INTEL_82801AA_IDE,
250 0,
251 "Intel 82801AA IDE Controller (ICH)",
252 piix_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82801AB_IDE,
255 0,
256 "Intel 82801AB IDE Controller (ICH0)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82801BA_IDE,
260 0,
261 "Intel 82801BA IDE Controller (ICH2)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82801BAM_IDE,
265 0,
266 "Intel 82801BAM IDE Controller (ICH2)",
267 piix_chip_map,
268 },
269 { 0,
270 0,
271 NULL,
272 }
273 };
274
275 const struct pciide_product_desc pciide_amd_products[] = {
276 { PCI_PRODUCT_AMD_PBC756_IDE,
277 0,
278 "Advanced Micro Devices AMD756 IDE Controller",
279 amd756_chip_map
280 },
281 { 0,
282 0,
283 NULL,
284 }
285 };
286
287 const struct pciide_product_desc pciide_cmd_products[] = {
288 { PCI_PRODUCT_CMDTECH_640,
289 0,
290 "CMD Technology PCI0640",
291 cmd_chip_map
292 },
293 { PCI_PRODUCT_CMDTECH_643,
294 0,
295 "CMD Technology PCI0643",
296 cmd0643_9_chip_map,
297 },
298 { PCI_PRODUCT_CMDTECH_646,
299 0,
300 "CMD Technology PCI0646",
301 cmd0643_9_chip_map,
302 },
303 { PCI_PRODUCT_CMDTECH_648,
304 IDE_PCI_CLASS_OVERRIDE,
305 "CMD Technology PCI0648",
306 cmd0643_9_chip_map,
307 },
308 { PCI_PRODUCT_CMDTECH_649,
309 IDE_PCI_CLASS_OVERRIDE,
310 "CMD Technology PCI0649",
311 cmd0643_9_chip_map,
312 },
313 { 0,
314 0,
315 NULL,
316 }
317 };
318
319 const struct pciide_product_desc pciide_via_products[] = {
320 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
321 0,
322 "VIA Tech VT82C586 IDE Controller",
323 apollo_chip_map,
324 },
325 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
326 0,
327 "VIA Tech VT82C586A IDE Controller",
328 apollo_chip_map,
329 },
330 { 0,
331 0,
332 NULL,
333 }
334 };
335
336 const struct pciide_product_desc pciide_cypress_products[] = {
337 { PCI_PRODUCT_CONTAQ_82C693,
338 0,
339 "Cypress 82C693 IDE Controller",
340 cy693_chip_map,
341 },
342 { 0,
343 0,
344 NULL,
345 }
346 };
347
348 const struct pciide_product_desc pciide_sis_products[] = {
349 { PCI_PRODUCT_SIS_5597_IDE,
350 0,
351 "Silicon Integrated System 5597/5598 IDE controller",
352 sis_chip_map,
353 },
354 { 0,
355 0,
356 NULL,
357 }
358 };
359
360 const struct pciide_product_desc pciide_acer_products[] = {
361 { PCI_PRODUCT_ALI_M5229,
362 0,
363 "Acer Labs M5229 UDMA IDE Controller",
364 acer_chip_map,
365 },
366 { 0,
367 0,
368 NULL,
369 }
370 };
371
372 const struct pciide_product_desc pciide_promise_products[] = {
373 { PCI_PRODUCT_PROMISE_ULTRA33,
374 IDE_PCI_CLASS_OVERRIDE,
375 "Promise Ultra33/ATA Bus Master IDE Accelerator",
376 pdc202xx_chip_map,
377 },
378 { PCI_PRODUCT_PROMISE_ULTRA66,
379 IDE_PCI_CLASS_OVERRIDE,
380 "Promise Ultra66/ATA Bus Master IDE Accelerator",
381 pdc202xx_chip_map,
382 },
383 { PCI_PRODUCT_PROMISE_ULTRA100,
384 IDE_PCI_CLASS_OVERRIDE,
385 "Promise Ultra100/ATA Bus Master IDE Accelerator",
386 pdc202xx_chip_map,
387 },
388 { PCI_PRODUCT_PROMISE_ULTRA100X,
389 IDE_PCI_CLASS_OVERRIDE,
390 "Promise Ultra100/ATA Bus Master IDE Accelerator",
391 pdc202xx_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 }
397 };
398
399 const struct pciide_product_desc pciide_opti_products[] = {
400 { PCI_PRODUCT_OPTI_82C621,
401 0,
402 "OPTi 82c621 PCI IDE controller",
403 opti_chip_map,
404 },
405 { PCI_PRODUCT_OPTI_82C568,
406 0,
407 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
408 opti_chip_map,
409 },
410 { PCI_PRODUCT_OPTI_82D568,
411 0,
412 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
413 opti_chip_map,
414 },
415 { 0,
416 0,
417 NULL,
418 }
419 };
420
421 const struct pciide_product_desc pciide_triones_products[] = {
422 { PCI_PRODUCT_TRIONES_HPT366,
423 IDE_PCI_CLASS_OVERRIDE,
424 "Triones/Highpoint HPT366/370 IDE Controller",
425 hpt_chip_map,
426 },
427 { 0,
428 0,
429 NULL,
430 }
431 };
432
433 struct pciide_vendor_desc {
434 u_int32_t ide_vendor;
435 const struct pciide_product_desc *ide_products;
436 };
437
438 const struct pciide_vendor_desc pciide_vendors[] = {
439 { PCI_VENDOR_INTEL, pciide_intel_products },
440 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
441 { PCI_VENDOR_VIATECH, pciide_via_products },
442 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
443 { PCI_VENDOR_SIS, pciide_sis_products },
444 { PCI_VENDOR_ALI, pciide_acer_products },
445 { PCI_VENDOR_PROMISE, pciide_promise_products },
446 { PCI_VENDOR_AMD, pciide_amd_products },
447 { PCI_VENDOR_OPTI, pciide_opti_products },
448 { PCI_VENDOR_TRIONES, pciide_triones_products },
449 { 0, NULL }
450 };
451
452 /* options passed via the 'flags' config keyword */
453 #define PCIIDE_OPTIONS_DMA 0x01
454
455 int pciide_match __P((struct device *, struct cfdata *, void *));
456 void pciide_attach __P((struct device *, struct device *, void *));
457
458 struct cfattach pciide_ca = {
459 sizeof(struct pciide_softc), pciide_match, pciide_attach
460 };
461 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
462 int pciide_mapregs_compat __P(( struct pci_attach_args *,
463 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
464 int pciide_mapregs_native __P((struct pci_attach_args *,
465 struct pciide_channel *, bus_size_t *, bus_size_t *,
466 int (*pci_intr) __P((void *))));
467 void pciide_mapreg_dma __P((struct pciide_softc *,
468 struct pci_attach_args *));
469 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
470 void pciide_mapchan __P((struct pci_attach_args *,
471 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
472 int (*pci_intr) __P((void *))));
473 int pciide_chan_candisable __P((struct pciide_channel *));
474 void pciide_map_compat_intr __P(( struct pci_attach_args *,
475 struct pciide_channel *, int, int));
476 int pciide_print __P((void *, const char *pnp));
477 int pciide_compat_intr __P((void *));
478 int pciide_pci_intr __P((void *));
479 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
480
481 const struct pciide_product_desc *
482 pciide_lookup_product(id)
483 u_int32_t id;
484 {
485 const struct pciide_product_desc *pp;
486 const struct pciide_vendor_desc *vp;
487
488 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
489 if (PCI_VENDOR(id) == vp->ide_vendor)
490 break;
491
492 if ((pp = vp->ide_products) == NULL)
493 return NULL;
494
495 for (; pp->ide_name != NULL; pp++)
496 if (PCI_PRODUCT(id) == pp->ide_product)
497 break;
498
499 if (pp->ide_name == NULL)
500 return NULL;
501 return pp;
502 }
503
504 int
505 pciide_match(parent, match, aux)
506 struct device *parent;
507 struct cfdata *match;
508 void *aux;
509 {
510 struct pci_attach_args *pa = aux;
511 const struct pciide_product_desc *pp;
512
513 /*
514 * Check the ID register to see that it's a PCI IDE controller.
515 * If it is, we assume that we can deal with it; it _should_
516 * work in a standardized way...
517 */
518 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
519 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
520 return (1);
521 }
522
523 /*
524 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
525 * controllers. Let see if we can deal with it anyway.
526 */
527 pp = pciide_lookup_product(pa->pa_id);
528 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
529 return (1);
530 }
531
532 return (0);
533 }
534
535 void
536 pciide_attach(parent, self, aux)
537 struct device *parent, *self;
538 void *aux;
539 {
540 struct pci_attach_args *pa = aux;
541 pci_chipset_tag_t pc = pa->pa_pc;
542 pcitag_t tag = pa->pa_tag;
543 struct pciide_softc *sc = (struct pciide_softc *)self;
544 pcireg_t csr;
545 char devinfo[256];
546 const char *displaydev;
547
548 sc->sc_pp = pciide_lookup_product(pa->pa_id);
549 if (sc->sc_pp == NULL) {
550 sc->sc_pp = &default_product_desc;
551 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
552 displaydev = devinfo;
553 } else
554 displaydev = sc->sc_pp->ide_name;
555
556 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
557
558 sc->sc_pc = pa->pa_pc;
559 sc->sc_tag = pa->pa_tag;
560 #ifdef WDCDEBUG
561 if (wdcdebug_pciide_mask & DEBUG_PROBE)
562 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
563 #endif
564 sc->sc_pp->chip_map(sc, pa);
565
566 if (sc->sc_dma_ok) {
567 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
568 csr |= PCI_COMMAND_MASTER_ENABLE;
569 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
570 }
571 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
572 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
573 }
574
575 /* tell wether the chip is enabled or not */
576 int
577 pciide_chipen(sc, pa)
578 struct pciide_softc *sc;
579 struct pci_attach_args *pa;
580 {
581 pcireg_t csr;
582 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
583 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
584 PCI_COMMAND_STATUS_REG);
585 printf("%s: device disabled (at %s)\n",
586 sc->sc_wdcdev.sc_dev.dv_xname,
587 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
588 "device" : "bridge");
589 return 0;
590 }
591 return 1;
592 }
593
594 int
595 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
596 struct pci_attach_args *pa;
597 struct pciide_channel *cp;
598 int compatchan;
599 bus_size_t *cmdsizep, *ctlsizep;
600 {
601 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
602 struct channel_softc *wdc_cp = &cp->wdc_channel;
603
604 cp->compat = 1;
605 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
606 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
607
608 wdc_cp->cmd_iot = pa->pa_iot;
609 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
610 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
611 printf("%s: couldn't map %s channel cmd regs\n",
612 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
613 return (0);
614 }
615
616 wdc_cp->ctl_iot = pa->pa_iot;
617 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
618 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
619 printf("%s: couldn't map %s channel ctl regs\n",
620 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
621 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
622 PCIIDE_COMPAT_CMD_SIZE);
623 return (0);
624 }
625
626 return (1);
627 }
628
629 int
630 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
631 struct pci_attach_args * pa;
632 struct pciide_channel *cp;
633 bus_size_t *cmdsizep, *ctlsizep;
634 int (*pci_intr) __P((void *));
635 {
636 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
637 struct channel_softc *wdc_cp = &cp->wdc_channel;
638 const char *intrstr;
639 pci_intr_handle_t intrhandle;
640
641 cp->compat = 0;
642
643 if (sc->sc_pci_ih == NULL) {
644 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
645 pa->pa_intrline, &intrhandle) != 0) {
646 printf("%s: couldn't map native-PCI interrupt\n",
647 sc->sc_wdcdev.sc_dev.dv_xname);
648 return 0;
649 }
650 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
651 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
652 intrhandle, IPL_BIO, pci_intr, sc);
653 if (sc->sc_pci_ih != NULL) {
654 printf("%s: using %s for native-PCI interrupt\n",
655 sc->sc_wdcdev.sc_dev.dv_xname,
656 intrstr ? intrstr : "unknown interrupt");
657 } else {
658 printf("%s: couldn't establish native-PCI interrupt",
659 sc->sc_wdcdev.sc_dev.dv_xname);
660 if (intrstr != NULL)
661 printf(" at %s", intrstr);
662 printf("\n");
663 return 0;
664 }
665 }
666 cp->ih = sc->sc_pci_ih;
667 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
668 PCI_MAPREG_TYPE_IO, 0,
669 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
670 printf("%s: couldn't map %s channel cmd regs\n",
671 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
672 return 0;
673 }
674
675 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
676 PCI_MAPREG_TYPE_IO, 0,
677 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
678 printf("%s: couldn't map %s channel ctl regs\n",
679 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
680 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
681 return 0;
682 }
683 /*
684 * In native mode, 4 bytes of I/O space are mapped for the control
685 * register, the control register is at offset 2. Pass the generic
686 * code a handle for only one byte at the rigth offset.
687 */
688 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
689 &wdc_cp->ctl_ioh) != 0) {
690 printf("%s: unable to subregion %s channel ctl regs\n",
691 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
692 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
693 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
694 return 0;
695 }
696 return (1);
697 }
698
699 void
700 pciide_mapreg_dma(sc, pa)
701 struct pciide_softc *sc;
702 struct pci_attach_args *pa;
703 {
704 pcireg_t maptype;
705
706 /*
707 * Map DMA registers
708 *
709 * Note that sc_dma_ok is the right variable to test to see if
710 * DMA can be done. If the interface doesn't support DMA,
711 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
712 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
713 * non-zero if the interface supports DMA and the registers
714 * could be mapped.
715 *
716 * XXX Note that despite the fact that the Bus Master IDE specs
717 * XXX say that "The bus master IDE function uses 16 bytes of IO
718 * XXX space," some controllers (at least the United
719 * XXX Microelectronics UM8886BF) place it in memory space.
720 */
721 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
722 PCIIDE_REG_BUS_MASTER_DMA);
723
724 switch (maptype) {
725 case PCI_MAPREG_TYPE_IO:
726 case PCI_MAPREG_MEM_TYPE_32BIT:
727 sc->sc_dma_ok = (pci_mapreg_map(pa,
728 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
729 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
730 sc->sc_dmat = pa->pa_dmat;
731 if (sc->sc_dma_ok == 0) {
732 printf(", but unused (couldn't map registers)");
733 } else {
734 sc->sc_wdcdev.dma_arg = sc;
735 sc->sc_wdcdev.dma_init = pciide_dma_init;
736 sc->sc_wdcdev.dma_start = pciide_dma_start;
737 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
738 }
739 break;
740
741 default:
742 sc->sc_dma_ok = 0;
743 printf(", but unsupported register maptype (0x%x)", maptype);
744 }
745 }
746
747 int
748 pciide_compat_intr(arg)
749 void *arg;
750 {
751 struct pciide_channel *cp = arg;
752
753 #ifdef DIAGNOSTIC
754 /* should only be called for a compat channel */
755 if (cp->compat == 0)
756 panic("pciide compat intr called for non-compat chan %p\n", cp);
757 #endif
758 return (wdcintr(&cp->wdc_channel));
759 }
760
761 int
762 pciide_pci_intr(arg)
763 void *arg;
764 {
765 struct pciide_softc *sc = arg;
766 struct pciide_channel *cp;
767 struct channel_softc *wdc_cp;
768 int i, rv, crv;
769
770 rv = 0;
771 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
772 cp = &sc->pciide_channels[i];
773 wdc_cp = &cp->wdc_channel;
774
775 /* If a compat channel skip. */
776 if (cp->compat)
777 continue;
778 /* if this channel not waiting for intr, skip */
779 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
780 continue;
781
782 crv = wdcintr(wdc_cp);
783 if (crv == 0)
784 ; /* leave rv alone */
785 else if (crv == 1)
786 rv = 1; /* claim the intr */
787 else if (rv == 0) /* crv should be -1 in this case */
788 rv = crv; /* if we've done no better, take it */
789 }
790 return (rv);
791 }
792
793 void
794 pciide_channel_dma_setup(cp)
795 struct pciide_channel *cp;
796 {
797 int drive;
798 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
799 struct ata_drive_datas *drvp;
800
801 for (drive = 0; drive < 2; drive++) {
802 drvp = &cp->wdc_channel.ch_drive[drive];
803 /* If no drive, skip */
804 if ((drvp->drive_flags & DRIVE) == 0)
805 continue;
806 /* setup DMA if needed */
807 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
808 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
809 sc->sc_dma_ok == 0) {
810 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
811 continue;
812 }
813 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
814 != 0) {
815 /* Abort DMA setup */
816 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
817 continue;
818 }
819 }
820 }
821
822 int
823 pciide_dma_table_setup(sc, channel, drive)
824 struct pciide_softc *sc;
825 int channel, drive;
826 {
827 bus_dma_segment_t seg;
828 int error, rseg;
829 const bus_size_t dma_table_size =
830 sizeof(struct idedma_table) * NIDEDMA_TABLES;
831 struct pciide_dma_maps *dma_maps =
832 &sc->pciide_channels[channel].dma_maps[drive];
833
834 /* If table was already allocated, just return */
835 if (dma_maps->dma_table)
836 return 0;
837
838 /* Allocate memory for the DMA tables and map it */
839 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
840 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
841 BUS_DMA_NOWAIT)) != 0) {
842 printf("%s:%d: unable to allocate table DMA for "
843 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
844 channel, drive, error);
845 return error;
846 }
847 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
848 dma_table_size,
849 (caddr_t *)&dma_maps->dma_table,
850 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
851 printf("%s:%d: unable to map table DMA for"
852 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
853 channel, drive, error);
854 return error;
855 }
856 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
857 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
858 seg.ds_addr), DEBUG_PROBE);
859
860 /* Create and load table DMA map for this disk */
861 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
862 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
863 &dma_maps->dmamap_table)) != 0) {
864 printf("%s:%d: unable to create table DMA map for "
865 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
866 channel, drive, error);
867 return error;
868 }
869 if ((error = bus_dmamap_load(sc->sc_dmat,
870 dma_maps->dmamap_table,
871 dma_maps->dma_table,
872 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
873 printf("%s:%d: unable to load table DMA map for "
874 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
875 channel, drive, error);
876 return error;
877 }
878 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
879 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
880 /* Create a xfer DMA map for this drive */
881 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
882 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
883 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
884 &dma_maps->dmamap_xfer)) != 0) {
885 printf("%s:%d: unable to create xfer DMA map for "
886 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
887 channel, drive, error);
888 return error;
889 }
890 return 0;
891 }
892
893 int
894 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
895 void *v;
896 int channel, drive;
897 void *databuf;
898 size_t datalen;
899 int flags;
900 {
901 struct pciide_softc *sc = v;
902 int error, seg;
903 struct pciide_dma_maps *dma_maps =
904 &sc->pciide_channels[channel].dma_maps[drive];
905
906 error = bus_dmamap_load(sc->sc_dmat,
907 dma_maps->dmamap_xfer,
908 databuf, datalen, NULL, BUS_DMA_NOWAIT);
909 if (error) {
910 printf("%s:%d: unable to load xfer DMA map for"
911 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
912 channel, drive, error);
913 return error;
914 }
915
916 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
917 dma_maps->dmamap_xfer->dm_mapsize,
918 (flags & WDC_DMA_READ) ?
919 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
920
921 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
922 #ifdef DIAGNOSTIC
923 /* A segment must not cross a 64k boundary */
924 {
925 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
926 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
927 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
928 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
929 printf("pciide_dma: segment %d physical addr 0x%lx"
930 " len 0x%lx not properly aligned\n",
931 seg, phys, len);
932 panic("pciide_dma: buf align");
933 }
934 }
935 #endif
936 dma_maps->dma_table[seg].base_addr =
937 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
938 dma_maps->dma_table[seg].byte_count =
939 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
940 IDEDMA_BYTE_COUNT_MASK);
941 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
942 seg, le32toh(dma_maps->dma_table[seg].byte_count),
943 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
944
945 }
946 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
947 htole32(IDEDMA_BYTE_COUNT_EOT);
948
949 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
950 dma_maps->dmamap_table->dm_mapsize,
951 BUS_DMASYNC_PREWRITE);
952
953 /* Maps are ready. Start DMA function */
954 #ifdef DIAGNOSTIC
955 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
956 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
957 dma_maps->dmamap_table->dm_segs[0].ds_addr);
958 panic("pciide_dma_init: table align");
959 }
960 #endif
961
962 /* Clear status bits */
963 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
964 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
965 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
966 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
967 /* Write table addr */
968 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
969 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
970 dma_maps->dmamap_table->dm_segs[0].ds_addr);
971 /* set read/write */
972 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
973 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
974 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
975 /* remember flags */
976 dma_maps->dma_flags = flags;
977 return 0;
978 }
979
980 void
981 pciide_dma_start(v, channel, drive)
982 void *v;
983 int channel, drive;
984 {
985 struct pciide_softc *sc = v;
986
987 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
988 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
989 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
990 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
991 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
992 }
993
994 int
995 pciide_dma_finish(v, channel, drive, force)
996 void *v;
997 int channel, drive;
998 int force;
999 {
1000 struct pciide_softc *sc = v;
1001 u_int8_t status;
1002 int error = 0;
1003 struct pciide_dma_maps *dma_maps =
1004 &sc->pciide_channels[channel].dma_maps[drive];
1005
1006 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1007 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1008 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1009 DEBUG_XFERS);
1010
1011 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1012 return WDC_DMAST_NOIRQ;
1013
1014 /* stop DMA channel */
1015 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1016 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1017 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1018 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1019
1020 /* Unload the map of the data buffer */
1021 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1022 dma_maps->dmamap_xfer->dm_mapsize,
1023 (dma_maps->dma_flags & WDC_DMA_READ) ?
1024 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1025 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1026
1027 if ((status & IDEDMA_CTL_ERR) != 0) {
1028 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1029 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1030 error |= WDC_DMAST_ERR;
1031 }
1032
1033 if ((status & IDEDMA_CTL_INTR) == 0) {
1034 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1035 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1036 drive, status);
1037 error |= WDC_DMAST_NOIRQ;
1038 }
1039
1040 if ((status & IDEDMA_CTL_ACT) != 0) {
1041 /* data underrun, may be a valid condition for ATAPI */
1042 error |= WDC_DMAST_UNDER;
1043 }
1044 return error;
1045 }
1046
1047 void
1048 pciide_irqack(chp)
1049 struct channel_softc *chp;
1050 {
1051 struct pciide_channel *cp = (struct pciide_channel*)chp;
1052 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1053
1054 /* clear status bits in IDE DMA registers */
1055 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1056 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1057 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1058 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1059 }
1060
1061 /* some common code used by several chip_map */
1062 int
1063 pciide_chansetup(sc, channel, interface)
1064 struct pciide_softc *sc;
1065 int channel;
1066 pcireg_t interface;
1067 {
1068 struct pciide_channel *cp = &sc->pciide_channels[channel];
1069 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1070 cp->name = PCIIDE_CHANNEL_NAME(channel);
1071 cp->wdc_channel.channel = channel;
1072 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1073 cp->wdc_channel.ch_queue =
1074 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1075 if (cp->wdc_channel.ch_queue == NULL) {
1076 printf("%s %s channel: "
1077 "can't allocate memory for command queue",
1078 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1079 return 0;
1080 }
1081 printf("%s: %s channel %s to %s mode\n",
1082 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1083 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1084 "configured" : "wired",
1085 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1086 "native-PCI" : "compatibility");
1087 return 1;
1088 }
1089
1090 /* some common code used by several chip channel_map */
1091 void
1092 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1093 struct pci_attach_args *pa;
1094 struct pciide_channel *cp;
1095 pcireg_t interface;
1096 bus_size_t *cmdsizep, *ctlsizep;
1097 int (*pci_intr) __P((void *));
1098 {
1099 struct channel_softc *wdc_cp = &cp->wdc_channel;
1100
1101 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1102 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1103 pci_intr);
1104 else
1105 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1106 wdc_cp->channel, cmdsizep, ctlsizep);
1107
1108 if (cp->hw_ok == 0)
1109 return;
1110 wdc_cp->data32iot = wdc_cp->cmd_iot;
1111 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1112 wdcattach(wdc_cp);
1113 }
1114
1115 /*
1116 * Generic code to call to know if a channel can be disabled. Return 1
1117 * if channel can be disabled, 0 if not
1118 */
1119 int
1120 pciide_chan_candisable(cp)
1121 struct pciide_channel *cp;
1122 {
1123 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1124 struct channel_softc *wdc_cp = &cp->wdc_channel;
1125
1126 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1127 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1128 printf("%s: disabling %s channel (no drives)\n",
1129 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1130 cp->hw_ok = 0;
1131 return 1;
1132 }
1133 return 0;
1134 }
1135
1136 /*
1137 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1138 * Set hw_ok=0 on failure
1139 */
1140 void
1141 pciide_map_compat_intr(pa, cp, compatchan, interface)
1142 struct pci_attach_args *pa;
1143 struct pciide_channel *cp;
1144 int compatchan, interface;
1145 {
1146 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1147 struct channel_softc *wdc_cp = &cp->wdc_channel;
1148
1149 if (cp->hw_ok == 0)
1150 return;
1151 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1152 return;
1153
1154 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1155 pa, compatchan, pciide_compat_intr, cp);
1156 if (cp->ih == NULL) {
1157 printf("%s: no compatibility interrupt for use by %s "
1158 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1159 cp->hw_ok = 0;
1160 }
1161 }
1162
1163 void
1164 pciide_print_modes(cp)
1165 struct pciide_channel *cp;
1166 {
1167 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1168 int drive;
1169 struct channel_softc *chp;
1170 struct ata_drive_datas *drvp;
1171
1172 chp = &cp->wdc_channel;
1173 for (drive = 0; drive < 2; drive++) {
1174 drvp = &chp->ch_drive[drive];
1175 if ((drvp->drive_flags & DRIVE) == 0)
1176 continue;
1177 printf("%s(%s:%d:%d): using PIO mode %d",
1178 drvp->drv_softc->dv_xname,
1179 sc->sc_wdcdev.sc_dev.dv_xname,
1180 chp->channel, drive, drvp->PIO_mode);
1181 if (drvp->drive_flags & DRIVE_DMA)
1182 printf(", DMA mode %d", drvp->DMA_mode);
1183 if (drvp->drive_flags & DRIVE_UDMA)
1184 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1185 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1186 printf(" (using DMA data transfers)");
1187 printf("\n");
1188 }
1189 }
1190
1191 void
1192 default_chip_map(sc, pa)
1193 struct pciide_softc *sc;
1194 struct pci_attach_args *pa;
1195 {
1196 struct pciide_channel *cp;
1197 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1198 pcireg_t csr;
1199 int channel, drive;
1200 struct ata_drive_datas *drvp;
1201 u_int8_t idedma_ctl;
1202 bus_size_t cmdsize, ctlsize;
1203 char *failreason;
1204
1205 if (pciide_chipen(sc, pa) == 0)
1206 return;
1207
1208 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1209 printf("%s: bus-master DMA support present",
1210 sc->sc_wdcdev.sc_dev.dv_xname);
1211 if (sc->sc_pp == &default_product_desc &&
1212 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1213 PCIIDE_OPTIONS_DMA) == 0) {
1214 printf(", but unused (no driver support)");
1215 sc->sc_dma_ok = 0;
1216 } else {
1217 pciide_mapreg_dma(sc, pa);
1218 if (sc->sc_dma_ok != 0)
1219 printf(", used without full driver "
1220 "support");
1221 }
1222 } else {
1223 printf("%s: hardware does not support DMA",
1224 sc->sc_wdcdev.sc_dev.dv_xname);
1225 sc->sc_dma_ok = 0;
1226 }
1227 printf("\n");
1228 if (sc->sc_dma_ok) {
1229 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1230 sc->sc_wdcdev.irqack = pciide_irqack;
1231 }
1232 sc->sc_wdcdev.PIO_cap = 0;
1233 sc->sc_wdcdev.DMA_cap = 0;
1234
1235 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1236 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1237 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1238
1239 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1240 cp = &sc->pciide_channels[channel];
1241 if (pciide_chansetup(sc, channel, interface) == 0)
1242 continue;
1243 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1244 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1245 &ctlsize, pciide_pci_intr);
1246 } else {
1247 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1248 channel, &cmdsize, &ctlsize);
1249 }
1250 if (cp->hw_ok == 0)
1251 continue;
1252 /*
1253 * Check to see if something appears to be there.
1254 */
1255 failreason = NULL;
1256 if (!wdcprobe(&cp->wdc_channel)) {
1257 failreason = "not responding; disabled or no drives?";
1258 goto next;
1259 }
1260 /*
1261 * Now, make sure it's actually attributable to this PCI IDE
1262 * channel by trying to access the channel again while the
1263 * PCI IDE controller's I/O space is disabled. (If the
1264 * channel no longer appears to be there, it belongs to
1265 * this controller.) YUCK!
1266 */
1267 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1268 PCI_COMMAND_STATUS_REG);
1269 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1270 csr & ~PCI_COMMAND_IO_ENABLE);
1271 if (wdcprobe(&cp->wdc_channel))
1272 failreason = "other hardware responding at addresses";
1273 pci_conf_write(sc->sc_pc, sc->sc_tag,
1274 PCI_COMMAND_STATUS_REG, csr);
1275 next:
1276 if (failreason) {
1277 printf("%s: %s channel ignored (%s)\n",
1278 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1279 failreason);
1280 cp->hw_ok = 0;
1281 bus_space_unmap(cp->wdc_channel.cmd_iot,
1282 cp->wdc_channel.cmd_ioh, cmdsize);
1283 bus_space_unmap(cp->wdc_channel.ctl_iot,
1284 cp->wdc_channel.ctl_ioh, ctlsize);
1285 } else {
1286 pciide_map_compat_intr(pa, cp, channel, interface);
1287 }
1288 if (cp->hw_ok) {
1289 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1290 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1291 wdcattach(&cp->wdc_channel);
1292 }
1293 }
1294
1295 if (sc->sc_dma_ok == 0)
1296 return;
1297
1298 /* Allocate DMA maps */
1299 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1300 idedma_ctl = 0;
1301 cp = &sc->pciide_channels[channel];
1302 for (drive = 0; drive < 2; drive++) {
1303 drvp = &cp->wdc_channel.ch_drive[drive];
1304 /* If no drive, skip */
1305 if ((drvp->drive_flags & DRIVE) == 0)
1306 continue;
1307 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1308 continue;
1309 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1310 /* Abort DMA setup */
1311 printf("%s:%d:%d: can't allocate DMA maps, "
1312 "using PIO transfers\n",
1313 sc->sc_wdcdev.sc_dev.dv_xname,
1314 channel, drive);
1315 drvp->drive_flags &= ~DRIVE_DMA;
1316 }
1317 printf("%s:%d:%d: using DMA data transfers\n",
1318 sc->sc_wdcdev.sc_dev.dv_xname,
1319 channel, drive);
1320 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1321 }
1322 if (idedma_ctl != 0) {
1323 /* Add software bits in status register */
1324 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1325 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1326 idedma_ctl);
1327 }
1328 }
1329 }
1330
1331 void
1332 piix_chip_map(sc, pa)
1333 struct pciide_softc *sc;
1334 struct pci_attach_args *pa;
1335 {
1336 struct pciide_channel *cp;
1337 int channel;
1338 u_int32_t idetim;
1339 bus_size_t cmdsize, ctlsize;
1340
1341 if (pciide_chipen(sc, pa) == 0)
1342 return;
1343
1344 printf("%s: bus-master DMA support present",
1345 sc->sc_wdcdev.sc_dev.dv_xname);
1346 pciide_mapreg_dma(sc, pa);
1347 printf("\n");
1348 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1349 WDC_CAPABILITY_MODE;
1350 if (sc->sc_dma_ok) {
1351 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1352 sc->sc_wdcdev.irqack = pciide_irqack;
1353 switch(sc->sc_pp->ide_product) {
1354 case PCI_PRODUCT_INTEL_82371AB_IDE:
1355 case PCI_PRODUCT_INTEL_82801AA_IDE:
1356 case PCI_PRODUCT_INTEL_82801AB_IDE:
1357 case PCI_PRODUCT_INTEL_82801BA_IDE:
1358 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1359 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1360 }
1361 }
1362 sc->sc_wdcdev.PIO_cap = 4;
1363 sc->sc_wdcdev.DMA_cap = 2;
1364 switch(sc->sc_pp->ide_product) {
1365 case PCI_PRODUCT_INTEL_82801AA_IDE:
1366 sc->sc_wdcdev.UDMA_cap = 4;
1367 break;
1368 case PCI_PRODUCT_INTEL_82801BA_IDE:
1369 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1370 sc->sc_wdcdev.UDMA_cap = 5;
1371 break;
1372 default:
1373 sc->sc_wdcdev.UDMA_cap = 2;
1374 }
1375 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1376 sc->sc_wdcdev.set_modes = piix_setup_channel;
1377 else
1378 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1379 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1380 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1381
1382 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1383 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1384 DEBUG_PROBE);
1385 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1386 WDCDEBUG_PRINT((", sidetim=0x%x",
1387 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1388 DEBUG_PROBE);
1389 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1390 WDCDEBUG_PRINT((", udamreg 0x%x",
1391 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1392 DEBUG_PROBE);
1393 }
1394 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1395 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1396 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1397 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1398 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1399 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1400 DEBUG_PROBE);
1401 }
1402
1403 }
1404 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1405
1406 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1407 cp = &sc->pciide_channels[channel];
1408 /* PIIX is compat-only */
1409 if (pciide_chansetup(sc, channel, 0) == 0)
1410 continue;
1411 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1412 if ((PIIX_IDETIM_READ(idetim, channel) &
1413 PIIX_IDETIM_IDE) == 0) {
1414 printf("%s: %s channel ignored (disabled)\n",
1415 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1416 continue;
1417 }
1418 /* PIIX are compat-only pciide devices */
1419 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1420 if (cp->hw_ok == 0)
1421 continue;
1422 if (pciide_chan_candisable(cp)) {
1423 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1424 channel);
1425 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1426 idetim);
1427 }
1428 pciide_map_compat_intr(pa, cp, channel, 0);
1429 if (cp->hw_ok == 0)
1430 continue;
1431 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1432 }
1433
1434 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1435 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1436 DEBUG_PROBE);
1437 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1438 WDCDEBUG_PRINT((", sidetim=0x%x",
1439 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1440 DEBUG_PROBE);
1441 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1442 WDCDEBUG_PRINT((", udamreg 0x%x",
1443 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1444 DEBUG_PROBE);
1445 }
1446 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1447 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1448 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1449 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1450 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1451 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1452 DEBUG_PROBE);
1453 }
1454 }
1455 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1456 }
1457
1458 void
1459 piix_setup_channel(chp)
1460 struct channel_softc *chp;
1461 {
1462 u_int8_t mode[2], drive;
1463 u_int32_t oidetim, idetim, idedma_ctl;
1464 struct pciide_channel *cp = (struct pciide_channel*)chp;
1465 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1466 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1467
1468 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1469 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1470 idedma_ctl = 0;
1471
1472 /* set up new idetim: Enable IDE registers decode */
1473 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1474 chp->channel);
1475
1476 /* setup DMA */
1477 pciide_channel_dma_setup(cp);
1478
1479 /*
1480 * Here we have to mess up with drives mode: PIIX can't have
1481 * different timings for master and slave drives.
1482 * We need to find the best combination.
1483 */
1484
1485 /* If both drives supports DMA, take the lower mode */
1486 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1487 (drvp[1].drive_flags & DRIVE_DMA)) {
1488 mode[0] = mode[1] =
1489 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1490 drvp[0].DMA_mode = mode[0];
1491 drvp[1].DMA_mode = mode[1];
1492 goto ok;
1493 }
1494 /*
1495 * If only one drive supports DMA, use its mode, and
1496 * put the other one in PIO mode 0 if mode not compatible
1497 */
1498 if (drvp[0].drive_flags & DRIVE_DMA) {
1499 mode[0] = drvp[0].DMA_mode;
1500 mode[1] = drvp[1].PIO_mode;
1501 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1502 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1503 mode[1] = drvp[1].PIO_mode = 0;
1504 goto ok;
1505 }
1506 if (drvp[1].drive_flags & DRIVE_DMA) {
1507 mode[1] = drvp[1].DMA_mode;
1508 mode[0] = drvp[0].PIO_mode;
1509 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1510 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1511 mode[0] = drvp[0].PIO_mode = 0;
1512 goto ok;
1513 }
1514 /*
1515 * If both drives are not DMA, takes the lower mode, unless
1516 * one of them is PIO mode < 2
1517 */
1518 if (drvp[0].PIO_mode < 2) {
1519 mode[0] = drvp[0].PIO_mode = 0;
1520 mode[1] = drvp[1].PIO_mode;
1521 } else if (drvp[1].PIO_mode < 2) {
1522 mode[1] = drvp[1].PIO_mode = 0;
1523 mode[0] = drvp[0].PIO_mode;
1524 } else {
1525 mode[0] = mode[1] =
1526 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1527 drvp[0].PIO_mode = mode[0];
1528 drvp[1].PIO_mode = mode[1];
1529 }
1530 ok: /* The modes are setup */
1531 for (drive = 0; drive < 2; drive++) {
1532 if (drvp[drive].drive_flags & DRIVE_DMA) {
1533 idetim |= piix_setup_idetim_timings(
1534 mode[drive], 1, chp->channel);
1535 goto end;
1536 }
1537 }
1538 /* If we are there, none of the drives are DMA */
1539 if (mode[0] >= 2)
1540 idetim |= piix_setup_idetim_timings(
1541 mode[0], 0, chp->channel);
1542 else
1543 idetim |= piix_setup_idetim_timings(
1544 mode[1], 0, chp->channel);
1545 end: /*
1546 * timing mode is now set up in the controller. Enable
1547 * it per-drive
1548 */
1549 for (drive = 0; drive < 2; drive++) {
1550 /* If no drive, skip */
1551 if ((drvp[drive].drive_flags & DRIVE) == 0)
1552 continue;
1553 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1554 if (drvp[drive].drive_flags & DRIVE_DMA)
1555 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1556 }
1557 if (idedma_ctl != 0) {
1558 /* Add software bits in status register */
1559 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1560 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1561 idedma_ctl);
1562 }
1563 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1564 pciide_print_modes(cp);
1565 }
1566
1567 void
1568 piix3_4_setup_channel(chp)
1569 struct channel_softc *chp;
1570 {
1571 struct ata_drive_datas *drvp;
1572 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1573 struct pciide_channel *cp = (struct pciide_channel*)chp;
1574 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1575 int drive;
1576 int channel = chp->channel;
1577
1578 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1579 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1580 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1581 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1582 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1583 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1584 PIIX_SIDETIM_RTC_MASK(channel));
1585
1586 idedma_ctl = 0;
1587 /* If channel disabled, no need to go further */
1588 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1589 return;
1590 /* set up new idetim: Enable IDE registers decode */
1591 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1592
1593 /* setup DMA if needed */
1594 pciide_channel_dma_setup(cp);
1595
1596 for (drive = 0; drive < 2; drive++) {
1597 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1598 PIIX_UDMATIM_SET(0x3, channel, drive));
1599 drvp = &chp->ch_drive[drive];
1600 /* If no drive, skip */
1601 if ((drvp->drive_flags & DRIVE) == 0)
1602 continue;
1603 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1604 (drvp->drive_flags & DRIVE_UDMA) == 0))
1605 goto pio;
1606
1607 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1608 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1609 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1610 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1611 ideconf |= PIIX_CONFIG_PINGPONG;
1612 }
1613 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1614 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1615 /* setup Ultra/100 */
1616 if (drvp->UDMA_mode > 2 &&
1617 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1618 drvp->UDMA_mode = 2;
1619 if (drvp->UDMA_mode > 4) {
1620 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1621 } else {
1622 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1623 if (drvp->UDMA_mode > 2) {
1624 ideconf |= PIIX_CONFIG_UDMA66(channel,
1625 drive);
1626 } else {
1627 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1628 drive);
1629 }
1630 }
1631 }
1632 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1633 /* setup Ultra/66 */
1634 if (drvp->UDMA_mode > 2 &&
1635 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1636 drvp->UDMA_mode = 2;
1637 if (drvp->UDMA_mode > 2)
1638 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1639 else
1640 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1641 }
1642 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1643 (drvp->drive_flags & DRIVE_UDMA)) {
1644 /* use Ultra/DMA */
1645 drvp->drive_flags &= ~DRIVE_DMA;
1646 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1647 udmareg |= PIIX_UDMATIM_SET(
1648 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1649 } else {
1650 /* use Multiword DMA */
1651 drvp->drive_flags &= ~DRIVE_UDMA;
1652 if (drive == 0) {
1653 idetim |= piix_setup_idetim_timings(
1654 drvp->DMA_mode, 1, channel);
1655 } else {
1656 sidetim |= piix_setup_sidetim_timings(
1657 drvp->DMA_mode, 1, channel);
1658 idetim =PIIX_IDETIM_SET(idetim,
1659 PIIX_IDETIM_SITRE, channel);
1660 }
1661 }
1662 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1663
1664 pio: /* use PIO mode */
1665 idetim |= piix_setup_idetim_drvs(drvp);
1666 if (drive == 0) {
1667 idetim |= piix_setup_idetim_timings(
1668 drvp->PIO_mode, 0, channel);
1669 } else {
1670 sidetim |= piix_setup_sidetim_timings(
1671 drvp->PIO_mode, 0, channel);
1672 idetim =PIIX_IDETIM_SET(idetim,
1673 PIIX_IDETIM_SITRE, channel);
1674 }
1675 }
1676 if (idedma_ctl != 0) {
1677 /* Add software bits in status register */
1678 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1679 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1680 idedma_ctl);
1681 }
1682 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1683 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1684 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1685 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1686 pciide_print_modes(cp);
1687 }
1688
1689
1690 /* setup ISP and RTC fields, based on mode */
1691 static u_int32_t
1692 piix_setup_idetim_timings(mode, dma, channel)
1693 u_int8_t mode;
1694 u_int8_t dma;
1695 u_int8_t channel;
1696 {
1697
1698 if (dma)
1699 return PIIX_IDETIM_SET(0,
1700 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1701 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1702 channel);
1703 else
1704 return PIIX_IDETIM_SET(0,
1705 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1706 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1707 channel);
1708 }
1709
1710 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1711 static u_int32_t
1712 piix_setup_idetim_drvs(drvp)
1713 struct ata_drive_datas *drvp;
1714 {
1715 u_int32_t ret = 0;
1716 struct channel_softc *chp = drvp->chnl_softc;
1717 u_int8_t channel = chp->channel;
1718 u_int8_t drive = drvp->drive;
1719
1720 /*
1721 * If drive is using UDMA, timings setups are independant
1722 * So just check DMA and PIO here.
1723 */
1724 if (drvp->drive_flags & DRIVE_DMA) {
1725 /* if mode = DMA mode 0, use compatible timings */
1726 if ((drvp->drive_flags & DRIVE_DMA) &&
1727 drvp->DMA_mode == 0) {
1728 drvp->PIO_mode = 0;
1729 return ret;
1730 }
1731 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1732 /*
1733 * PIO and DMA timings are the same, use fast timings for PIO
1734 * too, else use compat timings.
1735 */
1736 if ((piix_isp_pio[drvp->PIO_mode] !=
1737 piix_isp_dma[drvp->DMA_mode]) ||
1738 (piix_rtc_pio[drvp->PIO_mode] !=
1739 piix_rtc_dma[drvp->DMA_mode]))
1740 drvp->PIO_mode = 0;
1741 /* if PIO mode <= 2, use compat timings for PIO */
1742 if (drvp->PIO_mode <= 2) {
1743 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1744 channel);
1745 return ret;
1746 }
1747 }
1748
1749 /*
1750 * Now setup PIO modes. If mode < 2, use compat timings.
1751 * Else enable fast timings. Enable IORDY and prefetch/post
1752 * if PIO mode >= 3.
1753 */
1754
1755 if (drvp->PIO_mode < 2)
1756 return ret;
1757
1758 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1759 if (drvp->PIO_mode >= 3) {
1760 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1761 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1762 }
1763 return ret;
1764 }
1765
1766 /* setup values in SIDETIM registers, based on mode */
1767 static u_int32_t
1768 piix_setup_sidetim_timings(mode, dma, channel)
1769 u_int8_t mode;
1770 u_int8_t dma;
1771 u_int8_t channel;
1772 {
1773 if (dma)
1774 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1775 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1776 else
1777 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1778 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1779 }
1780
1781 void
1782 amd756_chip_map(sc, pa)
1783 struct pciide_softc *sc;
1784 struct pci_attach_args *pa;
1785 {
1786 struct pciide_channel *cp;
1787 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1788 int channel;
1789 pcireg_t chanenable;
1790 bus_size_t cmdsize, ctlsize;
1791
1792 if (pciide_chipen(sc, pa) == 0)
1793 return;
1794 printf("%s: bus-master DMA support present",
1795 sc->sc_wdcdev.sc_dev.dv_xname);
1796 pciide_mapreg_dma(sc, pa);
1797 printf("\n");
1798 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1799 WDC_CAPABILITY_MODE;
1800 if (sc->sc_dma_ok) {
1801 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1802 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1803 sc->sc_wdcdev.irqack = pciide_irqack;
1804 }
1805 sc->sc_wdcdev.PIO_cap = 4;
1806 sc->sc_wdcdev.DMA_cap = 2;
1807 sc->sc_wdcdev.UDMA_cap = 4;
1808 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1809 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1810 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1811 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1812
1813 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1814 DEBUG_PROBE);
1815 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1816 cp = &sc->pciide_channels[channel];
1817 if (pciide_chansetup(sc, channel, interface) == 0)
1818 continue;
1819
1820 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1821 printf("%s: %s channel ignored (disabled)\n",
1822 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1823 continue;
1824 }
1825 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1826 pciide_pci_intr);
1827
1828 if (pciide_chan_candisable(cp))
1829 chanenable &= ~AMD756_CHAN_EN(channel);
1830 pciide_map_compat_intr(pa, cp, channel, interface);
1831 if (cp->hw_ok == 0)
1832 continue;
1833
1834 amd756_setup_channel(&cp->wdc_channel);
1835 }
1836 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1837 chanenable);
1838 return;
1839 }
1840
1841 void
1842 amd756_setup_channel(chp)
1843 struct channel_softc *chp;
1844 {
1845 u_int32_t udmatim_reg, datatim_reg;
1846 u_int8_t idedma_ctl;
1847 int mode, drive;
1848 struct ata_drive_datas *drvp;
1849 struct pciide_channel *cp = (struct pciide_channel*)chp;
1850 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1851 #ifndef PCIIDE_AMD756_ENABLEDMA
1852 int rev = PCI_REVISION(
1853 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1854 #endif
1855
1856 idedma_ctl = 0;
1857 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1858 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1859 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1860 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1861
1862 /* setup DMA if needed */
1863 pciide_channel_dma_setup(cp);
1864
1865 for (drive = 0; drive < 2; drive++) {
1866 drvp = &chp->ch_drive[drive];
1867 /* If no drive, skip */
1868 if ((drvp->drive_flags & DRIVE) == 0)
1869 continue;
1870 /* add timing values, setup DMA if needed */
1871 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1872 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1873 mode = drvp->PIO_mode;
1874 goto pio;
1875 }
1876 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1877 (drvp->drive_flags & DRIVE_UDMA)) {
1878 /* use Ultra/DMA */
1879 drvp->drive_flags &= ~DRIVE_DMA;
1880 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1881 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1882 AMD756_UDMA_TIME(chp->channel, drive,
1883 amd756_udma_tim[drvp->UDMA_mode]);
1884 /* can use PIO timings, MW DMA unused */
1885 mode = drvp->PIO_mode;
1886 } else {
1887 /* use Multiword DMA, but only if revision is OK */
1888 drvp->drive_flags &= ~DRIVE_UDMA;
1889 #ifndef PCIIDE_AMD756_ENABLEDMA
1890 /*
1891 * The workaround doesn't seem to be necessary
1892 * with all drives, so it can be disabled by
1893 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1894 * triggered.
1895 */
1896 if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1897 printf("%s:%d:%d: multi-word DMA disabled due "
1898 "to chip revision\n",
1899 sc->sc_wdcdev.sc_dev.dv_xname,
1900 chp->channel, drive);
1901 mode = drvp->PIO_mode;
1902 drvp->drive_flags &= ~DRIVE_DMA;
1903 goto pio;
1904 }
1905 #endif
1906 /* mode = min(pio, dma+2) */
1907 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1908 mode = drvp->PIO_mode;
1909 else
1910 mode = drvp->DMA_mode + 2;
1911 }
1912 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1913
1914 pio: /* setup PIO mode */
1915 if (mode <= 2) {
1916 drvp->DMA_mode = 0;
1917 drvp->PIO_mode = 0;
1918 mode = 0;
1919 } else {
1920 drvp->PIO_mode = mode;
1921 drvp->DMA_mode = mode - 2;
1922 }
1923 datatim_reg |=
1924 AMD756_DATATIM_PULSE(chp->channel, drive,
1925 amd756_pio_set[mode]) |
1926 AMD756_DATATIM_RECOV(chp->channel, drive,
1927 amd756_pio_rec[mode]);
1928 }
1929 if (idedma_ctl != 0) {
1930 /* Add software bits in status register */
1931 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1932 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1933 idedma_ctl);
1934 }
1935 pciide_print_modes(cp);
1936 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1937 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1938 }
1939
1940 void
1941 apollo_chip_map(sc, pa)
1942 struct pciide_softc *sc;
1943 struct pci_attach_args *pa;
1944 {
1945 struct pciide_channel *cp;
1946 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1947 int rev = PCI_REVISION(pa->pa_class);
1948 int channel;
1949 u_int32_t ideconf, udma_conf, old_udma_conf;
1950 bus_size_t cmdsize, ctlsize;
1951
1952 if (pciide_chipen(sc, pa) == 0)
1953 return;
1954 printf("%s: bus-master DMA support present",
1955 sc->sc_wdcdev.sc_dev.dv_xname);
1956 pciide_mapreg_dma(sc, pa);
1957 printf("\n");
1958 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1959 WDC_CAPABILITY_MODE;
1960 if (sc->sc_dma_ok) {
1961 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1962 sc->sc_wdcdev.irqack = pciide_irqack;
1963 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE
1964 && rev >= 6)
1965 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1966 }
1967 sc->sc_wdcdev.PIO_cap = 4;
1968 sc->sc_wdcdev.DMA_cap = 2;
1969 sc->sc_wdcdev.UDMA_cap = 2;
1970 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1971 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1972 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1973
1974 old_udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1975 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1976 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1977 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1978 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1979 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1980 old_udma_conf),
1981 DEBUG_PROBE);
1982 pci_conf_write(sc->sc_pc, sc->sc_tag,
1983 old_udma_conf | (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1984 APO_UDMA_EN_MTH(0, 0) | APO_UDMA_CLK66(0)),
1985 APO_UDMA);
1986 udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1987 WDCDEBUG_PRINT(("apollo_chip_map: APO_UDMA now 0x%x\n", udma_conf),
1988 DEBUG_PROBE);
1989 if ((udma_conf & (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1990 APO_UDMA_EN_MTH(0, 0))) ==
1991 (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1992 APO_UDMA_EN_MTH(0, 0))) {
1993 if ((udma_conf & APO_UDMA_CLK66(0)) ==
1994 APO_UDMA_CLK66(0)) {
1995 printf("%s: Ultra/66 capable\n",
1996 sc->sc_wdcdev.sc_dev.dv_xname);
1997 sc->sc_wdcdev.UDMA_cap = 4;
1998 } else {
1999 printf("%s: Ultra/33 capable\n",
2000 sc->sc_wdcdev.sc_dev.dv_xname);
2001 sc->sc_wdcdev.UDMA_cap = 2;
2002 }
2003 } else {
2004 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_UDMA;
2005 }
2006 pci_conf_write(sc->sc_pc, sc->sc_tag, old_udma_conf, APO_UDMA);
2007
2008 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2009 cp = &sc->pciide_channels[channel];
2010 if (pciide_chansetup(sc, channel, interface) == 0)
2011 continue;
2012
2013 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2014 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2015 printf("%s: %s channel ignored (disabled)\n",
2016 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2017 continue;
2018 }
2019 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2020 pciide_pci_intr);
2021 if (cp->hw_ok == 0)
2022 continue;
2023 if (pciide_chan_candisable(cp)) {
2024 ideconf &= ~APO_IDECONF_EN(channel);
2025 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2026 ideconf);
2027 }
2028 pciide_map_compat_intr(pa, cp, channel, interface);
2029
2030 if (cp->hw_ok == 0)
2031 continue;
2032 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2033 }
2034 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2035 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2036 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2037 }
2038
2039 void
2040 apollo_setup_channel(chp)
2041 struct channel_softc *chp;
2042 {
2043 u_int32_t udmatim_reg, datatim_reg;
2044 u_int8_t idedma_ctl;
2045 int mode, drive;
2046 struct ata_drive_datas *drvp;
2047 struct pciide_channel *cp = (struct pciide_channel*)chp;
2048 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2049
2050 idedma_ctl = 0;
2051 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2052 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2053 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2054 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
2055
2056 /* setup DMA if needed */
2057 pciide_channel_dma_setup(cp);
2058
2059 /*
2060 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2061 * downgrade to Ultra/33 if needed
2062 */
2063 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2064 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2065 /* both drives UDMA */
2066 if (chp->ch_drive[0].UDMA_mode > 2 &&
2067 chp->ch_drive[1].UDMA_mode <= 2) {
2068 /* drive 0 Ultra/66, drive 1 Ultra/33 */
2069 chp->ch_drive[0].UDMA_mode = 2;
2070 } else if (chp->ch_drive[1].UDMA_mode > 2 &&
2071 chp->ch_drive[0].UDMA_mode <= 2) {
2072 /* drive 1 Ultra/66, drive 0 Ultra/33 */
2073 chp->ch_drive[1].UDMA_mode = 2;
2074 }
2075 }
2076
2077 for (drive = 0; drive < 2; drive++) {
2078 drvp = &chp->ch_drive[drive];
2079 /* If no drive, skip */
2080 if ((drvp->drive_flags & DRIVE) == 0)
2081 continue;
2082 /* add timing values, setup DMA if needed */
2083 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2084 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2085 mode = drvp->PIO_mode;
2086 goto pio;
2087 }
2088 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2089 (drvp->drive_flags & DRIVE_UDMA)) {
2090 /* use Ultra/DMA */
2091 drvp->drive_flags &= ~DRIVE_DMA;
2092 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2093 APO_UDMA_EN_MTH(chp->channel, drive) |
2094 APO_UDMA_TIME(chp->channel, drive,
2095 apollo_udma_tim[drvp->UDMA_mode]);
2096 if (drvp->UDMA_mode > 2)
2097 udmatim_reg |=
2098 APO_UDMA_CLK66(chp->channel);
2099 /* can use PIO timings, MW DMA unused */
2100 mode = drvp->PIO_mode;
2101 } else {
2102 /* use Multiword DMA */
2103 drvp->drive_flags &= ~DRIVE_UDMA;
2104 /* mode = min(pio, dma+2) */
2105 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2106 mode = drvp->PIO_mode;
2107 else
2108 mode = drvp->DMA_mode + 2;
2109 }
2110 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2111
2112 pio: /* setup PIO mode */
2113 if (mode <= 2) {
2114 drvp->DMA_mode = 0;
2115 drvp->PIO_mode = 0;
2116 mode = 0;
2117 } else {
2118 drvp->PIO_mode = mode;
2119 drvp->DMA_mode = mode - 2;
2120 }
2121 datatim_reg |=
2122 APO_DATATIM_PULSE(chp->channel, drive,
2123 apollo_pio_set[mode]) |
2124 APO_DATATIM_RECOV(chp->channel, drive,
2125 apollo_pio_rec[mode]);
2126 }
2127 if (idedma_ctl != 0) {
2128 /* Add software bits in status register */
2129 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2130 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2131 idedma_ctl);
2132 }
2133 pciide_print_modes(cp);
2134 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2135 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2136 }
2137
2138 void
2139 cmd_channel_map(pa, sc, channel)
2140 struct pci_attach_args *pa;
2141 struct pciide_softc *sc;
2142 int channel;
2143 {
2144 struct pciide_channel *cp = &sc->pciide_channels[channel];
2145 bus_size_t cmdsize, ctlsize;
2146 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2147 int interface;
2148
2149 /*
2150 * The 0648/0649 can be told to identify as a RAID controller.
2151 * In this case, we have to fake interface
2152 */
2153 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2154 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2155 PCIIDE_INTERFACE_SETTABLE(1);
2156 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2157 CMD_CONF_DSA1)
2158 interface |= PCIIDE_INTERFACE_PCI(0) |
2159 PCIIDE_INTERFACE_PCI(1);
2160 } else {
2161 interface = PCI_INTERFACE(pa->pa_class);
2162 }
2163
2164 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2165 cp->name = PCIIDE_CHANNEL_NAME(channel);
2166 cp->wdc_channel.channel = channel;
2167 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2168
2169 if (channel > 0) {
2170 cp->wdc_channel.ch_queue =
2171 sc->pciide_channels[0].wdc_channel.ch_queue;
2172 } else {
2173 cp->wdc_channel.ch_queue =
2174 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2175 }
2176 if (cp->wdc_channel.ch_queue == NULL) {
2177 printf("%s %s channel: "
2178 "can't allocate memory for command queue",
2179 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2180 return;
2181 }
2182
2183 printf("%s: %s channel %s to %s mode\n",
2184 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2185 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2186 "configured" : "wired",
2187 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2188 "native-PCI" : "compatibility");
2189
2190 /*
2191 * with a CMD PCI64x, if we get here, the first channel is enabled:
2192 * there's no way to disable the first channel without disabling
2193 * the whole device
2194 */
2195 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2196 printf("%s: %s channel ignored (disabled)\n",
2197 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2198 return;
2199 }
2200
2201 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2202 if (cp->hw_ok == 0)
2203 return;
2204 if (channel == 1) {
2205 if (pciide_chan_candisable(cp)) {
2206 ctrl &= ~CMD_CTRL_2PORT;
2207 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2208 CMD_CTRL, ctrl);
2209 }
2210 }
2211 pciide_map_compat_intr(pa, cp, channel, interface);
2212 }
2213
2214 int
2215 cmd_pci_intr(arg)
2216 void *arg;
2217 {
2218 struct pciide_softc *sc = arg;
2219 struct pciide_channel *cp;
2220 struct channel_softc *wdc_cp;
2221 int i, rv, crv;
2222 u_int32_t priirq, secirq;
2223
2224 rv = 0;
2225 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2226 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2227 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2228 cp = &sc->pciide_channels[i];
2229 wdc_cp = &cp->wdc_channel;
2230 /* If a compat channel skip. */
2231 if (cp->compat)
2232 continue;
2233 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2234 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2235 crv = wdcintr(wdc_cp);
2236 if (crv == 0)
2237 printf("%s:%d: bogus intr\n",
2238 sc->sc_wdcdev.sc_dev.dv_xname, i);
2239 else
2240 rv = 1;
2241 }
2242 }
2243 return rv;
2244 }
2245
2246 void
2247 cmd_chip_map(sc, pa)
2248 struct pciide_softc *sc;
2249 struct pci_attach_args *pa;
2250 {
2251 int channel;
2252
2253 /*
2254 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2255 * and base adresses registers can be disabled at
2256 * hardware level. In this case, the device is wired
2257 * in compat mode and its first channel is always enabled,
2258 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2259 * In fact, it seems that the first channel of the CMD PCI0640
2260 * can't be disabled.
2261 */
2262
2263 #ifdef PCIIDE_CMD064x_DISABLE
2264 if (pciide_chipen(sc, pa) == 0)
2265 return;
2266 #endif
2267
2268 printf("%s: hardware does not support DMA\n",
2269 sc->sc_wdcdev.sc_dev.dv_xname);
2270 sc->sc_dma_ok = 0;
2271
2272 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2273 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2274 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2275
2276 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2277 cmd_channel_map(pa, sc, channel);
2278 }
2279 }
2280
2281 void
2282 cmd0643_9_chip_map(sc, pa)
2283 struct pciide_softc *sc;
2284 struct pci_attach_args *pa;
2285 {
2286 struct pciide_channel *cp;
2287 int channel;
2288 int rev = PCI_REVISION(
2289 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2290
2291 /*
2292 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2293 * and base adresses registers can be disabled at
2294 * hardware level. In this case, the device is wired
2295 * in compat mode and its first channel is always enabled,
2296 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2297 * In fact, it seems that the first channel of the CMD PCI0640
2298 * can't be disabled.
2299 */
2300
2301 #ifdef PCIIDE_CMD064x_DISABLE
2302 if (pciide_chipen(sc, pa) == 0)
2303 return;
2304 #endif
2305 printf("%s: bus-master DMA support present",
2306 sc->sc_wdcdev.sc_dev.dv_xname);
2307 pciide_mapreg_dma(sc, pa);
2308 printf("\n");
2309 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2310 WDC_CAPABILITY_MODE;
2311 if (sc->sc_dma_ok) {
2312 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2313 switch (sc->sc_pp->ide_product) {
2314 case PCI_PRODUCT_CMDTECH_649:
2315 case PCI_PRODUCT_CMDTECH_648:
2316 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2317 sc->sc_wdcdev.UDMA_cap = 4;
2318 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2319 break;
2320 case PCI_PRODUCT_CMDTECH_646:
2321 if (rev >= CMD0646U2_REV) {
2322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2323 sc->sc_wdcdev.UDMA_cap = 2;
2324 } else if (rev >= CMD0646U_REV) {
2325 /*
2326 * Linux's driver claims that the 646U is broken
2327 * with UDMA. Only enable it if we know what we're
2328 * doing
2329 */
2330 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2331 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2332 sc->sc_wdcdev.UDMA_cap = 2;
2333 #endif
2334 /* explicitely disable UDMA */
2335 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2336 CMD_UDMATIM(0), 0);
2337 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2338 CMD_UDMATIM(1), 0);
2339 }
2340 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2341 break;
2342 default:
2343 sc->sc_wdcdev.irqack = pciide_irqack;
2344 }
2345 }
2346
2347 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2348 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2349 sc->sc_wdcdev.PIO_cap = 4;
2350 sc->sc_wdcdev.DMA_cap = 2;
2351 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2352
2353 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2354 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2355 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2356 DEBUG_PROBE);
2357
2358 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2359 cp = &sc->pciide_channels[channel];
2360 cmd_channel_map(pa, sc, channel);
2361 if (cp->hw_ok == 0)
2362 continue;
2363 cmd0643_9_setup_channel(&cp->wdc_channel);
2364 }
2365 /*
2366 * note - this also makes sure we clear the irq disable and reset
2367 * bits
2368 */
2369 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2370 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2371 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2372 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2373 DEBUG_PROBE);
2374 }
2375
2376 void
2377 cmd0643_9_setup_channel(chp)
2378 struct channel_softc *chp;
2379 {
2380 struct ata_drive_datas *drvp;
2381 u_int8_t tim;
2382 u_int32_t idedma_ctl, udma_reg;
2383 int drive;
2384 struct pciide_channel *cp = (struct pciide_channel*)chp;
2385 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2386
2387 idedma_ctl = 0;
2388 /* setup DMA if needed */
2389 pciide_channel_dma_setup(cp);
2390
2391 for (drive = 0; drive < 2; drive++) {
2392 drvp = &chp->ch_drive[drive];
2393 /* If no drive, skip */
2394 if ((drvp->drive_flags & DRIVE) == 0)
2395 continue;
2396 /* add timing values, setup DMA if needed */
2397 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2398 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2399 if (drvp->drive_flags & DRIVE_UDMA) {
2400 /* UltraDMA on a 646U2, 0648 or 0649 */
2401 drvp->drive_flags &= ~DRIVE_DMA;
2402 udma_reg = pciide_pci_read(sc->sc_pc,
2403 sc->sc_tag, CMD_UDMATIM(chp->channel));
2404 if (drvp->UDMA_mode > 2 &&
2405 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2406 CMD_BICSR) &
2407 CMD_BICSR_80(chp->channel)) == 0)
2408 drvp->UDMA_mode = 2;
2409 if (drvp->UDMA_mode > 2)
2410 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2411 else if (sc->sc_wdcdev.UDMA_cap > 2)
2412 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2413 udma_reg |= CMD_UDMATIM_UDMA(drive);
2414 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2415 CMD_UDMATIM_TIM_OFF(drive));
2416 udma_reg |=
2417 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2418 CMD_UDMATIM_TIM_OFF(drive));
2419 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2420 CMD_UDMATIM(chp->channel), udma_reg);
2421 } else {
2422 /*
2423 * use Multiword DMA.
2424 * Timings will be used for both PIO and DMA,
2425 * so adjust DMA mode if needed
2426 * if we have a 0646U2/8/9, turn off UDMA
2427 */
2428 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2429 udma_reg = pciide_pci_read(sc->sc_pc,
2430 sc->sc_tag,
2431 CMD_UDMATIM(chp->channel));
2432 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2433 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2434 CMD_UDMATIM(chp->channel),
2435 udma_reg);
2436 }
2437 if (drvp->PIO_mode >= 3 &&
2438 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2439 drvp->DMA_mode = drvp->PIO_mode - 2;
2440 }
2441 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2442 }
2443 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2444 }
2445 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2446 CMD_DATA_TIM(chp->channel, drive), tim);
2447 }
2448 if (idedma_ctl != 0) {
2449 /* Add software bits in status register */
2450 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2451 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2452 idedma_ctl);
2453 }
2454 pciide_print_modes(cp);
2455 }
2456
2457 void
2458 cmd646_9_irqack(chp)
2459 struct channel_softc *chp;
2460 {
2461 u_int32_t priirq, secirq;
2462 struct pciide_channel *cp = (struct pciide_channel*)chp;
2463 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2464
2465 if (chp->channel == 0) {
2466 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2467 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2468 } else {
2469 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2470 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2471 }
2472 pciide_irqack(chp);
2473 }
2474
2475 void
2476 cy693_chip_map(sc, pa)
2477 struct pciide_softc *sc;
2478 struct pci_attach_args *pa;
2479 {
2480 struct pciide_channel *cp;
2481 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2482 bus_size_t cmdsize, ctlsize;
2483
2484 if (pciide_chipen(sc, pa) == 0)
2485 return;
2486 /*
2487 * this chip has 2 PCI IDE functions, one for primary and one for
2488 * secondary. So we need to call pciide_mapregs_compat() with
2489 * the real channel
2490 */
2491 if (pa->pa_function == 1) {
2492 sc->sc_cy_compatchan = 0;
2493 } else if (pa->pa_function == 2) {
2494 sc->sc_cy_compatchan = 1;
2495 } else {
2496 printf("%s: unexpected PCI function %d\n",
2497 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2498 return;
2499 }
2500 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2501 printf("%s: bus-master DMA support present",
2502 sc->sc_wdcdev.sc_dev.dv_xname);
2503 pciide_mapreg_dma(sc, pa);
2504 } else {
2505 printf("%s: hardware does not support DMA",
2506 sc->sc_wdcdev.sc_dev.dv_xname);
2507 sc->sc_dma_ok = 0;
2508 }
2509 printf("\n");
2510
2511 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2512 if (sc->sc_cy_handle == NULL) {
2513 printf("%s: unable to map hyperCache control registers\n",
2514 sc->sc_wdcdev.sc_dev.dv_xname);
2515 sc->sc_dma_ok = 0;
2516 }
2517
2518 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2519 WDC_CAPABILITY_MODE;
2520 if (sc->sc_dma_ok) {
2521 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2522 sc->sc_wdcdev.irqack = pciide_irqack;
2523 }
2524 sc->sc_wdcdev.PIO_cap = 4;
2525 sc->sc_wdcdev.DMA_cap = 2;
2526 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2527
2528 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2529 sc->sc_wdcdev.nchannels = 1;
2530
2531 /* Only one channel for this chip; if we are here it's enabled */
2532 cp = &sc->pciide_channels[0];
2533 sc->wdc_chanarray[0] = &cp->wdc_channel;
2534 cp->name = PCIIDE_CHANNEL_NAME(0);
2535 cp->wdc_channel.channel = 0;
2536 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2537 cp->wdc_channel.ch_queue =
2538 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2539 if (cp->wdc_channel.ch_queue == NULL) {
2540 printf("%s primary channel: "
2541 "can't allocate memory for command queue",
2542 sc->sc_wdcdev.sc_dev.dv_xname);
2543 return;
2544 }
2545 printf("%s: primary channel %s to ",
2546 sc->sc_wdcdev.sc_dev.dv_xname,
2547 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2548 "configured" : "wired");
2549 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2550 printf("native-PCI");
2551 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2552 pciide_pci_intr);
2553 } else {
2554 printf("compatibility");
2555 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2556 &cmdsize, &ctlsize);
2557 }
2558 printf(" mode\n");
2559 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2560 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2561 wdcattach(&cp->wdc_channel);
2562 if (pciide_chan_candisable(cp)) {
2563 pci_conf_write(sc->sc_pc, sc->sc_tag,
2564 PCI_COMMAND_STATUS_REG, 0);
2565 }
2566 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2567 if (cp->hw_ok == 0)
2568 return;
2569 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2570 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2571 cy693_setup_channel(&cp->wdc_channel);
2572 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2573 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2574 }
2575
2576 void
2577 cy693_setup_channel(chp)
2578 struct channel_softc *chp;
2579 {
2580 struct ata_drive_datas *drvp;
2581 int drive;
2582 u_int32_t cy_cmd_ctrl;
2583 u_int32_t idedma_ctl;
2584 struct pciide_channel *cp = (struct pciide_channel*)chp;
2585 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2586 int dma_mode = -1;
2587
2588 cy_cmd_ctrl = idedma_ctl = 0;
2589
2590 /* setup DMA if needed */
2591 pciide_channel_dma_setup(cp);
2592
2593 for (drive = 0; drive < 2; drive++) {
2594 drvp = &chp->ch_drive[drive];
2595 /* If no drive, skip */
2596 if ((drvp->drive_flags & DRIVE) == 0)
2597 continue;
2598 /* add timing values, setup DMA if needed */
2599 if (drvp->drive_flags & DRIVE_DMA) {
2600 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2601 /* use Multiword DMA */
2602 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2603 dma_mode = drvp->DMA_mode;
2604 }
2605 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2606 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2607 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2608 CY_CMD_CTRL_IOW_REC_OFF(drive));
2609 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2610 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2611 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2612 CY_CMD_CTRL_IOR_REC_OFF(drive));
2613 }
2614 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2615 chp->ch_drive[0].DMA_mode = dma_mode;
2616 chp->ch_drive[1].DMA_mode = dma_mode;
2617
2618 if (dma_mode == -1)
2619 dma_mode = 0;
2620
2621 if (sc->sc_cy_handle != NULL) {
2622 /* Note: `multiple' is implied. */
2623 cy82c693_write(sc->sc_cy_handle,
2624 (sc->sc_cy_compatchan == 0) ?
2625 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2626 }
2627
2628 pciide_print_modes(cp);
2629
2630 if (idedma_ctl != 0) {
2631 /* Add software bits in status register */
2632 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2633 IDEDMA_CTL, idedma_ctl);
2634 }
2635 }
2636
2637 void
2638 sis_chip_map(sc, pa)
2639 struct pciide_softc *sc;
2640 struct pci_attach_args *pa;
2641 {
2642 struct pciide_channel *cp;
2643 int channel;
2644 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2645 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2646 pcireg_t rev = PCI_REVISION(pa->pa_class);
2647 bus_size_t cmdsize, ctlsize;
2648
2649 if (pciide_chipen(sc, pa) == 0)
2650 return;
2651 printf("%s: bus-master DMA support present",
2652 sc->sc_wdcdev.sc_dev.dv_xname);
2653 pciide_mapreg_dma(sc, pa);
2654 printf("\n");
2655 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2656 WDC_CAPABILITY_MODE;
2657 if (sc->sc_dma_ok) {
2658 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2659 sc->sc_wdcdev.irqack = pciide_irqack;
2660 if (rev >= 0xd0)
2661 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2662 }
2663
2664 sc->sc_wdcdev.PIO_cap = 4;
2665 sc->sc_wdcdev.DMA_cap = 2;
2666 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2667 sc->sc_wdcdev.UDMA_cap = 2;
2668 sc->sc_wdcdev.set_modes = sis_setup_channel;
2669
2670 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2671 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2672
2673 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2674 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2675 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2676
2677 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2678 cp = &sc->pciide_channels[channel];
2679 if (pciide_chansetup(sc, channel, interface) == 0)
2680 continue;
2681 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2682 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2683 printf("%s: %s channel ignored (disabled)\n",
2684 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2685 continue;
2686 }
2687 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2688 pciide_pci_intr);
2689 if (cp->hw_ok == 0)
2690 continue;
2691 if (pciide_chan_candisable(cp)) {
2692 if (channel == 0)
2693 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2694 else
2695 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2696 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2697 sis_ctr0);
2698 }
2699 pciide_map_compat_intr(pa, cp, channel, interface);
2700 if (cp->hw_ok == 0)
2701 continue;
2702 sis_setup_channel(&cp->wdc_channel);
2703 }
2704 }
2705
2706 void
2707 sis_setup_channel(chp)
2708 struct channel_softc *chp;
2709 {
2710 struct ata_drive_datas *drvp;
2711 int drive;
2712 u_int32_t sis_tim;
2713 u_int32_t idedma_ctl;
2714 struct pciide_channel *cp = (struct pciide_channel*)chp;
2715 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2716
2717 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2718 "channel %d 0x%x\n", chp->channel,
2719 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2720 DEBUG_PROBE);
2721 sis_tim = 0;
2722 idedma_ctl = 0;
2723 /* setup DMA if needed */
2724 pciide_channel_dma_setup(cp);
2725
2726 for (drive = 0; drive < 2; drive++) {
2727 drvp = &chp->ch_drive[drive];
2728 /* If no drive, skip */
2729 if ((drvp->drive_flags & DRIVE) == 0)
2730 continue;
2731 /* add timing values, setup DMA if needed */
2732 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2733 (drvp->drive_flags & DRIVE_UDMA) == 0)
2734 goto pio;
2735
2736 if (drvp->drive_flags & DRIVE_UDMA) {
2737 /* use Ultra/DMA */
2738 drvp->drive_flags &= ~DRIVE_DMA;
2739 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2740 SIS_TIM_UDMA_TIME_OFF(drive);
2741 sis_tim |= SIS_TIM_UDMA_EN(drive);
2742 } else {
2743 /*
2744 * use Multiword DMA
2745 * Timings will be used for both PIO and DMA,
2746 * so adjust DMA mode if needed
2747 */
2748 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2749 drvp->PIO_mode = drvp->DMA_mode + 2;
2750 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2751 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2752 drvp->PIO_mode - 2 : 0;
2753 if (drvp->DMA_mode == 0)
2754 drvp->PIO_mode = 0;
2755 }
2756 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2757 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2758 SIS_TIM_ACT_OFF(drive);
2759 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2760 SIS_TIM_REC_OFF(drive);
2761 }
2762 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2763 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2764 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2765 if (idedma_ctl != 0) {
2766 /* Add software bits in status register */
2767 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2768 IDEDMA_CTL, idedma_ctl);
2769 }
2770 pciide_print_modes(cp);
2771 }
2772
2773 void
2774 acer_chip_map(sc, pa)
2775 struct pciide_softc *sc;
2776 struct pci_attach_args *pa;
2777 {
2778 struct pciide_channel *cp;
2779 int channel;
2780 pcireg_t cr, interface;
2781 bus_size_t cmdsize, ctlsize;
2782 pcireg_t rev = PCI_REVISION(pa->pa_class);
2783
2784 if (pciide_chipen(sc, pa) == 0)
2785 return;
2786 printf("%s: bus-master DMA support present",
2787 sc->sc_wdcdev.sc_dev.dv_xname);
2788 pciide_mapreg_dma(sc, pa);
2789 printf("\n");
2790 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2791 WDC_CAPABILITY_MODE;
2792 if (sc->sc_dma_ok) {
2793 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2794 if (rev >= 0x20)
2795 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2797 sc->sc_wdcdev.irqack = pciide_irqack;
2798 }
2799
2800 sc->sc_wdcdev.PIO_cap = 4;
2801 sc->sc_wdcdev.DMA_cap = 2;
2802 sc->sc_wdcdev.UDMA_cap = 2;
2803 sc->sc_wdcdev.set_modes = acer_setup_channel;
2804 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2805 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2806
2807 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2808 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2809 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2810
2811 /* Enable "microsoft register bits" R/W. */
2812 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2813 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2814 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2815 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2816 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2817 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2818 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2819 ~ACER_CHANSTATUSREGS_RO);
2820 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2821 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2822 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2823 /* Don't use cr, re-read the real register content instead */
2824 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2825 PCI_CLASS_REG));
2826
2827 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2828 cp = &sc->pciide_channels[channel];
2829 if (pciide_chansetup(sc, channel, interface) == 0)
2830 continue;
2831 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2832 printf("%s: %s channel ignored (disabled)\n",
2833 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2834 continue;
2835 }
2836 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2837 acer_pci_intr);
2838 if (cp->hw_ok == 0)
2839 continue;
2840 if (pciide_chan_candisable(cp)) {
2841 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2842 pci_conf_write(sc->sc_pc, sc->sc_tag,
2843 PCI_CLASS_REG, cr);
2844 }
2845 pciide_map_compat_intr(pa, cp, channel, interface);
2846 acer_setup_channel(&cp->wdc_channel);
2847 }
2848 }
2849
2850 void
2851 acer_setup_channel(chp)
2852 struct channel_softc *chp;
2853 {
2854 struct ata_drive_datas *drvp;
2855 int drive;
2856 u_int32_t acer_fifo_udma;
2857 u_int32_t idedma_ctl;
2858 struct pciide_channel *cp = (struct pciide_channel*)chp;
2859 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2860
2861 idedma_ctl = 0;
2862 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2863 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2864 acer_fifo_udma), DEBUG_PROBE);
2865 /* setup DMA if needed */
2866 pciide_channel_dma_setup(cp);
2867
2868 for (drive = 0; drive < 2; drive++) {
2869 drvp = &chp->ch_drive[drive];
2870 /* If no drive, skip */
2871 if ((drvp->drive_flags & DRIVE) == 0)
2872 continue;
2873 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2874 "channel %d drive %d 0x%x\n", chp->channel, drive,
2875 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2876 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2877 /* clear FIFO/DMA mode */
2878 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2879 ACER_UDMA_EN(chp->channel, drive) |
2880 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2881
2882 /* add timing values, setup DMA if needed */
2883 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2884 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2885 acer_fifo_udma |=
2886 ACER_FTH_OPL(chp->channel, drive, 0x1);
2887 goto pio;
2888 }
2889
2890 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2891 if (drvp->drive_flags & DRIVE_UDMA) {
2892 /* use Ultra/DMA */
2893 drvp->drive_flags &= ~DRIVE_DMA;
2894 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2895 acer_fifo_udma |=
2896 ACER_UDMA_TIM(chp->channel, drive,
2897 acer_udma[drvp->UDMA_mode]);
2898 } else {
2899 /*
2900 * use Multiword DMA
2901 * Timings will be used for both PIO and DMA,
2902 * so adjust DMA mode if needed
2903 */
2904 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2905 drvp->PIO_mode = drvp->DMA_mode + 2;
2906 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2907 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2908 drvp->PIO_mode - 2 : 0;
2909 if (drvp->DMA_mode == 0)
2910 drvp->PIO_mode = 0;
2911 }
2912 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2913 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2914 ACER_IDETIM(chp->channel, drive),
2915 acer_pio[drvp->PIO_mode]);
2916 }
2917 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2918 acer_fifo_udma), DEBUG_PROBE);
2919 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2920 if (idedma_ctl != 0) {
2921 /* Add software bits in status register */
2922 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2923 IDEDMA_CTL, idedma_ctl);
2924 }
2925 pciide_print_modes(cp);
2926 }
2927
2928 int
2929 acer_pci_intr(arg)
2930 void *arg;
2931 {
2932 struct pciide_softc *sc = arg;
2933 struct pciide_channel *cp;
2934 struct channel_softc *wdc_cp;
2935 int i, rv, crv;
2936 u_int32_t chids;
2937
2938 rv = 0;
2939 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2940 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2941 cp = &sc->pciide_channels[i];
2942 wdc_cp = &cp->wdc_channel;
2943 /* If a compat channel skip. */
2944 if (cp->compat)
2945 continue;
2946 if (chids & ACER_CHIDS_INT(i)) {
2947 crv = wdcintr(wdc_cp);
2948 if (crv == 0)
2949 printf("%s:%d: bogus intr\n",
2950 sc->sc_wdcdev.sc_dev.dv_xname, i);
2951 else
2952 rv = 1;
2953 }
2954 }
2955 return rv;
2956 }
2957
2958 void
2959 hpt_chip_map(sc, pa)
2960 struct pciide_softc *sc;
2961 struct pci_attach_args *pa;
2962 {
2963 struct pciide_channel *cp;
2964 int i, compatchan, revision;
2965 pcireg_t interface;
2966 bus_size_t cmdsize, ctlsize;
2967
2968 if (pciide_chipen(sc, pa) == 0)
2969 return;
2970 revision = PCI_REVISION(pa->pa_class);
2971
2972 /*
2973 * when the chip is in native mode it identifies itself as a
2974 * 'misc mass storage'. Fake interface in this case.
2975 */
2976 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2977 interface = PCI_INTERFACE(pa->pa_class);
2978 } else {
2979 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2980 PCIIDE_INTERFACE_PCI(0);
2981 if (revision == HPT370_REV)
2982 interface |= PCIIDE_INTERFACE_PCI(1);
2983 }
2984
2985 printf("%s: bus-master DMA support present",
2986 sc->sc_wdcdev.sc_dev.dv_xname);
2987 pciide_mapreg_dma(sc, pa);
2988 printf("\n");
2989 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2990 WDC_CAPABILITY_MODE;
2991 if (sc->sc_dma_ok) {
2992 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2993 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2994 sc->sc_wdcdev.irqack = pciide_irqack;
2995 }
2996 sc->sc_wdcdev.PIO_cap = 4;
2997 sc->sc_wdcdev.DMA_cap = 2;
2998
2999 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3000 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3001 if (revision == HPT366_REV) {
3002 sc->sc_wdcdev.UDMA_cap = 4;
3003 /*
3004 * The 366 has 2 PCI IDE functions, one for primary and one
3005 * for secondary. So we need to call pciide_mapregs_compat()
3006 * with the real channel
3007 */
3008 if (pa->pa_function == 0) {
3009 compatchan = 0;
3010 } else if (pa->pa_function == 1) {
3011 compatchan = 1;
3012 } else {
3013 printf("%s: unexpected PCI function %d\n",
3014 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3015 return;
3016 }
3017 sc->sc_wdcdev.nchannels = 1;
3018 } else {
3019 sc->sc_wdcdev.nchannels = 2;
3020 sc->sc_wdcdev.UDMA_cap = 5;
3021 }
3022 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3023 cp = &sc->pciide_channels[i];
3024 if (sc->sc_wdcdev.nchannels > 1) {
3025 compatchan = i;
3026 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3027 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3028 printf("%s: %s channel ignored (disabled)\n",
3029 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3030 continue;
3031 }
3032 }
3033 if (pciide_chansetup(sc, i, interface) == 0)
3034 continue;
3035 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3036 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3037 &ctlsize, hpt_pci_intr);
3038 } else {
3039 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3040 &cmdsize, &ctlsize);
3041 }
3042 if (cp->hw_ok == 0)
3043 return;
3044 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3045 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3046 wdcattach(&cp->wdc_channel);
3047 hpt_setup_channel(&cp->wdc_channel);
3048 }
3049 if (revision == HPT370_REV) {
3050 /*
3051 * HPT370_REV has a bit to disable interrupts, make sure
3052 * to clear it
3053 */
3054 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3055 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3056 ~HPT_CSEL_IRQDIS);
3057 }
3058 return;
3059 }
3060
3061 void
3062 hpt_setup_channel(chp)
3063 struct channel_softc *chp;
3064 {
3065 struct ata_drive_datas *drvp;
3066 int drive;
3067 int cable;
3068 u_int32_t before, after;
3069 u_int32_t idedma_ctl;
3070 struct pciide_channel *cp = (struct pciide_channel*)chp;
3071 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3072
3073 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3074
3075 /* setup DMA if needed */
3076 pciide_channel_dma_setup(cp);
3077
3078 idedma_ctl = 0;
3079
3080 /* Per drive settings */
3081 for (drive = 0; drive < 2; drive++) {
3082 drvp = &chp->ch_drive[drive];
3083 /* If no drive, skip */
3084 if ((drvp->drive_flags & DRIVE) == 0)
3085 continue;
3086 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3087 HPT_IDETIM(chp->channel, drive));
3088
3089 /* add timing values, setup DMA if needed */
3090 if (drvp->drive_flags & DRIVE_UDMA) {
3091 /* use Ultra/DMA */
3092 drvp->drive_flags &= ~DRIVE_DMA;
3093 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3094 drvp->UDMA_mode > 2)
3095 drvp->UDMA_mode = 2;
3096 after = (sc->sc_wdcdev.nchannels == 2) ?
3097 hpt370_udma[drvp->UDMA_mode] :
3098 hpt366_udma[drvp->UDMA_mode];
3099 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3100 } else if (drvp->drive_flags & DRIVE_DMA) {
3101 /*
3102 * use Multiword DMA.
3103 * Timings will be used for both PIO and DMA, so adjust
3104 * DMA mode if needed
3105 */
3106 if (drvp->PIO_mode >= 3 &&
3107 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3108 drvp->DMA_mode = drvp->PIO_mode - 2;
3109 }
3110 after = (sc->sc_wdcdev.nchannels == 2) ?
3111 hpt370_dma[drvp->DMA_mode] :
3112 hpt366_dma[drvp->DMA_mode];
3113 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3114 } else {
3115 /* PIO only */
3116 after = (sc->sc_wdcdev.nchannels == 2) ?
3117 hpt370_pio[drvp->PIO_mode] :
3118 hpt366_pio[drvp->PIO_mode];
3119 }
3120 pci_conf_write(sc->sc_pc, sc->sc_tag,
3121 HPT_IDETIM(chp->channel, drive), after);
3122 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3123 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3124 after, before), DEBUG_PROBE);
3125 }
3126 if (idedma_ctl != 0) {
3127 /* Add software bits in status register */
3128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3129 IDEDMA_CTL, idedma_ctl);
3130 }
3131 pciide_print_modes(cp);
3132 }
3133
3134 int
3135 hpt_pci_intr(arg)
3136 void *arg;
3137 {
3138 struct pciide_softc *sc = arg;
3139 struct pciide_channel *cp;
3140 struct channel_softc *wdc_cp;
3141 int rv = 0;
3142 int dmastat, i, crv;
3143
3144 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3145 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3146 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3147 if((dmastat & IDEDMA_CTL_INTR) == 0)
3148 continue;
3149 cp = &sc->pciide_channels[i];
3150 wdc_cp = &cp->wdc_channel;
3151 crv = wdcintr(wdc_cp);
3152 if (crv == 0) {
3153 printf("%s:%d: bogus intr\n",
3154 sc->sc_wdcdev.sc_dev.dv_xname, i);
3155 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3156 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3157 } else
3158 rv = 1;
3159 }
3160 return rv;
3161 }
3162
3163
3164 /* Macros to test product */
3165 #define PDC_IS_262(sc) \
3166 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3167 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3168 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3169 #define PDC_IS_265(sc) \
3170 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3171 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3172
3173 void
3174 pdc202xx_chip_map(sc, pa)
3175 struct pciide_softc *sc;
3176 struct pci_attach_args *pa;
3177 {
3178 struct pciide_channel *cp;
3179 int channel;
3180 pcireg_t interface, st, mode;
3181 bus_size_t cmdsize, ctlsize;
3182
3183 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3184 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3185 DEBUG_PROBE);
3186 if (pciide_chipen(sc, pa) == 0)
3187 return;
3188
3189 /* turn off RAID mode */
3190 st &= ~PDC2xx_STATE_IDERAID;
3191
3192 /*
3193 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3194 * mode. We have to fake interface
3195 */
3196 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3197 if (st & PDC2xx_STATE_NATIVE)
3198 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3199
3200 printf("%s: bus-master DMA support present",
3201 sc->sc_wdcdev.sc_dev.dv_xname);
3202 pciide_mapreg_dma(sc, pa);
3203 printf("\n");
3204 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3205 WDC_CAPABILITY_MODE;
3206 if (sc->sc_dma_ok) {
3207 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3208 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3209 sc->sc_wdcdev.irqack = pciide_irqack;
3210 }
3211 sc->sc_wdcdev.PIO_cap = 4;
3212 sc->sc_wdcdev.DMA_cap = 2;
3213 if (PDC_IS_265(sc))
3214 sc->sc_wdcdev.UDMA_cap = 5;
3215 else if (PDC_IS_262(sc))
3216 sc->sc_wdcdev.UDMA_cap = 4;
3217 else
3218 sc->sc_wdcdev.UDMA_cap = 2;
3219 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3220 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3221 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3222
3223 /* setup failsafe defaults */
3224 mode = 0;
3225 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3226 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3227 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3228 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3229 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3230 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3231 "initial timings 0x%x, now 0x%x\n", channel,
3232 pci_conf_read(sc->sc_pc, sc->sc_tag,
3233 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3234 DEBUG_PROBE);
3235 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3236 mode | PDC2xx_TIM_IORDYp);
3237 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3238 "initial timings 0x%x, now 0x%x\n", channel,
3239 pci_conf_read(sc->sc_pc, sc->sc_tag,
3240 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3241 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3242 mode);
3243 }
3244
3245 mode = PDC2xx_SCR_DMA;
3246 if (PDC_IS_265(sc)) {
3247 /* the BIOS set it up this way */
3248 mode = PDC2xx_SCR_SET_GEN(mode, 0x3);
3249 mode |= 0x80000000;
3250 } else if (PDC_IS_262(sc)) {
3251 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3252 } else {
3253 /* the BIOS set it up this way */
3254 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3255 }
3256 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3257 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3258 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3259 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3260 DEBUG_PROBE);
3261 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3262
3263 /* controller initial state register is OK even without BIOS */
3264 /* Set DMA mode to IDE DMA compatibility */
3265 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3266 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3267 DEBUG_PROBE);
3268 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3269 mode | 0x1);
3270 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3271 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3272 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3273 mode | 0x1);
3274
3275 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3276 cp = &sc->pciide_channels[channel];
3277 if (pciide_chansetup(sc, channel, interface) == 0)
3278 continue;
3279 if ((st & (PDC_IS_262(sc) ?
3280 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3281 printf("%s: %s channel ignored (disabled)\n",
3282 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3283 continue;
3284 }
3285 if (PDC_IS_265(sc))
3286 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3287 pdc20265_pci_intr);
3288 else
3289 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3290 pdc202xx_pci_intr);
3291 if (cp->hw_ok == 0)
3292 continue;
3293 if (pciide_chan_candisable(cp))
3294 st &= ~(PDC_IS_262(sc) ?
3295 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3296 pciide_map_compat_intr(pa, cp, channel, interface);
3297 pdc202xx_setup_channel(&cp->wdc_channel);
3298 }
3299 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3300 DEBUG_PROBE);
3301 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3302 return;
3303 }
3304
3305 void
3306 pdc202xx_setup_channel(chp)
3307 struct channel_softc *chp;
3308 {
3309 struct ata_drive_datas *drvp;
3310 int drive;
3311 pcireg_t mode, st;
3312 u_int32_t idedma_ctl, scr, atapi;
3313 struct pciide_channel *cp = (struct pciide_channel*)chp;
3314 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3315 int channel = chp->channel;
3316
3317 /* setup DMA if needed */
3318 pciide_channel_dma_setup(cp);
3319
3320 idedma_ctl = 0;
3321 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3322 sc->sc_wdcdev.sc_dev.dv_xname,
3323 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3324 DEBUG_PROBE);
3325
3326 /* Per channel settings */
3327 if (PDC_IS_262(sc)) {
3328 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3329 PDC262_U66);
3330 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3331 /* Trimm UDMA mode */
3332 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3333 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3334 chp->ch_drive[0].UDMA_mode <= 2) ||
3335 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3336 chp->ch_drive[1].UDMA_mode <= 2)) {
3337 if (chp->ch_drive[0].UDMA_mode > 2)
3338 chp->ch_drive[0].UDMA_mode = 2;
3339 if (chp->ch_drive[1].UDMA_mode > 2)
3340 chp->ch_drive[1].UDMA_mode = 2;
3341 }
3342 /* Set U66 if needed */
3343 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3344 chp->ch_drive[0].UDMA_mode > 2) ||
3345 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3346 chp->ch_drive[1].UDMA_mode > 2))
3347 scr |= PDC262_U66_EN(channel);
3348 else
3349 scr &= ~PDC262_U66_EN(channel);
3350 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3351 PDC262_U66, scr);
3352 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3353 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3354 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3355 PDC262_ATAPI(channel))), DEBUG_PROBE);
3356 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3357 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3358 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3359 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3360 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3361 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3362 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3363 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3364 atapi = 0;
3365 else
3366 atapi = PDC262_ATAPI_UDMA;
3367 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3368 PDC262_ATAPI(channel), atapi);
3369 }
3370 }
3371 for (drive = 0; drive < 2; drive++) {
3372 drvp = &chp->ch_drive[drive];
3373 /* If no drive, skip */
3374 if ((drvp->drive_flags & DRIVE) == 0)
3375 continue;
3376 mode = 0;
3377 if (drvp->drive_flags & DRIVE_UDMA) {
3378 /* use Ultra/DMA */
3379 drvp->drive_flags &= ~DRIVE_DMA;
3380 mode = PDC2xx_TIM_SET_MB(mode,
3381 pdc2xx_udma_mb[drvp->UDMA_mode]);
3382 mode = PDC2xx_TIM_SET_MC(mode,
3383 pdc2xx_udma_mc[drvp->UDMA_mode]);
3384 drvp->drive_flags &= ~DRIVE_DMA;
3385 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3386 } else if (drvp->drive_flags & DRIVE_DMA) {
3387 mode = PDC2xx_TIM_SET_MB(mode,
3388 pdc2xx_dma_mb[drvp->DMA_mode]);
3389 mode = PDC2xx_TIM_SET_MC(mode,
3390 pdc2xx_dma_mc[drvp->DMA_mode]);
3391 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3392 } else {
3393 mode = PDC2xx_TIM_SET_MB(mode,
3394 pdc2xx_dma_mb[0]);
3395 mode = PDC2xx_TIM_SET_MC(mode,
3396 pdc2xx_dma_mc[0]);
3397 }
3398 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3399 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3400 if (drvp->drive_flags & DRIVE_ATA)
3401 mode |= PDC2xx_TIM_PRE;
3402 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3403 if (drvp->PIO_mode >= 3) {
3404 mode |= PDC2xx_TIM_IORDY;
3405 if (drive == 0)
3406 mode |= PDC2xx_TIM_IORDYp;
3407 }
3408 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3409 "timings 0x%x\n",
3410 sc->sc_wdcdev.sc_dev.dv_xname,
3411 chp->channel, drive, mode), DEBUG_PROBE);
3412 pci_conf_write(sc->sc_pc, sc->sc_tag,
3413 PDC2xx_TIM(chp->channel, drive), mode);
3414 }
3415 if (idedma_ctl != 0) {
3416 /* Add software bits in status register */
3417 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3418 IDEDMA_CTL, idedma_ctl);
3419 }
3420 pciide_print_modes(cp);
3421 }
3422
3423 int
3424 pdc202xx_pci_intr(arg)
3425 void *arg;
3426 {
3427 struct pciide_softc *sc = arg;
3428 struct pciide_channel *cp;
3429 struct channel_softc *wdc_cp;
3430 int i, rv, crv;
3431 u_int32_t scr;
3432
3433 rv = 0;
3434 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3435 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3436 cp = &sc->pciide_channels[i];
3437 wdc_cp = &cp->wdc_channel;
3438 /* If a compat channel skip. */
3439 if (cp->compat)
3440 continue;
3441 if (scr & PDC2xx_SCR_INT(i)) {
3442 crv = wdcintr(wdc_cp);
3443 if (crv == 0)
3444 printf("%s:%d: bogus intr (reg 0x%x)\n",
3445 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3446 else
3447 rv = 1;
3448 }
3449 }
3450 return rv;
3451 }
3452
3453 int
3454 pdc20265_pci_intr(arg)
3455 void *arg;
3456 {
3457 struct pciide_softc *sc = arg;
3458 struct pciide_channel *cp;
3459 struct channel_softc *wdc_cp;
3460 int i, rv, crv;
3461 u_int32_t dmastat;
3462
3463 rv = 0;
3464 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3465 cp = &sc->pciide_channels[i];
3466 wdc_cp = &cp->wdc_channel;
3467 /* If a compat channel skip. */
3468 if (cp->compat)
3469 continue;
3470 /*
3471 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3472 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3473 * So use it instead (requires 2 reg reads instead of 1,
3474 * but we can't do it another way).
3475 */
3476 dmastat = bus_space_read_1(sc->sc_dma_iot,
3477 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3478 if((dmastat & IDEDMA_CTL_INTR) == 0)
3479 continue;
3480 crv = wdcintr(wdc_cp);
3481 if (crv == 0)
3482 printf("%s:%d: bogus intr\n",
3483 sc->sc_wdcdev.sc_dev.dv_xname, i);
3484 else
3485 rv = 1;
3486 }
3487 return rv;
3488 }
3489
3490 void
3491 opti_chip_map(sc, pa)
3492 struct pciide_softc *sc;
3493 struct pci_attach_args *pa;
3494 {
3495 struct pciide_channel *cp;
3496 bus_size_t cmdsize, ctlsize;
3497 pcireg_t interface;
3498 u_int8_t init_ctrl;
3499 int channel;
3500
3501 if (pciide_chipen(sc, pa) == 0)
3502 return;
3503 printf("%s: bus-master DMA support present",
3504 sc->sc_wdcdev.sc_dev.dv_xname);
3505 pciide_mapreg_dma(sc, pa);
3506 printf("\n");
3507
3508 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3509 WDC_CAPABILITY_MODE;
3510 sc->sc_wdcdev.PIO_cap = 4;
3511 if (sc->sc_dma_ok) {
3512 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3513 sc->sc_wdcdev.irqack = pciide_irqack;
3514 sc->sc_wdcdev.DMA_cap = 2;
3515 }
3516 sc->sc_wdcdev.set_modes = opti_setup_channel;
3517
3518 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3519 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3520
3521 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3522 OPTI_REG_INIT_CONTROL);
3523
3524 interface = PCI_INTERFACE(pa->pa_class);
3525
3526 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3527 cp = &sc->pciide_channels[channel];
3528 if (pciide_chansetup(sc, channel, interface) == 0)
3529 continue;
3530 if (channel == 1 &&
3531 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3532 printf("%s: %s channel ignored (disabled)\n",
3533 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3534 continue;
3535 }
3536 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3537 pciide_pci_intr);
3538 if (cp->hw_ok == 0)
3539 continue;
3540 pciide_map_compat_intr(pa, cp, channel, interface);
3541 if (cp->hw_ok == 0)
3542 continue;
3543 opti_setup_channel(&cp->wdc_channel);
3544 }
3545 }
3546
3547 void
3548 opti_setup_channel(chp)
3549 struct channel_softc *chp;
3550 {
3551 struct ata_drive_datas *drvp;
3552 struct pciide_channel *cp = (struct pciide_channel*)chp;
3553 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3554 int drive, spd;
3555 int mode[2];
3556 u_int8_t rv, mr;
3557
3558 /*
3559 * The `Delay' and `Address Setup Time' fields of the
3560 * Miscellaneous Register are always zero initially.
3561 */
3562 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3563 mr &= ~(OPTI_MISC_DELAY_MASK |
3564 OPTI_MISC_ADDR_SETUP_MASK |
3565 OPTI_MISC_INDEX_MASK);
3566
3567 /* Prime the control register before setting timing values */
3568 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3569
3570 /* Determine the clockrate of the PCIbus the chip is attached to */
3571 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3572 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3573
3574 /* setup DMA if needed */
3575 pciide_channel_dma_setup(cp);
3576
3577 for (drive = 0; drive < 2; drive++) {
3578 drvp = &chp->ch_drive[drive];
3579 /* If no drive, skip */
3580 if ((drvp->drive_flags & DRIVE) == 0) {
3581 mode[drive] = -1;
3582 continue;
3583 }
3584
3585 if ((drvp->drive_flags & DRIVE_DMA)) {
3586 /*
3587 * Timings will be used for both PIO and DMA,
3588 * so adjust DMA mode if needed
3589 */
3590 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3591 drvp->PIO_mode = drvp->DMA_mode + 2;
3592 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3593 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3594 drvp->PIO_mode - 2 : 0;
3595 if (drvp->DMA_mode == 0)
3596 drvp->PIO_mode = 0;
3597
3598 mode[drive] = drvp->DMA_mode + 5;
3599 } else
3600 mode[drive] = drvp->PIO_mode;
3601
3602 if (drive && mode[0] >= 0 &&
3603 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3604 /*
3605 * Can't have two drives using different values
3606 * for `Address Setup Time'.
3607 * Slow down the faster drive to compensate.
3608 */
3609 int d = (opti_tim_as[spd][mode[0]] >
3610 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3611
3612 mode[d] = mode[1-d];
3613 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3614 chp->ch_drive[d].DMA_mode = 0;
3615 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3616 }
3617 }
3618
3619 for (drive = 0; drive < 2; drive++) {
3620 int m;
3621 if ((m = mode[drive]) < 0)
3622 continue;
3623
3624 /* Set the Address Setup Time and select appropriate index */
3625 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3626 rv |= OPTI_MISC_INDEX(drive);
3627 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3628
3629 /* Set the pulse width and recovery timing parameters */
3630 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3631 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3632 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3633 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3634
3635 /* Set the Enhanced Mode register appropriately */
3636 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3637 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3638 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3639 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3640 }
3641
3642 /* Finally, enable the timings */
3643 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3644
3645 pciide_print_modes(cp);
3646 }
3647