pciide.c revision 1.62 1 /* $NetBSD: pciide.c,v 1.62 2000/06/06 17:48:12 soren Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119
120 #include <dev/pci/cy82c693var.h>
121
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 int, u_int8_t));
127
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 pci_chipset_tag_t pc;
131 pcitag_t pa;
132 int reg;
133 {
134
135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 ((reg & 0x03) * 8) & 0xff);
137 }
138
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 pci_chipset_tag_t pc;
142 pcitag_t pa;
143 int reg;
144 u_int8_t val;
145 {
146 pcireg_t pcival;
147
148 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 pcival &= ~(0xff << ((reg & 0x03) * 8));
150 pcival |= (val << ((reg & 0x03) * 8));
151 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153
154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155
156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157 void piix_setup_channel __P((struct channel_softc*));
158 void piix3_4_setup_channel __P((struct channel_softc*));
159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162
163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164 void amd756_setup_channel __P((struct channel_softc*));
165
166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void apollo_setup_channel __P((struct channel_softc*));
168
169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void cmd0643_6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_6_setup_channel __P((struct channel_softc*));
172 void cmd_channel_map __P((struct pci_attach_args *,
173 struct pciide_softc *, int));
174 int cmd_pci_intr __P((void *));
175
176 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
177 void cy693_setup_channel __P((struct channel_softc*));
178
179 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void sis_setup_channel __P((struct channel_softc*));
181
182 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void acer_setup_channel __P((struct channel_softc*));
184 int acer_pci_intr __P((void *));
185
186 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
187 void pdc202xx_setup_channel __P((struct channel_softc*));
188 int pdc202xx_pci_intr __P((void *));
189
190 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void opti_setup_channel __P((struct channel_softc*));
192
193 void pciide_channel_dma_setup __P((struct pciide_channel *));
194 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
195 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
196 void pciide_dma_start __P((void*, int, int));
197 int pciide_dma_finish __P((void*, int, int, int));
198 void pciide_print_modes __P((struct pciide_channel *));
199
200 struct pciide_product_desc {
201 u_int32_t ide_product;
202 int ide_flags;
203 const char *ide_name;
204 /* map and setup chip, probe drives */
205 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
206 };
207
208 /* Flags for ide_flags */
209 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
210
211 /* Default product description for devices not known from this controller */
212 const struct pciide_product_desc default_product_desc = {
213 0,
214 0,
215 "Generic PCI IDE controller",
216 default_chip_map,
217 };
218
219 const struct pciide_product_desc pciide_intel_products[] = {
220 { PCI_PRODUCT_INTEL_82092AA,
221 0,
222 "Intel 82092AA IDE controller",
223 default_chip_map,
224 },
225 { PCI_PRODUCT_INTEL_82371FB_IDE,
226 0,
227 "Intel 82371FB IDE controller (PIIX)",
228 piix_chip_map,
229 },
230 { PCI_PRODUCT_INTEL_82371SB_IDE,
231 0,
232 "Intel 82371SB IDE Interface (PIIX3)",
233 piix_chip_map,
234 },
235 { PCI_PRODUCT_INTEL_82371AB_IDE,
236 0,
237 "Intel 82371AB IDE controller (PIIX4)",
238 piix_chip_map,
239 },
240 { PCI_PRODUCT_INTEL_82801AA_IDE,
241 0,
242 "Intel 82801AA IDE Controller (ICH)",
243 piix_chip_map,
244 },
245 { PCI_PRODUCT_INTEL_82801AB_IDE,
246 0,
247 "Intel 82801AB IDE Controller (ICH0)",
248 piix_chip_map,
249 },
250 { 0,
251 0,
252 NULL,
253 }
254 };
255
256 const struct pciide_product_desc pciide_amd_products[] = {
257 { PCI_PRODUCT_AMD_PBC756_IDE,
258 0,
259 "Advanced Micro Devices AMD756 IDE Controller",
260 amd756_chip_map
261 },
262 { 0,
263 0,
264 NULL,
265 }
266 };
267
268 const struct pciide_product_desc pciide_cmd_products[] = {
269 { PCI_PRODUCT_CMDTECH_640,
270 0,
271 "CMD Technology PCI0640",
272 cmd_chip_map
273 },
274 { PCI_PRODUCT_CMDTECH_643,
275 0,
276 "CMD Technology PCI0643",
277 cmd0643_6_chip_map,
278 },
279 { PCI_PRODUCT_CMDTECH_646,
280 0,
281 "CMD Technology PCI0646",
282 cmd0643_6_chip_map,
283 },
284 { 0,
285 0,
286 NULL,
287 }
288 };
289
290 const struct pciide_product_desc pciide_via_products[] = {
291 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
292 0,
293 "VIA Tech VT82C586 IDE Controller",
294 apollo_chip_map,
295 },
296 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
297 0,
298 "VIA Tech VT82C586A IDE Controller",
299 apollo_chip_map,
300 },
301 { 0,
302 0,
303 NULL,
304 }
305 };
306
307 const struct pciide_product_desc pciide_cypress_products[] = {
308 { PCI_PRODUCT_CONTAQ_82C693,
309 0,
310 "Cypress CY82C693 IDE Controller",
311 cy693_chip_map,
312 },
313 { 0,
314 0,
315 NULL,
316 }
317 };
318
319 const struct pciide_product_desc pciide_sis_products[] = {
320 { PCI_PRODUCT_SIS_5597_IDE,
321 0,
322 "Silicon Integrated System 5597/5598 IDE controller",
323 sis_chip_map,
324 },
325 { 0,
326 0,
327 NULL,
328 }
329 };
330
331 const struct pciide_product_desc pciide_acer_products[] = {
332 { PCI_PRODUCT_ALI_M5229,
333 0,
334 "Acer Labs M5229 UDMA IDE Controller",
335 acer_chip_map,
336 },
337 { 0,
338 0,
339 NULL,
340 }
341 };
342
343 const struct pciide_product_desc pciide_promise_products[] = {
344 { PCI_PRODUCT_PROMISE_ULTRA33,
345 IDE_PCI_CLASS_OVERRIDE,
346 "Promise Ultra33/ATA Bus Master IDE Accelerator",
347 pdc202xx_chip_map,
348 },
349 { PCI_PRODUCT_PROMISE_ULTRA66,
350 IDE_PCI_CLASS_OVERRIDE,
351 "Promise Ultra66/ATA Bus Master IDE Accelerator",
352 pdc202xx_chip_map,
353 },
354 { 0,
355 0,
356 NULL,
357 }
358 };
359
360 const struct pciide_product_desc pciide_opti_products[] = {
361 { PCI_PRODUCT_OPTI_82C621,
362 0,
363 "OPTi 82c621 PCI IDE controller",
364 opti_chip_map,
365 },
366 { PCI_PRODUCT_OPTI_82C568,
367 0,
368 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
369 opti_chip_map,
370 },
371 { PCI_PRODUCT_OPTI_82D568,
372 0,
373 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
374 opti_chip_map,
375 },
376 { 0,
377 0,
378 NULL,
379 }
380 };
381
382 struct pciide_vendor_desc {
383 u_int32_t ide_vendor;
384 const struct pciide_product_desc *ide_products;
385 };
386
387 const struct pciide_vendor_desc pciide_vendors[] = {
388 { PCI_VENDOR_INTEL, pciide_intel_products },
389 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
390 { PCI_VENDOR_VIATECH, pciide_via_products },
391 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
392 { PCI_VENDOR_SIS, pciide_sis_products },
393 { PCI_VENDOR_ALI, pciide_acer_products },
394 { PCI_VENDOR_PROMISE, pciide_promise_products },
395 { PCI_VENDOR_AMD, pciide_amd_products },
396 { PCI_VENDOR_OPTI, pciide_opti_products },
397 { 0, NULL }
398 };
399
400 /* options passed via the 'flags' config keyword */
401 #define PCIIDE_OPTIONS_DMA 0x01
402
403 int pciide_match __P((struct device *, struct cfdata *, void *));
404 void pciide_attach __P((struct device *, struct device *, void *));
405
406 struct cfattach pciide_ca = {
407 sizeof(struct pciide_softc), pciide_match, pciide_attach
408 };
409 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
410 int pciide_mapregs_compat __P(( struct pci_attach_args *,
411 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
412 int pciide_mapregs_native __P((struct pci_attach_args *,
413 struct pciide_channel *, bus_size_t *, bus_size_t *,
414 int (*pci_intr) __P((void *))));
415 void pciide_mapreg_dma __P((struct pciide_softc *,
416 struct pci_attach_args *));
417 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
418 void pciide_mapchan __P((struct pci_attach_args *,
419 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
420 int (*pci_intr) __P((void *))));
421 int pciide_chan_candisable __P((struct pciide_channel *));
422 void pciide_map_compat_intr __P(( struct pci_attach_args *,
423 struct pciide_channel *, int, int));
424 int pciide_print __P((void *, const char *pnp));
425 int pciide_compat_intr __P((void *));
426 int pciide_pci_intr __P((void *));
427 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
428
429 const struct pciide_product_desc *
430 pciide_lookup_product(id)
431 u_int32_t id;
432 {
433 const struct pciide_product_desc *pp;
434 const struct pciide_vendor_desc *vp;
435
436 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
437 if (PCI_VENDOR(id) == vp->ide_vendor)
438 break;
439
440 if ((pp = vp->ide_products) == NULL)
441 return NULL;
442
443 for (; pp->ide_name != NULL; pp++)
444 if (PCI_PRODUCT(id) == pp->ide_product)
445 break;
446
447 if (pp->ide_name == NULL)
448 return NULL;
449 return pp;
450 }
451
452 int
453 pciide_match(parent, match, aux)
454 struct device *parent;
455 struct cfdata *match;
456 void *aux;
457 {
458 struct pci_attach_args *pa = aux;
459 const struct pciide_product_desc *pp;
460
461 /*
462 * Check the ID register to see that it's a PCI IDE controller.
463 * If it is, we assume that we can deal with it; it _should_
464 * work in a standardized way...
465 */
466 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
467 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
468 return (1);
469 }
470
471 /*
472 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
473 * controllers. Let see if we can deal with it anyway.
474 */
475 pp = pciide_lookup_product(pa->pa_id);
476 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
477 return (1);
478 }
479
480 return (0);
481 }
482
483 void
484 pciide_attach(parent, self, aux)
485 struct device *parent, *self;
486 void *aux;
487 {
488 struct pci_attach_args *pa = aux;
489 pci_chipset_tag_t pc = pa->pa_pc;
490 pcitag_t tag = pa->pa_tag;
491 struct pciide_softc *sc = (struct pciide_softc *)self;
492 pcireg_t csr;
493 char devinfo[256];
494 const char *displaydev;
495
496 sc->sc_pp = pciide_lookup_product(pa->pa_id);
497 if (sc->sc_pp == NULL) {
498 sc->sc_pp = &default_product_desc;
499 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
500 displaydev = devinfo;
501 } else
502 displaydev = sc->sc_pp->ide_name;
503
504 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
505
506 sc->sc_pc = pa->pa_pc;
507 sc->sc_tag = pa->pa_tag;
508 #ifdef WDCDEBUG
509 if (wdcdebug_pciide_mask & DEBUG_PROBE)
510 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
511 #endif
512
513 sc->sc_pp->chip_map(sc, pa);
514
515 if (sc->sc_dma_ok) {
516 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
517 csr |= PCI_COMMAND_MASTER_ENABLE;
518 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
519 }
520 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
521 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
522 }
523
524 /* tell wether the chip is enabled or not */
525 int
526 pciide_chipen(sc, pa)
527 struct pciide_softc *sc;
528 struct pci_attach_args *pa;
529 {
530 pcireg_t csr;
531 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
532 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
533 PCI_COMMAND_STATUS_REG);
534 printf("%s: device disabled (at %s)\n",
535 sc->sc_wdcdev.sc_dev.dv_xname,
536 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
537 "device" : "bridge");
538 return 0;
539 }
540 return 1;
541 }
542
543 int
544 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
545 struct pci_attach_args *pa;
546 struct pciide_channel *cp;
547 int compatchan;
548 bus_size_t *cmdsizep, *ctlsizep;
549 {
550 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
551 struct channel_softc *wdc_cp = &cp->wdc_channel;
552
553 cp->compat = 1;
554 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
555 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
556
557 wdc_cp->cmd_iot = pa->pa_iot;
558 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
559 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
560 printf("%s: couldn't map %s channel cmd regs\n",
561 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
562 return (0);
563 }
564
565 wdc_cp->ctl_iot = pa->pa_iot;
566 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
567 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
568 printf("%s: couldn't map %s channel ctl regs\n",
569 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
570 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
571 PCIIDE_COMPAT_CMD_SIZE);
572 return (0);
573 }
574
575 return (1);
576 }
577
578 int
579 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
580 struct pci_attach_args * pa;
581 struct pciide_channel *cp;
582 bus_size_t *cmdsizep, *ctlsizep;
583 int (*pci_intr) __P((void *));
584 {
585 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
586 struct channel_softc *wdc_cp = &cp->wdc_channel;
587 const char *intrstr;
588 pci_intr_handle_t intrhandle;
589
590 cp->compat = 0;
591
592 if (sc->sc_pci_ih == NULL) {
593 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
594 pa->pa_intrline, &intrhandle) != 0) {
595 printf("%s: couldn't map native-PCI interrupt\n",
596 sc->sc_wdcdev.sc_dev.dv_xname);
597 return 0;
598 }
599 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
600 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
601 intrhandle, IPL_BIO, pci_intr, sc);
602 if (sc->sc_pci_ih != NULL) {
603 printf("%s: using %s for native-PCI interrupt\n",
604 sc->sc_wdcdev.sc_dev.dv_xname,
605 intrstr ? intrstr : "unknown interrupt");
606 } else {
607 printf("%s: couldn't establish native-PCI interrupt",
608 sc->sc_wdcdev.sc_dev.dv_xname);
609 if (intrstr != NULL)
610 printf(" at %s", intrstr);
611 printf("\n");
612 return 0;
613 }
614 }
615 cp->ih = sc->sc_pci_ih;
616 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
617 PCI_MAPREG_TYPE_IO, 0,
618 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
619 printf("%s: couldn't map %s channel cmd regs\n",
620 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
621 return 0;
622 }
623
624 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
625 PCI_MAPREG_TYPE_IO, 0,
626 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
627 printf("%s: couldn't map %s channel ctl regs\n",
628 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
629 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
630 return 0;
631 }
632 return (1);
633 }
634
635 void
636 pciide_mapreg_dma(sc, pa)
637 struct pciide_softc *sc;
638 struct pci_attach_args *pa;
639 {
640 /*
641 * Map DMA registers
642 *
643 * Note that sc_dma_ok is the right variable to test to see if
644 * DMA can be done. If the interface doesn't support DMA,
645 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
646 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
647 * non-zero if the interface supports DMA and the registers
648 * could be mapped.
649 *
650 * XXX Note that despite the fact that the Bus Master IDE specs
651 * XXX say that "The bus master IDE function uses 16 bytes of IO
652 * XXX space," some controllers (at least the United
653 * XXX Microelectronics UM8886BF) place it in memory space.
654 * XXX eventually, we should probably read the register and check
655 * XXX which type it is. Either that or 'quirk' certain devices.
656 */
657 sc->sc_dma_ok = (pci_mapreg_map(pa,
658 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 0,
659 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
660 sc->sc_dmat = pa->pa_dmat;
661 if (sc->sc_dma_ok == 0) {
662 printf(", but unused (couldn't map registers)");
663 } else {
664 sc->sc_wdcdev.dma_arg = sc;
665 sc->sc_wdcdev.dma_init = pciide_dma_init;
666 sc->sc_wdcdev.dma_start = pciide_dma_start;
667 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
668 }
669 }
670 int
671 pciide_compat_intr(arg)
672 void *arg;
673 {
674 struct pciide_channel *cp = arg;
675
676 #ifdef DIAGNOSTIC
677 /* should only be called for a compat channel */
678 if (cp->compat == 0)
679 panic("pciide compat intr called for non-compat chan %p\n", cp);
680 #endif
681 return (wdcintr(&cp->wdc_channel));
682 }
683
684 int
685 pciide_pci_intr(arg)
686 void *arg;
687 {
688 struct pciide_softc *sc = arg;
689 struct pciide_channel *cp;
690 struct channel_softc *wdc_cp;
691 int i, rv, crv;
692
693 rv = 0;
694 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
695 cp = &sc->pciide_channels[i];
696 wdc_cp = &cp->wdc_channel;
697
698 /* If a compat channel skip. */
699 if (cp->compat)
700 continue;
701 /* if this channel not waiting for intr, skip */
702 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
703 continue;
704
705 crv = wdcintr(wdc_cp);
706 if (crv == 0)
707 ; /* leave rv alone */
708 else if (crv == 1)
709 rv = 1; /* claim the intr */
710 else if (rv == 0) /* crv should be -1 in this case */
711 rv = crv; /* if we've done no better, take it */
712 }
713 return (rv);
714 }
715
716 void
717 pciide_channel_dma_setup(cp)
718 struct pciide_channel *cp;
719 {
720 int drive;
721 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
722 struct ata_drive_datas *drvp;
723
724 for (drive = 0; drive < 2; drive++) {
725 drvp = &cp->wdc_channel.ch_drive[drive];
726 /* If no drive, skip */
727 if ((drvp->drive_flags & DRIVE) == 0)
728 continue;
729 /* setup DMA if needed */
730 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
731 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
732 sc->sc_dma_ok == 0) {
733 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
734 continue;
735 }
736 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
737 != 0) {
738 /* Abort DMA setup */
739 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
740 continue;
741 }
742 }
743 }
744
745 int
746 pciide_dma_table_setup(sc, channel, drive)
747 struct pciide_softc *sc;
748 int channel, drive;
749 {
750 bus_dma_segment_t seg;
751 int error, rseg;
752 const bus_size_t dma_table_size =
753 sizeof(struct idedma_table) * NIDEDMA_TABLES;
754 struct pciide_dma_maps *dma_maps =
755 &sc->pciide_channels[channel].dma_maps[drive];
756
757 /* If table was already allocated, just return */
758 if (dma_maps->dma_table)
759 return 0;
760
761 /* Allocate memory for the DMA tables and map it */
762 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
763 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
764 BUS_DMA_NOWAIT)) != 0) {
765 printf("%s:%d: unable to allocate table DMA for "
766 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
767 channel, drive, error);
768 return error;
769 }
770 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
771 dma_table_size,
772 (caddr_t *)&dma_maps->dma_table,
773 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
774 printf("%s:%d: unable to map table DMA for"
775 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
776 channel, drive, error);
777 return error;
778 }
779 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
780 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
781 seg.ds_addr), DEBUG_PROBE);
782
783 /* Create and load table DMA map for this disk */
784 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
785 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
786 &dma_maps->dmamap_table)) != 0) {
787 printf("%s:%d: unable to create table DMA map for "
788 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
789 channel, drive, error);
790 return error;
791 }
792 if ((error = bus_dmamap_load(sc->sc_dmat,
793 dma_maps->dmamap_table,
794 dma_maps->dma_table,
795 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
796 printf("%s:%d: unable to load table DMA map for "
797 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
798 channel, drive, error);
799 return error;
800 }
801 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
802 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
803 /* Create a xfer DMA map for this drive */
804 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
805 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
806 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
807 &dma_maps->dmamap_xfer)) != 0) {
808 printf("%s:%d: unable to create xfer DMA map for "
809 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
810 channel, drive, error);
811 return error;
812 }
813 return 0;
814 }
815
816 int
817 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
818 void *v;
819 int channel, drive;
820 void *databuf;
821 size_t datalen;
822 int flags;
823 {
824 struct pciide_softc *sc = v;
825 int error, seg;
826 struct pciide_dma_maps *dma_maps =
827 &sc->pciide_channels[channel].dma_maps[drive];
828
829 error = bus_dmamap_load(sc->sc_dmat,
830 dma_maps->dmamap_xfer,
831 databuf, datalen, NULL, BUS_DMA_NOWAIT);
832 if (error) {
833 printf("%s:%d: unable to load xfer DMA map for"
834 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
835 channel, drive, error);
836 return error;
837 }
838
839 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
840 dma_maps->dmamap_xfer->dm_mapsize,
841 (flags & WDC_DMA_READ) ?
842 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
843
844 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
845 #ifdef DIAGNOSTIC
846 /* A segment must not cross a 64k boundary */
847 {
848 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
849 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
850 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
851 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
852 printf("pciide_dma: segment %d physical addr 0x%lx"
853 " len 0x%lx not properly aligned\n",
854 seg, phys, len);
855 panic("pciide_dma: buf align");
856 }
857 }
858 #endif
859 dma_maps->dma_table[seg].base_addr =
860 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
861 dma_maps->dma_table[seg].byte_count =
862 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
863 IDEDMA_BYTE_COUNT_MASK);
864 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
865 seg, le32toh(dma_maps->dma_table[seg].byte_count),
866 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
867
868 }
869 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
870 htole32(IDEDMA_BYTE_COUNT_EOT);
871
872 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
873 dma_maps->dmamap_table->dm_mapsize,
874 BUS_DMASYNC_PREWRITE);
875
876 /* Maps are ready. Start DMA function */
877 #ifdef DIAGNOSTIC
878 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
879 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
880 dma_maps->dmamap_table->dm_segs[0].ds_addr);
881 panic("pciide_dma_init: table align");
882 }
883 #endif
884
885 /* Clear status bits */
886 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
887 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
888 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
889 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
890 /* Write table addr */
891 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
892 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
893 dma_maps->dmamap_table->dm_segs[0].ds_addr);
894 /* set read/write */
895 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
896 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
897 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
898 /* remember flags */
899 dma_maps->dma_flags = flags;
900 return 0;
901 }
902
903 void
904 pciide_dma_start(v, channel, drive)
905 void *v;
906 int channel, drive;
907 {
908 struct pciide_softc *sc = v;
909
910 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
911 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
912 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
913 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
914 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
915 }
916
917 int
918 pciide_dma_finish(v, channel, drive, force)
919 void *v;
920 int channel, drive;
921 int force;
922 {
923 struct pciide_softc *sc = v;
924 u_int8_t status;
925 int error = 0;
926 struct pciide_dma_maps *dma_maps =
927 &sc->pciide_channels[channel].dma_maps[drive];
928
929 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
930 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
931 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
932 DEBUG_XFERS);
933
934 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
935 return WDC_DMAST_NOIRQ;
936
937 /* stop DMA channel */
938 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
939 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
940 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
941 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
942
943 /* Clear status bits */
944 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
945 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
946 status);
947
948 /* Unload the map of the data buffer */
949 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
950 dma_maps->dmamap_xfer->dm_mapsize,
951 (dma_maps->dma_flags & WDC_DMA_READ) ?
952 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
953 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
954
955 if ((status & IDEDMA_CTL_ERR) != 0) {
956 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
957 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
958 error |= WDC_DMAST_ERR;
959 }
960
961 if ((status & IDEDMA_CTL_INTR) == 0) {
962 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
963 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
964 drive, status);
965 error |= WDC_DMAST_NOIRQ;
966 }
967
968 if ((status & IDEDMA_CTL_ACT) != 0) {
969 /* data underrun, may be a valid condition for ATAPI */
970 error |= WDC_DMAST_UNDER;
971 }
972 return error;
973 }
974
975 /* some common code used by several chip_map */
976 int
977 pciide_chansetup(sc, channel, interface)
978 struct pciide_softc *sc;
979 int channel;
980 pcireg_t interface;
981 {
982 struct pciide_channel *cp = &sc->pciide_channels[channel];
983 sc->wdc_chanarray[channel] = &cp->wdc_channel;
984 cp->name = PCIIDE_CHANNEL_NAME(channel);
985 cp->wdc_channel.channel = channel;
986 cp->wdc_channel.wdc = &sc->sc_wdcdev;
987 cp->wdc_channel.ch_queue =
988 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
989 if (cp->wdc_channel.ch_queue == NULL) {
990 printf("%s %s channel: "
991 "can't allocate memory for command queue",
992 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
993 return 0;
994 }
995 printf("%s: %s channel %s to %s mode\n",
996 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
997 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
998 "configured" : "wired",
999 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1000 "native-PCI" : "compatibility");
1001 return 1;
1002 }
1003
1004 /* some common code used by several chip channel_map */
1005 void
1006 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1007 struct pci_attach_args *pa;
1008 struct pciide_channel *cp;
1009 pcireg_t interface;
1010 bus_size_t *cmdsizep, *ctlsizep;
1011 int (*pci_intr) __P((void *));
1012 {
1013 struct channel_softc *wdc_cp = &cp->wdc_channel;
1014
1015 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1016 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1017 pci_intr);
1018 else
1019 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1020 wdc_cp->channel, cmdsizep, ctlsizep);
1021
1022 if (cp->hw_ok == 0)
1023 return;
1024 wdc_cp->data32iot = wdc_cp->cmd_iot;
1025 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1026 wdcattach(wdc_cp);
1027 }
1028
1029 /*
1030 * Generic code to call to know if a channel can be disabled. Return 1
1031 * if channel can be disabled, 0 if not
1032 */
1033 int
1034 pciide_chan_candisable(cp)
1035 struct pciide_channel *cp;
1036 {
1037 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1038 struct channel_softc *wdc_cp = &cp->wdc_channel;
1039
1040 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1041 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1042 printf("%s: disabling %s channel (no drives)\n",
1043 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1044 cp->hw_ok = 0;
1045 return 1;
1046 }
1047 return 0;
1048 }
1049
1050 /*
1051 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1052 * Set hw_ok=0 on failure
1053 */
1054 void
1055 pciide_map_compat_intr(pa, cp, compatchan, interface)
1056 struct pci_attach_args *pa;
1057 struct pciide_channel *cp;
1058 int compatchan, interface;
1059 {
1060 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1061 struct channel_softc *wdc_cp = &cp->wdc_channel;
1062
1063 if (cp->hw_ok == 0)
1064 return;
1065 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1066 return;
1067
1068 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1069 pa, compatchan, pciide_compat_intr, cp);
1070 if (cp->ih == NULL) {
1071 printf("%s: no compatibility interrupt for use by %s "
1072 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1073 cp->hw_ok = 0;
1074 }
1075 }
1076
1077 void
1078 pciide_print_modes(cp)
1079 struct pciide_channel *cp;
1080 {
1081 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1082 int drive;
1083 struct channel_softc *chp;
1084 struct ata_drive_datas *drvp;
1085
1086 chp = &cp->wdc_channel;
1087 for (drive = 0; drive < 2; drive++) {
1088 drvp = &chp->ch_drive[drive];
1089 if ((drvp->drive_flags & DRIVE) == 0)
1090 continue;
1091 printf("%s(%s:%d:%d): using PIO mode %d",
1092 drvp->drv_softc->dv_xname,
1093 sc->sc_wdcdev.sc_dev.dv_xname,
1094 chp->channel, drive, drvp->PIO_mode);
1095 if (drvp->drive_flags & DRIVE_DMA)
1096 printf(", DMA mode %d", drvp->DMA_mode);
1097 if (drvp->drive_flags & DRIVE_UDMA)
1098 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1099 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1100 printf(" (using DMA data transfers)");
1101 printf("\n");
1102 }
1103 }
1104
1105 void
1106 default_chip_map(sc, pa)
1107 struct pciide_softc *sc;
1108 struct pci_attach_args *pa;
1109 {
1110 struct pciide_channel *cp;
1111 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1112 sc->sc_tag, PCI_CLASS_REG));
1113 pcireg_t csr;
1114 int channel, drive;
1115 struct ata_drive_datas *drvp;
1116 u_int8_t idedma_ctl;
1117 bus_size_t cmdsize, ctlsize;
1118 char *failreason;
1119
1120 if (pciide_chipen(sc, pa) == 0)
1121 return;
1122
1123 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1124 printf("%s: bus-master DMA support present",
1125 sc->sc_wdcdev.sc_dev.dv_xname);
1126 if (sc->sc_pp == &default_product_desc &&
1127 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1128 PCIIDE_OPTIONS_DMA) == 0) {
1129 printf(", but unused (no driver support)");
1130 sc->sc_dma_ok = 0;
1131 } else {
1132 pciide_mapreg_dma(sc, pa);
1133 if (sc->sc_dma_ok != 0)
1134 printf(", used without full driver "
1135 "support");
1136 }
1137 } else {
1138 printf("%s: hardware does not support DMA",
1139 sc->sc_wdcdev.sc_dev.dv_xname);
1140 sc->sc_dma_ok = 0;
1141 }
1142 printf("\n");
1143 if (sc->sc_dma_ok)
1144 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1145 sc->sc_wdcdev.PIO_cap = 0;
1146 sc->sc_wdcdev.DMA_cap = 0;
1147
1148 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1149 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1150 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1151
1152 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1153 cp = &sc->pciide_channels[channel];
1154 if (pciide_chansetup(sc, channel, interface) == 0)
1155 continue;
1156 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1157 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1158 &ctlsize, pciide_pci_intr);
1159 } else {
1160 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1161 channel, &cmdsize, &ctlsize);
1162 }
1163 if (cp->hw_ok == 0)
1164 continue;
1165 /*
1166 * Check to see if something appears to be there.
1167 */
1168 failreason = NULL;
1169 if (!wdcprobe(&cp->wdc_channel)) {
1170 failreason = "not responding; disabled or no drives?";
1171 goto next;
1172 }
1173 /*
1174 * Now, make sure it's actually attributable to this PCI IDE
1175 * channel by trying to access the channel again while the
1176 * PCI IDE controller's I/O space is disabled. (If the
1177 * channel no longer appears to be there, it belongs to
1178 * this controller.) YUCK!
1179 */
1180 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1181 PCI_COMMAND_STATUS_REG);
1182 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1183 csr & ~PCI_COMMAND_IO_ENABLE);
1184 if (wdcprobe(&cp->wdc_channel))
1185 failreason = "other hardware responding at addresses";
1186 pci_conf_write(sc->sc_pc, sc->sc_tag,
1187 PCI_COMMAND_STATUS_REG, csr);
1188 next:
1189 if (failreason) {
1190 printf("%s: %s channel ignored (%s)\n",
1191 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1192 failreason);
1193 cp->hw_ok = 0;
1194 bus_space_unmap(cp->wdc_channel.cmd_iot,
1195 cp->wdc_channel.cmd_ioh, cmdsize);
1196 bus_space_unmap(cp->wdc_channel.ctl_iot,
1197 cp->wdc_channel.ctl_ioh, ctlsize);
1198 } else {
1199 pciide_map_compat_intr(pa, cp, channel, interface);
1200 }
1201 if (cp->hw_ok) {
1202 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1203 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1204 wdcattach(&cp->wdc_channel);
1205 }
1206 }
1207
1208 if (sc->sc_dma_ok == 0)
1209 return;
1210
1211 /* Allocate DMA maps */
1212 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1213 idedma_ctl = 0;
1214 cp = &sc->pciide_channels[channel];
1215 for (drive = 0; drive < 2; drive++) {
1216 drvp = &cp->wdc_channel.ch_drive[drive];
1217 /* If no drive, skip */
1218 if ((drvp->drive_flags & DRIVE) == 0)
1219 continue;
1220 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1221 continue;
1222 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1223 /* Abort DMA setup */
1224 printf("%s:%d:%d: can't allocate DMA maps, "
1225 "using PIO transfers\n",
1226 sc->sc_wdcdev.sc_dev.dv_xname,
1227 channel, drive);
1228 drvp->drive_flags &= ~DRIVE_DMA;
1229 }
1230 printf("%s:%d:%d: using DMA data transfers\n",
1231 sc->sc_wdcdev.sc_dev.dv_xname,
1232 channel, drive);
1233 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1234 }
1235 if (idedma_ctl != 0) {
1236 /* Add software bits in status register */
1237 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1238 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1239 idedma_ctl);
1240 }
1241 }
1242 }
1243
1244 void
1245 piix_chip_map(sc, pa)
1246 struct pciide_softc *sc;
1247 struct pci_attach_args *pa;
1248 {
1249 struct pciide_channel *cp;
1250 int channel;
1251 u_int32_t idetim;
1252 bus_size_t cmdsize, ctlsize;
1253
1254 if (pciide_chipen(sc, pa) == 0)
1255 return;
1256
1257 printf("%s: bus-master DMA support present",
1258 sc->sc_wdcdev.sc_dev.dv_xname);
1259 pciide_mapreg_dma(sc, pa);
1260 printf("\n");
1261 if (sc->sc_dma_ok) {
1262 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1263 switch(sc->sc_pp->ide_product) {
1264 case PCI_PRODUCT_INTEL_82371AB_IDE:
1265 case PCI_PRODUCT_INTEL_82801AA_IDE:
1266 case PCI_PRODUCT_INTEL_82801AB_IDE:
1267 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1268 }
1269 }
1270 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1271 WDC_CAPABILITY_MODE;
1272 sc->sc_wdcdev.PIO_cap = 4;
1273 sc->sc_wdcdev.DMA_cap = 2;
1274 sc->sc_wdcdev.UDMA_cap =
1275 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1276 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1277 sc->sc_wdcdev.set_modes = piix_setup_channel;
1278 else
1279 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1280 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1281 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1282
1283 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1284 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1285 DEBUG_PROBE);
1286 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1287 WDCDEBUG_PRINT((", sidetim=0x%x",
1288 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1289 DEBUG_PROBE);
1290 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1291 WDCDEBUG_PRINT((", udamreg 0x%x",
1292 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1293 DEBUG_PROBE);
1294 }
1295 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1296 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1297 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1298 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1299 DEBUG_PROBE);
1300 }
1301
1302 }
1303 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1304
1305 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1306 cp = &sc->pciide_channels[channel];
1307 /* PIIX is compat-only */
1308 if (pciide_chansetup(sc, channel, 0) == 0)
1309 continue;
1310 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1311 if ((PIIX_IDETIM_READ(idetim, channel) &
1312 PIIX_IDETIM_IDE) == 0) {
1313 printf("%s: %s channel ignored (disabled)\n",
1314 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1315 continue;
1316 }
1317 /* PIIX are compat-only pciide devices */
1318 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1319 if (cp->hw_ok == 0)
1320 continue;
1321 if (pciide_chan_candisable(cp)) {
1322 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1323 channel);
1324 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1325 idetim);
1326 }
1327 pciide_map_compat_intr(pa, cp, channel, 0);
1328 if (cp->hw_ok == 0)
1329 continue;
1330 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1331 }
1332
1333 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1334 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1335 DEBUG_PROBE);
1336 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1337 WDCDEBUG_PRINT((", sidetim=0x%x",
1338 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1339 DEBUG_PROBE);
1340 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1341 WDCDEBUG_PRINT((", udamreg 0x%x",
1342 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1343 DEBUG_PROBE);
1344 }
1345 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1346 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1347 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1348 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1349 DEBUG_PROBE);
1350 }
1351 }
1352 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1353 }
1354
1355 void
1356 piix_setup_channel(chp)
1357 struct channel_softc *chp;
1358 {
1359 u_int8_t mode[2], drive;
1360 u_int32_t oidetim, idetim, idedma_ctl;
1361 struct pciide_channel *cp = (struct pciide_channel*)chp;
1362 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1363 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1364
1365 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1366 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1367 idedma_ctl = 0;
1368
1369 /* set up new idetim: Enable IDE registers decode */
1370 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1371 chp->channel);
1372
1373 /* setup DMA */
1374 pciide_channel_dma_setup(cp);
1375
1376 /*
1377 * Here we have to mess up with drives mode: PIIX can't have
1378 * different timings for master and slave drives.
1379 * We need to find the best combination.
1380 */
1381
1382 /* If both drives supports DMA, take the lower mode */
1383 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1384 (drvp[1].drive_flags & DRIVE_DMA)) {
1385 mode[0] = mode[1] =
1386 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1387 drvp[0].DMA_mode = mode[0];
1388 drvp[1].DMA_mode = mode[1];
1389 goto ok;
1390 }
1391 /*
1392 * If only one drive supports DMA, use its mode, and
1393 * put the other one in PIO mode 0 if mode not compatible
1394 */
1395 if (drvp[0].drive_flags & DRIVE_DMA) {
1396 mode[0] = drvp[0].DMA_mode;
1397 mode[1] = drvp[1].PIO_mode;
1398 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1399 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1400 mode[1] = drvp[1].PIO_mode = 0;
1401 goto ok;
1402 }
1403 if (drvp[1].drive_flags & DRIVE_DMA) {
1404 mode[1] = drvp[1].DMA_mode;
1405 mode[0] = drvp[0].PIO_mode;
1406 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1407 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1408 mode[0] = drvp[0].PIO_mode = 0;
1409 goto ok;
1410 }
1411 /*
1412 * If both drives are not DMA, takes the lower mode, unless
1413 * one of them is PIO mode < 2
1414 */
1415 if (drvp[0].PIO_mode < 2) {
1416 mode[0] = drvp[0].PIO_mode = 0;
1417 mode[1] = drvp[1].PIO_mode;
1418 } else if (drvp[1].PIO_mode < 2) {
1419 mode[1] = drvp[1].PIO_mode = 0;
1420 mode[0] = drvp[0].PIO_mode;
1421 } else {
1422 mode[0] = mode[1] =
1423 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1424 drvp[0].PIO_mode = mode[0];
1425 drvp[1].PIO_mode = mode[1];
1426 }
1427 ok: /* The modes are setup */
1428 for (drive = 0; drive < 2; drive++) {
1429 if (drvp[drive].drive_flags & DRIVE_DMA) {
1430 idetim |= piix_setup_idetim_timings(
1431 mode[drive], 1, chp->channel);
1432 goto end;
1433 }
1434 }
1435 /* If we are there, none of the drives are DMA */
1436 if (mode[0] >= 2)
1437 idetim |= piix_setup_idetim_timings(
1438 mode[0], 0, chp->channel);
1439 else
1440 idetim |= piix_setup_idetim_timings(
1441 mode[1], 0, chp->channel);
1442 end: /*
1443 * timing mode is now set up in the controller. Enable
1444 * it per-drive
1445 */
1446 for (drive = 0; drive < 2; drive++) {
1447 /* If no drive, skip */
1448 if ((drvp[drive].drive_flags & DRIVE) == 0)
1449 continue;
1450 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1451 if (drvp[drive].drive_flags & DRIVE_DMA)
1452 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1453 }
1454 if (idedma_ctl != 0) {
1455 /* Add software bits in status register */
1456 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1457 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1458 idedma_ctl);
1459 }
1460 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1461 pciide_print_modes(cp);
1462 }
1463
1464 void
1465 piix3_4_setup_channel(chp)
1466 struct channel_softc *chp;
1467 {
1468 struct ata_drive_datas *drvp;
1469 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1470 struct pciide_channel *cp = (struct pciide_channel*)chp;
1471 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1472 int drive;
1473 int channel = chp->channel;
1474
1475 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1476 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1477 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1478 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1479 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1480 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1481 PIIX_SIDETIM_RTC_MASK(channel));
1482
1483 idedma_ctl = 0;
1484 /* If channel disabled, no need to go further */
1485 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1486 return;
1487 /* set up new idetim: Enable IDE registers decode */
1488 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1489
1490 /* setup DMA if needed */
1491 pciide_channel_dma_setup(cp);
1492
1493 for (drive = 0; drive < 2; drive++) {
1494 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1495 PIIX_UDMATIM_SET(0x3, channel, drive));
1496 drvp = &chp->ch_drive[drive];
1497 /* If no drive, skip */
1498 if ((drvp->drive_flags & DRIVE) == 0)
1499 continue;
1500 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1501 (drvp->drive_flags & DRIVE_UDMA) == 0))
1502 goto pio;
1503
1504 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1505 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1506 ideconf |= PIIX_CONFIG_PINGPONG;
1507 }
1508 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1509 /* setup Ultra/66 */
1510 if (drvp->UDMA_mode > 2 &&
1511 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1512 drvp->UDMA_mode = 2;
1513 if (drvp->UDMA_mode > 2)
1514 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1515 else
1516 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1517 }
1518 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1519 (drvp->drive_flags & DRIVE_UDMA)) {
1520 /* use Ultra/DMA */
1521 drvp->drive_flags &= ~DRIVE_DMA;
1522 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1523 udmareg |= PIIX_UDMATIM_SET(
1524 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1525 } else {
1526 /* use Multiword DMA */
1527 drvp->drive_flags &= ~DRIVE_UDMA;
1528 if (drive == 0) {
1529 idetim |= piix_setup_idetim_timings(
1530 drvp->DMA_mode, 1, channel);
1531 } else {
1532 sidetim |= piix_setup_sidetim_timings(
1533 drvp->DMA_mode, 1, channel);
1534 idetim =PIIX_IDETIM_SET(idetim,
1535 PIIX_IDETIM_SITRE, channel);
1536 }
1537 }
1538 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1539
1540 pio: /* use PIO mode */
1541 idetim |= piix_setup_idetim_drvs(drvp);
1542 if (drive == 0) {
1543 idetim |= piix_setup_idetim_timings(
1544 drvp->PIO_mode, 0, channel);
1545 } else {
1546 sidetim |= piix_setup_sidetim_timings(
1547 drvp->PIO_mode, 0, channel);
1548 idetim =PIIX_IDETIM_SET(idetim,
1549 PIIX_IDETIM_SITRE, channel);
1550 }
1551 }
1552 if (idedma_ctl != 0) {
1553 /* Add software bits in status register */
1554 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1555 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1556 idedma_ctl);
1557 }
1558 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1559 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1560 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1561 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1562 pciide_print_modes(cp);
1563 }
1564
1565
1566 /* setup ISP and RTC fields, based on mode */
1567 static u_int32_t
1568 piix_setup_idetim_timings(mode, dma, channel)
1569 u_int8_t mode;
1570 u_int8_t dma;
1571 u_int8_t channel;
1572 {
1573
1574 if (dma)
1575 return PIIX_IDETIM_SET(0,
1576 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1577 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1578 channel);
1579 else
1580 return PIIX_IDETIM_SET(0,
1581 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1582 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1583 channel);
1584 }
1585
1586 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1587 static u_int32_t
1588 piix_setup_idetim_drvs(drvp)
1589 struct ata_drive_datas *drvp;
1590 {
1591 u_int32_t ret = 0;
1592 struct channel_softc *chp = drvp->chnl_softc;
1593 u_int8_t channel = chp->channel;
1594 u_int8_t drive = drvp->drive;
1595
1596 /*
1597 * If drive is using UDMA, timings setups are independant
1598 * So just check DMA and PIO here.
1599 */
1600 if (drvp->drive_flags & DRIVE_DMA) {
1601 /* if mode = DMA mode 0, use compatible timings */
1602 if ((drvp->drive_flags & DRIVE_DMA) &&
1603 drvp->DMA_mode == 0) {
1604 drvp->PIO_mode = 0;
1605 return ret;
1606 }
1607 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1608 /*
1609 * PIO and DMA timings are the same, use fast timings for PIO
1610 * too, else use compat timings.
1611 */
1612 if ((piix_isp_pio[drvp->PIO_mode] !=
1613 piix_isp_dma[drvp->DMA_mode]) ||
1614 (piix_rtc_pio[drvp->PIO_mode] !=
1615 piix_rtc_dma[drvp->DMA_mode]))
1616 drvp->PIO_mode = 0;
1617 /* if PIO mode <= 2, use compat timings for PIO */
1618 if (drvp->PIO_mode <= 2) {
1619 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1620 channel);
1621 return ret;
1622 }
1623 }
1624
1625 /*
1626 * Now setup PIO modes. If mode < 2, use compat timings.
1627 * Else enable fast timings. Enable IORDY and prefetch/post
1628 * if PIO mode >= 3.
1629 */
1630
1631 if (drvp->PIO_mode < 2)
1632 return ret;
1633
1634 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1635 if (drvp->PIO_mode >= 3) {
1636 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1637 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1638 }
1639 return ret;
1640 }
1641
1642 /* setup values in SIDETIM registers, based on mode */
1643 static u_int32_t
1644 piix_setup_sidetim_timings(mode, dma, channel)
1645 u_int8_t mode;
1646 u_int8_t dma;
1647 u_int8_t channel;
1648 {
1649 if (dma)
1650 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1651 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1652 else
1653 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1654 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1655 }
1656
1657 void
1658 amd756_chip_map(sc, pa)
1659 struct pciide_softc *sc;
1660 struct pci_attach_args *pa;
1661 {
1662 struct pciide_channel *cp;
1663 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1664 sc->sc_tag, PCI_CLASS_REG));
1665 int channel;
1666 pcireg_t chanenable;
1667 bus_size_t cmdsize, ctlsize;
1668
1669 if (pciide_chipen(sc, pa) == 0)
1670 return;
1671 printf("%s: bus-master DMA support present",
1672 sc->sc_wdcdev.sc_dev.dv_xname);
1673 pciide_mapreg_dma(sc, pa);
1674 printf("\n");
1675 if (sc->sc_dma_ok)
1676 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1677 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1678 WDC_CAPABILITY_MODE;
1679 sc->sc_wdcdev.PIO_cap = 4;
1680 sc->sc_wdcdev.DMA_cap = 2;
1681 sc->sc_wdcdev.UDMA_cap = 4;
1682 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1683 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1684 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1685 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1686
1687 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1688 DEBUG_PROBE);
1689 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1690 cp = &sc->pciide_channels[channel];
1691 if (pciide_chansetup(sc, channel, interface) == 0)
1692 continue;
1693
1694 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1695 printf("%s: %s channel ignored (disabled)\n",
1696 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1697 continue;
1698 }
1699 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1700 pciide_pci_intr);
1701
1702 if (pciide_chan_candisable(cp))
1703 chanenable &= ~AMD756_CHAN_EN(channel);
1704 pciide_map_compat_intr(pa, cp, channel, interface);
1705 if (cp->hw_ok == 0)
1706 continue;
1707
1708 amd756_setup_channel(&cp->wdc_channel);
1709 }
1710 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1711 chanenable);
1712 return;
1713 }
1714
1715 void
1716 amd756_setup_channel(chp)
1717 struct channel_softc *chp;
1718 {
1719 u_int32_t udmatim_reg, datatim_reg;
1720 u_int8_t idedma_ctl;
1721 int mode, drive;
1722 struct ata_drive_datas *drvp;
1723 struct pciide_channel *cp = (struct pciide_channel*)chp;
1724 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1725
1726 idedma_ctl = 0;
1727 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1728 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1729 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1730 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1731
1732 /* setup DMA if needed */
1733 pciide_channel_dma_setup(cp);
1734
1735 for (drive = 0; drive < 2; drive++) {
1736 drvp = &chp->ch_drive[drive];
1737 /* If no drive, skip */
1738 if ((drvp->drive_flags & DRIVE) == 0)
1739 continue;
1740 /* add timing values, setup DMA if needed */
1741 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1742 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1743 mode = drvp->PIO_mode;
1744 goto pio;
1745 }
1746 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1747 (drvp->drive_flags & DRIVE_UDMA)) {
1748 /* use Ultra/DMA */
1749 drvp->drive_flags &= ~DRIVE_DMA;
1750 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1751 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1752 AMD756_UDMA_TIME(chp->channel, drive,
1753 amd756_udma_tim[drvp->UDMA_mode]);
1754 /* can use PIO timings, MW DMA unused */
1755 mode = drvp->PIO_mode;
1756 } else {
1757 /* use Multiword DMA */
1758 drvp->drive_flags &= ~DRIVE_UDMA;
1759 /* mode = min(pio, dma+2) */
1760 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1761 mode = drvp->PIO_mode;
1762 else
1763 mode = drvp->DMA_mode + 2;
1764 }
1765 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1766
1767 pio: /* setup PIO mode */
1768 if (mode <= 2) {
1769 drvp->DMA_mode = 0;
1770 drvp->PIO_mode = 0;
1771 mode = 0;
1772 } else {
1773 drvp->PIO_mode = mode;
1774 drvp->DMA_mode = mode - 2;
1775 }
1776 datatim_reg |=
1777 AMD756_DATATIM_PULSE(chp->channel, drive,
1778 amd756_pio_set[mode]) |
1779 AMD756_DATATIM_RECOV(chp->channel, drive,
1780 amd756_pio_rec[mode]);
1781 }
1782 if (idedma_ctl != 0) {
1783 /* Add software bits in status register */
1784 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1785 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1786 idedma_ctl);
1787 }
1788 pciide_print_modes(cp);
1789 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1790 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1791 }
1792
1793 void
1794 apollo_chip_map(sc, pa)
1795 struct pciide_softc *sc;
1796 struct pci_attach_args *pa;
1797 {
1798 struct pciide_channel *cp;
1799 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1800 sc->sc_tag, PCI_CLASS_REG));
1801 int channel;
1802 u_int32_t ideconf;
1803 bus_size_t cmdsize, ctlsize;
1804
1805 if (pciide_chipen(sc, pa) == 0)
1806 return;
1807 printf("%s: bus-master DMA support present",
1808 sc->sc_wdcdev.sc_dev.dv_xname);
1809 pciide_mapreg_dma(sc, pa);
1810 printf("\n");
1811 if (sc->sc_dma_ok) {
1812 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1813 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1814 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1815 }
1816 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE;
1817 sc->sc_wdcdev.PIO_cap = 4;
1818 sc->sc_wdcdev.DMA_cap = 2;
1819 sc->sc_wdcdev.UDMA_cap = 2;
1820 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1821 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1822 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1823 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1824
1825 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1826 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1827 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1828 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1829 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1830 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1831 DEBUG_PROBE);
1832
1833 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1834 cp = &sc->pciide_channels[channel];
1835 if (pciide_chansetup(sc, channel, interface) == 0)
1836 continue;
1837
1838 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1839 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1840 printf("%s: %s channel ignored (disabled)\n",
1841 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1842 continue;
1843 }
1844 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1845 pciide_pci_intr);
1846 if (cp->hw_ok == 0)
1847 continue;
1848 if (pciide_chan_candisable(cp)) {
1849 ideconf &= ~APO_IDECONF_EN(channel);
1850 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1851 ideconf);
1852 }
1853 pciide_map_compat_intr(pa, cp, channel, interface);
1854
1855 if (cp->hw_ok == 0)
1856 continue;
1857 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1858 }
1859 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1860 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1861 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1862 }
1863
1864 void
1865 apollo_setup_channel(chp)
1866 struct channel_softc *chp;
1867 {
1868 u_int32_t udmatim_reg, datatim_reg;
1869 u_int8_t idedma_ctl;
1870 int mode, drive;
1871 struct ata_drive_datas *drvp;
1872 struct pciide_channel *cp = (struct pciide_channel*)chp;
1873 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1874
1875 idedma_ctl = 0;
1876 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1877 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1878 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1879 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1880
1881 /* setup DMA if needed */
1882 pciide_channel_dma_setup(cp);
1883
1884 for (drive = 0; drive < 2; drive++) {
1885 drvp = &chp->ch_drive[drive];
1886 /* If no drive, skip */
1887 if ((drvp->drive_flags & DRIVE) == 0)
1888 continue;
1889 /* add timing values, setup DMA if needed */
1890 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1891 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1892 mode = drvp->PIO_mode;
1893 goto pio;
1894 }
1895 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1896 (drvp->drive_flags & DRIVE_UDMA)) {
1897 /* use Ultra/DMA */
1898 drvp->drive_flags &= ~DRIVE_DMA;
1899 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1900 APO_UDMA_EN_MTH(chp->channel, drive) |
1901 APO_UDMA_TIME(chp->channel, drive,
1902 apollo_udma_tim[drvp->UDMA_mode]);
1903 /* can use PIO timings, MW DMA unused */
1904 mode = drvp->PIO_mode;
1905 } else {
1906 /* use Multiword DMA */
1907 drvp->drive_flags &= ~DRIVE_UDMA;
1908 /* mode = min(pio, dma+2) */
1909 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1910 mode = drvp->PIO_mode;
1911 else
1912 mode = drvp->DMA_mode + 2;
1913 }
1914 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1915
1916 pio: /* setup PIO mode */
1917 if (mode <= 2) {
1918 drvp->DMA_mode = 0;
1919 drvp->PIO_mode = 0;
1920 mode = 0;
1921 } else {
1922 drvp->PIO_mode = mode;
1923 drvp->DMA_mode = mode - 2;
1924 }
1925 datatim_reg |=
1926 APO_DATATIM_PULSE(chp->channel, drive,
1927 apollo_pio_set[mode]) |
1928 APO_DATATIM_RECOV(chp->channel, drive,
1929 apollo_pio_rec[mode]);
1930 }
1931 if (idedma_ctl != 0) {
1932 /* Add software bits in status register */
1933 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1934 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1935 idedma_ctl);
1936 }
1937 pciide_print_modes(cp);
1938 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
1939 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
1940 }
1941
1942 void
1943 cmd_channel_map(pa, sc, channel)
1944 struct pci_attach_args *pa;
1945 struct pciide_softc *sc;
1946 int channel;
1947 {
1948 struct pciide_channel *cp = &sc->pciide_channels[channel];
1949 bus_size_t cmdsize, ctlsize;
1950 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
1951 int interface =
1952 PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1953
1954 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1955 cp->name = PCIIDE_CHANNEL_NAME(channel);
1956 cp->wdc_channel.channel = channel;
1957 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1958
1959 if (channel > 0) {
1960 cp->wdc_channel.ch_queue =
1961 sc->pciide_channels[0].wdc_channel.ch_queue;
1962 } else {
1963 cp->wdc_channel.ch_queue =
1964 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1965 }
1966 if (cp->wdc_channel.ch_queue == NULL) {
1967 printf("%s %s channel: "
1968 "can't allocate memory for command queue",
1969 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1970 return;
1971 }
1972
1973 printf("%s: %s channel %s to %s mode\n",
1974 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1975 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1976 "configured" : "wired",
1977 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1978 "native-PCI" : "compatibility");
1979
1980 /*
1981 * with a CMD PCI64x, if we get here, the first channel is enabled:
1982 * there's no way to disable the first channel without disabling
1983 * the whole device
1984 */
1985 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
1986 printf("%s: %s channel ignored (disabled)\n",
1987 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1988 return;
1989 }
1990
1991 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
1992 if (cp->hw_ok == 0)
1993 return;
1994 if (channel == 1) {
1995 if (pciide_chan_candisable(cp)) {
1996 ctrl &= ~CMD_CTRL_2PORT;
1997 pciide_pci_write(pa->pa_pc, pa->pa_tag,
1998 CMD_CTRL, ctrl);
1999 }
2000 }
2001 pciide_map_compat_intr(pa, cp, channel, interface);
2002 }
2003
2004 int
2005 cmd_pci_intr(arg)
2006 void *arg;
2007 {
2008 struct pciide_softc *sc = arg;
2009 struct pciide_channel *cp;
2010 struct channel_softc *wdc_cp;
2011 int i, rv, crv;
2012 u_int32_t priirq, secirq;
2013
2014 rv = 0;
2015 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2016 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2017 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2018 cp = &sc->pciide_channels[i];
2019 wdc_cp = &cp->wdc_channel;
2020 /* If a compat channel skip. */
2021 if (cp->compat)
2022 continue;
2023 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2024 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2025 crv = wdcintr(wdc_cp);
2026 if (crv == 0)
2027 printf("%s:%d: bogus intr\n",
2028 sc->sc_wdcdev.sc_dev.dv_xname, i);
2029 else
2030 rv = 1;
2031 }
2032 }
2033 return rv;
2034 }
2035
2036 void
2037 cmd_chip_map(sc, pa)
2038 struct pciide_softc *sc;
2039 struct pci_attach_args *pa;
2040 {
2041 int channel;
2042
2043 /*
2044 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2045 * and base adresses registers can be disabled at
2046 * hardware level. In this case, the device is wired
2047 * in compat mode and its first channel is always enabled,
2048 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2049 * In fact, it seems that the first channel of the CMD PCI0640
2050 * can't be disabled.
2051 */
2052
2053 #ifdef PCIIDE_CMD064x_DISABLE
2054 if (pciide_chipen(sc, pa) == 0)
2055 return;
2056 #endif
2057
2058 printf("%s: hardware does not support DMA\n",
2059 sc->sc_wdcdev.sc_dev.dv_xname);
2060 sc->sc_dma_ok = 0;
2061
2062 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2063 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2064 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
2065
2066 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2067 cmd_channel_map(pa, sc, channel);
2068 }
2069 }
2070
2071 void
2072 cmd0643_6_chip_map(sc, pa)
2073 struct pciide_softc *sc;
2074 struct pci_attach_args *pa;
2075 {
2076 struct pciide_channel *cp;
2077 int channel;
2078
2079 /*
2080 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2081 * and base adresses registers can be disabled at
2082 * hardware level. In this case, the device is wired
2083 * in compat mode and its first channel is always enabled,
2084 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2085 * In fact, it seems that the first channel of the CMD PCI0640
2086 * can't be disabled.
2087 */
2088
2089 #ifdef PCIIDE_CMD064x_DISABLE
2090 if (pciide_chipen(sc, pa) == 0)
2091 return;
2092 #endif
2093 printf("%s: bus-master DMA support present",
2094 sc->sc_wdcdev.sc_dev.dv_xname);
2095 pciide_mapreg_dma(sc, pa);
2096 printf("\n");
2097 if (sc->sc_dma_ok)
2098 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2099
2100 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2101 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2102 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2103 WDC_CAPABILITY_MODE;
2104 sc->sc_wdcdev.PIO_cap = 4;
2105 sc->sc_wdcdev.DMA_cap = 2;
2106 sc->sc_wdcdev.set_modes = cmd0643_6_setup_channel;
2107
2108 WDCDEBUG_PRINT(("cmd0643_6_chip_map: old timings reg 0x%x 0x%x\n",
2109 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2110 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2111 DEBUG_PROBE);
2112
2113 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2114 cp = &sc->pciide_channels[channel];
2115 cmd_channel_map(pa, sc, channel);
2116 if (cp->hw_ok == 0)
2117 continue;
2118 cmd0643_6_setup_channel(&cp->wdc_channel);
2119 }
2120 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2121 WDCDEBUG_PRINT(("cmd0643_6_chip_map: timings reg now 0x%x 0x%x\n",
2122 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2123 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2124 DEBUG_PROBE);
2125 }
2126
2127 void
2128 cmd0643_6_setup_channel(chp)
2129 struct channel_softc *chp;
2130 {
2131 struct ata_drive_datas *drvp;
2132 u_int8_t tim;
2133 u_int32_t idedma_ctl;
2134 int drive;
2135 struct pciide_channel *cp = (struct pciide_channel*)chp;
2136 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2137
2138 idedma_ctl = 0;
2139 /* setup DMA if needed */
2140 pciide_channel_dma_setup(cp);
2141
2142 for (drive = 0; drive < 2; drive++) {
2143 drvp = &chp->ch_drive[drive];
2144 /* If no drive, skip */
2145 if ((drvp->drive_flags & DRIVE) == 0)
2146 continue;
2147 /* add timing values, setup DMA if needed */
2148 tim = cmd0643_6_data_tim_pio[drvp->PIO_mode];
2149 if (drvp->drive_flags & DRIVE_DMA) {
2150 /*
2151 * use Multiword DMA.
2152 * Timings will be used for both PIO and DMA, so adjust
2153 * DMA mode if needed
2154 */
2155 if (drvp->PIO_mode >= 3 &&
2156 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2157 drvp->DMA_mode = drvp->PIO_mode - 2;
2158 }
2159 tim = cmd0643_6_data_tim_dma[drvp->DMA_mode];
2160 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2161 }
2162 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2163 CMD_DATA_TIM(chp->channel, drive), tim);
2164 }
2165 if (idedma_ctl != 0) {
2166 /* Add software bits in status register */
2167 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2168 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2169 idedma_ctl);
2170 }
2171 pciide_print_modes(cp);
2172 }
2173
2174 void
2175 cy693_chip_map(sc, pa)
2176 struct pciide_softc *sc;
2177 struct pci_attach_args *pa;
2178 {
2179 struct pciide_channel *cp;
2180 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2181 sc->sc_tag, PCI_CLASS_REG));
2182 bus_size_t cmdsize, ctlsize;
2183
2184 if (pciide_chipen(sc, pa) == 0)
2185 return;
2186 /*
2187 * this chip has 2 PCI IDE functions, one for primary and one for
2188 * secondary. So we need to call pciide_mapregs_compat() with
2189 * the real channel
2190 */
2191 if (pa->pa_function == 1) {
2192 sc->sc_cy_compatchan = 0;
2193 } else if (pa->pa_function == 2) {
2194 sc->sc_cy_compatchan = 1;
2195 } else {
2196 printf("%s: unexpected PCI function %d\n",
2197 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2198 return;
2199 }
2200 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2201 printf("%s: bus-master DMA support present",
2202 sc->sc_wdcdev.sc_dev.dv_xname);
2203 pciide_mapreg_dma(sc, pa);
2204 } else {
2205 printf("%s: hardware does not support DMA",
2206 sc->sc_wdcdev.sc_dev.dv_xname);
2207 sc->sc_dma_ok = 0;
2208 }
2209 printf("\n");
2210
2211 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2212 if (sc->sc_cy_handle == NULL) {
2213 printf("%s: unable to map hyperCache control registers\n",
2214 sc->sc_wdcdev.sc_dev.dv_xname);
2215 sc->sc_dma_ok = 0;
2216 }
2217
2218 if (sc->sc_dma_ok)
2219 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2220 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2221 WDC_CAPABILITY_MODE;
2222 sc->sc_wdcdev.PIO_cap = 4;
2223 sc->sc_wdcdev.DMA_cap = 2;
2224 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2225
2226 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2227 sc->sc_wdcdev.nchannels = 1;
2228
2229 /* Only one channel for this chip; if we are here it's enabled */
2230 cp = &sc->pciide_channels[0];
2231 sc->wdc_chanarray[0] = &cp->wdc_channel;
2232 cp->name = PCIIDE_CHANNEL_NAME(0);
2233 cp->wdc_channel.channel = 0;
2234 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2235 cp->wdc_channel.ch_queue =
2236 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2237 if (cp->wdc_channel.ch_queue == NULL) {
2238 printf("%s primary channel: "
2239 "can't allocate memory for command queue",
2240 sc->sc_wdcdev.sc_dev.dv_xname);
2241 return;
2242 }
2243 printf("%s: primary channel %s to ",
2244 sc->sc_wdcdev.sc_dev.dv_xname,
2245 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2246 "configured" : "wired");
2247 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2248 printf("native-PCI");
2249 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2250 pciide_pci_intr);
2251 } else {
2252 printf("compatibility");
2253 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2254 &cmdsize, &ctlsize);
2255 }
2256 printf(" mode\n");
2257 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2258 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2259 wdcattach(&cp->wdc_channel);
2260 if (pciide_chan_candisable(cp)) {
2261 pci_conf_write(sc->sc_pc, sc->sc_tag,
2262 PCI_COMMAND_STATUS_REG, 0);
2263 }
2264 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2265 if (cp->hw_ok == 0)
2266 return;
2267 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2268 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2269 cy693_setup_channel(&cp->wdc_channel);
2270 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2271 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2272 }
2273
2274 void
2275 cy693_setup_channel(chp)
2276 struct channel_softc *chp;
2277 {
2278 struct ata_drive_datas *drvp;
2279 int drive;
2280 u_int32_t cy_cmd_ctrl;
2281 u_int32_t idedma_ctl;
2282 struct pciide_channel *cp = (struct pciide_channel*)chp;
2283 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2284 int dma_mode = -1;
2285
2286 cy_cmd_ctrl = idedma_ctl = 0;
2287
2288 /* setup DMA if needed */
2289 pciide_channel_dma_setup(cp);
2290
2291 for (drive = 0; drive < 2; drive++) {
2292 drvp = &chp->ch_drive[drive];
2293 /* If no drive, skip */
2294 if ((drvp->drive_flags & DRIVE) == 0)
2295 continue;
2296 /* add timing values, setup DMA if needed */
2297 if (drvp->drive_flags & DRIVE_DMA) {
2298 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2299 /* use Multiword DMA */
2300 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2301 dma_mode = drvp->DMA_mode;
2302 }
2303 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2304 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2305 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2306 CY_CMD_CTRL_IOW_REC_OFF(drive));
2307 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2308 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2309 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2310 CY_CMD_CTRL_IOR_REC_OFF(drive));
2311 }
2312 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2313 chp->ch_drive[0].DMA_mode = dma_mode;
2314 chp->ch_drive[1].DMA_mode = dma_mode;
2315
2316 if (dma_mode == -1)
2317 dma_mode = 0;
2318
2319 if (sc->sc_cy_handle != NULL) {
2320 /* Note: `multiple' is implied. */
2321 cy82c693_write(sc->sc_cy_handle,
2322 (sc->sc_cy_compatchan == 0) ?
2323 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2324 }
2325
2326 pciide_print_modes(cp);
2327
2328 if (idedma_ctl != 0) {
2329 /* Add software bits in status register */
2330 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2331 IDEDMA_CTL, idedma_ctl);
2332 }
2333 }
2334
2335 void
2336 sis_chip_map(sc, pa)
2337 struct pciide_softc *sc;
2338 struct pci_attach_args *pa;
2339 {
2340 struct pciide_channel *cp;
2341 int channel;
2342 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2343 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2344 sc->sc_tag, PCI_CLASS_REG));
2345 pcireg_t rev = PCI_REVISION(pci_conf_read(sc->sc_pc,
2346 sc->sc_tag, PCI_CLASS_REG));
2347 bus_size_t cmdsize, ctlsize;
2348
2349 if (pciide_chipen(sc, pa) == 0)
2350 return;
2351 printf("%s: bus-master DMA support present",
2352 sc->sc_wdcdev.sc_dev.dv_xname);
2353 pciide_mapreg_dma(sc, pa);
2354 printf("\n");
2355 if (sc->sc_dma_ok) {
2356 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2357 if (rev >= 0xd0)
2358 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2359 }
2360
2361 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2362 WDC_CAPABILITY_MODE;
2363 sc->sc_wdcdev.PIO_cap = 4;
2364 sc->sc_wdcdev.DMA_cap = 2;
2365 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2366 sc->sc_wdcdev.UDMA_cap = 2;
2367 sc->sc_wdcdev.set_modes = sis_setup_channel;
2368
2369 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2370 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2371
2372 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2373 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2374 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2375
2376 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2377 cp = &sc->pciide_channels[channel];
2378 if (pciide_chansetup(sc, channel, interface) == 0)
2379 continue;
2380 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2381 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2382 printf("%s: %s channel ignored (disabled)\n",
2383 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2384 continue;
2385 }
2386 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2387 pciide_pci_intr);
2388 if (cp->hw_ok == 0)
2389 continue;
2390 if (pciide_chan_candisable(cp)) {
2391 if (channel == 0)
2392 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2393 else
2394 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2395 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2396 sis_ctr0);
2397 }
2398 pciide_map_compat_intr(pa, cp, channel, interface);
2399 if (cp->hw_ok == 0)
2400 continue;
2401 sis_setup_channel(&cp->wdc_channel);
2402 }
2403 }
2404
2405 void
2406 sis_setup_channel(chp)
2407 struct channel_softc *chp;
2408 {
2409 struct ata_drive_datas *drvp;
2410 int drive;
2411 u_int32_t sis_tim;
2412 u_int32_t idedma_ctl;
2413 struct pciide_channel *cp = (struct pciide_channel*)chp;
2414 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2415
2416 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2417 "channel %d 0x%x\n", chp->channel,
2418 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2419 DEBUG_PROBE);
2420 sis_tim = 0;
2421 idedma_ctl = 0;
2422 /* setup DMA if needed */
2423 pciide_channel_dma_setup(cp);
2424
2425 for (drive = 0; drive < 2; drive++) {
2426 drvp = &chp->ch_drive[drive];
2427 /* If no drive, skip */
2428 if ((drvp->drive_flags & DRIVE) == 0)
2429 continue;
2430 /* add timing values, setup DMA if needed */
2431 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2432 (drvp->drive_flags & DRIVE_UDMA) == 0)
2433 goto pio;
2434
2435 if (drvp->drive_flags & DRIVE_UDMA) {
2436 /* use Ultra/DMA */
2437 drvp->drive_flags &= ~DRIVE_DMA;
2438 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2439 SIS_TIM_UDMA_TIME_OFF(drive);
2440 sis_tim |= SIS_TIM_UDMA_EN(drive);
2441 } else {
2442 /*
2443 * use Multiword DMA
2444 * Timings will be used for both PIO and DMA,
2445 * so adjust DMA mode if needed
2446 */
2447 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2448 drvp->PIO_mode = drvp->DMA_mode + 2;
2449 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2450 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2451 drvp->PIO_mode - 2 : 0;
2452 if (drvp->DMA_mode == 0)
2453 drvp->PIO_mode = 0;
2454 }
2455 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2456 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2457 SIS_TIM_ACT_OFF(drive);
2458 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2459 SIS_TIM_REC_OFF(drive);
2460 }
2461 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2462 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2463 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2464 if (idedma_ctl != 0) {
2465 /* Add software bits in status register */
2466 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2467 IDEDMA_CTL, idedma_ctl);
2468 }
2469 pciide_print_modes(cp);
2470 }
2471
2472 void
2473 acer_chip_map(sc, pa)
2474 struct pciide_softc *sc;
2475 struct pci_attach_args *pa;
2476 {
2477 struct pciide_channel *cp;
2478 int channel;
2479 pcireg_t cr, interface;
2480 bus_size_t cmdsize, ctlsize;
2481
2482 if (pciide_chipen(sc, pa) == 0)
2483 return;
2484 printf("%s: bus-master DMA support present",
2485 sc->sc_wdcdev.sc_dev.dv_xname);
2486 pciide_mapreg_dma(sc, pa);
2487 printf("\n");
2488 if (sc->sc_dma_ok)
2489 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2490
2491 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2492 WDC_CAPABILITY_MODE;
2493
2494 sc->sc_wdcdev.PIO_cap = 4;
2495 sc->sc_wdcdev.DMA_cap = 2;
2496 sc->sc_wdcdev.UDMA_cap = 2;
2497 sc->sc_wdcdev.set_modes = acer_setup_channel;
2498 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2499 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2500
2501 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2502 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2503 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2504
2505 /* Enable "microsoft register bits" R/W. */
2506 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2507 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2508 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2509 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2510 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2511 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2512 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2513 ~ACER_CHANSTATUSREGS_RO);
2514 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2515 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2516 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2517 /* Don't use cr, re-read the real register content instead */
2518 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2519 PCI_CLASS_REG));
2520
2521 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2522 cp = &sc->pciide_channels[channel];
2523 if (pciide_chansetup(sc, channel, interface) == 0)
2524 continue;
2525 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2526 printf("%s: %s channel ignored (disabled)\n",
2527 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2528 continue;
2529 }
2530 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2531 acer_pci_intr);
2532 if (cp->hw_ok == 0)
2533 continue;
2534 if (pciide_chan_candisable(cp)) {
2535 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2536 pci_conf_write(sc->sc_pc, sc->sc_tag,
2537 PCI_CLASS_REG, cr);
2538 }
2539 pciide_map_compat_intr(pa, cp, channel, interface);
2540 acer_setup_channel(&cp->wdc_channel);
2541 }
2542 }
2543
2544 void
2545 acer_setup_channel(chp)
2546 struct channel_softc *chp;
2547 {
2548 struct ata_drive_datas *drvp;
2549 int drive;
2550 u_int32_t acer_fifo_udma;
2551 u_int32_t idedma_ctl;
2552 struct pciide_channel *cp = (struct pciide_channel*)chp;
2553 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2554
2555 idedma_ctl = 0;
2556 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2557 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2558 acer_fifo_udma), DEBUG_PROBE);
2559 /* setup DMA if needed */
2560 pciide_channel_dma_setup(cp);
2561
2562 for (drive = 0; drive < 2; drive++) {
2563 drvp = &chp->ch_drive[drive];
2564 /* If no drive, skip */
2565 if ((drvp->drive_flags & DRIVE) == 0)
2566 continue;
2567 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2568 "channel %d drive %d 0x%x\n", chp->channel, drive,
2569 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2570 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2571 /* clear FIFO/DMA mode */
2572 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2573 ACER_UDMA_EN(chp->channel, drive) |
2574 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2575
2576 /* add timing values, setup DMA if needed */
2577 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2578 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2579 acer_fifo_udma |=
2580 ACER_FTH_OPL(chp->channel, drive, 0x1);
2581 goto pio;
2582 }
2583
2584 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2585 if (drvp->drive_flags & DRIVE_UDMA) {
2586 /* use Ultra/DMA */
2587 drvp->drive_flags &= ~DRIVE_DMA;
2588 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2589 acer_fifo_udma |=
2590 ACER_UDMA_TIM(chp->channel, drive,
2591 acer_udma[drvp->UDMA_mode]);
2592 } else {
2593 /*
2594 * use Multiword DMA
2595 * Timings will be used for both PIO and DMA,
2596 * so adjust DMA mode if needed
2597 */
2598 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2599 drvp->PIO_mode = drvp->DMA_mode + 2;
2600 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2601 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2602 drvp->PIO_mode - 2 : 0;
2603 if (drvp->DMA_mode == 0)
2604 drvp->PIO_mode = 0;
2605 }
2606 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2607 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2608 ACER_IDETIM(chp->channel, drive),
2609 acer_pio[drvp->PIO_mode]);
2610 }
2611 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2612 acer_fifo_udma), DEBUG_PROBE);
2613 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2614 if (idedma_ctl != 0) {
2615 /* Add software bits in status register */
2616 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2617 IDEDMA_CTL, idedma_ctl);
2618 }
2619 pciide_print_modes(cp);
2620 }
2621
2622 int
2623 acer_pci_intr(arg)
2624 void *arg;
2625 {
2626 struct pciide_softc *sc = arg;
2627 struct pciide_channel *cp;
2628 struct channel_softc *wdc_cp;
2629 int i, rv, crv;
2630 u_int32_t chids;
2631
2632 rv = 0;
2633 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2634 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2635 cp = &sc->pciide_channels[i];
2636 wdc_cp = &cp->wdc_channel;
2637 /* If a compat channel skip. */
2638 if (cp->compat)
2639 continue;
2640 if (chids & ACER_CHIDS_INT(i)) {
2641 crv = wdcintr(wdc_cp);
2642 if (crv == 0)
2643 printf("%s:%d: bogus intr\n",
2644 sc->sc_wdcdev.sc_dev.dv_xname, i);
2645 else
2646 rv = 1;
2647 }
2648 }
2649 return rv;
2650 }
2651
2652 /* A macro to test product */
2653 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2654
2655 void
2656 pdc202xx_chip_map(sc, pa)
2657 struct pciide_softc *sc;
2658 struct pci_attach_args *pa;
2659 {
2660 struct pciide_channel *cp;
2661 int channel;
2662 pcireg_t interface, st, mode;
2663 bus_size_t cmdsize, ctlsize;
2664
2665 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2666 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2667 DEBUG_PROBE);
2668 if (pciide_chipen(sc, pa) == 0)
2669 return;
2670
2671 /* turn off RAID mode */
2672 st &= ~PDC2xx_STATE_IDERAID;
2673
2674 /*
2675 * can't rely on the PCI_CLASS_REG content if the chip was in raid
2676 * mode. We have to fake interface
2677 */
2678 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
2679 if (st & PDC2xx_STATE_NATIVE)
2680 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2681
2682 printf("%s: bus-master DMA support present",
2683 sc->sc_wdcdev.sc_dev.dv_xname);
2684 pciide_mapreg_dma(sc, pa);
2685 printf("\n");
2686 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2687 WDC_CAPABILITY_MODE;
2688 if (sc->sc_dma_ok)
2689 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2690 sc->sc_wdcdev.PIO_cap = 4;
2691 sc->sc_wdcdev.DMA_cap = 2;
2692 if (PDC_IS_262(sc))
2693 sc->sc_wdcdev.UDMA_cap = 4;
2694 else
2695 sc->sc_wdcdev.UDMA_cap = 2;
2696 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
2697 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2698 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2699
2700 /* setup failsafe defaults */
2701 mode = 0;
2702 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
2703 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
2704 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
2705 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
2706 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2707 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
2708 "initial timings 0x%x, now 0x%x\n", channel,
2709 pci_conf_read(sc->sc_pc, sc->sc_tag,
2710 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
2711 DEBUG_PROBE);
2712 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
2713 mode | PDC2xx_TIM_IORDYp);
2714 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
2715 "initial timings 0x%x, now 0x%x\n", channel,
2716 pci_conf_read(sc->sc_pc, sc->sc_tag,
2717 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
2718 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
2719 mode);
2720 }
2721
2722 mode = PDC2xx_SCR_DMA;
2723 if (PDC_IS_262(sc)) {
2724 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
2725 } else {
2726 /* the BIOS set it up this way */
2727 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
2728 }
2729 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
2730 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
2731 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
2732 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
2733 DEBUG_PROBE);
2734 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
2735
2736 /* controller initial state register is OK even without BIOS */
2737 /* Set DMA mode to IDE DMA compatibility */
2738 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
2739 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
2740 DEBUG_PROBE);
2741 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
2742 mode | 0x1);
2743 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
2744 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
2745 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
2746 mode | 0x1);
2747
2748 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2749 cp = &sc->pciide_channels[channel];
2750 if (pciide_chansetup(sc, channel, interface) == 0)
2751 continue;
2752 if ((st & (PDC_IS_262(sc) ?
2753 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
2754 printf("%s: %s channel ignored (disabled)\n",
2755 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2756 continue;
2757 }
2758 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2759 pdc202xx_pci_intr);
2760 if (cp->hw_ok == 0)
2761 continue;
2762 if (pciide_chan_candisable(cp))
2763 st &= ~(PDC_IS_262(sc) ?
2764 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
2765 pciide_map_compat_intr(pa, cp, channel, interface);
2766 pdc202xx_setup_channel(&cp->wdc_channel);
2767 }
2768 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
2769 DEBUG_PROBE);
2770 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
2771 return;
2772 }
2773
2774 void
2775 pdc202xx_setup_channel(chp)
2776 struct channel_softc *chp;
2777 {
2778 struct ata_drive_datas *drvp;
2779 int drive;
2780 pcireg_t mode, st;
2781 u_int32_t idedma_ctl, scr, atapi;
2782 struct pciide_channel *cp = (struct pciide_channel*)chp;
2783 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2784 int channel = chp->channel;
2785
2786 /* setup DMA if needed */
2787 pciide_channel_dma_setup(cp);
2788
2789 idedma_ctl = 0;
2790
2791 /* Per channel settings */
2792 if (PDC_IS_262(sc)) {
2793 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2794 PDC262_U66);
2795 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2796 /* Trimm UDMA mode */
2797 if ((st & PDC262_STATE_80P(channel)) == 0 ||
2798 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
2799 chp->ch_drive[0].UDMA_mode <= 2) ||
2800 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
2801 chp->ch_drive[1].UDMA_mode <= 2)) {
2802 if (chp->ch_drive[0].UDMA_mode > 2)
2803 chp->ch_drive[0].UDMA_mode = 2;
2804 if (chp->ch_drive[1].UDMA_mode > 2)
2805 chp->ch_drive[1].UDMA_mode = 2;
2806 }
2807 /* Set U66 if needed */
2808 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
2809 chp->ch_drive[0].UDMA_mode > 2) ||
2810 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
2811 chp->ch_drive[1].UDMA_mode > 2))
2812 scr |= PDC262_U66_EN(channel);
2813 else
2814 scr &= ~PDC262_U66_EN(channel);
2815 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2816 PDC262_U66, scr);
2817 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
2818 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
2819 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2820 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
2821 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
2822 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
2823 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2824 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
2825 atapi = 0;
2826 else
2827 atapi = PDC262_ATAPI_UDMA;
2828 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
2829 PDC262_ATAPI(channel), atapi);
2830 }
2831 }
2832 for (drive = 0; drive < 2; drive++) {
2833 drvp = &chp->ch_drive[drive];
2834 /* If no drive, skip */
2835 if ((drvp->drive_flags & DRIVE) == 0)
2836 continue;
2837 mode = 0;
2838 if (drvp->drive_flags & DRIVE_UDMA) {
2839 mode = PDC2xx_TIM_SET_MB(mode,
2840 pdc2xx_udma_mb[drvp->UDMA_mode]);
2841 mode = PDC2xx_TIM_SET_MC(mode,
2842 pdc2xx_udma_mc[drvp->UDMA_mode]);
2843 drvp->drive_flags &= ~DRIVE_DMA;
2844 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2845 } else if (drvp->drive_flags & DRIVE_DMA) {
2846 mode = PDC2xx_TIM_SET_MB(mode,
2847 pdc2xx_dma_mb[drvp->DMA_mode]);
2848 mode = PDC2xx_TIM_SET_MC(mode,
2849 pdc2xx_dma_mc[drvp->DMA_mode]);
2850 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2851 } else {
2852 mode = PDC2xx_TIM_SET_MB(mode,
2853 pdc2xx_dma_mb[0]);
2854 mode = PDC2xx_TIM_SET_MC(mode,
2855 pdc2xx_dma_mc[0]);
2856 }
2857 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
2858 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
2859 if (drvp->drive_flags & DRIVE_ATA)
2860 mode |= PDC2xx_TIM_PRE;
2861 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
2862 if (drvp->PIO_mode >= 3) {
2863 mode |= PDC2xx_TIM_IORDY;
2864 if (drive == 0)
2865 mode |= PDC2xx_TIM_IORDYp;
2866 }
2867 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
2868 "timings 0x%x\n",
2869 sc->sc_wdcdev.sc_dev.dv_xname,
2870 chp->channel, drive, mode), DEBUG_PROBE);
2871 pci_conf_write(sc->sc_pc, sc->sc_tag,
2872 PDC2xx_TIM(chp->channel, drive), mode);
2873 }
2874 if (idedma_ctl != 0) {
2875 /* Add software bits in status register */
2876 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2877 IDEDMA_CTL, idedma_ctl);
2878 }
2879 pciide_print_modes(cp);
2880 }
2881
2882 int
2883 pdc202xx_pci_intr(arg)
2884 void *arg;
2885 {
2886 struct pciide_softc *sc = arg;
2887 struct pciide_channel *cp;
2888 struct channel_softc *wdc_cp;
2889 int i, rv, crv;
2890 u_int32_t scr;
2891
2892 rv = 0;
2893 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
2894 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2895 cp = &sc->pciide_channels[i];
2896 wdc_cp = &cp->wdc_channel;
2897 /* If a compat channel skip. */
2898 if (cp->compat)
2899 continue;
2900 if (scr & PDC2xx_SCR_INT(i)) {
2901 crv = wdcintr(wdc_cp);
2902 if (crv == 0)
2903 printf("%s:%d: bogus intr\n",
2904 sc->sc_wdcdev.sc_dev.dv_xname, i);
2905 else
2906 rv = 1;
2907 }
2908 }
2909 return rv;
2910 }
2911
2912 void
2913 opti_chip_map(sc, pa)
2914 struct pciide_softc *sc;
2915 struct pci_attach_args *pa;
2916 {
2917 struct pciide_channel *cp;
2918 bus_size_t cmdsize, ctlsize;
2919 pcireg_t interface;
2920 u_int8_t init_ctrl;
2921 int channel;
2922
2923 if (pciide_chipen(sc, pa) == 0)
2924 return;
2925 printf("%s: bus-master DMA support present",
2926 sc->sc_wdcdev.sc_dev.dv_xname);
2927 pciide_mapreg_dma(sc, pa);
2928 printf("\n");
2929
2930 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
2931 sc->sc_wdcdev.PIO_cap = 4;
2932 if (sc->sc_dma_ok) {
2933 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2934 sc->sc_wdcdev.DMA_cap = 2;
2935 }
2936 sc->sc_wdcdev.set_modes = opti_setup_channel;
2937
2938 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2939 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2940
2941 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
2942 OPTI_REG_INIT_CONTROL);
2943
2944 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2945 sc->sc_tag, PCI_CLASS_REG));
2946
2947 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2948 cp = &sc->pciide_channels[channel];
2949 if (pciide_chansetup(sc, channel, interface) == 0)
2950 continue;
2951 if (channel == 1 &&
2952 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
2953 printf("%s: %s channel ignored (disabled)\n",
2954 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2955 continue;
2956 }
2957 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2958 pciide_pci_intr);
2959 if (cp->hw_ok == 0)
2960 continue;
2961 pciide_map_compat_intr(pa, cp, channel, interface);
2962 if (cp->hw_ok == 0)
2963 continue;
2964 opti_setup_channel(&cp->wdc_channel);
2965 }
2966 }
2967
2968 void
2969 opti_setup_channel(chp)
2970 struct channel_softc *chp;
2971 {
2972 struct ata_drive_datas *drvp;
2973 struct pciide_channel *cp = (struct pciide_channel*)chp;
2974 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2975 int drive;
2976 int mode[2];
2977 u_int8_t rv, mr;
2978
2979 /*
2980 * The `Delay' and `Address Setup Time' fields of the
2981 * Miscellaneous Register are always zero initially.
2982 */
2983 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
2984 mr &= ~(OPTI_MISC_DELAY_MASK |
2985 OPTI_MISC_ADDR_SETUP_MASK |
2986 OPTI_MISC_INDEX_MASK);
2987
2988 /* Prime the control register before setting timing values */
2989 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
2990
2991 /* setup DMA if needed */
2992 pciide_channel_dma_setup(cp);
2993
2994 for (drive = 0; drive < 2; drive++) {
2995 drvp = &chp->ch_drive[drive];
2996 /* If no drive, skip */
2997 if ((drvp->drive_flags & DRIVE) == 0) {
2998 mode[drive] = -1;
2999 continue;
3000 }
3001
3002 if ((drvp->drive_flags & DRIVE_DMA)) {
3003 /*
3004 * Timings will be used for both PIO and DMA,
3005 * so adjust DMA mode if needed
3006 */
3007 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3008 drvp->PIO_mode = drvp->DMA_mode + 2;
3009 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3010 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3011 drvp->PIO_mode - 2 : 0;
3012 if (drvp->DMA_mode == 0)
3013 drvp->PIO_mode = 0;
3014
3015 mode[drive] = drvp->DMA_mode + 5;
3016 } else
3017 mode[drive] = drvp->PIO_mode;
3018
3019 if (drive && mode[0] >= 0 &&
3020 (opti_tim_as[mode[0]] != opti_tim_as[mode[1]])) {
3021 /*
3022 * Can't have two drives using different values
3023 * for `Address Setup Time'.
3024 * Slow down the faster drive to compensate.
3025 */
3026 int d;
3027 d = (opti_tim_as[mode[0]] > opti_tim_as[mode[1]])?0:1;
3028
3029 mode[d] = mode[1-d];
3030 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3031 chp->ch_drive[d].DMA_mode = 0;
3032 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3033 }
3034 }
3035
3036 for (drive = 0; drive < 2; drive++) {
3037 int m;
3038 if ((m = mode[drive]) < 0)
3039 continue;
3040
3041 /* Set the Address Setup Time and select appropriate index */
3042 rv = opti_tim_as[m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3043 rv |= OPTI_MISC_INDEX(drive);
3044 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3045
3046 /* Set the pulse width and recovery timing parameters */
3047 rv = opti_tim_cp[m] << OPTI_PULSE_WIDTH_SHIFT;
3048 rv |= opti_tim_rt[m] << OPTI_RECOVERY_TIME_SHIFT;
3049 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3050 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3051
3052 /* Set the Enhanced Mode register appropriately */
3053 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3054 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3055 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3056 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3057 }
3058
3059 /* Finally, enable the timings */
3060 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3061
3062 pciide_print_modes(cp);
3063 }
3064