pciide.c revision 1.66 1 /* $NetBSD: pciide.c,v 1.66 2000/06/07 20:42:52 scw Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119
120 #include <dev/pci/cy82c693var.h>
121
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 int, u_int8_t));
127
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 pci_chipset_tag_t pc;
131 pcitag_t pa;
132 int reg;
133 {
134
135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 ((reg & 0x03) * 8) & 0xff);
137 }
138
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 pci_chipset_tag_t pc;
142 pcitag_t pa;
143 int reg;
144 u_int8_t val;
145 {
146 pcireg_t pcival;
147
148 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 pcival &= ~(0xff << ((reg & 0x03) * 8));
150 pcival |= (val << ((reg & 0x03) * 8));
151 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153
154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155
156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157 void piix_setup_channel __P((struct channel_softc*));
158 void piix3_4_setup_channel __P((struct channel_softc*));
159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162
163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164 void amd756_setup_channel __P((struct channel_softc*));
165
166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void apollo_setup_channel __P((struct channel_softc*));
168
169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void cmd0643_6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_6_setup_channel __P((struct channel_softc*));
172 void cmd_channel_map __P((struct pci_attach_args *,
173 struct pciide_softc *, int));
174 int cmd_pci_intr __P((void *));
175
176 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
177 void cy693_setup_channel __P((struct channel_softc*));
178
179 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void sis_setup_channel __P((struct channel_softc*));
181
182 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void acer_setup_channel __P((struct channel_softc*));
184 int acer_pci_intr __P((void *));
185
186 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
187 void pdc202xx_setup_channel __P((struct channel_softc*));
188 int pdc202xx_pci_intr __P((void *));
189
190 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void opti_setup_channel __P((struct channel_softc*));
192
193 void pciide_channel_dma_setup __P((struct pciide_channel *));
194 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
195 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
196 void pciide_dma_start __P((void*, int, int));
197 int pciide_dma_finish __P((void*, int, int, int));
198 void pciide_print_modes __P((struct pciide_channel *));
199
200 struct pciide_product_desc {
201 u_int32_t ide_product;
202 int ide_flags;
203 const char *ide_name;
204 /* map and setup chip, probe drives */
205 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
206 };
207
208 /* Flags for ide_flags */
209 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
210
211 /* Default product description for devices not known from this controller */
212 const struct pciide_product_desc default_product_desc = {
213 0,
214 0,
215 "Generic PCI IDE controller",
216 default_chip_map,
217 };
218
219 const struct pciide_product_desc pciide_intel_products[] = {
220 { PCI_PRODUCT_INTEL_82092AA,
221 0,
222 "Intel 82092AA IDE controller",
223 default_chip_map,
224 },
225 { PCI_PRODUCT_INTEL_82371FB_IDE,
226 0,
227 "Intel 82371FB IDE controller (PIIX)",
228 piix_chip_map,
229 },
230 { PCI_PRODUCT_INTEL_82371SB_IDE,
231 0,
232 "Intel 82371SB IDE Interface (PIIX3)",
233 piix_chip_map,
234 },
235 { PCI_PRODUCT_INTEL_82371AB_IDE,
236 0,
237 "Intel 82371AB IDE controller (PIIX4)",
238 piix_chip_map,
239 },
240 { PCI_PRODUCT_INTEL_82801AA_IDE,
241 0,
242 "Intel 82801AA IDE Controller (ICH)",
243 piix_chip_map,
244 },
245 { PCI_PRODUCT_INTEL_82801AB_IDE,
246 0,
247 "Intel 82801AB IDE Controller (ICH0)",
248 piix_chip_map,
249 },
250 { 0,
251 0,
252 NULL,
253 }
254 };
255
256 const struct pciide_product_desc pciide_amd_products[] = {
257 { PCI_PRODUCT_AMD_PBC756_IDE,
258 0,
259 "Advanced Micro Devices AMD756 IDE Controller",
260 amd756_chip_map
261 },
262 { 0,
263 0,
264 NULL,
265 }
266 };
267
268 const struct pciide_product_desc pciide_cmd_products[] = {
269 { PCI_PRODUCT_CMDTECH_640,
270 0,
271 "CMD Technology PCI0640",
272 cmd_chip_map
273 },
274 { PCI_PRODUCT_CMDTECH_643,
275 0,
276 "CMD Technology PCI0643",
277 cmd0643_6_chip_map,
278 },
279 { PCI_PRODUCT_CMDTECH_646,
280 0,
281 "CMD Technology PCI0646",
282 cmd0643_6_chip_map,
283 },
284 { 0,
285 0,
286 NULL,
287 }
288 };
289
290 const struct pciide_product_desc pciide_via_products[] = {
291 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
292 0,
293 "VIA Tech VT82C586 IDE Controller",
294 apollo_chip_map,
295 },
296 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
297 0,
298 "VIA Tech VT82C586A IDE Controller",
299 apollo_chip_map,
300 },
301 { 0,
302 0,
303 NULL,
304 }
305 };
306
307 const struct pciide_product_desc pciide_cypress_products[] = {
308 { PCI_PRODUCT_CONTAQ_82C693,
309 0,
310 "Cypress 82C693 IDE Controller",
311 cy693_chip_map,
312 },
313 { 0,
314 0,
315 NULL,
316 }
317 };
318
319 const struct pciide_product_desc pciide_sis_products[] = {
320 { PCI_PRODUCT_SIS_5597_IDE,
321 0,
322 "Silicon Integrated System 5597/5598 IDE controller",
323 sis_chip_map,
324 },
325 { 0,
326 0,
327 NULL,
328 }
329 };
330
331 const struct pciide_product_desc pciide_acer_products[] = {
332 { PCI_PRODUCT_ALI_M5229,
333 0,
334 "Acer Labs M5229 UDMA IDE Controller",
335 acer_chip_map,
336 },
337 { 0,
338 0,
339 NULL,
340 }
341 };
342
343 const struct pciide_product_desc pciide_promise_products[] = {
344 { PCI_PRODUCT_PROMISE_ULTRA33,
345 IDE_PCI_CLASS_OVERRIDE,
346 "Promise Ultra33/ATA Bus Master IDE Accelerator",
347 pdc202xx_chip_map,
348 },
349 { PCI_PRODUCT_PROMISE_ULTRA66,
350 IDE_PCI_CLASS_OVERRIDE,
351 "Promise Ultra66/ATA Bus Master IDE Accelerator",
352 pdc202xx_chip_map,
353 },
354 { 0,
355 0,
356 NULL,
357 }
358 };
359
360 const struct pciide_product_desc pciide_opti_products[] = {
361 { PCI_PRODUCT_OPTI_82C621,
362 0,
363 "OPTi 82c621 PCI IDE controller",
364 opti_chip_map,
365 },
366 { PCI_PRODUCT_OPTI_82C568,
367 0,
368 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
369 opti_chip_map,
370 },
371 { PCI_PRODUCT_OPTI_82D568,
372 0,
373 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
374 opti_chip_map,
375 },
376 { 0,
377 0,
378 NULL,
379 }
380 };
381
382 struct pciide_vendor_desc {
383 u_int32_t ide_vendor;
384 const struct pciide_product_desc *ide_products;
385 };
386
387 const struct pciide_vendor_desc pciide_vendors[] = {
388 { PCI_VENDOR_INTEL, pciide_intel_products },
389 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
390 { PCI_VENDOR_VIATECH, pciide_via_products },
391 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
392 { PCI_VENDOR_SIS, pciide_sis_products },
393 { PCI_VENDOR_ALI, pciide_acer_products },
394 { PCI_VENDOR_PROMISE, pciide_promise_products },
395 { PCI_VENDOR_AMD, pciide_amd_products },
396 { PCI_VENDOR_OPTI, pciide_opti_products },
397 { 0, NULL }
398 };
399
400 /* options passed via the 'flags' config keyword */
401 #define PCIIDE_OPTIONS_DMA 0x01
402
403 int pciide_match __P((struct device *, struct cfdata *, void *));
404 void pciide_attach __P((struct device *, struct device *, void *));
405
406 struct cfattach pciide_ca = {
407 sizeof(struct pciide_softc), pciide_match, pciide_attach
408 };
409 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
410 int pciide_mapregs_compat __P(( struct pci_attach_args *,
411 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
412 int pciide_mapregs_native __P((struct pci_attach_args *,
413 struct pciide_channel *, bus_size_t *, bus_size_t *,
414 int (*pci_intr) __P((void *))));
415 void pciide_mapreg_dma __P((struct pciide_softc *,
416 struct pci_attach_args *));
417 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
418 void pciide_mapchan __P((struct pci_attach_args *,
419 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
420 int (*pci_intr) __P((void *))));
421 int pciide_chan_candisable __P((struct pciide_channel *));
422 void pciide_map_compat_intr __P(( struct pci_attach_args *,
423 struct pciide_channel *, int, int));
424 int pciide_print __P((void *, const char *pnp));
425 int pciide_compat_intr __P((void *));
426 int pciide_pci_intr __P((void *));
427 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
428
429 const struct pciide_product_desc *
430 pciide_lookup_product(id)
431 u_int32_t id;
432 {
433 const struct pciide_product_desc *pp;
434 const struct pciide_vendor_desc *vp;
435
436 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
437 if (PCI_VENDOR(id) == vp->ide_vendor)
438 break;
439
440 if ((pp = vp->ide_products) == NULL)
441 return NULL;
442
443 for (; pp->ide_name != NULL; pp++)
444 if (PCI_PRODUCT(id) == pp->ide_product)
445 break;
446
447 if (pp->ide_name == NULL)
448 return NULL;
449 return pp;
450 }
451
452 int
453 pciide_match(parent, match, aux)
454 struct device *parent;
455 struct cfdata *match;
456 void *aux;
457 {
458 struct pci_attach_args *pa = aux;
459 const struct pciide_product_desc *pp;
460
461 /*
462 * Check the ID register to see that it's a PCI IDE controller.
463 * If it is, we assume that we can deal with it; it _should_
464 * work in a standardized way...
465 */
466 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
467 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
468 return (1);
469 }
470
471 /*
472 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
473 * controllers. Let see if we can deal with it anyway.
474 */
475 pp = pciide_lookup_product(pa->pa_id);
476 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
477 return (1);
478 }
479
480 return (0);
481 }
482
483 void
484 pciide_attach(parent, self, aux)
485 struct device *parent, *self;
486 void *aux;
487 {
488 struct pci_attach_args *pa = aux;
489 pci_chipset_tag_t pc = pa->pa_pc;
490 pcitag_t tag = pa->pa_tag;
491 struct pciide_softc *sc = (struct pciide_softc *)self;
492 pcireg_t csr;
493 char devinfo[256];
494 const char *displaydev;
495
496 sc->sc_pp = pciide_lookup_product(pa->pa_id);
497 if (sc->sc_pp == NULL) {
498 sc->sc_pp = &default_product_desc;
499 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
500 displaydev = devinfo;
501 } else
502 displaydev = sc->sc_pp->ide_name;
503
504 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
505
506 sc->sc_pc = pa->pa_pc;
507 sc->sc_tag = pa->pa_tag;
508 #ifdef WDCDEBUG
509 if (wdcdebug_pciide_mask & DEBUG_PROBE)
510 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
511 #endif
512
513 sc->sc_pp->chip_map(sc, pa);
514
515 if (sc->sc_dma_ok) {
516 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
517 csr |= PCI_COMMAND_MASTER_ENABLE;
518 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
519 }
520 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
521 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
522 }
523
524 /* tell wether the chip is enabled or not */
525 int
526 pciide_chipen(sc, pa)
527 struct pciide_softc *sc;
528 struct pci_attach_args *pa;
529 {
530 pcireg_t csr;
531 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
532 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
533 PCI_COMMAND_STATUS_REG);
534 printf("%s: device disabled (at %s)\n",
535 sc->sc_wdcdev.sc_dev.dv_xname,
536 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
537 "device" : "bridge");
538 return 0;
539 }
540 return 1;
541 }
542
543 int
544 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
545 struct pci_attach_args *pa;
546 struct pciide_channel *cp;
547 int compatchan;
548 bus_size_t *cmdsizep, *ctlsizep;
549 {
550 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
551 struct channel_softc *wdc_cp = &cp->wdc_channel;
552
553 cp->compat = 1;
554 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
555 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
556
557 wdc_cp->cmd_iot = pa->pa_iot;
558 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
559 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
560 printf("%s: couldn't map %s channel cmd regs\n",
561 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
562 return (0);
563 }
564
565 wdc_cp->ctl_iot = pa->pa_iot;
566 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
567 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
568 printf("%s: couldn't map %s channel ctl regs\n",
569 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
570 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
571 PCIIDE_COMPAT_CMD_SIZE);
572 return (0);
573 }
574
575 return (1);
576 }
577
578 int
579 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
580 struct pci_attach_args * pa;
581 struct pciide_channel *cp;
582 bus_size_t *cmdsizep, *ctlsizep;
583 int (*pci_intr) __P((void *));
584 {
585 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
586 struct channel_softc *wdc_cp = &cp->wdc_channel;
587 const char *intrstr;
588 pci_intr_handle_t intrhandle;
589
590 cp->compat = 0;
591
592 if (sc->sc_pci_ih == NULL) {
593 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
594 pa->pa_intrline, &intrhandle) != 0) {
595 printf("%s: couldn't map native-PCI interrupt\n",
596 sc->sc_wdcdev.sc_dev.dv_xname);
597 return 0;
598 }
599 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
600 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
601 intrhandle, IPL_BIO, pci_intr, sc);
602 if (sc->sc_pci_ih != NULL) {
603 printf("%s: using %s for native-PCI interrupt\n",
604 sc->sc_wdcdev.sc_dev.dv_xname,
605 intrstr ? intrstr : "unknown interrupt");
606 } else {
607 printf("%s: couldn't establish native-PCI interrupt",
608 sc->sc_wdcdev.sc_dev.dv_xname);
609 if (intrstr != NULL)
610 printf(" at %s", intrstr);
611 printf("\n");
612 return 0;
613 }
614 }
615 cp->ih = sc->sc_pci_ih;
616 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
617 PCI_MAPREG_TYPE_IO, 0,
618 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
619 printf("%s: couldn't map %s channel cmd regs\n",
620 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
621 return 0;
622 }
623
624 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
625 PCI_MAPREG_TYPE_IO, 0,
626 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
627 printf("%s: couldn't map %s channel ctl regs\n",
628 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
629 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
630 return 0;
631 }
632 return (1);
633 }
634
635 void
636 pciide_mapreg_dma(sc, pa)
637 struct pciide_softc *sc;
638 struct pci_attach_args *pa;
639 {
640 pcireg_t maptype;
641
642 /*
643 * Map DMA registers
644 *
645 * Note that sc_dma_ok is the right variable to test to see if
646 * DMA can be done. If the interface doesn't support DMA,
647 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
648 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
649 * non-zero if the interface supports DMA and the registers
650 * could be mapped.
651 *
652 * XXX Note that despite the fact that the Bus Master IDE specs
653 * XXX say that "The bus master IDE function uses 16 bytes of IO
654 * XXX space," some controllers (at least the United
655 * XXX Microelectronics UM8886BF) place it in memory space.
656 */
657 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
658 PCIIDE_REG_BUS_MASTER_DMA);
659
660 switch (maptype) {
661 case PCI_MAPREG_TYPE_IO:
662 case PCI_MAPREG_MEM_TYPE_32BIT:
663 sc->sc_dma_ok = (pci_mapreg_map(pa,
664 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
665 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
666 sc->sc_dmat = pa->pa_dmat;
667 if (sc->sc_dma_ok == 0) {
668 printf(", but unused (couldn't map registers)");
669 } else {
670 sc->sc_wdcdev.dma_arg = sc;
671 sc->sc_wdcdev.dma_init = pciide_dma_init;
672 sc->sc_wdcdev.dma_start = pciide_dma_start;
673 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
674 }
675 break;
676
677 default:
678 sc->sc_dma_ok = 0;
679 printf(", but unsupported register maptype (0x%x)", maptype);
680 }
681 }
682
683 int
684 pciide_compat_intr(arg)
685 void *arg;
686 {
687 struct pciide_channel *cp = arg;
688
689 #ifdef DIAGNOSTIC
690 /* should only be called for a compat channel */
691 if (cp->compat == 0)
692 panic("pciide compat intr called for non-compat chan %p\n", cp);
693 #endif
694 return (wdcintr(&cp->wdc_channel));
695 }
696
697 int
698 pciide_pci_intr(arg)
699 void *arg;
700 {
701 struct pciide_softc *sc = arg;
702 struct pciide_channel *cp;
703 struct channel_softc *wdc_cp;
704 int i, rv, crv;
705
706 rv = 0;
707 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
708 cp = &sc->pciide_channels[i];
709 wdc_cp = &cp->wdc_channel;
710
711 /* If a compat channel skip. */
712 if (cp->compat)
713 continue;
714 /* if this channel not waiting for intr, skip */
715 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
716 continue;
717
718 crv = wdcintr(wdc_cp);
719 if (crv == 0)
720 ; /* leave rv alone */
721 else if (crv == 1)
722 rv = 1; /* claim the intr */
723 else if (rv == 0) /* crv should be -1 in this case */
724 rv = crv; /* if we've done no better, take it */
725 }
726 return (rv);
727 }
728
729 void
730 pciide_channel_dma_setup(cp)
731 struct pciide_channel *cp;
732 {
733 int drive;
734 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
735 struct ata_drive_datas *drvp;
736
737 for (drive = 0; drive < 2; drive++) {
738 drvp = &cp->wdc_channel.ch_drive[drive];
739 /* If no drive, skip */
740 if ((drvp->drive_flags & DRIVE) == 0)
741 continue;
742 /* setup DMA if needed */
743 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
744 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
745 sc->sc_dma_ok == 0) {
746 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
747 continue;
748 }
749 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
750 != 0) {
751 /* Abort DMA setup */
752 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
753 continue;
754 }
755 }
756 }
757
758 int
759 pciide_dma_table_setup(sc, channel, drive)
760 struct pciide_softc *sc;
761 int channel, drive;
762 {
763 bus_dma_segment_t seg;
764 int error, rseg;
765 const bus_size_t dma_table_size =
766 sizeof(struct idedma_table) * NIDEDMA_TABLES;
767 struct pciide_dma_maps *dma_maps =
768 &sc->pciide_channels[channel].dma_maps[drive];
769
770 /* If table was already allocated, just return */
771 if (dma_maps->dma_table)
772 return 0;
773
774 /* Allocate memory for the DMA tables and map it */
775 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
776 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
777 BUS_DMA_NOWAIT)) != 0) {
778 printf("%s:%d: unable to allocate table DMA for "
779 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
780 channel, drive, error);
781 return error;
782 }
783 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
784 dma_table_size,
785 (caddr_t *)&dma_maps->dma_table,
786 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
787 printf("%s:%d: unable to map table DMA for"
788 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
789 channel, drive, error);
790 return error;
791 }
792 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
793 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
794 seg.ds_addr), DEBUG_PROBE);
795
796 /* Create and load table DMA map for this disk */
797 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
798 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
799 &dma_maps->dmamap_table)) != 0) {
800 printf("%s:%d: unable to create table DMA map for "
801 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
802 channel, drive, error);
803 return error;
804 }
805 if ((error = bus_dmamap_load(sc->sc_dmat,
806 dma_maps->dmamap_table,
807 dma_maps->dma_table,
808 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
809 printf("%s:%d: unable to load table DMA map for "
810 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
811 channel, drive, error);
812 return error;
813 }
814 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
815 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
816 /* Create a xfer DMA map for this drive */
817 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
818 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
819 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
820 &dma_maps->dmamap_xfer)) != 0) {
821 printf("%s:%d: unable to create xfer DMA map for "
822 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
823 channel, drive, error);
824 return error;
825 }
826 return 0;
827 }
828
829 int
830 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
831 void *v;
832 int channel, drive;
833 void *databuf;
834 size_t datalen;
835 int flags;
836 {
837 struct pciide_softc *sc = v;
838 int error, seg;
839 struct pciide_dma_maps *dma_maps =
840 &sc->pciide_channels[channel].dma_maps[drive];
841
842 error = bus_dmamap_load(sc->sc_dmat,
843 dma_maps->dmamap_xfer,
844 databuf, datalen, NULL, BUS_DMA_NOWAIT);
845 if (error) {
846 printf("%s:%d: unable to load xfer DMA map for"
847 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
848 channel, drive, error);
849 return error;
850 }
851
852 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
853 dma_maps->dmamap_xfer->dm_mapsize,
854 (flags & WDC_DMA_READ) ?
855 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
856
857 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
858 #ifdef DIAGNOSTIC
859 /* A segment must not cross a 64k boundary */
860 {
861 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
862 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
863 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
864 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
865 printf("pciide_dma: segment %d physical addr 0x%lx"
866 " len 0x%lx not properly aligned\n",
867 seg, phys, len);
868 panic("pciide_dma: buf align");
869 }
870 }
871 #endif
872 dma_maps->dma_table[seg].base_addr =
873 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
874 dma_maps->dma_table[seg].byte_count =
875 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
876 IDEDMA_BYTE_COUNT_MASK);
877 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
878 seg, le32toh(dma_maps->dma_table[seg].byte_count),
879 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
880
881 }
882 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
883 htole32(IDEDMA_BYTE_COUNT_EOT);
884
885 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
886 dma_maps->dmamap_table->dm_mapsize,
887 BUS_DMASYNC_PREWRITE);
888
889 /* Maps are ready. Start DMA function */
890 #ifdef DIAGNOSTIC
891 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
892 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
893 dma_maps->dmamap_table->dm_segs[0].ds_addr);
894 panic("pciide_dma_init: table align");
895 }
896 #endif
897
898 /* Clear status bits */
899 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
900 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
901 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
902 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
903 /* Write table addr */
904 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
905 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
906 dma_maps->dmamap_table->dm_segs[0].ds_addr);
907 /* set read/write */
908 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
909 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
910 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
911 /* remember flags */
912 dma_maps->dma_flags = flags;
913 return 0;
914 }
915
916 void
917 pciide_dma_start(v, channel, drive)
918 void *v;
919 int channel, drive;
920 {
921 struct pciide_softc *sc = v;
922
923 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
924 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
925 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
926 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
927 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
928 }
929
930 int
931 pciide_dma_finish(v, channel, drive, force)
932 void *v;
933 int channel, drive;
934 int force;
935 {
936 struct pciide_softc *sc = v;
937 u_int8_t status;
938 int error = 0;
939 struct pciide_dma_maps *dma_maps =
940 &sc->pciide_channels[channel].dma_maps[drive];
941
942 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
943 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
944 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
945 DEBUG_XFERS);
946
947 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
948 return WDC_DMAST_NOIRQ;
949
950 /* stop DMA channel */
951 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
952 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
953 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
954 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
955
956 /* Clear status bits */
957 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
958 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
959 status);
960
961 /* Unload the map of the data buffer */
962 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
963 dma_maps->dmamap_xfer->dm_mapsize,
964 (dma_maps->dma_flags & WDC_DMA_READ) ?
965 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
966 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
967
968 if ((status & IDEDMA_CTL_ERR) != 0) {
969 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
970 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
971 error |= WDC_DMAST_ERR;
972 }
973
974 if ((status & IDEDMA_CTL_INTR) == 0) {
975 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
976 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
977 drive, status);
978 error |= WDC_DMAST_NOIRQ;
979 }
980
981 if ((status & IDEDMA_CTL_ACT) != 0) {
982 /* data underrun, may be a valid condition for ATAPI */
983 error |= WDC_DMAST_UNDER;
984 }
985 return error;
986 }
987
988 /* some common code used by several chip_map */
989 int
990 pciide_chansetup(sc, channel, interface)
991 struct pciide_softc *sc;
992 int channel;
993 pcireg_t interface;
994 {
995 struct pciide_channel *cp = &sc->pciide_channels[channel];
996 sc->wdc_chanarray[channel] = &cp->wdc_channel;
997 cp->name = PCIIDE_CHANNEL_NAME(channel);
998 cp->wdc_channel.channel = channel;
999 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1000 cp->wdc_channel.ch_queue =
1001 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1002 if (cp->wdc_channel.ch_queue == NULL) {
1003 printf("%s %s channel: "
1004 "can't allocate memory for command queue",
1005 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1006 return 0;
1007 }
1008 printf("%s: %s channel %s to %s mode\n",
1009 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1010 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1011 "configured" : "wired",
1012 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1013 "native-PCI" : "compatibility");
1014 return 1;
1015 }
1016
1017 /* some common code used by several chip channel_map */
1018 void
1019 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1020 struct pci_attach_args *pa;
1021 struct pciide_channel *cp;
1022 pcireg_t interface;
1023 bus_size_t *cmdsizep, *ctlsizep;
1024 int (*pci_intr) __P((void *));
1025 {
1026 struct channel_softc *wdc_cp = &cp->wdc_channel;
1027
1028 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1029 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1030 pci_intr);
1031 else
1032 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1033 wdc_cp->channel, cmdsizep, ctlsizep);
1034
1035 if (cp->hw_ok == 0)
1036 return;
1037 wdc_cp->data32iot = wdc_cp->cmd_iot;
1038 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1039 wdcattach(wdc_cp);
1040 }
1041
1042 /*
1043 * Generic code to call to know if a channel can be disabled. Return 1
1044 * if channel can be disabled, 0 if not
1045 */
1046 int
1047 pciide_chan_candisable(cp)
1048 struct pciide_channel *cp;
1049 {
1050 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1051 struct channel_softc *wdc_cp = &cp->wdc_channel;
1052
1053 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1054 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1055 printf("%s: disabling %s channel (no drives)\n",
1056 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1057 cp->hw_ok = 0;
1058 return 1;
1059 }
1060 return 0;
1061 }
1062
1063 /*
1064 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1065 * Set hw_ok=0 on failure
1066 */
1067 void
1068 pciide_map_compat_intr(pa, cp, compatchan, interface)
1069 struct pci_attach_args *pa;
1070 struct pciide_channel *cp;
1071 int compatchan, interface;
1072 {
1073 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1074 struct channel_softc *wdc_cp = &cp->wdc_channel;
1075
1076 if (cp->hw_ok == 0)
1077 return;
1078 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1079 return;
1080
1081 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1082 pa, compatchan, pciide_compat_intr, cp);
1083 if (cp->ih == NULL) {
1084 printf("%s: no compatibility interrupt for use by %s "
1085 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1086 cp->hw_ok = 0;
1087 }
1088 }
1089
1090 void
1091 pciide_print_modes(cp)
1092 struct pciide_channel *cp;
1093 {
1094 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1095 int drive;
1096 struct channel_softc *chp;
1097 struct ata_drive_datas *drvp;
1098
1099 chp = &cp->wdc_channel;
1100 for (drive = 0; drive < 2; drive++) {
1101 drvp = &chp->ch_drive[drive];
1102 if ((drvp->drive_flags & DRIVE) == 0)
1103 continue;
1104 printf("%s(%s:%d:%d): using PIO mode %d",
1105 drvp->drv_softc->dv_xname,
1106 sc->sc_wdcdev.sc_dev.dv_xname,
1107 chp->channel, drive, drvp->PIO_mode);
1108 if (drvp->drive_flags & DRIVE_DMA)
1109 printf(", DMA mode %d", drvp->DMA_mode);
1110 if (drvp->drive_flags & DRIVE_UDMA)
1111 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1112 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1113 printf(" (using DMA data transfers)");
1114 printf("\n");
1115 }
1116 }
1117
1118 void
1119 default_chip_map(sc, pa)
1120 struct pciide_softc *sc;
1121 struct pci_attach_args *pa;
1122 {
1123 struct pciide_channel *cp;
1124 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1125 sc->sc_tag, PCI_CLASS_REG));
1126 pcireg_t csr;
1127 int channel, drive;
1128 struct ata_drive_datas *drvp;
1129 u_int8_t idedma_ctl;
1130 bus_size_t cmdsize, ctlsize;
1131 char *failreason;
1132
1133 if (pciide_chipen(sc, pa) == 0)
1134 return;
1135
1136 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1137 printf("%s: bus-master DMA support present",
1138 sc->sc_wdcdev.sc_dev.dv_xname);
1139 if (sc->sc_pp == &default_product_desc &&
1140 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1141 PCIIDE_OPTIONS_DMA) == 0) {
1142 printf(", but unused (no driver support)");
1143 sc->sc_dma_ok = 0;
1144 } else {
1145 pciide_mapreg_dma(sc, pa);
1146 if (sc->sc_dma_ok != 0)
1147 printf(", used without full driver "
1148 "support");
1149 }
1150 } else {
1151 printf("%s: hardware does not support DMA",
1152 sc->sc_wdcdev.sc_dev.dv_xname);
1153 sc->sc_dma_ok = 0;
1154 }
1155 printf("\n");
1156 if (sc->sc_dma_ok)
1157 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1158 sc->sc_wdcdev.PIO_cap = 0;
1159 sc->sc_wdcdev.DMA_cap = 0;
1160
1161 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1162 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1163 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1164
1165 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1166 cp = &sc->pciide_channels[channel];
1167 if (pciide_chansetup(sc, channel, interface) == 0)
1168 continue;
1169 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1170 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1171 &ctlsize, pciide_pci_intr);
1172 } else {
1173 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1174 channel, &cmdsize, &ctlsize);
1175 }
1176 if (cp->hw_ok == 0)
1177 continue;
1178 /*
1179 * Check to see if something appears to be there.
1180 */
1181 failreason = NULL;
1182 if (!wdcprobe(&cp->wdc_channel)) {
1183 failreason = "not responding; disabled or no drives?";
1184 goto next;
1185 }
1186 /*
1187 * Now, make sure it's actually attributable to this PCI IDE
1188 * channel by trying to access the channel again while the
1189 * PCI IDE controller's I/O space is disabled. (If the
1190 * channel no longer appears to be there, it belongs to
1191 * this controller.) YUCK!
1192 */
1193 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1194 PCI_COMMAND_STATUS_REG);
1195 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1196 csr & ~PCI_COMMAND_IO_ENABLE);
1197 if (wdcprobe(&cp->wdc_channel))
1198 failreason = "other hardware responding at addresses";
1199 pci_conf_write(sc->sc_pc, sc->sc_tag,
1200 PCI_COMMAND_STATUS_REG, csr);
1201 next:
1202 if (failreason) {
1203 printf("%s: %s channel ignored (%s)\n",
1204 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1205 failreason);
1206 cp->hw_ok = 0;
1207 bus_space_unmap(cp->wdc_channel.cmd_iot,
1208 cp->wdc_channel.cmd_ioh, cmdsize);
1209 bus_space_unmap(cp->wdc_channel.ctl_iot,
1210 cp->wdc_channel.ctl_ioh, ctlsize);
1211 } else {
1212 pciide_map_compat_intr(pa, cp, channel, interface);
1213 }
1214 if (cp->hw_ok) {
1215 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1216 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1217 wdcattach(&cp->wdc_channel);
1218 }
1219 }
1220
1221 if (sc->sc_dma_ok == 0)
1222 return;
1223
1224 /* Allocate DMA maps */
1225 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1226 idedma_ctl = 0;
1227 cp = &sc->pciide_channels[channel];
1228 for (drive = 0; drive < 2; drive++) {
1229 drvp = &cp->wdc_channel.ch_drive[drive];
1230 /* If no drive, skip */
1231 if ((drvp->drive_flags & DRIVE) == 0)
1232 continue;
1233 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1234 continue;
1235 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1236 /* Abort DMA setup */
1237 printf("%s:%d:%d: can't allocate DMA maps, "
1238 "using PIO transfers\n",
1239 sc->sc_wdcdev.sc_dev.dv_xname,
1240 channel, drive);
1241 drvp->drive_flags &= ~DRIVE_DMA;
1242 }
1243 printf("%s:%d:%d: using DMA data transfers\n",
1244 sc->sc_wdcdev.sc_dev.dv_xname,
1245 channel, drive);
1246 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1247 }
1248 if (idedma_ctl != 0) {
1249 /* Add software bits in status register */
1250 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1251 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1252 idedma_ctl);
1253 }
1254 }
1255 }
1256
1257 void
1258 piix_chip_map(sc, pa)
1259 struct pciide_softc *sc;
1260 struct pci_attach_args *pa;
1261 {
1262 struct pciide_channel *cp;
1263 int channel;
1264 u_int32_t idetim;
1265 bus_size_t cmdsize, ctlsize;
1266
1267 if (pciide_chipen(sc, pa) == 0)
1268 return;
1269
1270 printf("%s: bus-master DMA support present",
1271 sc->sc_wdcdev.sc_dev.dv_xname);
1272 pciide_mapreg_dma(sc, pa);
1273 printf("\n");
1274 if (sc->sc_dma_ok) {
1275 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1276 switch(sc->sc_pp->ide_product) {
1277 case PCI_PRODUCT_INTEL_82371AB_IDE:
1278 case PCI_PRODUCT_INTEL_82801AA_IDE:
1279 case PCI_PRODUCT_INTEL_82801AB_IDE:
1280 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1281 }
1282 }
1283 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1284 WDC_CAPABILITY_MODE;
1285 sc->sc_wdcdev.PIO_cap = 4;
1286 sc->sc_wdcdev.DMA_cap = 2;
1287 sc->sc_wdcdev.UDMA_cap =
1288 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1289 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1290 sc->sc_wdcdev.set_modes = piix_setup_channel;
1291 else
1292 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1293 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1294 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1295
1296 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1297 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1298 DEBUG_PROBE);
1299 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1300 WDCDEBUG_PRINT((", sidetim=0x%x",
1301 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1302 DEBUG_PROBE);
1303 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1304 WDCDEBUG_PRINT((", udamreg 0x%x",
1305 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1306 DEBUG_PROBE);
1307 }
1308 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1309 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1310 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1311 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1312 DEBUG_PROBE);
1313 }
1314
1315 }
1316 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1317
1318 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1319 cp = &sc->pciide_channels[channel];
1320 /* PIIX is compat-only */
1321 if (pciide_chansetup(sc, channel, 0) == 0)
1322 continue;
1323 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1324 if ((PIIX_IDETIM_READ(idetim, channel) &
1325 PIIX_IDETIM_IDE) == 0) {
1326 printf("%s: %s channel ignored (disabled)\n",
1327 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1328 continue;
1329 }
1330 /* PIIX are compat-only pciide devices */
1331 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1332 if (cp->hw_ok == 0)
1333 continue;
1334 if (pciide_chan_candisable(cp)) {
1335 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1336 channel);
1337 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1338 idetim);
1339 }
1340 pciide_map_compat_intr(pa, cp, channel, 0);
1341 if (cp->hw_ok == 0)
1342 continue;
1343 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1344 }
1345
1346 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1347 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1348 DEBUG_PROBE);
1349 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1350 WDCDEBUG_PRINT((", sidetim=0x%x",
1351 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1352 DEBUG_PROBE);
1353 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1354 WDCDEBUG_PRINT((", udamreg 0x%x",
1355 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1356 DEBUG_PROBE);
1357 }
1358 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1359 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1360 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1361 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1362 DEBUG_PROBE);
1363 }
1364 }
1365 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1366 }
1367
1368 void
1369 piix_setup_channel(chp)
1370 struct channel_softc *chp;
1371 {
1372 u_int8_t mode[2], drive;
1373 u_int32_t oidetim, idetim, idedma_ctl;
1374 struct pciide_channel *cp = (struct pciide_channel*)chp;
1375 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1376 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1377
1378 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1379 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1380 idedma_ctl = 0;
1381
1382 /* set up new idetim: Enable IDE registers decode */
1383 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1384 chp->channel);
1385
1386 /* setup DMA */
1387 pciide_channel_dma_setup(cp);
1388
1389 /*
1390 * Here we have to mess up with drives mode: PIIX can't have
1391 * different timings for master and slave drives.
1392 * We need to find the best combination.
1393 */
1394
1395 /* If both drives supports DMA, take the lower mode */
1396 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1397 (drvp[1].drive_flags & DRIVE_DMA)) {
1398 mode[0] = mode[1] =
1399 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1400 drvp[0].DMA_mode = mode[0];
1401 drvp[1].DMA_mode = mode[1];
1402 goto ok;
1403 }
1404 /*
1405 * If only one drive supports DMA, use its mode, and
1406 * put the other one in PIO mode 0 if mode not compatible
1407 */
1408 if (drvp[0].drive_flags & DRIVE_DMA) {
1409 mode[0] = drvp[0].DMA_mode;
1410 mode[1] = drvp[1].PIO_mode;
1411 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1412 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1413 mode[1] = drvp[1].PIO_mode = 0;
1414 goto ok;
1415 }
1416 if (drvp[1].drive_flags & DRIVE_DMA) {
1417 mode[1] = drvp[1].DMA_mode;
1418 mode[0] = drvp[0].PIO_mode;
1419 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1420 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1421 mode[0] = drvp[0].PIO_mode = 0;
1422 goto ok;
1423 }
1424 /*
1425 * If both drives are not DMA, takes the lower mode, unless
1426 * one of them is PIO mode < 2
1427 */
1428 if (drvp[0].PIO_mode < 2) {
1429 mode[0] = drvp[0].PIO_mode = 0;
1430 mode[1] = drvp[1].PIO_mode;
1431 } else if (drvp[1].PIO_mode < 2) {
1432 mode[1] = drvp[1].PIO_mode = 0;
1433 mode[0] = drvp[0].PIO_mode;
1434 } else {
1435 mode[0] = mode[1] =
1436 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1437 drvp[0].PIO_mode = mode[0];
1438 drvp[1].PIO_mode = mode[1];
1439 }
1440 ok: /* The modes are setup */
1441 for (drive = 0; drive < 2; drive++) {
1442 if (drvp[drive].drive_flags & DRIVE_DMA) {
1443 idetim |= piix_setup_idetim_timings(
1444 mode[drive], 1, chp->channel);
1445 goto end;
1446 }
1447 }
1448 /* If we are there, none of the drives are DMA */
1449 if (mode[0] >= 2)
1450 idetim |= piix_setup_idetim_timings(
1451 mode[0], 0, chp->channel);
1452 else
1453 idetim |= piix_setup_idetim_timings(
1454 mode[1], 0, chp->channel);
1455 end: /*
1456 * timing mode is now set up in the controller. Enable
1457 * it per-drive
1458 */
1459 for (drive = 0; drive < 2; drive++) {
1460 /* If no drive, skip */
1461 if ((drvp[drive].drive_flags & DRIVE) == 0)
1462 continue;
1463 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1464 if (drvp[drive].drive_flags & DRIVE_DMA)
1465 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1466 }
1467 if (idedma_ctl != 0) {
1468 /* Add software bits in status register */
1469 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1470 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1471 idedma_ctl);
1472 }
1473 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1474 pciide_print_modes(cp);
1475 }
1476
1477 void
1478 piix3_4_setup_channel(chp)
1479 struct channel_softc *chp;
1480 {
1481 struct ata_drive_datas *drvp;
1482 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1483 struct pciide_channel *cp = (struct pciide_channel*)chp;
1484 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1485 int drive;
1486 int channel = chp->channel;
1487
1488 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1489 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1490 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1491 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1492 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1493 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1494 PIIX_SIDETIM_RTC_MASK(channel));
1495
1496 idedma_ctl = 0;
1497 /* If channel disabled, no need to go further */
1498 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1499 return;
1500 /* set up new idetim: Enable IDE registers decode */
1501 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1502
1503 /* setup DMA if needed */
1504 pciide_channel_dma_setup(cp);
1505
1506 for (drive = 0; drive < 2; drive++) {
1507 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1508 PIIX_UDMATIM_SET(0x3, channel, drive));
1509 drvp = &chp->ch_drive[drive];
1510 /* If no drive, skip */
1511 if ((drvp->drive_flags & DRIVE) == 0)
1512 continue;
1513 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1514 (drvp->drive_flags & DRIVE_UDMA) == 0))
1515 goto pio;
1516
1517 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1518 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1519 ideconf |= PIIX_CONFIG_PINGPONG;
1520 }
1521 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1522 /* setup Ultra/66 */
1523 if (drvp->UDMA_mode > 2 &&
1524 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1525 drvp->UDMA_mode = 2;
1526 if (drvp->UDMA_mode > 2)
1527 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1528 else
1529 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1530 }
1531 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1532 (drvp->drive_flags & DRIVE_UDMA)) {
1533 /* use Ultra/DMA */
1534 drvp->drive_flags &= ~DRIVE_DMA;
1535 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1536 udmareg |= PIIX_UDMATIM_SET(
1537 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1538 } else {
1539 /* use Multiword DMA */
1540 drvp->drive_flags &= ~DRIVE_UDMA;
1541 if (drive == 0) {
1542 idetim |= piix_setup_idetim_timings(
1543 drvp->DMA_mode, 1, channel);
1544 } else {
1545 sidetim |= piix_setup_sidetim_timings(
1546 drvp->DMA_mode, 1, channel);
1547 idetim =PIIX_IDETIM_SET(idetim,
1548 PIIX_IDETIM_SITRE, channel);
1549 }
1550 }
1551 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1552
1553 pio: /* use PIO mode */
1554 idetim |= piix_setup_idetim_drvs(drvp);
1555 if (drive == 0) {
1556 idetim |= piix_setup_idetim_timings(
1557 drvp->PIO_mode, 0, channel);
1558 } else {
1559 sidetim |= piix_setup_sidetim_timings(
1560 drvp->PIO_mode, 0, channel);
1561 idetim =PIIX_IDETIM_SET(idetim,
1562 PIIX_IDETIM_SITRE, channel);
1563 }
1564 }
1565 if (idedma_ctl != 0) {
1566 /* Add software bits in status register */
1567 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1568 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1569 idedma_ctl);
1570 }
1571 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1572 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1573 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1574 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1575 pciide_print_modes(cp);
1576 }
1577
1578
1579 /* setup ISP and RTC fields, based on mode */
1580 static u_int32_t
1581 piix_setup_idetim_timings(mode, dma, channel)
1582 u_int8_t mode;
1583 u_int8_t dma;
1584 u_int8_t channel;
1585 {
1586
1587 if (dma)
1588 return PIIX_IDETIM_SET(0,
1589 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1590 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1591 channel);
1592 else
1593 return PIIX_IDETIM_SET(0,
1594 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1595 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1596 channel);
1597 }
1598
1599 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1600 static u_int32_t
1601 piix_setup_idetim_drvs(drvp)
1602 struct ata_drive_datas *drvp;
1603 {
1604 u_int32_t ret = 0;
1605 struct channel_softc *chp = drvp->chnl_softc;
1606 u_int8_t channel = chp->channel;
1607 u_int8_t drive = drvp->drive;
1608
1609 /*
1610 * If drive is using UDMA, timings setups are independant
1611 * So just check DMA and PIO here.
1612 */
1613 if (drvp->drive_flags & DRIVE_DMA) {
1614 /* if mode = DMA mode 0, use compatible timings */
1615 if ((drvp->drive_flags & DRIVE_DMA) &&
1616 drvp->DMA_mode == 0) {
1617 drvp->PIO_mode = 0;
1618 return ret;
1619 }
1620 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1621 /*
1622 * PIO and DMA timings are the same, use fast timings for PIO
1623 * too, else use compat timings.
1624 */
1625 if ((piix_isp_pio[drvp->PIO_mode] !=
1626 piix_isp_dma[drvp->DMA_mode]) ||
1627 (piix_rtc_pio[drvp->PIO_mode] !=
1628 piix_rtc_dma[drvp->DMA_mode]))
1629 drvp->PIO_mode = 0;
1630 /* if PIO mode <= 2, use compat timings for PIO */
1631 if (drvp->PIO_mode <= 2) {
1632 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1633 channel);
1634 return ret;
1635 }
1636 }
1637
1638 /*
1639 * Now setup PIO modes. If mode < 2, use compat timings.
1640 * Else enable fast timings. Enable IORDY and prefetch/post
1641 * if PIO mode >= 3.
1642 */
1643
1644 if (drvp->PIO_mode < 2)
1645 return ret;
1646
1647 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1648 if (drvp->PIO_mode >= 3) {
1649 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1650 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1651 }
1652 return ret;
1653 }
1654
1655 /* setup values in SIDETIM registers, based on mode */
1656 static u_int32_t
1657 piix_setup_sidetim_timings(mode, dma, channel)
1658 u_int8_t mode;
1659 u_int8_t dma;
1660 u_int8_t channel;
1661 {
1662 if (dma)
1663 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1664 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1665 else
1666 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1667 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1668 }
1669
1670 void
1671 amd756_chip_map(sc, pa)
1672 struct pciide_softc *sc;
1673 struct pci_attach_args *pa;
1674 {
1675 struct pciide_channel *cp;
1676 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1677 sc->sc_tag, PCI_CLASS_REG));
1678 int channel;
1679 pcireg_t chanenable;
1680 bus_size_t cmdsize, ctlsize;
1681
1682 if (pciide_chipen(sc, pa) == 0)
1683 return;
1684 printf("%s: bus-master DMA support present",
1685 sc->sc_wdcdev.sc_dev.dv_xname);
1686 pciide_mapreg_dma(sc, pa);
1687 printf("\n");
1688 if (sc->sc_dma_ok)
1689 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1690 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1691 WDC_CAPABILITY_MODE;
1692 sc->sc_wdcdev.PIO_cap = 4;
1693 sc->sc_wdcdev.DMA_cap = 2;
1694 sc->sc_wdcdev.UDMA_cap = 4;
1695 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1696 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1697 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1698 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1699
1700 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1701 DEBUG_PROBE);
1702 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1703 cp = &sc->pciide_channels[channel];
1704 if (pciide_chansetup(sc, channel, interface) == 0)
1705 continue;
1706
1707 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1708 printf("%s: %s channel ignored (disabled)\n",
1709 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1710 continue;
1711 }
1712 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1713 pciide_pci_intr);
1714
1715 if (pciide_chan_candisable(cp))
1716 chanenable &= ~AMD756_CHAN_EN(channel);
1717 pciide_map_compat_intr(pa, cp, channel, interface);
1718 if (cp->hw_ok == 0)
1719 continue;
1720
1721 amd756_setup_channel(&cp->wdc_channel);
1722 }
1723 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1724 chanenable);
1725 return;
1726 }
1727
1728 void
1729 amd756_setup_channel(chp)
1730 struct channel_softc *chp;
1731 {
1732 u_int32_t udmatim_reg, datatim_reg;
1733 u_int8_t idedma_ctl;
1734 int mode, drive;
1735 struct ata_drive_datas *drvp;
1736 struct pciide_channel *cp = (struct pciide_channel*)chp;
1737 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1738
1739 idedma_ctl = 0;
1740 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1741 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1742 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1743 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1744
1745 /* setup DMA if needed */
1746 pciide_channel_dma_setup(cp);
1747
1748 for (drive = 0; drive < 2; drive++) {
1749 drvp = &chp->ch_drive[drive];
1750 /* If no drive, skip */
1751 if ((drvp->drive_flags & DRIVE) == 0)
1752 continue;
1753 /* add timing values, setup DMA if needed */
1754 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1755 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1756 mode = drvp->PIO_mode;
1757 goto pio;
1758 }
1759 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1760 (drvp->drive_flags & DRIVE_UDMA)) {
1761 /* use Ultra/DMA */
1762 drvp->drive_flags &= ~DRIVE_DMA;
1763 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1764 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1765 AMD756_UDMA_TIME(chp->channel, drive,
1766 amd756_udma_tim[drvp->UDMA_mode]);
1767 /* can use PIO timings, MW DMA unused */
1768 mode = drvp->PIO_mode;
1769 } else {
1770 /* use Multiword DMA */
1771 drvp->drive_flags &= ~DRIVE_UDMA;
1772 /* mode = min(pio, dma+2) */
1773 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1774 mode = drvp->PIO_mode;
1775 else
1776 mode = drvp->DMA_mode + 2;
1777 }
1778 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1779
1780 pio: /* setup PIO mode */
1781 if (mode <= 2) {
1782 drvp->DMA_mode = 0;
1783 drvp->PIO_mode = 0;
1784 mode = 0;
1785 } else {
1786 drvp->PIO_mode = mode;
1787 drvp->DMA_mode = mode - 2;
1788 }
1789 datatim_reg |=
1790 AMD756_DATATIM_PULSE(chp->channel, drive,
1791 amd756_pio_set[mode]) |
1792 AMD756_DATATIM_RECOV(chp->channel, drive,
1793 amd756_pio_rec[mode]);
1794 }
1795 if (idedma_ctl != 0) {
1796 /* Add software bits in status register */
1797 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1798 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1799 idedma_ctl);
1800 }
1801 pciide_print_modes(cp);
1802 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1803 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1804 }
1805
1806 void
1807 apollo_chip_map(sc, pa)
1808 struct pciide_softc *sc;
1809 struct pci_attach_args *pa;
1810 {
1811 struct pciide_channel *cp;
1812 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1813 sc->sc_tag, PCI_CLASS_REG));
1814 int channel;
1815 u_int32_t ideconf;
1816 bus_size_t cmdsize, ctlsize;
1817
1818 if (pciide_chipen(sc, pa) == 0)
1819 return;
1820 printf("%s: bus-master DMA support present",
1821 sc->sc_wdcdev.sc_dev.dv_xname);
1822 pciide_mapreg_dma(sc, pa);
1823 printf("\n");
1824 if (sc->sc_dma_ok) {
1825 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1826 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1827 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1828 }
1829 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE;
1830 sc->sc_wdcdev.PIO_cap = 4;
1831 sc->sc_wdcdev.DMA_cap = 2;
1832 sc->sc_wdcdev.UDMA_cap = 2;
1833 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1834 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1835 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1836 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1837
1838 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1839 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1840 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1841 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1842 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1843 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1844 DEBUG_PROBE);
1845
1846 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1847 cp = &sc->pciide_channels[channel];
1848 if (pciide_chansetup(sc, channel, interface) == 0)
1849 continue;
1850
1851 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1852 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1853 printf("%s: %s channel ignored (disabled)\n",
1854 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1855 continue;
1856 }
1857 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1858 pciide_pci_intr);
1859 if (cp->hw_ok == 0)
1860 continue;
1861 if (pciide_chan_candisable(cp)) {
1862 ideconf &= ~APO_IDECONF_EN(channel);
1863 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1864 ideconf);
1865 }
1866 pciide_map_compat_intr(pa, cp, channel, interface);
1867
1868 if (cp->hw_ok == 0)
1869 continue;
1870 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1871 }
1872 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1873 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1874 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1875 }
1876
1877 void
1878 apollo_setup_channel(chp)
1879 struct channel_softc *chp;
1880 {
1881 u_int32_t udmatim_reg, datatim_reg;
1882 u_int8_t idedma_ctl;
1883 int mode, drive;
1884 struct ata_drive_datas *drvp;
1885 struct pciide_channel *cp = (struct pciide_channel*)chp;
1886 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1887
1888 idedma_ctl = 0;
1889 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1890 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1891 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1892 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1893
1894 /* setup DMA if needed */
1895 pciide_channel_dma_setup(cp);
1896
1897 for (drive = 0; drive < 2; drive++) {
1898 drvp = &chp->ch_drive[drive];
1899 /* If no drive, skip */
1900 if ((drvp->drive_flags & DRIVE) == 0)
1901 continue;
1902 /* add timing values, setup DMA if needed */
1903 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1904 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1905 mode = drvp->PIO_mode;
1906 goto pio;
1907 }
1908 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1909 (drvp->drive_flags & DRIVE_UDMA)) {
1910 /* use Ultra/DMA */
1911 drvp->drive_flags &= ~DRIVE_DMA;
1912 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1913 APO_UDMA_EN_MTH(chp->channel, drive) |
1914 APO_UDMA_TIME(chp->channel, drive,
1915 apollo_udma_tim[drvp->UDMA_mode]);
1916 /* can use PIO timings, MW DMA unused */
1917 mode = drvp->PIO_mode;
1918 } else {
1919 /* use Multiword DMA */
1920 drvp->drive_flags &= ~DRIVE_UDMA;
1921 /* mode = min(pio, dma+2) */
1922 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1923 mode = drvp->PIO_mode;
1924 else
1925 mode = drvp->DMA_mode + 2;
1926 }
1927 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1928
1929 pio: /* setup PIO mode */
1930 if (mode <= 2) {
1931 drvp->DMA_mode = 0;
1932 drvp->PIO_mode = 0;
1933 mode = 0;
1934 } else {
1935 drvp->PIO_mode = mode;
1936 drvp->DMA_mode = mode - 2;
1937 }
1938 datatim_reg |=
1939 APO_DATATIM_PULSE(chp->channel, drive,
1940 apollo_pio_set[mode]) |
1941 APO_DATATIM_RECOV(chp->channel, drive,
1942 apollo_pio_rec[mode]);
1943 }
1944 if (idedma_ctl != 0) {
1945 /* Add software bits in status register */
1946 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1947 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1948 idedma_ctl);
1949 }
1950 pciide_print_modes(cp);
1951 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
1952 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
1953 }
1954
1955 void
1956 cmd_channel_map(pa, sc, channel)
1957 struct pci_attach_args *pa;
1958 struct pciide_softc *sc;
1959 int channel;
1960 {
1961 struct pciide_channel *cp = &sc->pciide_channels[channel];
1962 bus_size_t cmdsize, ctlsize;
1963 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
1964 int interface =
1965 PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1966
1967 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1968 cp->name = PCIIDE_CHANNEL_NAME(channel);
1969 cp->wdc_channel.channel = channel;
1970 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1971
1972 if (channel > 0) {
1973 cp->wdc_channel.ch_queue =
1974 sc->pciide_channels[0].wdc_channel.ch_queue;
1975 } else {
1976 cp->wdc_channel.ch_queue =
1977 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1978 }
1979 if (cp->wdc_channel.ch_queue == NULL) {
1980 printf("%s %s channel: "
1981 "can't allocate memory for command queue",
1982 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1983 return;
1984 }
1985
1986 printf("%s: %s channel %s to %s mode\n",
1987 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1988 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1989 "configured" : "wired",
1990 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1991 "native-PCI" : "compatibility");
1992
1993 /*
1994 * with a CMD PCI64x, if we get here, the first channel is enabled:
1995 * there's no way to disable the first channel without disabling
1996 * the whole device
1997 */
1998 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
1999 printf("%s: %s channel ignored (disabled)\n",
2000 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2001 return;
2002 }
2003
2004 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2005 if (cp->hw_ok == 0)
2006 return;
2007 if (channel == 1) {
2008 if (pciide_chan_candisable(cp)) {
2009 ctrl &= ~CMD_CTRL_2PORT;
2010 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2011 CMD_CTRL, ctrl);
2012 }
2013 }
2014 pciide_map_compat_intr(pa, cp, channel, interface);
2015 }
2016
2017 int
2018 cmd_pci_intr(arg)
2019 void *arg;
2020 {
2021 struct pciide_softc *sc = arg;
2022 struct pciide_channel *cp;
2023 struct channel_softc *wdc_cp;
2024 int i, rv, crv;
2025 u_int32_t priirq, secirq;
2026
2027 rv = 0;
2028 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2029 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2030 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2031 cp = &sc->pciide_channels[i];
2032 wdc_cp = &cp->wdc_channel;
2033 /* If a compat channel skip. */
2034 if (cp->compat)
2035 continue;
2036 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2037 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2038 crv = wdcintr(wdc_cp);
2039 if (crv == 0)
2040 printf("%s:%d: bogus intr\n",
2041 sc->sc_wdcdev.sc_dev.dv_xname, i);
2042 else
2043 rv = 1;
2044 }
2045 }
2046 return rv;
2047 }
2048
2049 void
2050 cmd_chip_map(sc, pa)
2051 struct pciide_softc *sc;
2052 struct pci_attach_args *pa;
2053 {
2054 int channel;
2055
2056 /*
2057 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2058 * and base adresses registers can be disabled at
2059 * hardware level. In this case, the device is wired
2060 * in compat mode and its first channel is always enabled,
2061 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2062 * In fact, it seems that the first channel of the CMD PCI0640
2063 * can't be disabled.
2064 */
2065
2066 #ifdef PCIIDE_CMD064x_DISABLE
2067 if (pciide_chipen(sc, pa) == 0)
2068 return;
2069 #endif
2070
2071 printf("%s: hardware does not support DMA\n",
2072 sc->sc_wdcdev.sc_dev.dv_xname);
2073 sc->sc_dma_ok = 0;
2074
2075 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2076 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2077 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
2078
2079 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2080 cmd_channel_map(pa, sc, channel);
2081 }
2082 }
2083
2084 void
2085 cmd0643_6_chip_map(sc, pa)
2086 struct pciide_softc *sc;
2087 struct pci_attach_args *pa;
2088 {
2089 struct pciide_channel *cp;
2090 int channel;
2091
2092 /*
2093 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2094 * and base adresses registers can be disabled at
2095 * hardware level. In this case, the device is wired
2096 * in compat mode and its first channel is always enabled,
2097 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2098 * In fact, it seems that the first channel of the CMD PCI0640
2099 * can't be disabled.
2100 */
2101
2102 #ifdef PCIIDE_CMD064x_DISABLE
2103 if (pciide_chipen(sc, pa) == 0)
2104 return;
2105 #endif
2106 printf("%s: bus-master DMA support present",
2107 sc->sc_wdcdev.sc_dev.dv_xname);
2108 pciide_mapreg_dma(sc, pa);
2109 printf("\n");
2110 if (sc->sc_dma_ok)
2111 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2112
2113 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2114 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2115 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2116 WDC_CAPABILITY_MODE;
2117 sc->sc_wdcdev.PIO_cap = 4;
2118 sc->sc_wdcdev.DMA_cap = 2;
2119 sc->sc_wdcdev.set_modes = cmd0643_6_setup_channel;
2120
2121 WDCDEBUG_PRINT(("cmd0643_6_chip_map: old timings reg 0x%x 0x%x\n",
2122 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2123 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2124 DEBUG_PROBE);
2125
2126 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2127 cp = &sc->pciide_channels[channel];
2128 cmd_channel_map(pa, sc, channel);
2129 if (cp->hw_ok == 0)
2130 continue;
2131 cmd0643_6_setup_channel(&cp->wdc_channel);
2132 }
2133 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2134 WDCDEBUG_PRINT(("cmd0643_6_chip_map: timings reg now 0x%x 0x%x\n",
2135 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2136 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2137 DEBUG_PROBE);
2138 }
2139
2140 void
2141 cmd0643_6_setup_channel(chp)
2142 struct channel_softc *chp;
2143 {
2144 struct ata_drive_datas *drvp;
2145 u_int8_t tim;
2146 u_int32_t idedma_ctl;
2147 int drive;
2148 struct pciide_channel *cp = (struct pciide_channel*)chp;
2149 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2150
2151 idedma_ctl = 0;
2152 /* setup DMA if needed */
2153 pciide_channel_dma_setup(cp);
2154
2155 for (drive = 0; drive < 2; drive++) {
2156 drvp = &chp->ch_drive[drive];
2157 /* If no drive, skip */
2158 if ((drvp->drive_flags & DRIVE) == 0)
2159 continue;
2160 /* add timing values, setup DMA if needed */
2161 tim = cmd0643_6_data_tim_pio[drvp->PIO_mode];
2162 if (drvp->drive_flags & DRIVE_DMA) {
2163 /*
2164 * use Multiword DMA.
2165 * Timings will be used for both PIO and DMA, so adjust
2166 * DMA mode if needed
2167 */
2168 if (drvp->PIO_mode >= 3 &&
2169 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2170 drvp->DMA_mode = drvp->PIO_mode - 2;
2171 }
2172 tim = cmd0643_6_data_tim_dma[drvp->DMA_mode];
2173 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2174 }
2175 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2176 CMD_DATA_TIM(chp->channel, drive), tim);
2177 }
2178 if (idedma_ctl != 0) {
2179 /* Add software bits in status register */
2180 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2181 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2182 idedma_ctl);
2183 }
2184 pciide_print_modes(cp);
2185 }
2186
2187 void
2188 cy693_chip_map(sc, pa)
2189 struct pciide_softc *sc;
2190 struct pci_attach_args *pa;
2191 {
2192 struct pciide_channel *cp;
2193 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2194 sc->sc_tag, PCI_CLASS_REG));
2195 bus_size_t cmdsize, ctlsize;
2196
2197 if (pciide_chipen(sc, pa) == 0)
2198 return;
2199 /*
2200 * this chip has 2 PCI IDE functions, one for primary and one for
2201 * secondary. So we need to call pciide_mapregs_compat() with
2202 * the real channel
2203 */
2204 if (pa->pa_function == 1) {
2205 sc->sc_cy_compatchan = 0;
2206 } else if (pa->pa_function == 2) {
2207 sc->sc_cy_compatchan = 1;
2208 } else {
2209 printf("%s: unexpected PCI function %d\n",
2210 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2211 return;
2212 }
2213 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2214 printf("%s: bus-master DMA support present",
2215 sc->sc_wdcdev.sc_dev.dv_xname);
2216 pciide_mapreg_dma(sc, pa);
2217 } else {
2218 printf("%s: hardware does not support DMA",
2219 sc->sc_wdcdev.sc_dev.dv_xname);
2220 sc->sc_dma_ok = 0;
2221 }
2222 printf("\n");
2223
2224 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2225 if (sc->sc_cy_handle == NULL) {
2226 printf("%s: unable to map hyperCache control registers\n",
2227 sc->sc_wdcdev.sc_dev.dv_xname);
2228 sc->sc_dma_ok = 0;
2229 }
2230
2231 if (sc->sc_dma_ok)
2232 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2233 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2234 WDC_CAPABILITY_MODE;
2235 sc->sc_wdcdev.PIO_cap = 4;
2236 sc->sc_wdcdev.DMA_cap = 2;
2237 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2238
2239 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2240 sc->sc_wdcdev.nchannels = 1;
2241
2242 /* Only one channel for this chip; if we are here it's enabled */
2243 cp = &sc->pciide_channels[0];
2244 sc->wdc_chanarray[0] = &cp->wdc_channel;
2245 cp->name = PCIIDE_CHANNEL_NAME(0);
2246 cp->wdc_channel.channel = 0;
2247 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2248 cp->wdc_channel.ch_queue =
2249 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2250 if (cp->wdc_channel.ch_queue == NULL) {
2251 printf("%s primary channel: "
2252 "can't allocate memory for command queue",
2253 sc->sc_wdcdev.sc_dev.dv_xname);
2254 return;
2255 }
2256 printf("%s: primary channel %s to ",
2257 sc->sc_wdcdev.sc_dev.dv_xname,
2258 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2259 "configured" : "wired");
2260 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2261 printf("native-PCI");
2262 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2263 pciide_pci_intr);
2264 } else {
2265 printf("compatibility");
2266 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2267 &cmdsize, &ctlsize);
2268 }
2269 printf(" mode\n");
2270 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2271 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2272 wdcattach(&cp->wdc_channel);
2273 if (pciide_chan_candisable(cp)) {
2274 pci_conf_write(sc->sc_pc, sc->sc_tag,
2275 PCI_COMMAND_STATUS_REG, 0);
2276 }
2277 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2278 if (cp->hw_ok == 0)
2279 return;
2280 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2281 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2282 cy693_setup_channel(&cp->wdc_channel);
2283 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2284 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2285 }
2286
2287 void
2288 cy693_setup_channel(chp)
2289 struct channel_softc *chp;
2290 {
2291 struct ata_drive_datas *drvp;
2292 int drive;
2293 u_int32_t cy_cmd_ctrl;
2294 u_int32_t idedma_ctl;
2295 struct pciide_channel *cp = (struct pciide_channel*)chp;
2296 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2297 int dma_mode = -1;
2298
2299 cy_cmd_ctrl = idedma_ctl = 0;
2300
2301 /* setup DMA if needed */
2302 pciide_channel_dma_setup(cp);
2303
2304 for (drive = 0; drive < 2; drive++) {
2305 drvp = &chp->ch_drive[drive];
2306 /* If no drive, skip */
2307 if ((drvp->drive_flags & DRIVE) == 0)
2308 continue;
2309 /* add timing values, setup DMA if needed */
2310 if (drvp->drive_flags & DRIVE_DMA) {
2311 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2312 /* use Multiword DMA */
2313 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2314 dma_mode = drvp->DMA_mode;
2315 }
2316 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2317 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2318 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2319 CY_CMD_CTRL_IOW_REC_OFF(drive));
2320 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2321 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2322 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2323 CY_CMD_CTRL_IOR_REC_OFF(drive));
2324 }
2325 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2326 chp->ch_drive[0].DMA_mode = dma_mode;
2327 chp->ch_drive[1].DMA_mode = dma_mode;
2328
2329 if (dma_mode == -1)
2330 dma_mode = 0;
2331
2332 if (sc->sc_cy_handle != NULL) {
2333 /* Note: `multiple' is implied. */
2334 cy82c693_write(sc->sc_cy_handle,
2335 (sc->sc_cy_compatchan == 0) ?
2336 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2337 }
2338
2339 pciide_print_modes(cp);
2340
2341 if (idedma_ctl != 0) {
2342 /* Add software bits in status register */
2343 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2344 IDEDMA_CTL, idedma_ctl);
2345 }
2346 }
2347
2348 void
2349 sis_chip_map(sc, pa)
2350 struct pciide_softc *sc;
2351 struct pci_attach_args *pa;
2352 {
2353 struct pciide_channel *cp;
2354 int channel;
2355 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2356 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2357 sc->sc_tag, PCI_CLASS_REG));
2358 pcireg_t rev = PCI_REVISION(pci_conf_read(sc->sc_pc,
2359 sc->sc_tag, PCI_CLASS_REG));
2360 bus_size_t cmdsize, ctlsize;
2361
2362 if (pciide_chipen(sc, pa) == 0)
2363 return;
2364 printf("%s: bus-master DMA support present",
2365 sc->sc_wdcdev.sc_dev.dv_xname);
2366 pciide_mapreg_dma(sc, pa);
2367 printf("\n");
2368 if (sc->sc_dma_ok) {
2369 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2370 if (rev >= 0xd0)
2371 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2372 }
2373
2374 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2375 WDC_CAPABILITY_MODE;
2376 sc->sc_wdcdev.PIO_cap = 4;
2377 sc->sc_wdcdev.DMA_cap = 2;
2378 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2379 sc->sc_wdcdev.UDMA_cap = 2;
2380 sc->sc_wdcdev.set_modes = sis_setup_channel;
2381
2382 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2383 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2384
2385 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2386 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2387 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2388
2389 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2390 cp = &sc->pciide_channels[channel];
2391 if (pciide_chansetup(sc, channel, interface) == 0)
2392 continue;
2393 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2394 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2395 printf("%s: %s channel ignored (disabled)\n",
2396 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2397 continue;
2398 }
2399 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2400 pciide_pci_intr);
2401 if (cp->hw_ok == 0)
2402 continue;
2403 if (pciide_chan_candisable(cp)) {
2404 if (channel == 0)
2405 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2406 else
2407 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2408 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2409 sis_ctr0);
2410 }
2411 pciide_map_compat_intr(pa, cp, channel, interface);
2412 if (cp->hw_ok == 0)
2413 continue;
2414 sis_setup_channel(&cp->wdc_channel);
2415 }
2416 }
2417
2418 void
2419 sis_setup_channel(chp)
2420 struct channel_softc *chp;
2421 {
2422 struct ata_drive_datas *drvp;
2423 int drive;
2424 u_int32_t sis_tim;
2425 u_int32_t idedma_ctl;
2426 struct pciide_channel *cp = (struct pciide_channel*)chp;
2427 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2428
2429 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2430 "channel %d 0x%x\n", chp->channel,
2431 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2432 DEBUG_PROBE);
2433 sis_tim = 0;
2434 idedma_ctl = 0;
2435 /* setup DMA if needed */
2436 pciide_channel_dma_setup(cp);
2437
2438 for (drive = 0; drive < 2; drive++) {
2439 drvp = &chp->ch_drive[drive];
2440 /* If no drive, skip */
2441 if ((drvp->drive_flags & DRIVE) == 0)
2442 continue;
2443 /* add timing values, setup DMA if needed */
2444 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2445 (drvp->drive_flags & DRIVE_UDMA) == 0)
2446 goto pio;
2447
2448 if (drvp->drive_flags & DRIVE_UDMA) {
2449 /* use Ultra/DMA */
2450 drvp->drive_flags &= ~DRIVE_DMA;
2451 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2452 SIS_TIM_UDMA_TIME_OFF(drive);
2453 sis_tim |= SIS_TIM_UDMA_EN(drive);
2454 } else {
2455 /*
2456 * use Multiword DMA
2457 * Timings will be used for both PIO and DMA,
2458 * so adjust DMA mode if needed
2459 */
2460 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2461 drvp->PIO_mode = drvp->DMA_mode + 2;
2462 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2463 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2464 drvp->PIO_mode - 2 : 0;
2465 if (drvp->DMA_mode == 0)
2466 drvp->PIO_mode = 0;
2467 }
2468 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2469 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2470 SIS_TIM_ACT_OFF(drive);
2471 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2472 SIS_TIM_REC_OFF(drive);
2473 }
2474 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2475 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2476 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2477 if (idedma_ctl != 0) {
2478 /* Add software bits in status register */
2479 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2480 IDEDMA_CTL, idedma_ctl);
2481 }
2482 pciide_print_modes(cp);
2483 }
2484
2485 void
2486 acer_chip_map(sc, pa)
2487 struct pciide_softc *sc;
2488 struct pci_attach_args *pa;
2489 {
2490 struct pciide_channel *cp;
2491 int channel;
2492 pcireg_t cr, interface;
2493 bus_size_t cmdsize, ctlsize;
2494
2495 if (pciide_chipen(sc, pa) == 0)
2496 return;
2497 printf("%s: bus-master DMA support present",
2498 sc->sc_wdcdev.sc_dev.dv_xname);
2499 pciide_mapreg_dma(sc, pa);
2500 printf("\n");
2501 if (sc->sc_dma_ok)
2502 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2503
2504 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2505 WDC_CAPABILITY_MODE;
2506
2507 sc->sc_wdcdev.PIO_cap = 4;
2508 sc->sc_wdcdev.DMA_cap = 2;
2509 sc->sc_wdcdev.UDMA_cap = 2;
2510 sc->sc_wdcdev.set_modes = acer_setup_channel;
2511 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2512 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2513
2514 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2515 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2516 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2517
2518 /* Enable "microsoft register bits" R/W. */
2519 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2520 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2521 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2522 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2523 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2524 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2525 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2526 ~ACER_CHANSTATUSREGS_RO);
2527 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2528 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2529 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2530 /* Don't use cr, re-read the real register content instead */
2531 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2532 PCI_CLASS_REG));
2533
2534 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2535 cp = &sc->pciide_channels[channel];
2536 if (pciide_chansetup(sc, channel, interface) == 0)
2537 continue;
2538 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2539 printf("%s: %s channel ignored (disabled)\n",
2540 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2541 continue;
2542 }
2543 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2544 acer_pci_intr);
2545 if (cp->hw_ok == 0)
2546 continue;
2547 if (pciide_chan_candisable(cp)) {
2548 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2549 pci_conf_write(sc->sc_pc, sc->sc_tag,
2550 PCI_CLASS_REG, cr);
2551 }
2552 pciide_map_compat_intr(pa, cp, channel, interface);
2553 acer_setup_channel(&cp->wdc_channel);
2554 }
2555 }
2556
2557 void
2558 acer_setup_channel(chp)
2559 struct channel_softc *chp;
2560 {
2561 struct ata_drive_datas *drvp;
2562 int drive;
2563 u_int32_t acer_fifo_udma;
2564 u_int32_t idedma_ctl;
2565 struct pciide_channel *cp = (struct pciide_channel*)chp;
2566 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2567
2568 idedma_ctl = 0;
2569 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2570 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2571 acer_fifo_udma), DEBUG_PROBE);
2572 /* setup DMA if needed */
2573 pciide_channel_dma_setup(cp);
2574
2575 for (drive = 0; drive < 2; drive++) {
2576 drvp = &chp->ch_drive[drive];
2577 /* If no drive, skip */
2578 if ((drvp->drive_flags & DRIVE) == 0)
2579 continue;
2580 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2581 "channel %d drive %d 0x%x\n", chp->channel, drive,
2582 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2583 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2584 /* clear FIFO/DMA mode */
2585 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2586 ACER_UDMA_EN(chp->channel, drive) |
2587 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2588
2589 /* add timing values, setup DMA if needed */
2590 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2591 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2592 acer_fifo_udma |=
2593 ACER_FTH_OPL(chp->channel, drive, 0x1);
2594 goto pio;
2595 }
2596
2597 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2598 if (drvp->drive_flags & DRIVE_UDMA) {
2599 /* use Ultra/DMA */
2600 drvp->drive_flags &= ~DRIVE_DMA;
2601 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2602 acer_fifo_udma |=
2603 ACER_UDMA_TIM(chp->channel, drive,
2604 acer_udma[drvp->UDMA_mode]);
2605 } else {
2606 /*
2607 * use Multiword DMA
2608 * Timings will be used for both PIO and DMA,
2609 * so adjust DMA mode if needed
2610 */
2611 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2612 drvp->PIO_mode = drvp->DMA_mode + 2;
2613 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2614 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2615 drvp->PIO_mode - 2 : 0;
2616 if (drvp->DMA_mode == 0)
2617 drvp->PIO_mode = 0;
2618 }
2619 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2620 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2621 ACER_IDETIM(chp->channel, drive),
2622 acer_pio[drvp->PIO_mode]);
2623 }
2624 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2625 acer_fifo_udma), DEBUG_PROBE);
2626 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2627 if (idedma_ctl != 0) {
2628 /* Add software bits in status register */
2629 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2630 IDEDMA_CTL, idedma_ctl);
2631 }
2632 pciide_print_modes(cp);
2633 }
2634
2635 int
2636 acer_pci_intr(arg)
2637 void *arg;
2638 {
2639 struct pciide_softc *sc = arg;
2640 struct pciide_channel *cp;
2641 struct channel_softc *wdc_cp;
2642 int i, rv, crv;
2643 u_int32_t chids;
2644
2645 rv = 0;
2646 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2647 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2648 cp = &sc->pciide_channels[i];
2649 wdc_cp = &cp->wdc_channel;
2650 /* If a compat channel skip. */
2651 if (cp->compat)
2652 continue;
2653 if (chids & ACER_CHIDS_INT(i)) {
2654 crv = wdcintr(wdc_cp);
2655 if (crv == 0)
2656 printf("%s:%d: bogus intr\n",
2657 sc->sc_wdcdev.sc_dev.dv_xname, i);
2658 else
2659 rv = 1;
2660 }
2661 }
2662 return rv;
2663 }
2664
2665 /* A macro to test product */
2666 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2667
2668 void
2669 pdc202xx_chip_map(sc, pa)
2670 struct pciide_softc *sc;
2671 struct pci_attach_args *pa;
2672 {
2673 struct pciide_channel *cp;
2674 int channel;
2675 pcireg_t interface, st, mode;
2676 bus_size_t cmdsize, ctlsize;
2677
2678 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2679 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2680 DEBUG_PROBE);
2681 if (pciide_chipen(sc, pa) == 0)
2682 return;
2683
2684 /* turn off RAID mode */
2685 st &= ~PDC2xx_STATE_IDERAID;
2686
2687 /*
2688 * can't rely on the PCI_CLASS_REG content if the chip was in raid
2689 * mode. We have to fake interface
2690 */
2691 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
2692 if (st & PDC2xx_STATE_NATIVE)
2693 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2694
2695 printf("%s: bus-master DMA support present",
2696 sc->sc_wdcdev.sc_dev.dv_xname);
2697 pciide_mapreg_dma(sc, pa);
2698 printf("\n");
2699 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2700 WDC_CAPABILITY_MODE;
2701 if (sc->sc_dma_ok)
2702 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2703 sc->sc_wdcdev.PIO_cap = 4;
2704 sc->sc_wdcdev.DMA_cap = 2;
2705 if (PDC_IS_262(sc))
2706 sc->sc_wdcdev.UDMA_cap = 4;
2707 else
2708 sc->sc_wdcdev.UDMA_cap = 2;
2709 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
2710 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2711 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2712
2713 /* setup failsafe defaults */
2714 mode = 0;
2715 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
2716 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
2717 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
2718 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
2719 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2720 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
2721 "initial timings 0x%x, now 0x%x\n", channel,
2722 pci_conf_read(sc->sc_pc, sc->sc_tag,
2723 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
2724 DEBUG_PROBE);
2725 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
2726 mode | PDC2xx_TIM_IORDYp);
2727 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
2728 "initial timings 0x%x, now 0x%x\n", channel,
2729 pci_conf_read(sc->sc_pc, sc->sc_tag,
2730 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
2731 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
2732 mode);
2733 }
2734
2735 mode = PDC2xx_SCR_DMA;
2736 if (PDC_IS_262(sc)) {
2737 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
2738 } else {
2739 /* the BIOS set it up this way */
2740 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
2741 }
2742 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
2743 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
2744 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
2745 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
2746 DEBUG_PROBE);
2747 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
2748
2749 /* controller initial state register is OK even without BIOS */
2750 /* Set DMA mode to IDE DMA compatibility */
2751 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
2752 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
2753 DEBUG_PROBE);
2754 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
2755 mode | 0x1);
2756 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
2757 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
2758 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
2759 mode | 0x1);
2760
2761 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2762 cp = &sc->pciide_channels[channel];
2763 if (pciide_chansetup(sc, channel, interface) == 0)
2764 continue;
2765 if ((st & (PDC_IS_262(sc) ?
2766 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
2767 printf("%s: %s channel ignored (disabled)\n",
2768 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2769 continue;
2770 }
2771 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2772 pdc202xx_pci_intr);
2773 if (cp->hw_ok == 0)
2774 continue;
2775 if (pciide_chan_candisable(cp))
2776 st &= ~(PDC_IS_262(sc) ?
2777 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
2778 pciide_map_compat_intr(pa, cp, channel, interface);
2779 pdc202xx_setup_channel(&cp->wdc_channel);
2780 }
2781 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
2782 DEBUG_PROBE);
2783 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
2784 return;
2785 }
2786
2787 void
2788 pdc202xx_setup_channel(chp)
2789 struct channel_softc *chp;
2790 {
2791 struct ata_drive_datas *drvp;
2792 int drive;
2793 pcireg_t mode, st;
2794 u_int32_t idedma_ctl, scr, atapi;
2795 struct pciide_channel *cp = (struct pciide_channel*)chp;
2796 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2797 int channel = chp->channel;
2798
2799 /* setup DMA if needed */
2800 pciide_channel_dma_setup(cp);
2801
2802 idedma_ctl = 0;
2803
2804 /* Per channel settings */
2805 if (PDC_IS_262(sc)) {
2806 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2807 PDC262_U66);
2808 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2809 /* Trimm UDMA mode */
2810 if ((st & PDC262_STATE_80P(channel)) == 0 ||
2811 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
2812 chp->ch_drive[0].UDMA_mode <= 2) ||
2813 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
2814 chp->ch_drive[1].UDMA_mode <= 2)) {
2815 if (chp->ch_drive[0].UDMA_mode > 2)
2816 chp->ch_drive[0].UDMA_mode = 2;
2817 if (chp->ch_drive[1].UDMA_mode > 2)
2818 chp->ch_drive[1].UDMA_mode = 2;
2819 }
2820 /* Set U66 if needed */
2821 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
2822 chp->ch_drive[0].UDMA_mode > 2) ||
2823 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
2824 chp->ch_drive[1].UDMA_mode > 2))
2825 scr |= PDC262_U66_EN(channel);
2826 else
2827 scr &= ~PDC262_U66_EN(channel);
2828 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2829 PDC262_U66, scr);
2830 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
2831 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
2832 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2833 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
2834 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
2835 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
2836 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2837 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
2838 atapi = 0;
2839 else
2840 atapi = PDC262_ATAPI_UDMA;
2841 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
2842 PDC262_ATAPI(channel), atapi);
2843 }
2844 }
2845 for (drive = 0; drive < 2; drive++) {
2846 drvp = &chp->ch_drive[drive];
2847 /* If no drive, skip */
2848 if ((drvp->drive_flags & DRIVE) == 0)
2849 continue;
2850 mode = 0;
2851 if (drvp->drive_flags & DRIVE_UDMA) {
2852 mode = PDC2xx_TIM_SET_MB(mode,
2853 pdc2xx_udma_mb[drvp->UDMA_mode]);
2854 mode = PDC2xx_TIM_SET_MC(mode,
2855 pdc2xx_udma_mc[drvp->UDMA_mode]);
2856 drvp->drive_flags &= ~DRIVE_DMA;
2857 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2858 } else if (drvp->drive_flags & DRIVE_DMA) {
2859 mode = PDC2xx_TIM_SET_MB(mode,
2860 pdc2xx_dma_mb[drvp->DMA_mode]);
2861 mode = PDC2xx_TIM_SET_MC(mode,
2862 pdc2xx_dma_mc[drvp->DMA_mode]);
2863 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2864 } else {
2865 mode = PDC2xx_TIM_SET_MB(mode,
2866 pdc2xx_dma_mb[0]);
2867 mode = PDC2xx_TIM_SET_MC(mode,
2868 pdc2xx_dma_mc[0]);
2869 }
2870 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
2871 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
2872 if (drvp->drive_flags & DRIVE_ATA)
2873 mode |= PDC2xx_TIM_PRE;
2874 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
2875 if (drvp->PIO_mode >= 3) {
2876 mode |= PDC2xx_TIM_IORDY;
2877 if (drive == 0)
2878 mode |= PDC2xx_TIM_IORDYp;
2879 }
2880 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
2881 "timings 0x%x\n",
2882 sc->sc_wdcdev.sc_dev.dv_xname,
2883 chp->channel, drive, mode), DEBUG_PROBE);
2884 pci_conf_write(sc->sc_pc, sc->sc_tag,
2885 PDC2xx_TIM(chp->channel, drive), mode);
2886 }
2887 if (idedma_ctl != 0) {
2888 /* Add software bits in status register */
2889 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2890 IDEDMA_CTL, idedma_ctl);
2891 }
2892 pciide_print_modes(cp);
2893 }
2894
2895 int
2896 pdc202xx_pci_intr(arg)
2897 void *arg;
2898 {
2899 struct pciide_softc *sc = arg;
2900 struct pciide_channel *cp;
2901 struct channel_softc *wdc_cp;
2902 int i, rv, crv;
2903 u_int32_t scr;
2904
2905 rv = 0;
2906 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
2907 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2908 cp = &sc->pciide_channels[i];
2909 wdc_cp = &cp->wdc_channel;
2910 /* If a compat channel skip. */
2911 if (cp->compat)
2912 continue;
2913 if (scr & PDC2xx_SCR_INT(i)) {
2914 crv = wdcintr(wdc_cp);
2915 if (crv == 0)
2916 printf("%s:%d: bogus intr\n",
2917 sc->sc_wdcdev.sc_dev.dv_xname, i);
2918 else
2919 rv = 1;
2920 }
2921 }
2922 return rv;
2923 }
2924
2925 void
2926 opti_chip_map(sc, pa)
2927 struct pciide_softc *sc;
2928 struct pci_attach_args *pa;
2929 {
2930 struct pciide_channel *cp;
2931 bus_size_t cmdsize, ctlsize;
2932 pcireg_t interface;
2933 u_int8_t init_ctrl;
2934 int channel;
2935
2936 if (pciide_chipen(sc, pa) == 0)
2937 return;
2938 printf("%s: bus-master DMA support present",
2939 sc->sc_wdcdev.sc_dev.dv_xname);
2940 pciide_mapreg_dma(sc, pa);
2941 printf("\n");
2942
2943 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2944 WDC_CAPABILITY_MODE;
2945 sc->sc_wdcdev.PIO_cap = 4;
2946 if (sc->sc_dma_ok) {
2947 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2948 sc->sc_wdcdev.DMA_cap = 2;
2949 }
2950 sc->sc_wdcdev.set_modes = opti_setup_channel;
2951
2952 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2953 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2954
2955 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
2956 OPTI_REG_INIT_CONTROL);
2957
2958 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2959 sc->sc_tag, PCI_CLASS_REG));
2960
2961 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2962 cp = &sc->pciide_channels[channel];
2963 if (pciide_chansetup(sc, channel, interface) == 0)
2964 continue;
2965 if (channel == 1 &&
2966 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
2967 printf("%s: %s channel ignored (disabled)\n",
2968 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2969 continue;
2970 }
2971 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2972 pciide_pci_intr);
2973 if (cp->hw_ok == 0)
2974 continue;
2975 pciide_map_compat_intr(pa, cp, channel, interface);
2976 if (cp->hw_ok == 0)
2977 continue;
2978 opti_setup_channel(&cp->wdc_channel);
2979 }
2980 }
2981
2982 void
2983 opti_setup_channel(chp)
2984 struct channel_softc *chp;
2985 {
2986 struct ata_drive_datas *drvp;
2987 struct pciide_channel *cp = (struct pciide_channel*)chp;
2988 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2989 int drive, spd;
2990 int mode[2];
2991 u_int8_t rv, mr;
2992
2993 /*
2994 * The `Delay' and `Address Setup Time' fields of the
2995 * Miscellaneous Register are always zero initially.
2996 */
2997 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
2998 mr &= ~(OPTI_MISC_DELAY_MASK |
2999 OPTI_MISC_ADDR_SETUP_MASK |
3000 OPTI_MISC_INDEX_MASK);
3001
3002 /* Prime the control register before setting timing values */
3003 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3004
3005 /* Determine the clockrate of the PCIbus the chip is attached to */
3006 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3007 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3008
3009 /* setup DMA if needed */
3010 pciide_channel_dma_setup(cp);
3011
3012 for (drive = 0; drive < 2; drive++) {
3013 drvp = &chp->ch_drive[drive];
3014 /* If no drive, skip */
3015 if ((drvp->drive_flags & DRIVE) == 0) {
3016 mode[drive] = -1;
3017 continue;
3018 }
3019
3020 if ((drvp->drive_flags & DRIVE_DMA)) {
3021 /*
3022 * Timings will be used for both PIO and DMA,
3023 * so adjust DMA mode if needed
3024 */
3025 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3026 drvp->PIO_mode = drvp->DMA_mode + 2;
3027 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3028 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3029 drvp->PIO_mode - 2 : 0;
3030 if (drvp->DMA_mode == 0)
3031 drvp->PIO_mode = 0;
3032
3033 mode[drive] = drvp->DMA_mode + 5;
3034 } else
3035 mode[drive] = drvp->PIO_mode;
3036
3037 if (drive && mode[0] >= 0 &&
3038 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3039 /*
3040 * Can't have two drives using different values
3041 * for `Address Setup Time'.
3042 * Slow down the faster drive to compensate.
3043 */
3044 int d = (opti_tim_as[spd][mode[0]] >
3045 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3046
3047 mode[d] = mode[1-d];
3048 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3049 chp->ch_drive[d].DMA_mode = 0;
3050 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3051 }
3052 }
3053
3054 for (drive = 0; drive < 2; drive++) {
3055 int m;
3056 if ((m = mode[drive]) < 0)
3057 continue;
3058
3059 /* Set the Address Setup Time and select appropriate index */
3060 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3061 rv |= OPTI_MISC_INDEX(drive);
3062 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3063
3064 /* Set the pulse width and recovery timing parameters */
3065 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3066 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3067 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3068 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3069
3070 /* Set the Enhanced Mode register appropriately */
3071 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3072 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3073 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3074 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3075 }
3076
3077 /* Finally, enable the timings */
3078 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3079
3080 pciide_print_modes(cp);
3081 }
3082