pciide.c revision 1.59 1 /* $NetBSD: pciide.c,v 1.59 2000/05/27 17:18:41 scw Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119
120 /* inlines for reading/writing 8-bit PCI registers */
121 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
122 int));
123 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
124 int, u_int8_t));
125
126 static __inline u_int8_t
127 pciide_pci_read(pc, pa, reg)
128 pci_chipset_tag_t pc;
129 pcitag_t pa;
130 int reg;
131 {
132
133 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
134 ((reg & 0x03) * 8) & 0xff);
135 }
136
137 static __inline void
138 pciide_pci_write(pc, pa, reg, val)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 u_int8_t val;
143 {
144 pcireg_t pcival;
145
146 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
147 pcival &= ~(0xff << ((reg & 0x03) * 8));
148 pcival |= (val << ((reg & 0x03) * 8));
149 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
150 }
151
152 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
153
154 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155 void piix_setup_channel __P((struct channel_softc*));
156 void piix3_4_setup_channel __P((struct channel_softc*));
157 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
158 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
159 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160
161 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
162 void amd756_setup_channel __P((struct channel_softc*));
163
164 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void apollo_setup_channel __P((struct channel_softc*));
166
167 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void cmd0643_6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void cmd0643_6_setup_channel __P((struct channel_softc*));
170 void cmd_channel_map __P((struct pci_attach_args *,
171 struct pciide_softc *, int));
172 int cmd_pci_intr __P((void *));
173
174 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cy693_setup_channel __P((struct channel_softc*));
176
177 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void sis_setup_channel __P((struct channel_softc*));
179
180 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void acer_setup_channel __P((struct channel_softc*));
182 int acer_pci_intr __P((void *));
183
184 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void pdc202xx_setup_channel __P((struct channel_softc*));
186 int pdc202xx_pci_intr __P((void *));
187
188 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void opti_setup_channel __P((struct channel_softc*));
190
191 void pciide_channel_dma_setup __P((struct pciide_channel *));
192 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
193 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
194 void pciide_dma_start __P((void*, int, int));
195 int pciide_dma_finish __P((void*, int, int, int));
196 void pciide_print_modes __P((struct pciide_channel *));
197
198 struct pciide_product_desc {
199 u_int32_t ide_product;
200 int ide_flags;
201 const char *ide_name;
202 /* map and setup chip, probe drives */
203 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
204 };
205
206 /* Flags for ide_flags */
207 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
208
209 /* Default product description for devices not known from this controller */
210 const struct pciide_product_desc default_product_desc = {
211 0,
212 0,
213 "Generic PCI IDE controller",
214 default_chip_map,
215 };
216
217 const struct pciide_product_desc pciide_intel_products[] = {
218 { PCI_PRODUCT_INTEL_82092AA,
219 0,
220 "Intel 82092AA IDE controller",
221 default_chip_map,
222 },
223 { PCI_PRODUCT_INTEL_82371FB_IDE,
224 0,
225 "Intel 82371FB IDE controller (PIIX)",
226 piix_chip_map,
227 },
228 { PCI_PRODUCT_INTEL_82371SB_IDE,
229 0,
230 "Intel 82371SB IDE Interface (PIIX3)",
231 piix_chip_map,
232 },
233 { PCI_PRODUCT_INTEL_82371AB_IDE,
234 0,
235 "Intel 82371AB IDE controller (PIIX4)",
236 piix_chip_map,
237 },
238 { PCI_PRODUCT_INTEL_82801AA_IDE,
239 0,
240 "Intel 82801AA IDE Controller (ICH)",
241 piix_chip_map,
242 },
243 { PCI_PRODUCT_INTEL_82801AB_IDE,
244 0,
245 "Intel 82801AB IDE Controller (ICH0)",
246 piix_chip_map,
247 },
248 { 0,
249 0,
250 NULL,
251 }
252 };
253
254 const struct pciide_product_desc pciide_amd_products[] = {
255 { PCI_PRODUCT_AMD_PBC756_IDE,
256 0,
257 "Advanced Micro Devices AMD756 IDE Controller",
258 amd756_chip_map
259 },
260 { 0,
261 0,
262 NULL,
263 }
264 };
265
266 const struct pciide_product_desc pciide_cmd_products[] = {
267 { PCI_PRODUCT_CMDTECH_640,
268 0,
269 "CMD Technology PCI0640",
270 cmd_chip_map
271 },
272 { PCI_PRODUCT_CMDTECH_643,
273 0,
274 "CMD Technology PCI0643",
275 cmd0643_6_chip_map,
276 },
277 { PCI_PRODUCT_CMDTECH_646,
278 0,
279 "CMD Technology PCI0646",
280 cmd0643_6_chip_map,
281 },
282 { 0,
283 0,
284 NULL,
285 }
286 };
287
288 const struct pciide_product_desc pciide_via_products[] = {
289 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
290 0,
291 "VIA Technologies VT82C586 (Apollo VP) IDE Controller",
292 apollo_chip_map,
293 },
294 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
295 0,
296 "VIA Technologies VT82C586A IDE Controller",
297 apollo_chip_map,
298 },
299 { 0,
300 0,
301 NULL,
302 }
303 };
304
305 const struct pciide_product_desc pciide_cypress_products[] = {
306 { PCI_PRODUCT_CONTAQ_82C693,
307 0,
308 "Contaq Microsystems CY82C693 IDE Controller",
309 cy693_chip_map,
310 },
311 { 0,
312 0,
313 NULL,
314 }
315 };
316
317 const struct pciide_product_desc pciide_sis_products[] = {
318 { PCI_PRODUCT_SIS_5597_IDE,
319 0,
320 "Silicon Integrated System 5597/5598 IDE controller",
321 sis_chip_map,
322 },
323 { 0,
324 0,
325 NULL,
326 }
327 };
328
329 const struct pciide_product_desc pciide_acer_products[] = {
330 { PCI_PRODUCT_ALI_M5229,
331 0,
332 "Acer Labs M5229 UDMA IDE Controller",
333 acer_chip_map,
334 },
335 { 0,
336 0,
337 NULL,
338 }
339 };
340
341 const struct pciide_product_desc pciide_promise_products[] = {
342 { PCI_PRODUCT_PROMISE_ULTRA33,
343 IDE_PCI_CLASS_OVERRIDE,
344 "Promise Ultra33/ATA Bus Master IDE Accelerator",
345 pdc202xx_chip_map,
346 },
347 { PCI_PRODUCT_PROMISE_ULTRA66,
348 IDE_PCI_CLASS_OVERRIDE,
349 "Promise Ultra66/ATA Bus Master IDE Accelerator",
350 pdc202xx_chip_map,
351 },
352 { 0,
353 0,
354 NULL,
355 }
356 };
357
358 const struct pciide_product_desc pciide_opti_products[] = {
359 { PCI_PRODUCT_OPTI_82C621,
360 0,
361 "OPTi 82c621 PCI IDE controller",
362 opti_chip_map,
363 },
364 { PCI_PRODUCT_OPTI_82C568,
365 0,
366 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
367 opti_chip_map,
368 },
369 { PCI_PRODUCT_OPTI_82D568,
370 0,
371 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
372 opti_chip_map,
373 },
374 { 0,
375 0,
376 NULL,
377 }
378 };
379
380 struct pciide_vendor_desc {
381 u_int32_t ide_vendor;
382 const struct pciide_product_desc *ide_products;
383 };
384
385 const struct pciide_vendor_desc pciide_vendors[] = {
386 { PCI_VENDOR_INTEL, pciide_intel_products },
387 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
388 { PCI_VENDOR_VIATECH, pciide_via_products },
389 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
390 { PCI_VENDOR_SIS, pciide_sis_products },
391 { PCI_VENDOR_ALI, pciide_acer_products },
392 { PCI_VENDOR_PROMISE, pciide_promise_products },
393 { PCI_VENDOR_AMD, pciide_amd_products },
394 { PCI_VENDOR_OPTI, pciide_opti_products },
395 { 0, NULL }
396 };
397
398 /* options passed via the 'flags' config keyword */
399 #define PCIIDE_OPTIONS_DMA 0x01
400
401 int pciide_match __P((struct device *, struct cfdata *, void *));
402 void pciide_attach __P((struct device *, struct device *, void *));
403
404 struct cfattach pciide_ca = {
405 sizeof(struct pciide_softc), pciide_match, pciide_attach
406 };
407 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
408 int pciide_mapregs_compat __P(( struct pci_attach_args *,
409 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
410 int pciide_mapregs_native __P((struct pci_attach_args *,
411 struct pciide_channel *, bus_size_t *, bus_size_t *,
412 int (*pci_intr) __P((void *))));
413 void pciide_mapreg_dma __P((struct pciide_softc *,
414 struct pci_attach_args *));
415 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
416 void pciide_mapchan __P((struct pci_attach_args *,
417 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
418 int (*pci_intr) __P((void *))));
419 int pciiide_chan_candisable __P((struct pciide_channel *));
420 void pciide_map_compat_intr __P(( struct pci_attach_args *,
421 struct pciide_channel *, int, int));
422 int pciide_print __P((void *, const char *pnp));
423 int pciide_compat_intr __P((void *));
424 int pciide_pci_intr __P((void *));
425 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
426
427 const struct pciide_product_desc *
428 pciide_lookup_product(id)
429 u_int32_t id;
430 {
431 const struct pciide_product_desc *pp;
432 const struct pciide_vendor_desc *vp;
433
434 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
435 if (PCI_VENDOR(id) == vp->ide_vendor)
436 break;
437
438 if ((pp = vp->ide_products) == NULL)
439 return NULL;
440
441 for (; pp->ide_name != NULL; pp++)
442 if (PCI_PRODUCT(id) == pp->ide_product)
443 break;
444
445 if (pp->ide_name == NULL)
446 return NULL;
447 return pp;
448 }
449
450 int
451 pciide_match(parent, match, aux)
452 struct device *parent;
453 struct cfdata *match;
454 void *aux;
455 {
456 struct pci_attach_args *pa = aux;
457 const struct pciide_product_desc *pp;
458
459 /*
460 * Check the ID register to see that it's a PCI IDE controller.
461 * If it is, we assume that we can deal with it; it _should_
462 * work in a standardized way...
463 */
464 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
465 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
466 return (1);
467 }
468
469 /*
470 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
471 * controllers. Let see if we can deal with it anyway.
472 */
473 pp = pciide_lookup_product(pa->pa_id);
474 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
475 return (1);
476 }
477
478 return (0);
479 }
480
481 void
482 pciide_attach(parent, self, aux)
483 struct device *parent, *self;
484 void *aux;
485 {
486 struct pci_attach_args *pa = aux;
487 pci_chipset_tag_t pc = pa->pa_pc;
488 pcitag_t tag = pa->pa_tag;
489 struct pciide_softc *sc = (struct pciide_softc *)self;
490 pcireg_t csr;
491 char devinfo[256];
492 const char *displaydev;
493
494 sc->sc_pp = pciide_lookup_product(pa->pa_id);
495 if (sc->sc_pp == NULL) {
496 sc->sc_pp = &default_product_desc;
497 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
498 displaydev = devinfo;
499 } else
500 displaydev = sc->sc_pp->ide_name;
501
502 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
503
504 sc->sc_pc = pa->pa_pc;
505 sc->sc_tag = pa->pa_tag;
506 #ifdef WDCDEBUG
507 if (wdcdebug_pciide_mask & DEBUG_PROBE)
508 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
509 #endif
510
511 sc->sc_pp->chip_map(sc, pa);
512
513 if (sc->sc_dma_ok) {
514 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
515 csr |= PCI_COMMAND_MASTER_ENABLE;
516 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
517 }
518 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
519 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
520 }
521
522 /* tell wether the chip is enabled or not */
523 int
524 pciide_chipen(sc, pa)
525 struct pciide_softc *sc;
526 struct pci_attach_args *pa;
527 {
528 pcireg_t csr;
529 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
530 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
531 PCI_COMMAND_STATUS_REG);
532 printf("%s: device disabled (at %s)\n",
533 sc->sc_wdcdev.sc_dev.dv_xname,
534 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
535 "device" : "bridge");
536 return 0;
537 }
538 return 1;
539 }
540
541 int
542 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
543 struct pci_attach_args *pa;
544 struct pciide_channel *cp;
545 int compatchan;
546 bus_size_t *cmdsizep, *ctlsizep;
547 {
548 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
549 struct channel_softc *wdc_cp = &cp->wdc_channel;
550
551 cp->compat = 1;
552 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
553 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
554
555 wdc_cp->cmd_iot = pa->pa_iot;
556 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
557 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
558 printf("%s: couldn't map %s channel cmd regs\n",
559 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
560 return (0);
561 }
562
563 wdc_cp->ctl_iot = pa->pa_iot;
564 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
565 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
566 printf("%s: couldn't map %s channel ctl regs\n",
567 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
568 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
569 PCIIDE_COMPAT_CMD_SIZE);
570 return (0);
571 }
572
573 return (1);
574 }
575
576 int
577 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
578 struct pci_attach_args * pa;
579 struct pciide_channel *cp;
580 bus_size_t *cmdsizep, *ctlsizep;
581 int (*pci_intr) __P((void *));
582 {
583 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
584 struct channel_softc *wdc_cp = &cp->wdc_channel;
585 const char *intrstr;
586 pci_intr_handle_t intrhandle;
587
588 cp->compat = 0;
589
590 if (sc->sc_pci_ih == NULL) {
591 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
592 pa->pa_intrline, &intrhandle) != 0) {
593 printf("%s: couldn't map native-PCI interrupt\n",
594 sc->sc_wdcdev.sc_dev.dv_xname);
595 return 0;
596 }
597 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
598 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
599 intrhandle, IPL_BIO, pci_intr, sc);
600 if (sc->sc_pci_ih != NULL) {
601 printf("%s: using %s for native-PCI interrupt\n",
602 sc->sc_wdcdev.sc_dev.dv_xname,
603 intrstr ? intrstr : "unknown interrupt");
604 } else {
605 printf("%s: couldn't establish native-PCI interrupt",
606 sc->sc_wdcdev.sc_dev.dv_xname);
607 if (intrstr != NULL)
608 printf(" at %s", intrstr);
609 printf("\n");
610 return 0;
611 }
612 }
613 cp->ih = sc->sc_pci_ih;
614 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
615 PCI_MAPREG_TYPE_IO, 0,
616 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
617 printf("%s: couldn't map %s channel cmd regs\n",
618 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
619 return 0;
620 }
621
622 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
623 PCI_MAPREG_TYPE_IO, 0,
624 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
625 printf("%s: couldn't map %s channel ctl regs\n",
626 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
627 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
628 return 0;
629 }
630 return (1);
631 }
632
633 void
634 pciide_mapreg_dma(sc, pa)
635 struct pciide_softc *sc;
636 struct pci_attach_args *pa;
637 {
638 /*
639 * Map DMA registers
640 *
641 * Note that sc_dma_ok is the right variable to test to see if
642 * DMA can be done. If the interface doesn't support DMA,
643 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
644 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
645 * non-zero if the interface supports DMA and the registers
646 * could be mapped.
647 *
648 * XXX Note that despite the fact that the Bus Master IDE specs
649 * XXX say that "The bus master IDE function uses 16 bytes of IO
650 * XXX space," some controllers (at least the United
651 * XXX Microelectronics UM8886BF) place it in memory space.
652 * XXX eventually, we should probably read the register and check
653 * XXX which type it is. Either that or 'quirk' certain devices.
654 */
655 sc->sc_dma_ok = (pci_mapreg_map(pa,
656 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 0,
657 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
658 sc->sc_dmat = pa->pa_dmat;
659 if (sc->sc_dma_ok == 0) {
660 printf(", but unused (couldn't map registers)");
661 } else {
662 sc->sc_wdcdev.dma_arg = sc;
663 sc->sc_wdcdev.dma_init = pciide_dma_init;
664 sc->sc_wdcdev.dma_start = pciide_dma_start;
665 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
666 }
667 }
668 int
669 pciide_compat_intr(arg)
670 void *arg;
671 {
672 struct pciide_channel *cp = arg;
673
674 #ifdef DIAGNOSTIC
675 /* should only be called for a compat channel */
676 if (cp->compat == 0)
677 panic("pciide compat intr called for non-compat chan %p\n", cp);
678 #endif
679 return (wdcintr(&cp->wdc_channel));
680 }
681
682 int
683 pciide_pci_intr(arg)
684 void *arg;
685 {
686 struct pciide_softc *sc = arg;
687 struct pciide_channel *cp;
688 struct channel_softc *wdc_cp;
689 int i, rv, crv;
690
691 rv = 0;
692 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
693 cp = &sc->pciide_channels[i];
694 wdc_cp = &cp->wdc_channel;
695
696 /* If a compat channel skip. */
697 if (cp->compat)
698 continue;
699 /* if this channel not waiting for intr, skip */
700 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
701 continue;
702
703 crv = wdcintr(wdc_cp);
704 if (crv == 0)
705 ; /* leave rv alone */
706 else if (crv == 1)
707 rv = 1; /* claim the intr */
708 else if (rv == 0) /* crv should be -1 in this case */
709 rv = crv; /* if we've done no better, take it */
710 }
711 return (rv);
712 }
713
714 void
715 pciide_channel_dma_setup(cp)
716 struct pciide_channel *cp;
717 {
718 int drive;
719 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
720 struct ata_drive_datas *drvp;
721
722 for (drive = 0; drive < 2; drive++) {
723 drvp = &cp->wdc_channel.ch_drive[drive];
724 /* If no drive, skip */
725 if ((drvp->drive_flags & DRIVE) == 0)
726 continue;
727 /* setup DMA if needed */
728 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
729 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
730 sc->sc_dma_ok == 0) {
731 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
732 continue;
733 }
734 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
735 != 0) {
736 /* Abort DMA setup */
737 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
738 continue;
739 }
740 }
741 }
742
743 int
744 pciide_dma_table_setup(sc, channel, drive)
745 struct pciide_softc *sc;
746 int channel, drive;
747 {
748 bus_dma_segment_t seg;
749 int error, rseg;
750 const bus_size_t dma_table_size =
751 sizeof(struct idedma_table) * NIDEDMA_TABLES;
752 struct pciide_dma_maps *dma_maps =
753 &sc->pciide_channels[channel].dma_maps[drive];
754
755 /* If table was already allocated, just return */
756 if (dma_maps->dma_table)
757 return 0;
758
759 /* Allocate memory for the DMA tables and map it */
760 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
761 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
762 BUS_DMA_NOWAIT)) != 0) {
763 printf("%s:%d: unable to allocate table DMA for "
764 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
765 channel, drive, error);
766 return error;
767 }
768 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
769 dma_table_size,
770 (caddr_t *)&dma_maps->dma_table,
771 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
772 printf("%s:%d: unable to map table DMA for"
773 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
774 channel, drive, error);
775 return error;
776 }
777 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
778 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
779 seg.ds_addr), DEBUG_PROBE);
780
781 /* Create and load table DMA map for this disk */
782 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
783 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
784 &dma_maps->dmamap_table)) != 0) {
785 printf("%s:%d: unable to create table DMA map for "
786 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
787 channel, drive, error);
788 return error;
789 }
790 if ((error = bus_dmamap_load(sc->sc_dmat,
791 dma_maps->dmamap_table,
792 dma_maps->dma_table,
793 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
794 printf("%s:%d: unable to load table DMA map for "
795 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
796 channel, drive, error);
797 return error;
798 }
799 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
800 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
801 /* Create a xfer DMA map for this drive */
802 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
803 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
804 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
805 &dma_maps->dmamap_xfer)) != 0) {
806 printf("%s:%d: unable to create xfer DMA map for "
807 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
808 channel, drive, error);
809 return error;
810 }
811 return 0;
812 }
813
814 int
815 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
816 void *v;
817 int channel, drive;
818 void *databuf;
819 size_t datalen;
820 int flags;
821 {
822 struct pciide_softc *sc = v;
823 int error, seg;
824 struct pciide_dma_maps *dma_maps =
825 &sc->pciide_channels[channel].dma_maps[drive];
826
827 error = bus_dmamap_load(sc->sc_dmat,
828 dma_maps->dmamap_xfer,
829 databuf, datalen, NULL, BUS_DMA_NOWAIT);
830 if (error) {
831 printf("%s:%d: unable to load xfer DMA map for"
832 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
833 channel, drive, error);
834 return error;
835 }
836
837 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
838 dma_maps->dmamap_xfer->dm_mapsize,
839 (flags & WDC_DMA_READ) ?
840 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
841
842 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
843 #ifdef DIAGNOSTIC
844 /* A segment must not cross a 64k boundary */
845 {
846 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
847 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
848 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
849 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
850 printf("pciide_dma: segment %d physical addr 0x%lx"
851 " len 0x%lx not properly aligned\n",
852 seg, phys, len);
853 panic("pciide_dma: buf align");
854 }
855 }
856 #endif
857 dma_maps->dma_table[seg].base_addr =
858 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
859 dma_maps->dma_table[seg].byte_count =
860 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
861 IDEDMA_BYTE_COUNT_MASK);
862 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
863 seg, le32toh(dma_maps->dma_table[seg].byte_count),
864 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
865
866 }
867 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
868 htole32(IDEDMA_BYTE_COUNT_EOT);
869
870 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
871 dma_maps->dmamap_table->dm_mapsize,
872 BUS_DMASYNC_PREWRITE);
873
874 /* Maps are ready. Start DMA function */
875 #ifdef DIAGNOSTIC
876 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
877 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
878 dma_maps->dmamap_table->dm_segs[0].ds_addr);
879 panic("pciide_dma_init: table align");
880 }
881 #endif
882
883 /* Clear status bits */
884 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
885 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
886 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
887 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
888 /* Write table addr */
889 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
890 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
891 dma_maps->dmamap_table->dm_segs[0].ds_addr);
892 /* set read/write */
893 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
894 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
895 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
896 /* remember flags */
897 dma_maps->dma_flags = flags;
898 return 0;
899 }
900
901 void
902 pciide_dma_start(v, channel, drive)
903 void *v;
904 int channel, drive;
905 {
906 struct pciide_softc *sc = v;
907
908 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
909 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
910 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
911 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
912 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
913 }
914
915 int
916 pciide_dma_finish(v, channel, drive, force)
917 void *v;
918 int channel, drive;
919 int force;
920 {
921 struct pciide_softc *sc = v;
922 u_int8_t status;
923 int error = 0;
924 struct pciide_dma_maps *dma_maps =
925 &sc->pciide_channels[channel].dma_maps[drive];
926
927 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
928 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
929 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
930 DEBUG_XFERS);
931
932 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
933 return WDC_DMAST_NOIRQ;
934
935 /* stop DMA channel */
936 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
937 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
938 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
939 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
940
941 /* Clear status bits */
942 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
943 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
944 status);
945
946 /* Unload the map of the data buffer */
947 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
948 dma_maps->dmamap_xfer->dm_mapsize,
949 (dma_maps->dma_flags & WDC_DMA_READ) ?
950 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
951 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
952
953 if ((status & IDEDMA_CTL_ERR) != 0) {
954 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
955 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
956 error |= WDC_DMAST_ERR;
957 }
958
959 if ((status & IDEDMA_CTL_INTR) == 0) {
960 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
961 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
962 drive, status);
963 error |= WDC_DMAST_NOIRQ;
964 }
965
966 if ((status & IDEDMA_CTL_ACT) != 0) {
967 /* data underrun, may be a valid condition for ATAPI */
968 error |= WDC_DMAST_UNDER;
969 }
970 return error;
971 }
972
973 /* some common code used by several chip_map */
974 int
975 pciide_chansetup(sc, channel, interface)
976 struct pciide_softc *sc;
977 int channel;
978 pcireg_t interface;
979 {
980 struct pciide_channel *cp = &sc->pciide_channels[channel];
981 sc->wdc_chanarray[channel] = &cp->wdc_channel;
982 cp->name = PCIIDE_CHANNEL_NAME(channel);
983 cp->wdc_channel.channel = channel;
984 cp->wdc_channel.wdc = &sc->sc_wdcdev;
985 cp->wdc_channel.ch_queue =
986 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
987 if (cp->wdc_channel.ch_queue == NULL) {
988 printf("%s %s channel: "
989 "can't allocate memory for command queue",
990 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
991 return 0;
992 }
993 printf("%s: %s channel %s to %s mode\n",
994 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
995 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
996 "configured" : "wired",
997 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
998 "native-PCI" : "compatibility");
999 return 1;
1000 }
1001
1002 /* some common code used by several chip channel_map */
1003 void
1004 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1005 struct pci_attach_args *pa;
1006 struct pciide_channel *cp;
1007 pcireg_t interface;
1008 bus_size_t *cmdsizep, *ctlsizep;
1009 int (*pci_intr) __P((void *));
1010 {
1011 struct channel_softc *wdc_cp = &cp->wdc_channel;
1012
1013 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1014 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1015 pci_intr);
1016 else
1017 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1018 wdc_cp->channel, cmdsizep, ctlsizep);
1019
1020 if (cp->hw_ok == 0)
1021 return;
1022 wdc_cp->data32iot = wdc_cp->cmd_iot;
1023 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1024 wdcattach(wdc_cp);
1025 }
1026
1027 /*
1028 * Generic code to call to know if a channel can be disabled. Return 1
1029 * if channel can be disabled, 0 if not
1030 */
1031 int
1032 pciiide_chan_candisable(cp)
1033 struct pciide_channel *cp;
1034 {
1035 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1036 struct channel_softc *wdc_cp = &cp->wdc_channel;
1037
1038 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1039 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1040 printf("%s: disabling %s channel (no drives)\n",
1041 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1042 cp->hw_ok = 0;
1043 return 1;
1044 }
1045 return 0;
1046 }
1047
1048 /*
1049 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1050 * Set hw_ok=0 on failure
1051 */
1052 void
1053 pciide_map_compat_intr(pa, cp, compatchan, interface)
1054 struct pci_attach_args *pa;
1055 struct pciide_channel *cp;
1056 int compatchan, interface;
1057 {
1058 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1059 struct channel_softc *wdc_cp = &cp->wdc_channel;
1060
1061 if (cp->hw_ok == 0)
1062 return;
1063 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1064 return;
1065
1066 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1067 pa, compatchan, pciide_compat_intr, cp);
1068 if (cp->ih == NULL) {
1069 printf("%s: no compatibility interrupt for use by %s "
1070 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1071 cp->hw_ok = 0;
1072 }
1073 }
1074
1075 void
1076 pciide_print_modes(cp)
1077 struct pciide_channel *cp;
1078 {
1079 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1080 int drive;
1081 struct channel_softc *chp;
1082 struct ata_drive_datas *drvp;
1083
1084 chp = &cp->wdc_channel;
1085 for (drive = 0; drive < 2; drive++) {
1086 drvp = &chp->ch_drive[drive];
1087 if ((drvp->drive_flags & DRIVE) == 0)
1088 continue;
1089 printf("%s(%s:%d:%d): using PIO mode %d",
1090 drvp->drv_softc->dv_xname,
1091 sc->sc_wdcdev.sc_dev.dv_xname,
1092 chp->channel, drive, drvp->PIO_mode);
1093 if (drvp->drive_flags & DRIVE_DMA)
1094 printf(", DMA mode %d", drvp->DMA_mode);
1095 if (drvp->drive_flags & DRIVE_UDMA)
1096 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1097 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1098 printf(" (using DMA data transfers)");
1099 printf("\n");
1100 }
1101 }
1102
1103 void
1104 default_chip_map(sc, pa)
1105 struct pciide_softc *sc;
1106 struct pci_attach_args *pa;
1107 {
1108 struct pciide_channel *cp;
1109 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1110 sc->sc_tag, PCI_CLASS_REG));
1111 pcireg_t csr;
1112 int channel, drive;
1113 struct ata_drive_datas *drvp;
1114 u_int8_t idedma_ctl;
1115 bus_size_t cmdsize, ctlsize;
1116 char *failreason;
1117
1118 if (pciide_chipen(sc, pa) == 0)
1119 return;
1120
1121 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1122 printf("%s: bus-master DMA support present",
1123 sc->sc_wdcdev.sc_dev.dv_xname);
1124 if (sc->sc_pp == &default_product_desc &&
1125 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1126 PCIIDE_OPTIONS_DMA) == 0) {
1127 printf(", but unused (no driver support)");
1128 sc->sc_dma_ok = 0;
1129 } else {
1130 pciide_mapreg_dma(sc, pa);
1131 if (sc->sc_dma_ok != 0)
1132 printf(", used without full driver "
1133 "support");
1134 }
1135 } else {
1136 printf("%s: hardware does not support DMA",
1137 sc->sc_wdcdev.sc_dev.dv_xname);
1138 sc->sc_dma_ok = 0;
1139 }
1140 printf("\n");
1141 if (sc->sc_dma_ok)
1142 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1143 sc->sc_wdcdev.PIO_cap = 0;
1144 sc->sc_wdcdev.DMA_cap = 0;
1145
1146 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1147 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1148 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1149
1150 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1151 cp = &sc->pciide_channels[channel];
1152 if (pciide_chansetup(sc, channel, interface) == 0)
1153 continue;
1154 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1155 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1156 &ctlsize, pciide_pci_intr);
1157 } else {
1158 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1159 channel, &cmdsize, &ctlsize);
1160 }
1161 if (cp->hw_ok == 0)
1162 continue;
1163 /*
1164 * Check to see if something appears to be there.
1165 */
1166 failreason = NULL;
1167 if (!wdcprobe(&cp->wdc_channel)) {
1168 failreason = "not responding; disabled or no drives?";
1169 goto next;
1170 }
1171 /*
1172 * Now, make sure it's actually attributable to this PCI IDE
1173 * channel by trying to access the channel again while the
1174 * PCI IDE controller's I/O space is disabled. (If the
1175 * channel no longer appears to be there, it belongs to
1176 * this controller.) YUCK!
1177 */
1178 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1179 PCI_COMMAND_STATUS_REG);
1180 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1181 csr & ~PCI_COMMAND_IO_ENABLE);
1182 if (wdcprobe(&cp->wdc_channel))
1183 failreason = "other hardware responding at addresses";
1184 pci_conf_write(sc->sc_pc, sc->sc_tag,
1185 PCI_COMMAND_STATUS_REG, csr);
1186 next:
1187 if (failreason) {
1188 printf("%s: %s channel ignored (%s)\n",
1189 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1190 failreason);
1191 cp->hw_ok = 0;
1192 bus_space_unmap(cp->wdc_channel.cmd_iot,
1193 cp->wdc_channel.cmd_ioh, cmdsize);
1194 bus_space_unmap(cp->wdc_channel.ctl_iot,
1195 cp->wdc_channel.ctl_ioh, ctlsize);
1196 } else {
1197 pciide_map_compat_intr(pa, cp, channel, interface);
1198 }
1199 if (cp->hw_ok) {
1200 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1201 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1202 wdcattach(&cp->wdc_channel);
1203 }
1204 }
1205
1206 if (sc->sc_dma_ok == 0)
1207 return;
1208
1209 /* Allocate DMA maps */
1210 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1211 idedma_ctl = 0;
1212 cp = &sc->pciide_channels[channel];
1213 for (drive = 0; drive < 2; drive++) {
1214 drvp = &cp->wdc_channel.ch_drive[drive];
1215 /* If no drive, skip */
1216 if ((drvp->drive_flags & DRIVE) == 0)
1217 continue;
1218 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1219 continue;
1220 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1221 /* Abort DMA setup */
1222 printf("%s:%d:%d: can't allocate DMA maps, "
1223 "using PIO transfers\n",
1224 sc->sc_wdcdev.sc_dev.dv_xname,
1225 channel, drive);
1226 drvp->drive_flags &= ~DRIVE_DMA;
1227 }
1228 printf("%s:%d:%d: using DMA data transfers\n",
1229 sc->sc_wdcdev.sc_dev.dv_xname,
1230 channel, drive);
1231 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1232 }
1233 if (idedma_ctl != 0) {
1234 /* Add software bits in status register */
1235 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1236 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1237 idedma_ctl);
1238 }
1239 }
1240 }
1241
1242 void
1243 piix_chip_map(sc, pa)
1244 struct pciide_softc *sc;
1245 struct pci_attach_args *pa;
1246 {
1247 struct pciide_channel *cp;
1248 int channel;
1249 u_int32_t idetim;
1250 bus_size_t cmdsize, ctlsize;
1251
1252 if (pciide_chipen(sc, pa) == 0)
1253 return;
1254
1255 printf("%s: bus-master DMA support present",
1256 sc->sc_wdcdev.sc_dev.dv_xname);
1257 pciide_mapreg_dma(sc, pa);
1258 printf("\n");
1259 if (sc->sc_dma_ok) {
1260 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1261 switch(sc->sc_pp->ide_product) {
1262 case PCI_PRODUCT_INTEL_82371AB_IDE:
1263 case PCI_PRODUCT_INTEL_82801AA_IDE:
1264 case PCI_PRODUCT_INTEL_82801AB_IDE:
1265 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1266 }
1267 }
1268 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1269 WDC_CAPABILITY_MODE;
1270 sc->sc_wdcdev.PIO_cap = 4;
1271 sc->sc_wdcdev.DMA_cap = 2;
1272 sc->sc_wdcdev.UDMA_cap =
1273 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1274 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1275 sc->sc_wdcdev.set_modes = piix_setup_channel;
1276 else
1277 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1278 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1279 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1280
1281 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1282 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1283 DEBUG_PROBE);
1284 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1285 WDCDEBUG_PRINT((", sidetim=0x%x",
1286 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1287 DEBUG_PROBE);
1288 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1289 WDCDEBUG_PRINT((", udamreg 0x%x",
1290 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1291 DEBUG_PROBE);
1292 }
1293 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1294 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1295 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1296 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1297 DEBUG_PROBE);
1298 }
1299
1300 }
1301 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1302
1303 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1304 cp = &sc->pciide_channels[channel];
1305 /* PIIX is compat-only */
1306 if (pciide_chansetup(sc, channel, 0) == 0)
1307 continue;
1308 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1309 if ((PIIX_IDETIM_READ(idetim, channel) &
1310 PIIX_IDETIM_IDE) == 0) {
1311 printf("%s: %s channel ignored (disabled)\n",
1312 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1313 continue;
1314 }
1315 /* PIIX are compat-only pciide devices */
1316 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1317 if (cp->hw_ok == 0)
1318 continue;
1319 if (pciiide_chan_candisable(cp)) {
1320 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1321 channel);
1322 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1323 idetim);
1324 }
1325 pciide_map_compat_intr(pa, cp, channel, 0);
1326 if (cp->hw_ok == 0)
1327 continue;
1328 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1329 }
1330
1331 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1332 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1333 DEBUG_PROBE);
1334 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1335 WDCDEBUG_PRINT((", sidetim=0x%x",
1336 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1337 DEBUG_PROBE);
1338 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1339 WDCDEBUG_PRINT((", udamreg 0x%x",
1340 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1341 DEBUG_PROBE);
1342 }
1343 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1344 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1345 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1346 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1347 DEBUG_PROBE);
1348 }
1349 }
1350 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1351 }
1352
1353 void
1354 piix_setup_channel(chp)
1355 struct channel_softc *chp;
1356 {
1357 u_int8_t mode[2], drive;
1358 u_int32_t oidetim, idetim, idedma_ctl;
1359 struct pciide_channel *cp = (struct pciide_channel*)chp;
1360 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1361 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1362
1363 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1364 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1365 idedma_ctl = 0;
1366
1367 /* set up new idetim: Enable IDE registers decode */
1368 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1369 chp->channel);
1370
1371 /* setup DMA */
1372 pciide_channel_dma_setup(cp);
1373
1374 /*
1375 * Here we have to mess up with drives mode: PIIX can't have
1376 * different timings for master and slave drives.
1377 * We need to find the best combination.
1378 */
1379
1380 /* If both drives supports DMA, take the lower mode */
1381 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1382 (drvp[1].drive_flags & DRIVE_DMA)) {
1383 mode[0] = mode[1] =
1384 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1385 drvp[0].DMA_mode = mode[0];
1386 drvp[1].DMA_mode = mode[1];
1387 goto ok;
1388 }
1389 /*
1390 * If only one drive supports DMA, use its mode, and
1391 * put the other one in PIO mode 0 if mode not compatible
1392 */
1393 if (drvp[0].drive_flags & DRIVE_DMA) {
1394 mode[0] = drvp[0].DMA_mode;
1395 mode[1] = drvp[1].PIO_mode;
1396 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1397 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1398 mode[1] = drvp[1].PIO_mode = 0;
1399 goto ok;
1400 }
1401 if (drvp[1].drive_flags & DRIVE_DMA) {
1402 mode[1] = drvp[1].DMA_mode;
1403 mode[0] = drvp[0].PIO_mode;
1404 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1405 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1406 mode[0] = drvp[0].PIO_mode = 0;
1407 goto ok;
1408 }
1409 /*
1410 * If both drives are not DMA, takes the lower mode, unless
1411 * one of them is PIO mode < 2
1412 */
1413 if (drvp[0].PIO_mode < 2) {
1414 mode[0] = drvp[0].PIO_mode = 0;
1415 mode[1] = drvp[1].PIO_mode;
1416 } else if (drvp[1].PIO_mode < 2) {
1417 mode[1] = drvp[1].PIO_mode = 0;
1418 mode[0] = drvp[0].PIO_mode;
1419 } else {
1420 mode[0] = mode[1] =
1421 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1422 drvp[0].PIO_mode = mode[0];
1423 drvp[1].PIO_mode = mode[1];
1424 }
1425 ok: /* The modes are setup */
1426 for (drive = 0; drive < 2; drive++) {
1427 if (drvp[drive].drive_flags & DRIVE_DMA) {
1428 idetim |= piix_setup_idetim_timings(
1429 mode[drive], 1, chp->channel);
1430 goto end;
1431 }
1432 }
1433 /* If we are there, none of the drives are DMA */
1434 if (mode[0] >= 2)
1435 idetim |= piix_setup_idetim_timings(
1436 mode[0], 0, chp->channel);
1437 else
1438 idetim |= piix_setup_idetim_timings(
1439 mode[1], 0, chp->channel);
1440 end: /*
1441 * timing mode is now set up in the controller. Enable
1442 * it per-drive
1443 */
1444 for (drive = 0; drive < 2; drive++) {
1445 /* If no drive, skip */
1446 if ((drvp[drive].drive_flags & DRIVE) == 0)
1447 continue;
1448 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1449 if (drvp[drive].drive_flags & DRIVE_DMA)
1450 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1451 }
1452 if (idedma_ctl != 0) {
1453 /* Add software bits in status register */
1454 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1455 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1456 idedma_ctl);
1457 }
1458 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1459 pciide_print_modes(cp);
1460 }
1461
1462 void
1463 piix3_4_setup_channel(chp)
1464 struct channel_softc *chp;
1465 {
1466 struct ata_drive_datas *drvp;
1467 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1468 struct pciide_channel *cp = (struct pciide_channel*)chp;
1469 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1470 int drive;
1471 int channel = chp->channel;
1472
1473 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1474 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1475 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1476 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1477 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1478 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1479 PIIX_SIDETIM_RTC_MASK(channel));
1480
1481 idedma_ctl = 0;
1482 /* If channel disabled, no need to go further */
1483 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1484 return;
1485 /* set up new idetim: Enable IDE registers decode */
1486 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1487
1488 /* setup DMA if needed */
1489 pciide_channel_dma_setup(cp);
1490
1491 for (drive = 0; drive < 2; drive++) {
1492 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1493 PIIX_UDMATIM_SET(0x3, channel, drive));
1494 drvp = &chp->ch_drive[drive];
1495 /* If no drive, skip */
1496 if ((drvp->drive_flags & DRIVE) == 0)
1497 continue;
1498 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1499 (drvp->drive_flags & DRIVE_UDMA) == 0))
1500 goto pio;
1501
1502 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1503 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1504 ideconf |= PIIX_CONFIG_PINGPONG;
1505 }
1506 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1507 /* setup Ultra/66 */
1508 if (drvp->UDMA_mode > 2 &&
1509 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1510 drvp->UDMA_mode = 2;
1511 if (drvp->UDMA_mode > 2)
1512 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1513 else
1514 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1515 }
1516 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1517 (drvp->drive_flags & DRIVE_UDMA)) {
1518 /* use Ultra/DMA */
1519 drvp->drive_flags &= ~DRIVE_DMA;
1520 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1521 udmareg |= PIIX_UDMATIM_SET(
1522 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1523 } else {
1524 /* use Multiword DMA */
1525 drvp->drive_flags &= ~DRIVE_UDMA;
1526 if (drive == 0) {
1527 idetim |= piix_setup_idetim_timings(
1528 drvp->DMA_mode, 1, channel);
1529 } else {
1530 sidetim |= piix_setup_sidetim_timings(
1531 drvp->DMA_mode, 1, channel);
1532 idetim =PIIX_IDETIM_SET(idetim,
1533 PIIX_IDETIM_SITRE, channel);
1534 }
1535 }
1536 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1537
1538 pio: /* use PIO mode */
1539 idetim |= piix_setup_idetim_drvs(drvp);
1540 if (drive == 0) {
1541 idetim |= piix_setup_idetim_timings(
1542 drvp->PIO_mode, 0, channel);
1543 } else {
1544 sidetim |= piix_setup_sidetim_timings(
1545 drvp->PIO_mode, 0, channel);
1546 idetim =PIIX_IDETIM_SET(idetim,
1547 PIIX_IDETIM_SITRE, channel);
1548 }
1549 }
1550 if (idedma_ctl != 0) {
1551 /* Add software bits in status register */
1552 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1553 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1554 idedma_ctl);
1555 }
1556 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1557 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1558 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1559 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1560 pciide_print_modes(cp);
1561 }
1562
1563
1564 /* setup ISP and RTC fields, based on mode */
1565 static u_int32_t
1566 piix_setup_idetim_timings(mode, dma, channel)
1567 u_int8_t mode;
1568 u_int8_t dma;
1569 u_int8_t channel;
1570 {
1571
1572 if (dma)
1573 return PIIX_IDETIM_SET(0,
1574 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1575 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1576 channel);
1577 else
1578 return PIIX_IDETIM_SET(0,
1579 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1580 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1581 channel);
1582 }
1583
1584 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1585 static u_int32_t
1586 piix_setup_idetim_drvs(drvp)
1587 struct ata_drive_datas *drvp;
1588 {
1589 u_int32_t ret = 0;
1590 struct channel_softc *chp = drvp->chnl_softc;
1591 u_int8_t channel = chp->channel;
1592 u_int8_t drive = drvp->drive;
1593
1594 /*
1595 * If drive is using UDMA, timings setups are independant
1596 * So just check DMA and PIO here.
1597 */
1598 if (drvp->drive_flags & DRIVE_DMA) {
1599 /* if mode = DMA mode 0, use compatible timings */
1600 if ((drvp->drive_flags & DRIVE_DMA) &&
1601 drvp->DMA_mode == 0) {
1602 drvp->PIO_mode = 0;
1603 return ret;
1604 }
1605 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1606 /*
1607 * PIO and DMA timings are the same, use fast timings for PIO
1608 * too, else use compat timings.
1609 */
1610 if ((piix_isp_pio[drvp->PIO_mode] !=
1611 piix_isp_dma[drvp->DMA_mode]) ||
1612 (piix_rtc_pio[drvp->PIO_mode] !=
1613 piix_rtc_dma[drvp->DMA_mode]))
1614 drvp->PIO_mode = 0;
1615 /* if PIO mode <= 2, use compat timings for PIO */
1616 if (drvp->PIO_mode <= 2) {
1617 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1618 channel);
1619 return ret;
1620 }
1621 }
1622
1623 /*
1624 * Now setup PIO modes. If mode < 2, use compat timings.
1625 * Else enable fast timings. Enable IORDY and prefetch/post
1626 * if PIO mode >= 3.
1627 */
1628
1629 if (drvp->PIO_mode < 2)
1630 return ret;
1631
1632 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1633 if (drvp->PIO_mode >= 3) {
1634 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1635 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1636 }
1637 return ret;
1638 }
1639
1640 /* setup values in SIDETIM registers, based on mode */
1641 static u_int32_t
1642 piix_setup_sidetim_timings(mode, dma, channel)
1643 u_int8_t mode;
1644 u_int8_t dma;
1645 u_int8_t channel;
1646 {
1647 if (dma)
1648 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1649 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1650 else
1651 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1652 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1653 }
1654
1655 void
1656 amd756_chip_map(sc, pa)
1657 struct pciide_softc *sc;
1658 struct pci_attach_args *pa;
1659 {
1660 struct pciide_channel *cp;
1661 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1662 sc->sc_tag, PCI_CLASS_REG));
1663 int channel;
1664 pcireg_t chanenable;
1665 bus_size_t cmdsize, ctlsize;
1666
1667 if (pciide_chipen(sc, pa) == 0)
1668 return;
1669 printf("%s: bus-master DMA support present",
1670 sc->sc_wdcdev.sc_dev.dv_xname);
1671 pciide_mapreg_dma(sc, pa);
1672 printf("\n");
1673 if (sc->sc_dma_ok)
1674 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1675 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1676 WDC_CAPABILITY_MODE;
1677 sc->sc_wdcdev.PIO_cap = 4;
1678 sc->sc_wdcdev.DMA_cap = 2;
1679 sc->sc_wdcdev.UDMA_cap = 4;
1680 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1681 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1682 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1683 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1684
1685 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1686 DEBUG_PROBE);
1687 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1688 cp = &sc->pciide_channels[channel];
1689 if (pciide_chansetup(sc, channel, interface) == 0)
1690 continue;
1691
1692 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1693 printf("%s: %s channel ignored (disabled)\n",
1694 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1695 continue;
1696 }
1697 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1698 pciide_pci_intr);
1699
1700 if (pciiide_chan_candisable(cp))
1701 chanenable &= ~AMD756_CHAN_EN(channel);
1702 pciide_map_compat_intr(pa, cp, channel, interface);
1703 if (cp->hw_ok == 0)
1704 continue;
1705
1706 amd756_setup_channel(&cp->wdc_channel);
1707 }
1708 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1709 chanenable);
1710 return;
1711 }
1712
1713 void
1714 amd756_setup_channel(chp)
1715 struct channel_softc *chp;
1716 {
1717 u_int32_t udmatim_reg, datatim_reg;
1718 u_int8_t idedma_ctl;
1719 int mode, drive;
1720 struct ata_drive_datas *drvp;
1721 struct pciide_channel *cp = (struct pciide_channel*)chp;
1722 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1723
1724 idedma_ctl = 0;
1725 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1726 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1727 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1728 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1729
1730 /* setup DMA if needed */
1731 pciide_channel_dma_setup(cp);
1732
1733 for (drive = 0; drive < 2; drive++) {
1734 drvp = &chp->ch_drive[drive];
1735 /* If no drive, skip */
1736 if ((drvp->drive_flags & DRIVE) == 0)
1737 continue;
1738 /* add timing values, setup DMA if needed */
1739 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1740 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1741 mode = drvp->PIO_mode;
1742 goto pio;
1743 }
1744 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1745 (drvp->drive_flags & DRIVE_UDMA)) {
1746 /* use Ultra/DMA */
1747 drvp->drive_flags &= ~DRIVE_DMA;
1748 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1749 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1750 AMD756_UDMA_TIME(chp->channel, drive,
1751 amd756_udma_tim[drvp->UDMA_mode]);
1752 /* can use PIO timings, MW DMA unused */
1753 mode = drvp->PIO_mode;
1754 } else {
1755 /* use Multiword DMA */
1756 drvp->drive_flags &= ~DRIVE_UDMA;
1757 /* mode = min(pio, dma+2) */
1758 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1759 mode = drvp->PIO_mode;
1760 else
1761 mode = drvp->DMA_mode + 2;
1762 }
1763 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1764
1765 pio: /* setup PIO mode */
1766 if (mode <= 2) {
1767 drvp->DMA_mode = 0;
1768 drvp->PIO_mode = 0;
1769 mode = 0;
1770 } else {
1771 drvp->PIO_mode = mode;
1772 drvp->DMA_mode = mode - 2;
1773 }
1774 datatim_reg |=
1775 AMD756_DATATIM_PULSE(chp->channel, drive,
1776 amd756_pio_set[mode]) |
1777 AMD756_DATATIM_RECOV(chp->channel, drive,
1778 amd756_pio_rec[mode]);
1779 }
1780 if (idedma_ctl != 0) {
1781 /* Add software bits in status register */
1782 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1783 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1784 idedma_ctl);
1785 }
1786 pciide_print_modes(cp);
1787 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1788 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1789 }
1790
1791 void
1792 apollo_chip_map(sc, pa)
1793 struct pciide_softc *sc;
1794 struct pci_attach_args *pa;
1795 {
1796 struct pciide_channel *cp;
1797 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
1798 sc->sc_tag, PCI_CLASS_REG));
1799 int channel;
1800 u_int32_t ideconf;
1801 bus_size_t cmdsize, ctlsize;
1802
1803 if (pciide_chipen(sc, pa) == 0)
1804 return;
1805 printf("%s: bus-master DMA support present",
1806 sc->sc_wdcdev.sc_dev.dv_xname);
1807 pciide_mapreg_dma(sc, pa);
1808 printf("\n");
1809 if (sc->sc_dma_ok) {
1810 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
1811 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1812 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1813 }
1814 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE;
1815 sc->sc_wdcdev.PIO_cap = 4;
1816 sc->sc_wdcdev.DMA_cap = 2;
1817 sc->sc_wdcdev.UDMA_cap = 2;
1818 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1819 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1820 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1821 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1822
1823 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1824 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1825 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1826 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1827 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1828 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1829 DEBUG_PROBE);
1830
1831 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1832 cp = &sc->pciide_channels[channel];
1833 if (pciide_chansetup(sc, channel, interface) == 0)
1834 continue;
1835
1836 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1837 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1838 printf("%s: %s channel ignored (disabled)\n",
1839 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1840 continue;
1841 }
1842 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1843 pciide_pci_intr);
1844 if (cp->hw_ok == 0)
1845 continue;
1846 if (pciiide_chan_candisable(cp)) {
1847 ideconf &= ~APO_IDECONF_EN(channel);
1848 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1849 ideconf);
1850 }
1851 pciide_map_compat_intr(pa, cp, channel, interface);
1852
1853 if (cp->hw_ok == 0)
1854 continue;
1855 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1856 }
1857 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1858 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1859 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1860 }
1861
1862 void
1863 apollo_setup_channel(chp)
1864 struct channel_softc *chp;
1865 {
1866 u_int32_t udmatim_reg, datatim_reg;
1867 u_int8_t idedma_ctl;
1868 int mode, drive;
1869 struct ata_drive_datas *drvp;
1870 struct pciide_channel *cp = (struct pciide_channel*)chp;
1871 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1872
1873 idedma_ctl = 0;
1874 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1875 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1876 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1877 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1878
1879 /* setup DMA if needed */
1880 pciide_channel_dma_setup(cp);
1881
1882 for (drive = 0; drive < 2; drive++) {
1883 drvp = &chp->ch_drive[drive];
1884 /* If no drive, skip */
1885 if ((drvp->drive_flags & DRIVE) == 0)
1886 continue;
1887 /* add timing values, setup DMA if needed */
1888 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1889 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1890 mode = drvp->PIO_mode;
1891 goto pio;
1892 }
1893 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1894 (drvp->drive_flags & DRIVE_UDMA)) {
1895 /* use Ultra/DMA */
1896 drvp->drive_flags &= ~DRIVE_DMA;
1897 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1898 APO_UDMA_EN_MTH(chp->channel, drive) |
1899 APO_UDMA_TIME(chp->channel, drive,
1900 apollo_udma_tim[drvp->UDMA_mode]);
1901 /* can use PIO timings, MW DMA unused */
1902 mode = drvp->PIO_mode;
1903 } else {
1904 /* use Multiword DMA */
1905 drvp->drive_flags &= ~DRIVE_UDMA;
1906 /* mode = min(pio, dma+2) */
1907 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1908 mode = drvp->PIO_mode;
1909 else
1910 mode = drvp->DMA_mode + 2;
1911 }
1912 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1913
1914 pio: /* setup PIO mode */
1915 if (mode <= 2) {
1916 drvp->DMA_mode = 0;
1917 drvp->PIO_mode = 0;
1918 mode = 0;
1919 } else {
1920 drvp->PIO_mode = mode;
1921 drvp->DMA_mode = mode - 2;
1922 }
1923 datatim_reg |=
1924 APO_DATATIM_PULSE(chp->channel, drive,
1925 apollo_pio_set[mode]) |
1926 APO_DATATIM_RECOV(chp->channel, drive,
1927 apollo_pio_rec[mode]);
1928 }
1929 if (idedma_ctl != 0) {
1930 /* Add software bits in status register */
1931 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1932 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1933 idedma_ctl);
1934 }
1935 pciide_print_modes(cp);
1936 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
1937 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
1938 }
1939
1940 void
1941 cmd_channel_map(pa, sc, channel)
1942 struct pci_attach_args *pa;
1943 struct pciide_softc *sc;
1944 int channel;
1945 {
1946 struct pciide_channel *cp = &sc->pciide_channels[channel];
1947 bus_size_t cmdsize, ctlsize;
1948 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
1949 int interface =
1950 PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1951
1952 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1953 cp->name = PCIIDE_CHANNEL_NAME(channel);
1954 cp->wdc_channel.channel = channel;
1955 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1956
1957 if (channel > 0) {
1958 cp->wdc_channel.ch_queue =
1959 sc->pciide_channels[0].wdc_channel.ch_queue;
1960 } else {
1961 cp->wdc_channel.ch_queue =
1962 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1963 }
1964 if (cp->wdc_channel.ch_queue == NULL) {
1965 printf("%s %s channel: "
1966 "can't allocate memory for command queue",
1967 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1968 return;
1969 }
1970
1971 printf("%s: %s channel %s to %s mode\n",
1972 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1973 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1974 "configured" : "wired",
1975 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1976 "native-PCI" : "compatibility");
1977
1978 /*
1979 * with a CMD PCI64x, if we get here, the first channel is enabled:
1980 * there's no way to disable the first channel without disabling
1981 * the whole device
1982 */
1983 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
1984 printf("%s: %s channel ignored (disabled)\n",
1985 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1986 return;
1987 }
1988
1989 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
1990 if (cp->hw_ok == 0)
1991 return;
1992 if (channel == 1) {
1993 if (pciiide_chan_candisable(cp)) {
1994 ctrl &= ~CMD_CTRL_2PORT;
1995 pciide_pci_write(pa->pa_pc, pa->pa_tag,
1996 CMD_CTRL, ctrl);
1997 }
1998 }
1999 pciide_map_compat_intr(pa, cp, channel, interface);
2000 }
2001
2002 int
2003 cmd_pci_intr(arg)
2004 void *arg;
2005 {
2006 struct pciide_softc *sc = arg;
2007 struct pciide_channel *cp;
2008 struct channel_softc *wdc_cp;
2009 int i, rv, crv;
2010 u_int32_t priirq, secirq;
2011
2012 rv = 0;
2013 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2014 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2015 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2016 cp = &sc->pciide_channels[i];
2017 wdc_cp = &cp->wdc_channel;
2018 /* If a compat channel skip. */
2019 if (cp->compat)
2020 continue;
2021 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2022 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2023 crv = wdcintr(wdc_cp);
2024 if (crv == 0)
2025 printf("%s:%d: bogus intr\n",
2026 sc->sc_wdcdev.sc_dev.dv_xname, i);
2027 else
2028 rv = 1;
2029 }
2030 }
2031 return rv;
2032 }
2033
2034 void
2035 cmd_chip_map(sc, pa)
2036 struct pciide_softc *sc;
2037 struct pci_attach_args *pa;
2038 {
2039 int channel;
2040
2041 /*
2042 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2043 * and base adresses registers can be disabled at
2044 * hardware level. In this case, the device is wired
2045 * in compat mode and its first channel is always enabled,
2046 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2047 * In fact, it seems that the first channel of the CMD PCI0640
2048 * can't be disabled.
2049 */
2050
2051 #ifdef PCIIDE_CMD064x_DISABLE
2052 if (pciide_chipen(sc, pa) == 0)
2053 return;
2054 #endif
2055
2056 printf("%s: hardware does not support DMA\n",
2057 sc->sc_wdcdev.sc_dev.dv_xname);
2058 sc->sc_dma_ok = 0;
2059
2060 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2061 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2062 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
2063
2064 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2065 cmd_channel_map(pa, sc, channel);
2066 }
2067 }
2068
2069 void
2070 cmd0643_6_chip_map(sc, pa)
2071 struct pciide_softc *sc;
2072 struct pci_attach_args *pa;
2073 {
2074 struct pciide_channel *cp;
2075 int channel;
2076
2077 /*
2078 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2079 * and base adresses registers can be disabled at
2080 * hardware level. In this case, the device is wired
2081 * in compat mode and its first channel is always enabled,
2082 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2083 * In fact, it seems that the first channel of the CMD PCI0640
2084 * can't be disabled.
2085 */
2086
2087 #ifdef PCIIDE_CMD064x_DISABLE
2088 if (pciide_chipen(sc, pa) == 0)
2089 return;
2090 #endif
2091 printf("%s: bus-master DMA support present",
2092 sc->sc_wdcdev.sc_dev.dv_xname);
2093 pciide_mapreg_dma(sc, pa);
2094 printf("\n");
2095 if (sc->sc_dma_ok)
2096 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2097
2098 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2099 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2100 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2101 WDC_CAPABILITY_MODE;
2102 sc->sc_wdcdev.PIO_cap = 4;
2103 sc->sc_wdcdev.DMA_cap = 2;
2104 sc->sc_wdcdev.set_modes = cmd0643_6_setup_channel;
2105
2106 WDCDEBUG_PRINT(("cmd0643_6_chip_map: old timings reg 0x%x 0x%x\n",
2107 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2108 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2109 DEBUG_PROBE);
2110
2111 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2112 cp = &sc->pciide_channels[channel];
2113 cmd_channel_map(pa, sc, channel);
2114 if (cp->hw_ok == 0)
2115 continue;
2116 cmd0643_6_setup_channel(&cp->wdc_channel);
2117 }
2118 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2119 WDCDEBUG_PRINT(("cmd0643_6_chip_map: timings reg now 0x%x 0x%x\n",
2120 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2121 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2122 DEBUG_PROBE);
2123 }
2124
2125 void
2126 cmd0643_6_setup_channel(chp)
2127 struct channel_softc *chp;
2128 {
2129 struct ata_drive_datas *drvp;
2130 u_int8_t tim;
2131 u_int32_t idedma_ctl;
2132 int drive;
2133 struct pciide_channel *cp = (struct pciide_channel*)chp;
2134 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2135
2136 idedma_ctl = 0;
2137 /* setup DMA if needed */
2138 pciide_channel_dma_setup(cp);
2139
2140 for (drive = 0; drive < 2; drive++) {
2141 drvp = &chp->ch_drive[drive];
2142 /* If no drive, skip */
2143 if ((drvp->drive_flags & DRIVE) == 0)
2144 continue;
2145 /* add timing values, setup DMA if needed */
2146 tim = cmd0643_6_data_tim_pio[drvp->PIO_mode];
2147 if (drvp->drive_flags & DRIVE_DMA) {
2148 /*
2149 * use Multiword DMA.
2150 * Timings will be used for both PIO and DMA, so adjust
2151 * DMA mode if needed
2152 */
2153 if (drvp->PIO_mode >= 3 &&
2154 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2155 drvp->DMA_mode = drvp->PIO_mode - 2;
2156 }
2157 tim = cmd0643_6_data_tim_dma[drvp->DMA_mode];
2158 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2159 }
2160 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2161 CMD_DATA_TIM(chp->channel, drive), tim);
2162 }
2163 if (idedma_ctl != 0) {
2164 /* Add software bits in status register */
2165 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2166 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2167 idedma_ctl);
2168 }
2169 pciide_print_modes(cp);
2170 }
2171
2172 void
2173 cy693_chip_map(sc, pa)
2174 struct pciide_softc *sc;
2175 struct pci_attach_args *pa;
2176 {
2177 struct pciide_channel *cp;
2178 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2179 sc->sc_tag, PCI_CLASS_REG));
2180 int compatchan;
2181 bus_size_t cmdsize, ctlsize;
2182
2183 if (pciide_chipen(sc, pa) == 0)
2184 return;
2185 /*
2186 * this chip has 2 PCI IDE functions, one for primary and one for
2187 * secondary. So we need to call pciide_mapregs_compat() with
2188 * the real channel
2189 */
2190 if (pa->pa_function == 1) {
2191 compatchan = 0;
2192 } else if (pa->pa_function == 2) {
2193 compatchan = 1;
2194 } else {
2195 printf("%s: unexpected PCI function %d\n",
2196 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2197 return;
2198 }
2199 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2200 printf("%s: bus-master DMA support present",
2201 sc->sc_wdcdev.sc_dev.dv_xname);
2202 pciide_mapreg_dma(sc, pa);
2203 } else {
2204 printf("%s: hardware does not support DMA",
2205 sc->sc_wdcdev.sc_dev.dv_xname);
2206 sc->sc_dma_ok = 0;
2207 }
2208 printf("\n");
2209
2210 if (sc->sc_dma_ok)
2211 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2212 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2213 WDC_CAPABILITY_MODE;
2214 sc->sc_wdcdev.PIO_cap = 4;
2215 sc->sc_wdcdev.DMA_cap = 2;
2216 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2217
2218 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2219 sc->sc_wdcdev.nchannels = 1;
2220
2221 /* Only one channel for this chip; if we are here it's enabled */
2222 cp = &sc->pciide_channels[0];
2223 sc->wdc_chanarray[0] = &cp->wdc_channel;
2224 cp->name = PCIIDE_CHANNEL_NAME(0);
2225 cp->wdc_channel.channel = 0;
2226 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2227 cp->wdc_channel.ch_queue =
2228 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2229 if (cp->wdc_channel.ch_queue == NULL) {
2230 printf("%s primary channel: "
2231 "can't allocate memory for command queue",
2232 sc->sc_wdcdev.sc_dev.dv_xname);
2233 return;
2234 }
2235 printf("%s: primary channel %s to ",
2236 sc->sc_wdcdev.sc_dev.dv_xname,
2237 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2238 "configured" : "wired");
2239 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2240 printf("native-PCI");
2241 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2242 pciide_pci_intr);
2243 } else {
2244 printf("compatibility");
2245 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
2246 &cmdsize, &ctlsize);
2247 }
2248 printf(" mode\n");
2249 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2250 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2251 wdcattach(&cp->wdc_channel);
2252 if (pciiide_chan_candisable(cp)) {
2253 pci_conf_write(sc->sc_pc, sc->sc_tag,
2254 PCI_COMMAND_STATUS_REG, 0);
2255 }
2256 pciide_map_compat_intr(pa, cp, compatchan, interface);
2257 if (cp->hw_ok == 0)
2258 return;
2259 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2260 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2261 cy693_setup_channel(&cp->wdc_channel);
2262 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2263 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2264 }
2265
2266 void
2267 cy693_setup_channel(chp)
2268 struct channel_softc *chp;
2269 {
2270 struct ata_drive_datas *drvp;
2271 int drive;
2272 u_int32_t cy_cmd_ctrl;
2273 u_int32_t idedma_ctl;
2274 struct pciide_channel *cp = (struct pciide_channel*)chp;
2275 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2276 int dma_mode = -1;
2277
2278 cy_cmd_ctrl = idedma_ctl = 0;
2279
2280 /* setup DMA if needed */
2281 pciide_channel_dma_setup(cp);
2282
2283 for (drive = 0; drive < 2; drive++) {
2284 drvp = &chp->ch_drive[drive];
2285 /* If no drive, skip */
2286 if ((drvp->drive_flags & DRIVE) == 0)
2287 continue;
2288 /* add timing values, setup DMA if needed */
2289 if (drvp->drive_flags & DRIVE_DMA) {
2290 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2291 /* use Multiword DMA */
2292 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2293 dma_mode = drvp->DMA_mode;
2294 }
2295 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2296 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2297 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2298 CY_CMD_CTRL_IOW_REC_OFF(drive));
2299 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2300 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2301 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2302 CY_CMD_CTRL_IOR_REC_OFF(drive));
2303 }
2304 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2305 chp->ch_drive[0].DMA_mode = dma_mode;
2306 chp->ch_drive[1].DMA_mode = dma_mode;
2307 pciide_print_modes(cp);
2308 if (idedma_ctl != 0) {
2309 /* Add software bits in status register */
2310 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2311 IDEDMA_CTL, idedma_ctl);
2312 }
2313 }
2314
2315 void
2316 sis_chip_map(sc, pa)
2317 struct pciide_softc *sc;
2318 struct pci_attach_args *pa;
2319 {
2320 struct pciide_channel *cp;
2321 int channel;
2322 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2323 pcireg_t interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2324 sc->sc_tag, PCI_CLASS_REG));
2325 pcireg_t rev = PCI_REVISION(pci_conf_read(sc->sc_pc,
2326 sc->sc_tag, PCI_CLASS_REG));
2327 bus_size_t cmdsize, ctlsize;
2328
2329 if (pciide_chipen(sc, pa) == 0)
2330 return;
2331 printf("%s: bus-master DMA support present",
2332 sc->sc_wdcdev.sc_dev.dv_xname);
2333 pciide_mapreg_dma(sc, pa);
2334 printf("\n");
2335 if (sc->sc_dma_ok) {
2336 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2337 if (rev >= 0xd0)
2338 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2339 }
2340
2341 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2342 WDC_CAPABILITY_MODE;
2343 sc->sc_wdcdev.PIO_cap = 4;
2344 sc->sc_wdcdev.DMA_cap = 2;
2345 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2346 sc->sc_wdcdev.UDMA_cap = 2;
2347 sc->sc_wdcdev.set_modes = sis_setup_channel;
2348
2349 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2350 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2351
2352 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2353 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2354 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2355
2356 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2357 cp = &sc->pciide_channels[channel];
2358 if (pciide_chansetup(sc, channel, interface) == 0)
2359 continue;
2360 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2361 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2362 printf("%s: %s channel ignored (disabled)\n",
2363 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2364 continue;
2365 }
2366 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2367 pciide_pci_intr);
2368 if (cp->hw_ok == 0)
2369 continue;
2370 if (pciiide_chan_candisable(cp)) {
2371 if (channel == 0)
2372 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2373 else
2374 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2375 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2376 sis_ctr0);
2377 }
2378 pciide_map_compat_intr(pa, cp, channel, interface);
2379 if (cp->hw_ok == 0)
2380 continue;
2381 sis_setup_channel(&cp->wdc_channel);
2382 }
2383 }
2384
2385 void
2386 sis_setup_channel(chp)
2387 struct channel_softc *chp;
2388 {
2389 struct ata_drive_datas *drvp;
2390 int drive;
2391 u_int32_t sis_tim;
2392 u_int32_t idedma_ctl;
2393 struct pciide_channel *cp = (struct pciide_channel*)chp;
2394 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2395
2396 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2397 "channel %d 0x%x\n", chp->channel,
2398 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2399 DEBUG_PROBE);
2400 sis_tim = 0;
2401 idedma_ctl = 0;
2402 /* setup DMA if needed */
2403 pciide_channel_dma_setup(cp);
2404
2405 for (drive = 0; drive < 2; drive++) {
2406 drvp = &chp->ch_drive[drive];
2407 /* If no drive, skip */
2408 if ((drvp->drive_flags & DRIVE) == 0)
2409 continue;
2410 /* add timing values, setup DMA if needed */
2411 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2412 (drvp->drive_flags & DRIVE_UDMA) == 0)
2413 goto pio;
2414
2415 if (drvp->drive_flags & DRIVE_UDMA) {
2416 /* use Ultra/DMA */
2417 drvp->drive_flags &= ~DRIVE_DMA;
2418 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2419 SIS_TIM_UDMA_TIME_OFF(drive);
2420 sis_tim |= SIS_TIM_UDMA_EN(drive);
2421 } else {
2422 /*
2423 * use Multiword DMA
2424 * Timings will be used for both PIO and DMA,
2425 * so adjust DMA mode if needed
2426 */
2427 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2428 drvp->PIO_mode = drvp->DMA_mode + 2;
2429 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2430 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2431 drvp->PIO_mode - 2 : 0;
2432 if (drvp->DMA_mode == 0)
2433 drvp->PIO_mode = 0;
2434 }
2435 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2436 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2437 SIS_TIM_ACT_OFF(drive);
2438 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2439 SIS_TIM_REC_OFF(drive);
2440 }
2441 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2442 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2443 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2444 if (idedma_ctl != 0) {
2445 /* Add software bits in status register */
2446 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2447 IDEDMA_CTL, idedma_ctl);
2448 }
2449 pciide_print_modes(cp);
2450 }
2451
2452 void
2453 acer_chip_map(sc, pa)
2454 struct pciide_softc *sc;
2455 struct pci_attach_args *pa;
2456 {
2457 struct pciide_channel *cp;
2458 int channel;
2459 pcireg_t cr, interface;
2460 bus_size_t cmdsize, ctlsize;
2461
2462 if (pciide_chipen(sc, pa) == 0)
2463 return;
2464 printf("%s: bus-master DMA support present",
2465 sc->sc_wdcdev.sc_dev.dv_xname);
2466 pciide_mapreg_dma(sc, pa);
2467 printf("\n");
2468 if (sc->sc_dma_ok)
2469 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2470
2471 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2472 WDC_CAPABILITY_MODE;
2473
2474 sc->sc_wdcdev.PIO_cap = 4;
2475 sc->sc_wdcdev.DMA_cap = 2;
2476 sc->sc_wdcdev.UDMA_cap = 2;
2477 sc->sc_wdcdev.set_modes = acer_setup_channel;
2478 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2479 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2480
2481 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2482 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2483 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2484
2485 /* Enable "microsoft register bits" R/W. */
2486 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2487 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2488 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2489 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2490 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2491 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2492 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2493 ~ACER_CHANSTATUSREGS_RO);
2494 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2495 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2496 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2497 /* Don't use cr, re-read the real register content instead */
2498 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2499 PCI_CLASS_REG));
2500
2501 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2502 cp = &sc->pciide_channels[channel];
2503 if (pciide_chansetup(sc, channel, interface) == 0)
2504 continue;
2505 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2506 printf("%s: %s channel ignored (disabled)\n",
2507 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2508 continue;
2509 }
2510 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2511 acer_pci_intr);
2512 if (cp->hw_ok == 0)
2513 continue;
2514 if (pciiide_chan_candisable(cp)) {
2515 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2516 pci_conf_write(sc->sc_pc, sc->sc_tag,
2517 PCI_CLASS_REG, cr);
2518 }
2519 pciide_map_compat_intr(pa, cp, channel, interface);
2520 acer_setup_channel(&cp->wdc_channel);
2521 }
2522 }
2523
2524 void
2525 acer_setup_channel(chp)
2526 struct channel_softc *chp;
2527 {
2528 struct ata_drive_datas *drvp;
2529 int drive;
2530 u_int32_t acer_fifo_udma;
2531 u_int32_t idedma_ctl;
2532 struct pciide_channel *cp = (struct pciide_channel*)chp;
2533 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2534
2535 idedma_ctl = 0;
2536 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2537 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2538 acer_fifo_udma), DEBUG_PROBE);
2539 /* setup DMA if needed */
2540 pciide_channel_dma_setup(cp);
2541
2542 for (drive = 0; drive < 2; drive++) {
2543 drvp = &chp->ch_drive[drive];
2544 /* If no drive, skip */
2545 if ((drvp->drive_flags & DRIVE) == 0)
2546 continue;
2547 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2548 "channel %d drive %d 0x%x\n", chp->channel, drive,
2549 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2550 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2551 /* clear FIFO/DMA mode */
2552 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2553 ACER_UDMA_EN(chp->channel, drive) |
2554 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2555
2556 /* add timing values, setup DMA if needed */
2557 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2558 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2559 acer_fifo_udma |=
2560 ACER_FTH_OPL(chp->channel, drive, 0x1);
2561 goto pio;
2562 }
2563
2564 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2565 if (drvp->drive_flags & DRIVE_UDMA) {
2566 /* use Ultra/DMA */
2567 drvp->drive_flags &= ~DRIVE_DMA;
2568 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2569 acer_fifo_udma |=
2570 ACER_UDMA_TIM(chp->channel, drive,
2571 acer_udma[drvp->UDMA_mode]);
2572 } else {
2573 /*
2574 * use Multiword DMA
2575 * Timings will be used for both PIO and DMA,
2576 * so adjust DMA mode if needed
2577 */
2578 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2579 drvp->PIO_mode = drvp->DMA_mode + 2;
2580 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2581 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2582 drvp->PIO_mode - 2 : 0;
2583 if (drvp->DMA_mode == 0)
2584 drvp->PIO_mode = 0;
2585 }
2586 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2587 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2588 ACER_IDETIM(chp->channel, drive),
2589 acer_pio[drvp->PIO_mode]);
2590 }
2591 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2592 acer_fifo_udma), DEBUG_PROBE);
2593 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2594 if (idedma_ctl != 0) {
2595 /* Add software bits in status register */
2596 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2597 IDEDMA_CTL, idedma_ctl);
2598 }
2599 pciide_print_modes(cp);
2600 }
2601
2602 int
2603 acer_pci_intr(arg)
2604 void *arg;
2605 {
2606 struct pciide_softc *sc = arg;
2607 struct pciide_channel *cp;
2608 struct channel_softc *wdc_cp;
2609 int i, rv, crv;
2610 u_int32_t chids;
2611
2612 rv = 0;
2613 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2614 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2615 cp = &sc->pciide_channels[i];
2616 wdc_cp = &cp->wdc_channel;
2617 /* If a compat channel skip. */
2618 if (cp->compat)
2619 continue;
2620 if (chids & ACER_CHIDS_INT(i)) {
2621 crv = wdcintr(wdc_cp);
2622 if (crv == 0)
2623 printf("%s:%d: bogus intr\n",
2624 sc->sc_wdcdev.sc_dev.dv_xname, i);
2625 else
2626 rv = 1;
2627 }
2628 }
2629 return rv;
2630 }
2631
2632 /* A macro to test product */
2633 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2634
2635 void
2636 pdc202xx_chip_map(sc, pa)
2637 struct pciide_softc *sc;
2638 struct pci_attach_args *pa;
2639 {
2640 struct pciide_channel *cp;
2641 int channel;
2642 pcireg_t interface, st, mode;
2643 bus_size_t cmdsize, ctlsize;
2644
2645 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2646 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2647 DEBUG_PROBE);
2648 if (pciide_chipen(sc, pa) == 0)
2649 return;
2650
2651 /* turn off RAID mode */
2652 st &= ~PDC2xx_STATE_IDERAID;
2653
2654 /*
2655 * can't rely on the PCI_CLASS_REG content if the chip was in raid
2656 * mode. We have to fake interface
2657 */
2658 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
2659 if (st & PDC2xx_STATE_NATIVE)
2660 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2661
2662 printf("%s: bus-master DMA support present",
2663 sc->sc_wdcdev.sc_dev.dv_xname);
2664 pciide_mapreg_dma(sc, pa);
2665 printf("\n");
2666 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2667 WDC_CAPABILITY_MODE;
2668 if (sc->sc_dma_ok)
2669 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2670 sc->sc_wdcdev.PIO_cap = 4;
2671 sc->sc_wdcdev.DMA_cap = 2;
2672 if (PDC_IS_262(sc))
2673 sc->sc_wdcdev.UDMA_cap = 4;
2674 else
2675 sc->sc_wdcdev.UDMA_cap = 2;
2676 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
2677 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2678 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2679
2680 /* setup failsafe defaults */
2681 mode = 0;
2682 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
2683 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
2684 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
2685 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
2686 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2687 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
2688 "initial timings 0x%x, now 0x%x\n", channel,
2689 pci_conf_read(sc->sc_pc, sc->sc_tag,
2690 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
2691 DEBUG_PROBE);
2692 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
2693 mode | PDC2xx_TIM_IORDYp);
2694 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
2695 "initial timings 0x%x, now 0x%x\n", channel,
2696 pci_conf_read(sc->sc_pc, sc->sc_tag,
2697 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
2698 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
2699 mode);
2700 }
2701
2702 mode = PDC2xx_SCR_DMA;
2703 if (PDC_IS_262(sc)) {
2704 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
2705 } else {
2706 /* the BIOS set it up this way */
2707 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
2708 }
2709 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
2710 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
2711 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
2712 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
2713 DEBUG_PROBE);
2714 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
2715
2716 /* controller initial state register is OK even without BIOS */
2717 /* Set DMA mode to IDE DMA compatibility */
2718 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
2719 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
2720 DEBUG_PROBE);
2721 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
2722 mode | 0x1);
2723 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
2724 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
2725 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
2726 mode | 0x1);
2727
2728 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2729 cp = &sc->pciide_channels[channel];
2730 if (pciide_chansetup(sc, channel, interface) == 0)
2731 continue;
2732 if ((st & (PDC_IS_262(sc) ?
2733 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
2734 printf("%s: %s channel ignored (disabled)\n",
2735 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2736 continue;
2737 }
2738 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2739 pdc202xx_pci_intr);
2740 if (cp->hw_ok == 0)
2741 continue;
2742 if (pciiide_chan_candisable(cp))
2743 st &= ~(PDC_IS_262(sc) ?
2744 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
2745 pciide_map_compat_intr(pa, cp, channel, interface);
2746 pdc202xx_setup_channel(&cp->wdc_channel);
2747 }
2748 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
2749 DEBUG_PROBE);
2750 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
2751 return;
2752 }
2753
2754 void
2755 pdc202xx_setup_channel(chp)
2756 struct channel_softc *chp;
2757 {
2758 struct ata_drive_datas *drvp;
2759 int drive;
2760 pcireg_t mode, st;
2761 u_int32_t idedma_ctl, scr, atapi;
2762 struct pciide_channel *cp = (struct pciide_channel*)chp;
2763 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2764 int channel = chp->channel;
2765
2766 /* setup DMA if needed */
2767 pciide_channel_dma_setup(cp);
2768
2769 idedma_ctl = 0;
2770
2771 /* Per channel settings */
2772 if (PDC_IS_262(sc)) {
2773 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2774 PDC262_U66);
2775 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2776 /* Trimm UDMA mode */
2777 if ((st & PDC262_STATE_80P(channel)) == 0 ||
2778 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
2779 chp->ch_drive[0].UDMA_mode <= 2) ||
2780 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
2781 chp->ch_drive[1].UDMA_mode <= 2)) {
2782 if (chp->ch_drive[0].UDMA_mode > 2)
2783 chp->ch_drive[0].UDMA_mode = 2;
2784 if (chp->ch_drive[1].UDMA_mode > 2)
2785 chp->ch_drive[1].UDMA_mode = 2;
2786 }
2787 /* Set U66 if needed */
2788 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
2789 chp->ch_drive[0].UDMA_mode > 2) ||
2790 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
2791 chp->ch_drive[1].UDMA_mode > 2))
2792 scr |= PDC262_U66_EN(channel);
2793 else
2794 scr &= ~PDC262_U66_EN(channel);
2795 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2796 PDC262_U66, scr);
2797 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
2798 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
2799 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2800 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
2801 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
2802 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
2803 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2804 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
2805 atapi = 0;
2806 else
2807 atapi = PDC262_ATAPI_UDMA;
2808 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
2809 PDC262_ATAPI(channel), atapi);
2810 }
2811 }
2812 for (drive = 0; drive < 2; drive++) {
2813 drvp = &chp->ch_drive[drive];
2814 /* If no drive, skip */
2815 if ((drvp->drive_flags & DRIVE) == 0)
2816 continue;
2817 mode = 0;
2818 if (drvp->drive_flags & DRIVE_UDMA) {
2819 mode = PDC2xx_TIM_SET_MB(mode,
2820 pdc2xx_udma_mb[drvp->UDMA_mode]);
2821 mode = PDC2xx_TIM_SET_MC(mode,
2822 pdc2xx_udma_mc[drvp->UDMA_mode]);
2823 drvp->drive_flags &= ~DRIVE_DMA;
2824 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2825 } else if (drvp->drive_flags & DRIVE_DMA) {
2826 mode = PDC2xx_TIM_SET_MB(mode,
2827 pdc2xx_dma_mb[drvp->DMA_mode]);
2828 mode = PDC2xx_TIM_SET_MC(mode,
2829 pdc2xx_dma_mc[drvp->DMA_mode]);
2830 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2831 } else {
2832 mode = PDC2xx_TIM_SET_MB(mode,
2833 pdc2xx_dma_mb[0]);
2834 mode = PDC2xx_TIM_SET_MC(mode,
2835 pdc2xx_dma_mc[0]);
2836 }
2837 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
2838 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
2839 if (drvp->drive_flags & DRIVE_ATA)
2840 mode |= PDC2xx_TIM_PRE;
2841 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
2842 if (drvp->PIO_mode >= 3) {
2843 mode |= PDC2xx_TIM_IORDY;
2844 if (drive == 0)
2845 mode |= PDC2xx_TIM_IORDYp;
2846 }
2847 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
2848 "timings 0x%x\n",
2849 sc->sc_wdcdev.sc_dev.dv_xname,
2850 chp->channel, drive, mode), DEBUG_PROBE);
2851 pci_conf_write(sc->sc_pc, sc->sc_tag,
2852 PDC2xx_TIM(chp->channel, drive), mode);
2853 }
2854 if (idedma_ctl != 0) {
2855 /* Add software bits in status register */
2856 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2857 IDEDMA_CTL, idedma_ctl);
2858 }
2859 pciide_print_modes(cp);
2860 }
2861
2862 int
2863 pdc202xx_pci_intr(arg)
2864 void *arg;
2865 {
2866 struct pciide_softc *sc = arg;
2867 struct pciide_channel *cp;
2868 struct channel_softc *wdc_cp;
2869 int i, rv, crv;
2870 u_int32_t scr;
2871
2872 rv = 0;
2873 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
2874 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2875 cp = &sc->pciide_channels[i];
2876 wdc_cp = &cp->wdc_channel;
2877 /* If a compat channel skip. */
2878 if (cp->compat)
2879 continue;
2880 if (scr & PDC2xx_SCR_INT(i)) {
2881 crv = wdcintr(wdc_cp);
2882 if (crv == 0)
2883 printf("%s:%d: bogus intr\n",
2884 sc->sc_wdcdev.sc_dev.dv_xname, i);
2885 else
2886 rv = 1;
2887 }
2888 }
2889 return rv;
2890 }
2891
2892 void
2893 opti_chip_map(sc, pa)
2894 struct pciide_softc *sc;
2895 struct pci_attach_args *pa;
2896 {
2897 struct pciide_channel *cp;
2898 bus_size_t cmdsize, ctlsize;
2899 pcireg_t interface;
2900 u_int8_t init_ctrl;
2901 int channel;
2902
2903 if (pciide_chipen(sc, pa) == 0)
2904 return;
2905 printf("%s: bus-master DMA support present",
2906 sc->sc_wdcdev.sc_dev.dv_xname);
2907 pciide_mapreg_dma(sc, pa);
2908 printf("\n");
2909
2910 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
2911 sc->sc_wdcdev.PIO_cap = 4;
2912 if (sc->sc_dma_ok) {
2913 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2914 sc->sc_wdcdev.DMA_cap = 2;
2915 }
2916 sc->sc_wdcdev.set_modes = opti_setup_channel;
2917
2918 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2919 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2920
2921 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
2922 OPTI_REG_INIT_CONTROL);
2923
2924 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
2925 sc->sc_tag, PCI_CLASS_REG));
2926
2927 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2928 cp = &sc->pciide_channels[channel];
2929 if (pciide_chansetup(sc, channel, interface) == 0)
2930 continue;
2931 if (channel == 1 &&
2932 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
2933 printf("%s: %s channel ignored (disabled)\n",
2934 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2935 continue;
2936 }
2937 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2938 pciide_pci_intr);
2939 if (cp->hw_ok == 0)
2940 continue;
2941 pciide_map_compat_intr(pa, cp, channel, interface);
2942 if (cp->hw_ok == 0)
2943 continue;
2944 opti_setup_channel(&cp->wdc_channel);
2945 }
2946 }
2947
2948 void
2949 opti_setup_channel(chp)
2950 struct channel_softc *chp;
2951 {
2952 struct ata_drive_datas *drvp;
2953 struct pciide_channel *cp = (struct pciide_channel*)chp;
2954 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2955 int drive;
2956 int mode[2];
2957 u_int8_t rv, mr;
2958
2959 /*
2960 * The `Delay' and `Address Setup Time' fields of the
2961 * Miscellaneous Register are always zero initially.
2962 */
2963 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
2964 mr &= ~(OPTI_MISC_DELAY_MASK |
2965 OPTI_MISC_ADDR_SETUP_MASK |
2966 OPTI_MISC_INDEX_MASK);
2967
2968 /* Prime the control register before setting timing values */
2969 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
2970
2971 /* setup DMA if needed */
2972 pciide_channel_dma_setup(cp);
2973
2974 for (drive = 0; drive < 2; drive++) {
2975 drvp = &chp->ch_drive[drive];
2976 /* If no drive, skip */
2977 if ((drvp->drive_flags & DRIVE) == 0) {
2978 mode[drive] = -1;
2979 continue;
2980 }
2981
2982 if ((drvp->drive_flags & DRIVE_DMA)) {
2983 /*
2984 * Timings will be used for both PIO and DMA,
2985 * so adjust DMA mode if needed
2986 */
2987 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2988 drvp->PIO_mode = drvp->DMA_mode + 2;
2989 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2990 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2991 drvp->PIO_mode - 2 : 0;
2992 if (drvp->DMA_mode == 0)
2993 drvp->PIO_mode = 0;
2994
2995 mode[drive] = drvp->DMA_mode + 5;
2996 } else
2997 mode[drive] = drvp->PIO_mode;
2998
2999 if (drive && mode[0] >= 0 &&
3000 (opti_tim_as[mode[0]] != opti_tim_as[mode[1]])) {
3001 /*
3002 * Can't have two drives using different values
3003 * for `Address Setup Time'.
3004 * Slow down the faster drive to compensate.
3005 */
3006 int d;
3007 d = (opti_tim_as[mode[0]] > opti_tim_as[mode[1]])?0:1;
3008
3009 mode[d] = mode[1-d];
3010 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3011 chp->ch_drive[d].DMA_mode = 0;
3012 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3013 }
3014 }
3015
3016 for (drive = 0; drive < 2; drive++) {
3017 int m;
3018 if ((m = mode[drive]) < 0)
3019 continue;
3020
3021 /* Set the Address Setup Time and select appropriate index */
3022 rv = opti_tim_as[m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3023 rv |= OPTI_MISC_INDEX(drive);
3024 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3025
3026 /* Set the pulse width and recovery timing parameters */
3027 rv = opti_tim_cp[m] << OPTI_PULSE_WIDTH_SHIFT;
3028 rv |= opti_tim_rt[m] << OPTI_RECOVERY_TIME_SHIFT;
3029 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3030 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3031
3032 /* Set the Enhanced Mode register appropriately */
3033 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3034 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3035 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3036 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3037 }
3038
3039 /* Finally, enable the timings */
3040 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3041
3042 pciide_print_modes(cp);
3043 }
3044