pciide.c revision 1.85 1 /* $NetBSD: pciide.c,v 1.85 2000/08/09 13:23:07 drochner Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <dev/pci/pcireg.h>
102 #include <dev/pci/pcivar.h>
103 #include <dev/pci/pcidevs.h>
104 #include <dev/pci/pciidereg.h>
105 #include <dev/pci/pciidevar.h>
106 #include <dev/pci/pciide_piix_reg.h>
107 #include <dev/pci/pciide_amd_reg.h>
108 #include <dev/pci/pciide_apollo_reg.h>
109 #include <dev/pci/pciide_cmd_reg.h>
110 #include <dev/pci/pciide_cy693_reg.h>
111 #include <dev/pci/pciide_sis_reg.h>
112 #include <dev/pci/pciide_acer_reg.h>
113 #include <dev/pci/pciide_pdc202xx_reg.h>
114 #include <dev/pci/pciide_opti_reg.h>
115 #include <dev/pci/pciide_hpt_reg.h>
116 #include <dev/pci/cy82c693var.h>
117
118 #include "opt_pciide.h"
119
120 /* inlines for reading/writing 8-bit PCI registers */
121 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
122 int));
123 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
124 int, u_int8_t));
125
126 static __inline u_int8_t
127 pciide_pci_read(pc, pa, reg)
128 pci_chipset_tag_t pc;
129 pcitag_t pa;
130 int reg;
131 {
132
133 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
134 ((reg & 0x03) * 8) & 0xff);
135 }
136
137 static __inline void
138 pciide_pci_write(pc, pa, reg, val)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 u_int8_t val;
143 {
144 pcireg_t pcival;
145
146 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
147 pcival &= ~(0xff << ((reg & 0x03) * 8));
148 pcival |= (val << ((reg & 0x03) * 8));
149 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
150 }
151
152 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
153
154 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155 void piix_setup_channel __P((struct channel_softc*));
156 void piix3_4_setup_channel __P((struct channel_softc*));
157 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
158 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
159 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160
161 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
162 void amd756_setup_channel __P((struct channel_softc*));
163
164 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void apollo_setup_channel __P((struct channel_softc*));
166
167 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void cmd0643_9_setup_channel __P((struct channel_softc*));
170 void cmd_channel_map __P((struct pci_attach_args *,
171 struct pciide_softc *, int));
172 int cmd_pci_intr __P((void *));
173 void cmd646_9_irqack __P((struct channel_softc *));
174
175 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
176 void cy693_setup_channel __P((struct channel_softc*));
177
178 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void sis_setup_channel __P((struct channel_softc*));
180
181 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void acer_setup_channel __P((struct channel_softc*));
183 int acer_pci_intr __P((void *));
184
185 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void pdc202xx_setup_channel __P((struct channel_softc*));
187 int pdc202xx_pci_intr __P((void *));
188
189 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void opti_setup_channel __P((struct channel_softc*));
191
192 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void hpt_setup_channel __P((struct channel_softc*));
194 int hpt_pci_intr __P((void *));
195
196 void pciide_channel_dma_setup __P((struct pciide_channel *));
197 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
198 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
199 void pciide_dma_start __P((void*, int, int));
200 int pciide_dma_finish __P((void*, int, int, int));
201 void pciide_irqack __P((struct channel_softc *));
202 void pciide_print_modes __P((struct pciide_channel *));
203
204 struct pciide_product_desc {
205 u_int32_t ide_product;
206 int ide_flags;
207 const char *ide_name;
208 /* map and setup chip, probe drives */
209 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
210 };
211
212 /* Flags for ide_flags */
213 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
214
215 /* Default product description for devices not known from this controller */
216 const struct pciide_product_desc default_product_desc = {
217 0,
218 0,
219 "Generic PCI IDE controller",
220 default_chip_map,
221 };
222
223 const struct pciide_product_desc pciide_intel_products[] = {
224 { PCI_PRODUCT_INTEL_82092AA,
225 0,
226 "Intel 82092AA IDE controller",
227 default_chip_map,
228 },
229 { PCI_PRODUCT_INTEL_82371FB_IDE,
230 0,
231 "Intel 82371FB IDE controller (PIIX)",
232 piix_chip_map,
233 },
234 { PCI_PRODUCT_INTEL_82371SB_IDE,
235 0,
236 "Intel 82371SB IDE Interface (PIIX3)",
237 piix_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371AB_IDE,
240 0,
241 "Intel 82371AB IDE controller (PIIX4)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82440MX_IDE,
245 0,
246 "Intel 82440MX IDE controller",
247 piix_chip_map
248 },
249 { PCI_PRODUCT_INTEL_82801AA_IDE,
250 0,
251 "Intel 82801AA IDE Controller (ICH)",
252 piix_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82801AB_IDE,
255 0,
256 "Intel 82801AB IDE Controller (ICH0)",
257 piix_chip_map,
258 },
259 { 0,
260 0,
261 NULL,
262 }
263 };
264
265 const struct pciide_product_desc pciide_amd_products[] = {
266 { PCI_PRODUCT_AMD_PBC756_IDE,
267 0,
268 "Advanced Micro Devices AMD756 IDE Controller",
269 amd756_chip_map
270 },
271 { 0,
272 0,
273 NULL,
274 }
275 };
276
277 const struct pciide_product_desc pciide_cmd_products[] = {
278 { PCI_PRODUCT_CMDTECH_640,
279 0,
280 "CMD Technology PCI0640",
281 cmd_chip_map
282 },
283 { PCI_PRODUCT_CMDTECH_643,
284 0,
285 "CMD Technology PCI0643",
286 cmd0643_9_chip_map,
287 },
288 { PCI_PRODUCT_CMDTECH_646,
289 0,
290 "CMD Technology PCI0646",
291 cmd0643_9_chip_map,
292 },
293 { PCI_PRODUCT_CMDTECH_648,
294 IDE_PCI_CLASS_OVERRIDE,
295 "CMD Technology PCI0648",
296 cmd0643_9_chip_map,
297 },
298 { PCI_PRODUCT_CMDTECH_649,
299 IDE_PCI_CLASS_OVERRIDE,
300 "CMD Technology PCI0649",
301 cmd0643_9_chip_map,
302 },
303 { 0,
304 0,
305 NULL,
306 }
307 };
308
309 const struct pciide_product_desc pciide_via_products[] = {
310 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
311 0,
312 "VIA Tech VT82C586 IDE Controller",
313 apollo_chip_map,
314 },
315 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
316 0,
317 "VIA Tech VT82C586A IDE Controller",
318 apollo_chip_map,
319 },
320 { 0,
321 0,
322 NULL,
323 }
324 };
325
326 const struct pciide_product_desc pciide_cypress_products[] = {
327 { PCI_PRODUCT_CONTAQ_82C693,
328 0,
329 "Cypress 82C693 IDE Controller",
330 cy693_chip_map,
331 },
332 { 0,
333 0,
334 NULL,
335 }
336 };
337
338 const struct pciide_product_desc pciide_sis_products[] = {
339 { PCI_PRODUCT_SIS_5597_IDE,
340 0,
341 "Silicon Integrated System 5597/5598 IDE controller",
342 sis_chip_map,
343 },
344 { 0,
345 0,
346 NULL,
347 }
348 };
349
350 const struct pciide_product_desc pciide_acer_products[] = {
351 { PCI_PRODUCT_ALI_M5229,
352 0,
353 "Acer Labs M5229 UDMA IDE Controller",
354 acer_chip_map,
355 },
356 { 0,
357 0,
358 NULL,
359 }
360 };
361
362 const struct pciide_product_desc pciide_promise_products[] = {
363 { PCI_PRODUCT_PROMISE_ULTRA33,
364 IDE_PCI_CLASS_OVERRIDE,
365 "Promise Ultra33/ATA Bus Master IDE Accelerator",
366 pdc202xx_chip_map,
367 },
368 { PCI_PRODUCT_PROMISE_ULTRA66,
369 IDE_PCI_CLASS_OVERRIDE,
370 "Promise Ultra66/ATA Bus Master IDE Accelerator",
371 pdc202xx_chip_map,
372 },
373 { PCI_PRODUCT_PROMISE_ULTRA100,
374 IDE_PCI_CLASS_OVERRIDE,
375 "Promise Ultra100/ATA Bus Master IDE Accelerator",
376 pdc202xx_chip_map,
377 },
378 { 0,
379 0,
380 NULL,
381 }
382 };
383
384 const struct pciide_product_desc pciide_opti_products[] = {
385 { PCI_PRODUCT_OPTI_82C621,
386 0,
387 "OPTi 82c621 PCI IDE controller",
388 opti_chip_map,
389 },
390 { PCI_PRODUCT_OPTI_82C568,
391 0,
392 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
393 opti_chip_map,
394 },
395 { PCI_PRODUCT_OPTI_82D568,
396 0,
397 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
398 opti_chip_map,
399 },
400 { 0,
401 0,
402 NULL,
403 }
404 };
405
406 const struct pciide_product_desc pciide_triones_products[] = {
407 { PCI_PRODUCT_TRIONES_HPT366,
408 IDE_PCI_CLASS_OVERRIDE,
409 "Triones/Highpoint HPT366/370 IDE Controller",
410 hpt_chip_map,
411 },
412 { 0,
413 0,
414 NULL,
415 }
416 };
417
418 struct pciide_vendor_desc {
419 u_int32_t ide_vendor;
420 const struct pciide_product_desc *ide_products;
421 };
422
423 const struct pciide_vendor_desc pciide_vendors[] = {
424 { PCI_VENDOR_INTEL, pciide_intel_products },
425 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
426 { PCI_VENDOR_VIATECH, pciide_via_products },
427 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
428 { PCI_VENDOR_SIS, pciide_sis_products },
429 { PCI_VENDOR_ALI, pciide_acer_products },
430 { PCI_VENDOR_PROMISE, pciide_promise_products },
431 { PCI_VENDOR_AMD, pciide_amd_products },
432 { PCI_VENDOR_OPTI, pciide_opti_products },
433 { PCI_VENDOR_TRIONES, pciide_triones_products },
434 { 0, NULL }
435 };
436
437 /* options passed via the 'flags' config keyword */
438 #define PCIIDE_OPTIONS_DMA 0x01
439
440 int pciide_match __P((struct device *, struct cfdata *, void *));
441 void pciide_attach __P((struct device *, struct device *, void *));
442
443 struct cfattach pciide_ca = {
444 sizeof(struct pciide_softc), pciide_match, pciide_attach
445 };
446 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
447 int pciide_mapregs_compat __P(( struct pci_attach_args *,
448 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
449 int pciide_mapregs_native __P((struct pci_attach_args *,
450 struct pciide_channel *, bus_size_t *, bus_size_t *,
451 int (*pci_intr) __P((void *))));
452 void pciide_mapreg_dma __P((struct pciide_softc *,
453 struct pci_attach_args *));
454 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
455 void pciide_mapchan __P((struct pci_attach_args *,
456 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
457 int (*pci_intr) __P((void *))));
458 int pciide_chan_candisable __P((struct pciide_channel *));
459 void pciide_map_compat_intr __P(( struct pci_attach_args *,
460 struct pciide_channel *, int, int));
461 int pciide_print __P((void *, const char *pnp));
462 int pciide_compat_intr __P((void *));
463 int pciide_pci_intr __P((void *));
464 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
465
466 const struct pciide_product_desc *
467 pciide_lookup_product(id)
468 u_int32_t id;
469 {
470 const struct pciide_product_desc *pp;
471 const struct pciide_vendor_desc *vp;
472
473 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
474 if (PCI_VENDOR(id) == vp->ide_vendor)
475 break;
476
477 if ((pp = vp->ide_products) == NULL)
478 return NULL;
479
480 for (; pp->ide_name != NULL; pp++)
481 if (PCI_PRODUCT(id) == pp->ide_product)
482 break;
483
484 if (pp->ide_name == NULL)
485 return NULL;
486 return pp;
487 }
488
489 int
490 pciide_match(parent, match, aux)
491 struct device *parent;
492 struct cfdata *match;
493 void *aux;
494 {
495 struct pci_attach_args *pa = aux;
496 const struct pciide_product_desc *pp;
497
498 /*
499 * Check the ID register to see that it's a PCI IDE controller.
500 * If it is, we assume that we can deal with it; it _should_
501 * work in a standardized way...
502 */
503 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
504 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
505 return (1);
506 }
507
508 /*
509 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
510 * controllers. Let see if we can deal with it anyway.
511 */
512 pp = pciide_lookup_product(pa->pa_id);
513 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
514 return (1);
515 }
516
517 return (0);
518 }
519
520 void
521 pciide_attach(parent, self, aux)
522 struct device *parent, *self;
523 void *aux;
524 {
525 struct pci_attach_args *pa = aux;
526 pci_chipset_tag_t pc = pa->pa_pc;
527 pcitag_t tag = pa->pa_tag;
528 struct pciide_softc *sc = (struct pciide_softc *)self;
529 pcireg_t csr;
530 char devinfo[256];
531 const char *displaydev;
532
533 sc->sc_pp = pciide_lookup_product(pa->pa_id);
534 if (sc->sc_pp == NULL) {
535 sc->sc_pp = &default_product_desc;
536 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
537 displaydev = devinfo;
538 } else
539 displaydev = sc->sc_pp->ide_name;
540
541 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
542
543 sc->sc_pc = pa->pa_pc;
544 sc->sc_tag = pa->pa_tag;
545 #ifdef WDCDEBUG
546 if (wdcdebug_pciide_mask & DEBUG_PROBE)
547 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
548 #endif
549 sc->sc_pp->chip_map(sc, pa);
550
551 if (sc->sc_dma_ok) {
552 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
553 csr |= PCI_COMMAND_MASTER_ENABLE;
554 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
555 }
556 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
557 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
558 }
559
560 /* tell wether the chip is enabled or not */
561 int
562 pciide_chipen(sc, pa)
563 struct pciide_softc *sc;
564 struct pci_attach_args *pa;
565 {
566 pcireg_t csr;
567 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
568 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
569 PCI_COMMAND_STATUS_REG);
570 printf("%s: device disabled (at %s)\n",
571 sc->sc_wdcdev.sc_dev.dv_xname,
572 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
573 "device" : "bridge");
574 return 0;
575 }
576 return 1;
577 }
578
579 int
580 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
581 struct pci_attach_args *pa;
582 struct pciide_channel *cp;
583 int compatchan;
584 bus_size_t *cmdsizep, *ctlsizep;
585 {
586 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
587 struct channel_softc *wdc_cp = &cp->wdc_channel;
588
589 cp->compat = 1;
590 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
591 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
592
593 wdc_cp->cmd_iot = pa->pa_iot;
594 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
595 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
596 printf("%s: couldn't map %s channel cmd regs\n",
597 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
598 return (0);
599 }
600
601 wdc_cp->ctl_iot = pa->pa_iot;
602 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
603 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
604 printf("%s: couldn't map %s channel ctl regs\n",
605 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
606 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
607 PCIIDE_COMPAT_CMD_SIZE);
608 return (0);
609 }
610
611 return (1);
612 }
613
614 int
615 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
616 struct pci_attach_args * pa;
617 struct pciide_channel *cp;
618 bus_size_t *cmdsizep, *ctlsizep;
619 int (*pci_intr) __P((void *));
620 {
621 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
622 struct channel_softc *wdc_cp = &cp->wdc_channel;
623 const char *intrstr;
624 pci_intr_handle_t intrhandle;
625
626 cp->compat = 0;
627
628 if (sc->sc_pci_ih == NULL) {
629 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
630 pa->pa_intrline, &intrhandle) != 0) {
631 printf("%s: couldn't map native-PCI interrupt\n",
632 sc->sc_wdcdev.sc_dev.dv_xname);
633 return 0;
634 }
635 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
636 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
637 intrhandle, IPL_BIO, pci_intr, sc);
638 if (sc->sc_pci_ih != NULL) {
639 printf("%s: using %s for native-PCI interrupt\n",
640 sc->sc_wdcdev.sc_dev.dv_xname,
641 intrstr ? intrstr : "unknown interrupt");
642 } else {
643 printf("%s: couldn't establish native-PCI interrupt",
644 sc->sc_wdcdev.sc_dev.dv_xname);
645 if (intrstr != NULL)
646 printf(" at %s", intrstr);
647 printf("\n");
648 return 0;
649 }
650 }
651 cp->ih = sc->sc_pci_ih;
652 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
653 PCI_MAPREG_TYPE_IO, 0,
654 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
655 printf("%s: couldn't map %s channel cmd regs\n",
656 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
657 return 0;
658 }
659
660 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
661 PCI_MAPREG_TYPE_IO, 0,
662 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
663 printf("%s: couldn't map %s channel ctl regs\n",
664 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
665 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
666 return 0;
667 }
668 return (1);
669 }
670
671 void
672 pciide_mapreg_dma(sc, pa)
673 struct pciide_softc *sc;
674 struct pci_attach_args *pa;
675 {
676 pcireg_t maptype;
677
678 /*
679 * Map DMA registers
680 *
681 * Note that sc_dma_ok is the right variable to test to see if
682 * DMA can be done. If the interface doesn't support DMA,
683 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
684 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
685 * non-zero if the interface supports DMA and the registers
686 * could be mapped.
687 *
688 * XXX Note that despite the fact that the Bus Master IDE specs
689 * XXX say that "The bus master IDE function uses 16 bytes of IO
690 * XXX space," some controllers (at least the United
691 * XXX Microelectronics UM8886BF) place it in memory space.
692 */
693 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
694 PCIIDE_REG_BUS_MASTER_DMA);
695
696 switch (maptype) {
697 case PCI_MAPREG_TYPE_IO:
698 case PCI_MAPREG_MEM_TYPE_32BIT:
699 sc->sc_dma_ok = (pci_mapreg_map(pa,
700 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
701 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
702 sc->sc_dmat = pa->pa_dmat;
703 if (sc->sc_dma_ok == 0) {
704 printf(", but unused (couldn't map registers)");
705 } else {
706 sc->sc_wdcdev.dma_arg = sc;
707 sc->sc_wdcdev.dma_init = pciide_dma_init;
708 sc->sc_wdcdev.dma_start = pciide_dma_start;
709 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
710 }
711 break;
712
713 default:
714 sc->sc_dma_ok = 0;
715 printf(", but unsupported register maptype (0x%x)", maptype);
716 }
717 }
718
719 int
720 pciide_compat_intr(arg)
721 void *arg;
722 {
723 struct pciide_channel *cp = arg;
724
725 #ifdef DIAGNOSTIC
726 /* should only be called for a compat channel */
727 if (cp->compat == 0)
728 panic("pciide compat intr called for non-compat chan %p\n", cp);
729 #endif
730 return (wdcintr(&cp->wdc_channel));
731 }
732
733 int
734 pciide_pci_intr(arg)
735 void *arg;
736 {
737 struct pciide_softc *sc = arg;
738 struct pciide_channel *cp;
739 struct channel_softc *wdc_cp;
740 int i, rv, crv;
741
742 rv = 0;
743 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
744 cp = &sc->pciide_channels[i];
745 wdc_cp = &cp->wdc_channel;
746
747 /* If a compat channel skip. */
748 if (cp->compat)
749 continue;
750 /* if this channel not waiting for intr, skip */
751 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
752 continue;
753
754 crv = wdcintr(wdc_cp);
755 if (crv == 0)
756 ; /* leave rv alone */
757 else if (crv == 1)
758 rv = 1; /* claim the intr */
759 else if (rv == 0) /* crv should be -1 in this case */
760 rv = crv; /* if we've done no better, take it */
761 }
762 return (rv);
763 }
764
765 void
766 pciide_channel_dma_setup(cp)
767 struct pciide_channel *cp;
768 {
769 int drive;
770 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
771 struct ata_drive_datas *drvp;
772
773 for (drive = 0; drive < 2; drive++) {
774 drvp = &cp->wdc_channel.ch_drive[drive];
775 /* If no drive, skip */
776 if ((drvp->drive_flags & DRIVE) == 0)
777 continue;
778 /* setup DMA if needed */
779 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
780 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
781 sc->sc_dma_ok == 0) {
782 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
783 continue;
784 }
785 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
786 != 0) {
787 /* Abort DMA setup */
788 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
789 continue;
790 }
791 }
792 }
793
794 int
795 pciide_dma_table_setup(sc, channel, drive)
796 struct pciide_softc *sc;
797 int channel, drive;
798 {
799 bus_dma_segment_t seg;
800 int error, rseg;
801 const bus_size_t dma_table_size =
802 sizeof(struct idedma_table) * NIDEDMA_TABLES;
803 struct pciide_dma_maps *dma_maps =
804 &sc->pciide_channels[channel].dma_maps[drive];
805
806 /* If table was already allocated, just return */
807 if (dma_maps->dma_table)
808 return 0;
809
810 /* Allocate memory for the DMA tables and map it */
811 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
812 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
813 BUS_DMA_NOWAIT)) != 0) {
814 printf("%s:%d: unable to allocate table DMA for "
815 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
816 channel, drive, error);
817 return error;
818 }
819 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
820 dma_table_size,
821 (caddr_t *)&dma_maps->dma_table,
822 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
823 printf("%s:%d: unable to map table DMA for"
824 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
825 channel, drive, error);
826 return error;
827 }
828 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
829 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
830 seg.ds_addr), DEBUG_PROBE);
831
832 /* Create and load table DMA map for this disk */
833 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
834 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
835 &dma_maps->dmamap_table)) != 0) {
836 printf("%s:%d: unable to create table DMA map for "
837 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
838 channel, drive, error);
839 return error;
840 }
841 if ((error = bus_dmamap_load(sc->sc_dmat,
842 dma_maps->dmamap_table,
843 dma_maps->dma_table,
844 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
845 printf("%s:%d: unable to load table DMA map for "
846 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
847 channel, drive, error);
848 return error;
849 }
850 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
851 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
852 /* Create a xfer DMA map for this drive */
853 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
854 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
855 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
856 &dma_maps->dmamap_xfer)) != 0) {
857 printf("%s:%d: unable to create xfer DMA map for "
858 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
859 channel, drive, error);
860 return error;
861 }
862 return 0;
863 }
864
865 int
866 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
867 void *v;
868 int channel, drive;
869 void *databuf;
870 size_t datalen;
871 int flags;
872 {
873 struct pciide_softc *sc = v;
874 int error, seg;
875 struct pciide_dma_maps *dma_maps =
876 &sc->pciide_channels[channel].dma_maps[drive];
877
878 error = bus_dmamap_load(sc->sc_dmat,
879 dma_maps->dmamap_xfer,
880 databuf, datalen, NULL, BUS_DMA_NOWAIT);
881 if (error) {
882 printf("%s:%d: unable to load xfer DMA map for"
883 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
884 channel, drive, error);
885 return error;
886 }
887
888 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
889 dma_maps->dmamap_xfer->dm_mapsize,
890 (flags & WDC_DMA_READ) ?
891 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
892
893 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
894 #ifdef DIAGNOSTIC
895 /* A segment must not cross a 64k boundary */
896 {
897 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
898 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
899 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
900 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
901 printf("pciide_dma: segment %d physical addr 0x%lx"
902 " len 0x%lx not properly aligned\n",
903 seg, phys, len);
904 panic("pciide_dma: buf align");
905 }
906 }
907 #endif
908 dma_maps->dma_table[seg].base_addr =
909 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
910 dma_maps->dma_table[seg].byte_count =
911 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
912 IDEDMA_BYTE_COUNT_MASK);
913 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
914 seg, le32toh(dma_maps->dma_table[seg].byte_count),
915 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
916
917 }
918 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
919 htole32(IDEDMA_BYTE_COUNT_EOT);
920
921 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
922 dma_maps->dmamap_table->dm_mapsize,
923 BUS_DMASYNC_PREWRITE);
924
925 /* Maps are ready. Start DMA function */
926 #ifdef DIAGNOSTIC
927 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
928 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
929 dma_maps->dmamap_table->dm_segs[0].ds_addr);
930 panic("pciide_dma_init: table align");
931 }
932 #endif
933
934 /* Clear status bits */
935 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
936 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
937 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
938 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
939 /* Write table addr */
940 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
941 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
942 dma_maps->dmamap_table->dm_segs[0].ds_addr);
943 /* set read/write */
944 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
945 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
946 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
947 /* remember flags */
948 dma_maps->dma_flags = flags;
949 return 0;
950 }
951
952 void
953 pciide_dma_start(v, channel, drive)
954 void *v;
955 int channel, drive;
956 {
957 struct pciide_softc *sc = v;
958
959 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
960 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
961 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
962 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
963 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
964 }
965
966 int
967 pciide_dma_finish(v, channel, drive, force)
968 void *v;
969 int channel, drive;
970 int force;
971 {
972 struct pciide_softc *sc = v;
973 u_int8_t status;
974 int error = 0;
975 struct pciide_dma_maps *dma_maps =
976 &sc->pciide_channels[channel].dma_maps[drive];
977
978 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
979 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
980 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
981 DEBUG_XFERS);
982
983 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
984 return WDC_DMAST_NOIRQ;
985
986 /* stop DMA channel */
987 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
988 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
989 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
990 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
991
992 /* Unload the map of the data buffer */
993 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
994 dma_maps->dmamap_xfer->dm_mapsize,
995 (dma_maps->dma_flags & WDC_DMA_READ) ?
996 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
997 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
998
999 if ((status & IDEDMA_CTL_ERR) != 0) {
1000 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1001 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1002 error |= WDC_DMAST_ERR;
1003 }
1004
1005 if ((status & IDEDMA_CTL_INTR) == 0) {
1006 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1007 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1008 drive, status);
1009 error |= WDC_DMAST_NOIRQ;
1010 }
1011
1012 if ((status & IDEDMA_CTL_ACT) != 0) {
1013 /* data underrun, may be a valid condition for ATAPI */
1014 error |= WDC_DMAST_UNDER;
1015 }
1016 return error;
1017 }
1018
1019 void
1020 pciide_irqack(chp)
1021 struct channel_softc *chp;
1022 {
1023 struct pciide_channel *cp = (struct pciide_channel*)chp;
1024 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1025
1026 /* clear status bits in IDE DMA registers */
1027 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1028 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1029 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1030 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1031 }
1032
1033 /* some common code used by several chip_map */
1034 int
1035 pciide_chansetup(sc, channel, interface)
1036 struct pciide_softc *sc;
1037 int channel;
1038 pcireg_t interface;
1039 {
1040 struct pciide_channel *cp = &sc->pciide_channels[channel];
1041 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1042 cp->name = PCIIDE_CHANNEL_NAME(channel);
1043 cp->wdc_channel.channel = channel;
1044 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1045 cp->wdc_channel.ch_queue =
1046 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1047 if (cp->wdc_channel.ch_queue == NULL) {
1048 printf("%s %s channel: "
1049 "can't allocate memory for command queue",
1050 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1051 return 0;
1052 }
1053 printf("%s: %s channel %s to %s mode\n",
1054 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1055 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1056 "configured" : "wired",
1057 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1058 "native-PCI" : "compatibility");
1059 return 1;
1060 }
1061
1062 /* some common code used by several chip channel_map */
1063 void
1064 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1065 struct pci_attach_args *pa;
1066 struct pciide_channel *cp;
1067 pcireg_t interface;
1068 bus_size_t *cmdsizep, *ctlsizep;
1069 int (*pci_intr) __P((void *));
1070 {
1071 struct channel_softc *wdc_cp = &cp->wdc_channel;
1072
1073 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1074 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1075 pci_intr);
1076 else
1077 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1078 wdc_cp->channel, cmdsizep, ctlsizep);
1079
1080 if (cp->hw_ok == 0)
1081 return;
1082 wdc_cp->data32iot = wdc_cp->cmd_iot;
1083 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1084 wdcattach(wdc_cp);
1085 }
1086
1087 /*
1088 * Generic code to call to know if a channel can be disabled. Return 1
1089 * if channel can be disabled, 0 if not
1090 */
1091 int
1092 pciide_chan_candisable(cp)
1093 struct pciide_channel *cp;
1094 {
1095 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1096 struct channel_softc *wdc_cp = &cp->wdc_channel;
1097
1098 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1099 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1100 printf("%s: disabling %s channel (no drives)\n",
1101 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1102 cp->hw_ok = 0;
1103 return 1;
1104 }
1105 return 0;
1106 }
1107
1108 /*
1109 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1110 * Set hw_ok=0 on failure
1111 */
1112 void
1113 pciide_map_compat_intr(pa, cp, compatchan, interface)
1114 struct pci_attach_args *pa;
1115 struct pciide_channel *cp;
1116 int compatchan, interface;
1117 {
1118 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1119 struct channel_softc *wdc_cp = &cp->wdc_channel;
1120
1121 if (cp->hw_ok == 0)
1122 return;
1123 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1124 return;
1125
1126 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1127 pa, compatchan, pciide_compat_intr, cp);
1128 if (cp->ih == NULL) {
1129 printf("%s: no compatibility interrupt for use by %s "
1130 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1131 cp->hw_ok = 0;
1132 }
1133 }
1134
1135 void
1136 pciide_print_modes(cp)
1137 struct pciide_channel *cp;
1138 {
1139 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1140 int drive;
1141 struct channel_softc *chp;
1142 struct ata_drive_datas *drvp;
1143
1144 chp = &cp->wdc_channel;
1145 for (drive = 0; drive < 2; drive++) {
1146 drvp = &chp->ch_drive[drive];
1147 if ((drvp->drive_flags & DRIVE) == 0)
1148 continue;
1149 printf("%s(%s:%d:%d): using PIO mode %d",
1150 drvp->drv_softc->dv_xname,
1151 sc->sc_wdcdev.sc_dev.dv_xname,
1152 chp->channel, drive, drvp->PIO_mode);
1153 if (drvp->drive_flags & DRIVE_DMA)
1154 printf(", DMA mode %d", drvp->DMA_mode);
1155 if (drvp->drive_flags & DRIVE_UDMA)
1156 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1157 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1158 printf(" (using DMA data transfers)");
1159 printf("\n");
1160 }
1161 }
1162
1163 void
1164 default_chip_map(sc, pa)
1165 struct pciide_softc *sc;
1166 struct pci_attach_args *pa;
1167 {
1168 struct pciide_channel *cp;
1169 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1170 pcireg_t csr;
1171 int channel, drive;
1172 struct ata_drive_datas *drvp;
1173 u_int8_t idedma_ctl;
1174 bus_size_t cmdsize, ctlsize;
1175 char *failreason;
1176
1177 if (pciide_chipen(sc, pa) == 0)
1178 return;
1179
1180 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1181 printf("%s: bus-master DMA support present",
1182 sc->sc_wdcdev.sc_dev.dv_xname);
1183 if (sc->sc_pp == &default_product_desc &&
1184 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1185 PCIIDE_OPTIONS_DMA) == 0) {
1186 printf(", but unused (no driver support)");
1187 sc->sc_dma_ok = 0;
1188 } else {
1189 pciide_mapreg_dma(sc, pa);
1190 if (sc->sc_dma_ok != 0)
1191 printf(", used without full driver "
1192 "support");
1193 }
1194 } else {
1195 printf("%s: hardware does not support DMA",
1196 sc->sc_wdcdev.sc_dev.dv_xname);
1197 sc->sc_dma_ok = 0;
1198 }
1199 printf("\n");
1200 if (sc->sc_dma_ok) {
1201 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1202 sc->sc_wdcdev.irqack = pciide_irqack;
1203 }
1204 sc->sc_wdcdev.PIO_cap = 0;
1205 sc->sc_wdcdev.DMA_cap = 0;
1206
1207 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1208 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1209 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1210
1211 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1212 cp = &sc->pciide_channels[channel];
1213 if (pciide_chansetup(sc, channel, interface) == 0)
1214 continue;
1215 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1216 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1217 &ctlsize, pciide_pci_intr);
1218 } else {
1219 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1220 channel, &cmdsize, &ctlsize);
1221 }
1222 if (cp->hw_ok == 0)
1223 continue;
1224 /*
1225 * Check to see if something appears to be there.
1226 */
1227 failreason = NULL;
1228 if (!wdcprobe(&cp->wdc_channel)) {
1229 failreason = "not responding; disabled or no drives?";
1230 goto next;
1231 }
1232 /*
1233 * Now, make sure it's actually attributable to this PCI IDE
1234 * channel by trying to access the channel again while the
1235 * PCI IDE controller's I/O space is disabled. (If the
1236 * channel no longer appears to be there, it belongs to
1237 * this controller.) YUCK!
1238 */
1239 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1240 PCI_COMMAND_STATUS_REG);
1241 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1242 csr & ~PCI_COMMAND_IO_ENABLE);
1243 if (wdcprobe(&cp->wdc_channel))
1244 failreason = "other hardware responding at addresses";
1245 pci_conf_write(sc->sc_pc, sc->sc_tag,
1246 PCI_COMMAND_STATUS_REG, csr);
1247 next:
1248 if (failreason) {
1249 printf("%s: %s channel ignored (%s)\n",
1250 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1251 failreason);
1252 cp->hw_ok = 0;
1253 bus_space_unmap(cp->wdc_channel.cmd_iot,
1254 cp->wdc_channel.cmd_ioh, cmdsize);
1255 bus_space_unmap(cp->wdc_channel.ctl_iot,
1256 cp->wdc_channel.ctl_ioh, ctlsize);
1257 } else {
1258 pciide_map_compat_intr(pa, cp, channel, interface);
1259 }
1260 if (cp->hw_ok) {
1261 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1262 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1263 wdcattach(&cp->wdc_channel);
1264 }
1265 }
1266
1267 if (sc->sc_dma_ok == 0)
1268 return;
1269
1270 /* Allocate DMA maps */
1271 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1272 idedma_ctl = 0;
1273 cp = &sc->pciide_channels[channel];
1274 for (drive = 0; drive < 2; drive++) {
1275 drvp = &cp->wdc_channel.ch_drive[drive];
1276 /* If no drive, skip */
1277 if ((drvp->drive_flags & DRIVE) == 0)
1278 continue;
1279 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1280 continue;
1281 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1282 /* Abort DMA setup */
1283 printf("%s:%d:%d: can't allocate DMA maps, "
1284 "using PIO transfers\n",
1285 sc->sc_wdcdev.sc_dev.dv_xname,
1286 channel, drive);
1287 drvp->drive_flags &= ~DRIVE_DMA;
1288 }
1289 printf("%s:%d:%d: using DMA data transfers\n",
1290 sc->sc_wdcdev.sc_dev.dv_xname,
1291 channel, drive);
1292 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1293 }
1294 if (idedma_ctl != 0) {
1295 /* Add software bits in status register */
1296 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1297 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1298 idedma_ctl);
1299 }
1300 }
1301 }
1302
1303 void
1304 piix_chip_map(sc, pa)
1305 struct pciide_softc *sc;
1306 struct pci_attach_args *pa;
1307 {
1308 struct pciide_channel *cp;
1309 int channel;
1310 u_int32_t idetim;
1311 bus_size_t cmdsize, ctlsize;
1312
1313 if (pciide_chipen(sc, pa) == 0)
1314 return;
1315
1316 printf("%s: bus-master DMA support present",
1317 sc->sc_wdcdev.sc_dev.dv_xname);
1318 pciide_mapreg_dma(sc, pa);
1319 printf("\n");
1320 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1321 WDC_CAPABILITY_MODE;
1322 if (sc->sc_dma_ok) {
1323 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1324 sc->sc_wdcdev.irqack = pciide_irqack;
1325 switch(sc->sc_pp->ide_product) {
1326 case PCI_PRODUCT_INTEL_82371AB_IDE:
1327 case PCI_PRODUCT_INTEL_82440MX_IDE:
1328 case PCI_PRODUCT_INTEL_82801AA_IDE:
1329 case PCI_PRODUCT_INTEL_82801AB_IDE:
1330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1331 }
1332 }
1333 sc->sc_wdcdev.PIO_cap = 4;
1334 sc->sc_wdcdev.DMA_cap = 2;
1335 sc->sc_wdcdev.UDMA_cap =
1336 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1337 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1338 sc->sc_wdcdev.set_modes = piix_setup_channel;
1339 else
1340 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1341 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1342 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1343
1344 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1345 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1346 DEBUG_PROBE);
1347 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1348 WDCDEBUG_PRINT((", sidetim=0x%x",
1349 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1350 DEBUG_PROBE);
1351 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1352 WDCDEBUG_PRINT((", udamreg 0x%x",
1353 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1354 DEBUG_PROBE);
1355 }
1356 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1357 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1358 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1359 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1360 DEBUG_PROBE);
1361 }
1362
1363 }
1364 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1365
1366 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1367 cp = &sc->pciide_channels[channel];
1368 /* PIIX is compat-only */
1369 if (pciide_chansetup(sc, channel, 0) == 0)
1370 continue;
1371 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1372 if ((PIIX_IDETIM_READ(idetim, channel) &
1373 PIIX_IDETIM_IDE) == 0) {
1374 printf("%s: %s channel ignored (disabled)\n",
1375 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1376 continue;
1377 }
1378 /* PIIX are compat-only pciide devices */
1379 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1380 if (cp->hw_ok == 0)
1381 continue;
1382 if (pciide_chan_candisable(cp)) {
1383 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1384 channel);
1385 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1386 idetim);
1387 }
1388 pciide_map_compat_intr(pa, cp, channel, 0);
1389 if (cp->hw_ok == 0)
1390 continue;
1391 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1392 }
1393
1394 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1395 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1396 DEBUG_PROBE);
1397 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1398 WDCDEBUG_PRINT((", sidetim=0x%x",
1399 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1400 DEBUG_PROBE);
1401 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1402 WDCDEBUG_PRINT((", udamreg 0x%x",
1403 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1404 DEBUG_PROBE);
1405 }
1406 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1407 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1408 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1409 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1410 DEBUG_PROBE);
1411 }
1412 }
1413 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1414 }
1415
1416 void
1417 piix_setup_channel(chp)
1418 struct channel_softc *chp;
1419 {
1420 u_int8_t mode[2], drive;
1421 u_int32_t oidetim, idetim, idedma_ctl;
1422 struct pciide_channel *cp = (struct pciide_channel*)chp;
1423 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1424 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1425
1426 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1427 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1428 idedma_ctl = 0;
1429
1430 /* set up new idetim: Enable IDE registers decode */
1431 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1432 chp->channel);
1433
1434 /* setup DMA */
1435 pciide_channel_dma_setup(cp);
1436
1437 /*
1438 * Here we have to mess up with drives mode: PIIX can't have
1439 * different timings for master and slave drives.
1440 * We need to find the best combination.
1441 */
1442
1443 /* If both drives supports DMA, take the lower mode */
1444 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1445 (drvp[1].drive_flags & DRIVE_DMA)) {
1446 mode[0] = mode[1] =
1447 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1448 drvp[0].DMA_mode = mode[0];
1449 drvp[1].DMA_mode = mode[1];
1450 goto ok;
1451 }
1452 /*
1453 * If only one drive supports DMA, use its mode, and
1454 * put the other one in PIO mode 0 if mode not compatible
1455 */
1456 if (drvp[0].drive_flags & DRIVE_DMA) {
1457 mode[0] = drvp[0].DMA_mode;
1458 mode[1] = drvp[1].PIO_mode;
1459 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1460 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1461 mode[1] = drvp[1].PIO_mode = 0;
1462 goto ok;
1463 }
1464 if (drvp[1].drive_flags & DRIVE_DMA) {
1465 mode[1] = drvp[1].DMA_mode;
1466 mode[0] = drvp[0].PIO_mode;
1467 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1468 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1469 mode[0] = drvp[0].PIO_mode = 0;
1470 goto ok;
1471 }
1472 /*
1473 * If both drives are not DMA, takes the lower mode, unless
1474 * one of them is PIO mode < 2
1475 */
1476 if (drvp[0].PIO_mode < 2) {
1477 mode[0] = drvp[0].PIO_mode = 0;
1478 mode[1] = drvp[1].PIO_mode;
1479 } else if (drvp[1].PIO_mode < 2) {
1480 mode[1] = drvp[1].PIO_mode = 0;
1481 mode[0] = drvp[0].PIO_mode;
1482 } else {
1483 mode[0] = mode[1] =
1484 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1485 drvp[0].PIO_mode = mode[0];
1486 drvp[1].PIO_mode = mode[1];
1487 }
1488 ok: /* The modes are setup */
1489 for (drive = 0; drive < 2; drive++) {
1490 if (drvp[drive].drive_flags & DRIVE_DMA) {
1491 idetim |= piix_setup_idetim_timings(
1492 mode[drive], 1, chp->channel);
1493 goto end;
1494 }
1495 }
1496 /* If we are there, none of the drives are DMA */
1497 if (mode[0] >= 2)
1498 idetim |= piix_setup_idetim_timings(
1499 mode[0], 0, chp->channel);
1500 else
1501 idetim |= piix_setup_idetim_timings(
1502 mode[1], 0, chp->channel);
1503 end: /*
1504 * timing mode is now set up in the controller. Enable
1505 * it per-drive
1506 */
1507 for (drive = 0; drive < 2; drive++) {
1508 /* If no drive, skip */
1509 if ((drvp[drive].drive_flags & DRIVE) == 0)
1510 continue;
1511 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1512 if (drvp[drive].drive_flags & DRIVE_DMA)
1513 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1514 }
1515 if (idedma_ctl != 0) {
1516 /* Add software bits in status register */
1517 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1518 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1519 idedma_ctl);
1520 }
1521 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1522 pciide_print_modes(cp);
1523 }
1524
1525 void
1526 piix3_4_setup_channel(chp)
1527 struct channel_softc *chp;
1528 {
1529 struct ata_drive_datas *drvp;
1530 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1531 struct pciide_channel *cp = (struct pciide_channel*)chp;
1532 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1533 int drive;
1534 int channel = chp->channel;
1535
1536 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1537 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1538 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1539 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1540 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1541 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1542 PIIX_SIDETIM_RTC_MASK(channel));
1543
1544 idedma_ctl = 0;
1545 /* If channel disabled, no need to go further */
1546 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1547 return;
1548 /* set up new idetim: Enable IDE registers decode */
1549 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1550
1551 /* setup DMA if needed */
1552 pciide_channel_dma_setup(cp);
1553
1554 for (drive = 0; drive < 2; drive++) {
1555 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1556 PIIX_UDMATIM_SET(0x3, channel, drive));
1557 drvp = &chp->ch_drive[drive];
1558 /* If no drive, skip */
1559 if ((drvp->drive_flags & DRIVE) == 0)
1560 continue;
1561 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1562 (drvp->drive_flags & DRIVE_UDMA) == 0))
1563 goto pio;
1564
1565 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1566 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1567 ideconf |= PIIX_CONFIG_PINGPONG;
1568 }
1569 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1570 /* setup Ultra/66 */
1571 if (drvp->UDMA_mode > 2 &&
1572 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1573 drvp->UDMA_mode = 2;
1574 if (drvp->UDMA_mode > 2)
1575 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1576 else
1577 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1578 }
1579 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1580 (drvp->drive_flags & DRIVE_UDMA)) {
1581 /* use Ultra/DMA */
1582 drvp->drive_flags &= ~DRIVE_DMA;
1583 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1584 udmareg |= PIIX_UDMATIM_SET(
1585 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1586 } else {
1587 /* use Multiword DMA */
1588 drvp->drive_flags &= ~DRIVE_UDMA;
1589 if (drive == 0) {
1590 idetim |= piix_setup_idetim_timings(
1591 drvp->DMA_mode, 1, channel);
1592 } else {
1593 sidetim |= piix_setup_sidetim_timings(
1594 drvp->DMA_mode, 1, channel);
1595 idetim =PIIX_IDETIM_SET(idetim,
1596 PIIX_IDETIM_SITRE, channel);
1597 }
1598 }
1599 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1600
1601 pio: /* use PIO mode */
1602 idetim |= piix_setup_idetim_drvs(drvp);
1603 if (drive == 0) {
1604 idetim |= piix_setup_idetim_timings(
1605 drvp->PIO_mode, 0, channel);
1606 } else {
1607 sidetim |= piix_setup_sidetim_timings(
1608 drvp->PIO_mode, 0, channel);
1609 idetim =PIIX_IDETIM_SET(idetim,
1610 PIIX_IDETIM_SITRE, channel);
1611 }
1612 }
1613 if (idedma_ctl != 0) {
1614 /* Add software bits in status register */
1615 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1616 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1617 idedma_ctl);
1618 }
1619 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1620 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1621 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1622 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1623 pciide_print_modes(cp);
1624 }
1625
1626
1627 /* setup ISP and RTC fields, based on mode */
1628 static u_int32_t
1629 piix_setup_idetim_timings(mode, dma, channel)
1630 u_int8_t mode;
1631 u_int8_t dma;
1632 u_int8_t channel;
1633 {
1634
1635 if (dma)
1636 return PIIX_IDETIM_SET(0,
1637 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1638 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1639 channel);
1640 else
1641 return PIIX_IDETIM_SET(0,
1642 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1643 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1644 channel);
1645 }
1646
1647 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1648 static u_int32_t
1649 piix_setup_idetim_drvs(drvp)
1650 struct ata_drive_datas *drvp;
1651 {
1652 u_int32_t ret = 0;
1653 struct channel_softc *chp = drvp->chnl_softc;
1654 u_int8_t channel = chp->channel;
1655 u_int8_t drive = drvp->drive;
1656
1657 /*
1658 * If drive is using UDMA, timings setups are independant
1659 * So just check DMA and PIO here.
1660 */
1661 if (drvp->drive_flags & DRIVE_DMA) {
1662 /* if mode = DMA mode 0, use compatible timings */
1663 if ((drvp->drive_flags & DRIVE_DMA) &&
1664 drvp->DMA_mode == 0) {
1665 drvp->PIO_mode = 0;
1666 return ret;
1667 }
1668 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1669 /*
1670 * PIO and DMA timings are the same, use fast timings for PIO
1671 * too, else use compat timings.
1672 */
1673 if ((piix_isp_pio[drvp->PIO_mode] !=
1674 piix_isp_dma[drvp->DMA_mode]) ||
1675 (piix_rtc_pio[drvp->PIO_mode] !=
1676 piix_rtc_dma[drvp->DMA_mode]))
1677 drvp->PIO_mode = 0;
1678 /* if PIO mode <= 2, use compat timings for PIO */
1679 if (drvp->PIO_mode <= 2) {
1680 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1681 channel);
1682 return ret;
1683 }
1684 }
1685
1686 /*
1687 * Now setup PIO modes. If mode < 2, use compat timings.
1688 * Else enable fast timings. Enable IORDY and prefetch/post
1689 * if PIO mode >= 3.
1690 */
1691
1692 if (drvp->PIO_mode < 2)
1693 return ret;
1694
1695 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1696 if (drvp->PIO_mode >= 3) {
1697 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1698 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1699 }
1700 return ret;
1701 }
1702
1703 /* setup values in SIDETIM registers, based on mode */
1704 static u_int32_t
1705 piix_setup_sidetim_timings(mode, dma, channel)
1706 u_int8_t mode;
1707 u_int8_t dma;
1708 u_int8_t channel;
1709 {
1710 if (dma)
1711 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1712 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1713 else
1714 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1715 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1716 }
1717
1718 void
1719 amd756_chip_map(sc, pa)
1720 struct pciide_softc *sc;
1721 struct pci_attach_args *pa;
1722 {
1723 struct pciide_channel *cp;
1724 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1725 int channel;
1726 pcireg_t chanenable;
1727 bus_size_t cmdsize, ctlsize;
1728
1729 if (pciide_chipen(sc, pa) == 0)
1730 return;
1731 printf("%s: bus-master DMA support present",
1732 sc->sc_wdcdev.sc_dev.dv_xname);
1733 pciide_mapreg_dma(sc, pa);
1734 printf("\n");
1735 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1736 WDC_CAPABILITY_MODE;
1737 if (sc->sc_dma_ok) {
1738 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1739 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1740 sc->sc_wdcdev.irqack = pciide_irqack;
1741 }
1742 sc->sc_wdcdev.PIO_cap = 4;
1743 sc->sc_wdcdev.DMA_cap = 2;
1744 sc->sc_wdcdev.UDMA_cap = 4;
1745 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1746 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1747 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1748 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1749
1750 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1751 DEBUG_PROBE);
1752 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1753 cp = &sc->pciide_channels[channel];
1754 if (pciide_chansetup(sc, channel, interface) == 0)
1755 continue;
1756
1757 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1758 printf("%s: %s channel ignored (disabled)\n",
1759 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1760 continue;
1761 }
1762 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1763 pciide_pci_intr);
1764
1765 if (pciide_chan_candisable(cp))
1766 chanenable &= ~AMD756_CHAN_EN(channel);
1767 pciide_map_compat_intr(pa, cp, channel, interface);
1768 if (cp->hw_ok == 0)
1769 continue;
1770
1771 amd756_setup_channel(&cp->wdc_channel);
1772 }
1773 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1774 chanenable);
1775 return;
1776 }
1777
1778 void
1779 amd756_setup_channel(chp)
1780 struct channel_softc *chp;
1781 {
1782 u_int32_t udmatim_reg, datatim_reg;
1783 u_int8_t idedma_ctl;
1784 int mode, drive;
1785 struct ata_drive_datas *drvp;
1786 struct pciide_channel *cp = (struct pciide_channel*)chp;
1787 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1788 #ifndef PCIIDE_AMD756_ENABLEDMA
1789 int rev = PCI_REVISION(
1790 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1791 #endif
1792
1793 idedma_ctl = 0;
1794 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1795 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1796 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1797 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1798
1799 /* setup DMA if needed */
1800 pciide_channel_dma_setup(cp);
1801
1802 for (drive = 0; drive < 2; drive++) {
1803 drvp = &chp->ch_drive[drive];
1804 /* If no drive, skip */
1805 if ((drvp->drive_flags & DRIVE) == 0)
1806 continue;
1807 /* add timing values, setup DMA if needed */
1808 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1809 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1810 mode = drvp->PIO_mode;
1811 goto pio;
1812 }
1813 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1814 (drvp->drive_flags & DRIVE_UDMA)) {
1815 /* use Ultra/DMA */
1816 drvp->drive_flags &= ~DRIVE_DMA;
1817 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1818 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1819 AMD756_UDMA_TIME(chp->channel, drive,
1820 amd756_udma_tim[drvp->UDMA_mode]);
1821 /* can use PIO timings, MW DMA unused */
1822 mode = drvp->PIO_mode;
1823 } else {
1824 /* use Multiword DMA, but only if revision is OK */
1825 drvp->drive_flags &= ~DRIVE_UDMA;
1826 #ifndef PCIIDE_AMD756_ENABLEDMA
1827 /*
1828 * The workaround doesn't seem to be necessary
1829 * with all drives, so it can be disabled by
1830 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1831 * triggered.
1832 */
1833 if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1834 printf("%s:%d:%d: multi-word DMA disabled due "
1835 "to chip revision\n",
1836 sc->sc_wdcdev.sc_dev.dv_xname,
1837 chp->channel, drive);
1838 mode = drvp->PIO_mode;
1839 drvp->drive_flags &= ~DRIVE_DMA;
1840 goto pio;
1841 }
1842 #endif
1843 /* mode = min(pio, dma+2) */
1844 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1845 mode = drvp->PIO_mode;
1846 else
1847 mode = drvp->DMA_mode + 2;
1848 }
1849 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1850
1851 pio: /* setup PIO mode */
1852 if (mode <= 2) {
1853 drvp->DMA_mode = 0;
1854 drvp->PIO_mode = 0;
1855 mode = 0;
1856 } else {
1857 drvp->PIO_mode = mode;
1858 drvp->DMA_mode = mode - 2;
1859 }
1860 datatim_reg |=
1861 AMD756_DATATIM_PULSE(chp->channel, drive,
1862 amd756_pio_set[mode]) |
1863 AMD756_DATATIM_RECOV(chp->channel, drive,
1864 amd756_pio_rec[mode]);
1865 }
1866 if (idedma_ctl != 0) {
1867 /* Add software bits in status register */
1868 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1869 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1870 idedma_ctl);
1871 }
1872 pciide_print_modes(cp);
1873 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1874 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1875 }
1876
1877 void
1878 apollo_chip_map(sc, pa)
1879 struct pciide_softc *sc;
1880 struct pci_attach_args *pa;
1881 {
1882 struct pciide_channel *cp;
1883 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1884 int channel;
1885 u_int32_t ideconf;
1886 bus_size_t cmdsize, ctlsize;
1887
1888 if (pciide_chipen(sc, pa) == 0)
1889 return;
1890 printf("%s: bus-master DMA support present",
1891 sc->sc_wdcdev.sc_dev.dv_xname);
1892 pciide_mapreg_dma(sc, pa);
1893 printf("\n");
1894 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1895 WDC_CAPABILITY_MODE;
1896 if (sc->sc_dma_ok) {
1897 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1898 sc->sc_wdcdev.irqack = pciide_irqack;
1899 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1900 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1901 }
1902 sc->sc_wdcdev.PIO_cap = 4;
1903 sc->sc_wdcdev.DMA_cap = 2;
1904 sc->sc_wdcdev.UDMA_cap = 2;
1905 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1906 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1907 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1908
1909 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1910 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1911 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1912 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1913 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1914 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1915 DEBUG_PROBE);
1916
1917 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1918 cp = &sc->pciide_channels[channel];
1919 if (pciide_chansetup(sc, channel, interface) == 0)
1920 continue;
1921
1922 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1923 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1924 printf("%s: %s channel ignored (disabled)\n",
1925 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1926 continue;
1927 }
1928 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1929 pciide_pci_intr);
1930 if (cp->hw_ok == 0)
1931 continue;
1932 if (pciide_chan_candisable(cp)) {
1933 ideconf &= ~APO_IDECONF_EN(channel);
1934 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1935 ideconf);
1936 }
1937 pciide_map_compat_intr(pa, cp, channel, interface);
1938
1939 if (cp->hw_ok == 0)
1940 continue;
1941 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1942 }
1943 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1944 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1945 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1946 }
1947
1948 void
1949 apollo_setup_channel(chp)
1950 struct channel_softc *chp;
1951 {
1952 u_int32_t udmatim_reg, datatim_reg;
1953 u_int8_t idedma_ctl;
1954 int mode, drive;
1955 struct ata_drive_datas *drvp;
1956 struct pciide_channel *cp = (struct pciide_channel*)chp;
1957 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1958
1959 idedma_ctl = 0;
1960 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1961 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1962 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1963 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1964
1965 /* setup DMA if needed */
1966 pciide_channel_dma_setup(cp);
1967
1968 for (drive = 0; drive < 2; drive++) {
1969 drvp = &chp->ch_drive[drive];
1970 /* If no drive, skip */
1971 if ((drvp->drive_flags & DRIVE) == 0)
1972 continue;
1973 /* add timing values, setup DMA if needed */
1974 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1975 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1976 mode = drvp->PIO_mode;
1977 goto pio;
1978 }
1979 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1980 (drvp->drive_flags & DRIVE_UDMA)) {
1981 /* use Ultra/DMA */
1982 drvp->drive_flags &= ~DRIVE_DMA;
1983 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1984 APO_UDMA_EN_MTH(chp->channel, drive) |
1985 APO_UDMA_TIME(chp->channel, drive,
1986 apollo_udma_tim[drvp->UDMA_mode]);
1987 /* can use PIO timings, MW DMA unused */
1988 mode = drvp->PIO_mode;
1989 } else {
1990 /* use Multiword DMA */
1991 drvp->drive_flags &= ~DRIVE_UDMA;
1992 /* mode = min(pio, dma+2) */
1993 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1994 mode = drvp->PIO_mode;
1995 else
1996 mode = drvp->DMA_mode + 2;
1997 }
1998 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1999
2000 pio: /* setup PIO mode */
2001 if (mode <= 2) {
2002 drvp->DMA_mode = 0;
2003 drvp->PIO_mode = 0;
2004 mode = 0;
2005 } else {
2006 drvp->PIO_mode = mode;
2007 drvp->DMA_mode = mode - 2;
2008 }
2009 datatim_reg |=
2010 APO_DATATIM_PULSE(chp->channel, drive,
2011 apollo_pio_set[mode]) |
2012 APO_DATATIM_RECOV(chp->channel, drive,
2013 apollo_pio_rec[mode]);
2014 }
2015 if (idedma_ctl != 0) {
2016 /* Add software bits in status register */
2017 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2018 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2019 idedma_ctl);
2020 }
2021 pciide_print_modes(cp);
2022 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2023 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2024 }
2025
2026 void
2027 cmd_channel_map(pa, sc, channel)
2028 struct pci_attach_args *pa;
2029 struct pciide_softc *sc;
2030 int channel;
2031 {
2032 struct pciide_channel *cp = &sc->pciide_channels[channel];
2033 bus_size_t cmdsize, ctlsize;
2034 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2035 int interface;
2036
2037 /*
2038 * The 0648/0649 can be told to identify as a RAID controller.
2039 * In this case, we have to fake interface
2040 */
2041 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2042 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2043 PCIIDE_INTERFACE_SETTABLE(1);
2044 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2045 CMD_CONF_DSA1)
2046 interface |= PCIIDE_INTERFACE_PCI(0) |
2047 PCIIDE_INTERFACE_PCI(1);
2048 } else {
2049 interface = PCI_INTERFACE(pa->pa_class);
2050 }
2051
2052 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2053 cp->name = PCIIDE_CHANNEL_NAME(channel);
2054 cp->wdc_channel.channel = channel;
2055 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2056
2057 if (channel > 0) {
2058 cp->wdc_channel.ch_queue =
2059 sc->pciide_channels[0].wdc_channel.ch_queue;
2060 } else {
2061 cp->wdc_channel.ch_queue =
2062 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2063 }
2064 if (cp->wdc_channel.ch_queue == NULL) {
2065 printf("%s %s channel: "
2066 "can't allocate memory for command queue",
2067 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2068 return;
2069 }
2070
2071 printf("%s: %s channel %s to %s mode\n",
2072 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2073 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2074 "configured" : "wired",
2075 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2076 "native-PCI" : "compatibility");
2077
2078 /*
2079 * with a CMD PCI64x, if we get here, the first channel is enabled:
2080 * there's no way to disable the first channel without disabling
2081 * the whole device
2082 */
2083 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2084 printf("%s: %s channel ignored (disabled)\n",
2085 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2086 return;
2087 }
2088
2089 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2090 if (cp->hw_ok == 0)
2091 return;
2092 if (channel == 1) {
2093 if (pciide_chan_candisable(cp)) {
2094 ctrl &= ~CMD_CTRL_2PORT;
2095 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2096 CMD_CTRL, ctrl);
2097 }
2098 }
2099 pciide_map_compat_intr(pa, cp, channel, interface);
2100 }
2101
2102 int
2103 cmd_pci_intr(arg)
2104 void *arg;
2105 {
2106 struct pciide_softc *sc = arg;
2107 struct pciide_channel *cp;
2108 struct channel_softc *wdc_cp;
2109 int i, rv, crv;
2110 u_int32_t priirq, secirq;
2111
2112 rv = 0;
2113 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2114 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2115 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2116 cp = &sc->pciide_channels[i];
2117 wdc_cp = &cp->wdc_channel;
2118 /* If a compat channel skip. */
2119 if (cp->compat)
2120 continue;
2121 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2122 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2123 crv = wdcintr(wdc_cp);
2124 if (crv == 0)
2125 printf("%s:%d: bogus intr\n",
2126 sc->sc_wdcdev.sc_dev.dv_xname, i);
2127 else
2128 rv = 1;
2129 }
2130 }
2131 return rv;
2132 }
2133
2134 void
2135 cmd_chip_map(sc, pa)
2136 struct pciide_softc *sc;
2137 struct pci_attach_args *pa;
2138 {
2139 int channel;
2140
2141 /*
2142 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2143 * and base adresses registers can be disabled at
2144 * hardware level. In this case, the device is wired
2145 * in compat mode and its first channel is always enabled,
2146 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2147 * In fact, it seems that the first channel of the CMD PCI0640
2148 * can't be disabled.
2149 */
2150
2151 #ifdef PCIIDE_CMD064x_DISABLE
2152 if (pciide_chipen(sc, pa) == 0)
2153 return;
2154 #endif
2155
2156 printf("%s: hardware does not support DMA\n",
2157 sc->sc_wdcdev.sc_dev.dv_xname);
2158 sc->sc_dma_ok = 0;
2159
2160 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2161 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2162 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2163
2164 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2165 cmd_channel_map(pa, sc, channel);
2166 }
2167 }
2168
2169 void
2170 cmd0643_9_chip_map(sc, pa)
2171 struct pciide_softc *sc;
2172 struct pci_attach_args *pa;
2173 {
2174 struct pciide_channel *cp;
2175 int channel;
2176 int rev = PCI_REVISION(
2177 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2178
2179 /*
2180 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2181 * and base adresses registers can be disabled at
2182 * hardware level. In this case, the device is wired
2183 * in compat mode and its first channel is always enabled,
2184 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2185 * In fact, it seems that the first channel of the CMD PCI0640
2186 * can't be disabled.
2187 */
2188
2189 #ifdef PCIIDE_CMD064x_DISABLE
2190 if (pciide_chipen(sc, pa) == 0)
2191 return;
2192 #endif
2193 printf("%s: bus-master DMA support present",
2194 sc->sc_wdcdev.sc_dev.dv_xname);
2195 pciide_mapreg_dma(sc, pa);
2196 printf("\n");
2197 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2198 WDC_CAPABILITY_MODE;
2199 if (sc->sc_dma_ok) {
2200 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2201 switch (sc->sc_pp->ide_product) {
2202 case PCI_PRODUCT_CMDTECH_649:
2203 case PCI_PRODUCT_CMDTECH_648:
2204 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2205 sc->sc_wdcdev.UDMA_cap = 4;
2206 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2207 break;
2208 case PCI_PRODUCT_CMDTECH_646:
2209 if (rev >= CMD0646U2_REV) {
2210 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2211 sc->sc_wdcdev.UDMA_cap = 2;
2212 } else if (rev >= CMD0646U_REV) {
2213 /*
2214 * Linux's driver claims that the 646U is broken
2215 * with UDMA. Only enable it if we know what we're
2216 * doing
2217 */
2218 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2219 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2220 sc->sc_wdcdev.UDMA_cap = 2;
2221 #endif
2222 /* explicitely disable UDMA */
2223 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2224 CMD_UDMATIM(0), 0);
2225 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2226 CMD_UDMATIM(1), 0);
2227 }
2228 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2229 break;
2230 default:
2231 sc->sc_wdcdev.irqack = pciide_irqack;
2232 }
2233 }
2234
2235 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2236 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2237 sc->sc_wdcdev.PIO_cap = 4;
2238 sc->sc_wdcdev.DMA_cap = 2;
2239 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2240
2241 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2242 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2243 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2244 DEBUG_PROBE);
2245
2246 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2247 cp = &sc->pciide_channels[channel];
2248 cmd_channel_map(pa, sc, channel);
2249 if (cp->hw_ok == 0)
2250 continue;
2251 cmd0643_9_setup_channel(&cp->wdc_channel);
2252 }
2253 /*
2254 * note - this also makes sure we clear the irq disable and reset
2255 * bits
2256 */
2257 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2258 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2259 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2260 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2261 DEBUG_PROBE);
2262 }
2263
2264 void
2265 cmd0643_9_setup_channel(chp)
2266 struct channel_softc *chp;
2267 {
2268 struct ata_drive_datas *drvp;
2269 u_int8_t tim;
2270 u_int32_t idedma_ctl, udma_reg;
2271 int drive;
2272 struct pciide_channel *cp = (struct pciide_channel*)chp;
2273 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2274
2275 idedma_ctl = 0;
2276 /* setup DMA if needed */
2277 pciide_channel_dma_setup(cp);
2278
2279 for (drive = 0; drive < 2; drive++) {
2280 drvp = &chp->ch_drive[drive];
2281 /* If no drive, skip */
2282 if ((drvp->drive_flags & DRIVE) == 0)
2283 continue;
2284 /* add timing values, setup DMA if needed */
2285 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2286 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2287 if (drvp->drive_flags & DRIVE_UDMA) {
2288 /* UltraDMA on a 646U2, 0648 or 0649 */
2289 udma_reg = pciide_pci_read(sc->sc_pc,
2290 sc->sc_tag, CMD_UDMATIM(chp->channel));
2291 if (drvp->UDMA_mode > 2 &&
2292 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2293 CMD_BICSR) &
2294 CMD_BICSR_80(chp->channel)) == 0)
2295 drvp->UDMA_mode = 2;
2296 if (drvp->UDMA_mode > 2)
2297 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2298 else if (sc->sc_wdcdev.UDMA_cap > 2)
2299 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2300 udma_reg |= CMD_UDMATIM_UDMA(drive);
2301 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2302 CMD_UDMATIM_TIM_OFF(drive));
2303 udma_reg |=
2304 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2305 CMD_UDMATIM_TIM_OFF(drive));
2306 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2307 CMD_UDMATIM(chp->channel), udma_reg);
2308 } else {
2309 /*
2310 * use Multiword DMA.
2311 * Timings will be used for both PIO and DMA,
2312 * so adjust DMA mode if needed
2313 * if we have a 0646U2/8/9, turn off UDMA
2314 */
2315 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2316 udma_reg = pciide_pci_read(sc->sc_pc,
2317 sc->sc_tag,
2318 CMD_UDMATIM(chp->channel));
2319 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2320 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2321 CMD_UDMATIM(chp->channel),
2322 udma_reg);
2323 }
2324 if (drvp->PIO_mode >= 3 &&
2325 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2326 drvp->DMA_mode = drvp->PIO_mode - 2;
2327 }
2328 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2329 }
2330 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2331 }
2332 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2333 CMD_DATA_TIM(chp->channel, drive), tim);
2334 }
2335 if (idedma_ctl != 0) {
2336 /* Add software bits in status register */
2337 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2338 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2339 idedma_ctl);
2340 }
2341 pciide_print_modes(cp);
2342 }
2343
2344 void
2345 cmd646_9_irqack(chp)
2346 struct channel_softc *chp;
2347 {
2348 u_int32_t priirq, secirq;
2349 struct pciide_channel *cp = (struct pciide_channel*)chp;
2350 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2351
2352 if (chp->channel == 0) {
2353 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2354 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2355 } else {
2356 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2357 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2358 }
2359 pciide_irqack(chp);
2360 }
2361
2362 void
2363 cy693_chip_map(sc, pa)
2364 struct pciide_softc *sc;
2365 struct pci_attach_args *pa;
2366 {
2367 struct pciide_channel *cp;
2368 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2369 bus_size_t cmdsize, ctlsize;
2370
2371 if (pciide_chipen(sc, pa) == 0)
2372 return;
2373 /*
2374 * this chip has 2 PCI IDE functions, one for primary and one for
2375 * secondary. So we need to call pciide_mapregs_compat() with
2376 * the real channel
2377 */
2378 if (pa->pa_function == 1) {
2379 sc->sc_cy_compatchan = 0;
2380 } else if (pa->pa_function == 2) {
2381 sc->sc_cy_compatchan = 1;
2382 } else {
2383 printf("%s: unexpected PCI function %d\n",
2384 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2385 return;
2386 }
2387 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2388 printf("%s: bus-master DMA support present",
2389 sc->sc_wdcdev.sc_dev.dv_xname);
2390 pciide_mapreg_dma(sc, pa);
2391 } else {
2392 printf("%s: hardware does not support DMA",
2393 sc->sc_wdcdev.sc_dev.dv_xname);
2394 sc->sc_dma_ok = 0;
2395 }
2396 printf("\n");
2397
2398 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2399 if (sc->sc_cy_handle == NULL) {
2400 printf("%s: unable to map hyperCache control registers\n",
2401 sc->sc_wdcdev.sc_dev.dv_xname);
2402 sc->sc_dma_ok = 0;
2403 }
2404
2405 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2406 WDC_CAPABILITY_MODE;
2407 if (sc->sc_dma_ok) {
2408 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2409 sc->sc_wdcdev.irqack = pciide_irqack;
2410 }
2411 sc->sc_wdcdev.PIO_cap = 4;
2412 sc->sc_wdcdev.DMA_cap = 2;
2413 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2414
2415 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2416 sc->sc_wdcdev.nchannels = 1;
2417
2418 /* Only one channel for this chip; if we are here it's enabled */
2419 cp = &sc->pciide_channels[0];
2420 sc->wdc_chanarray[0] = &cp->wdc_channel;
2421 cp->name = PCIIDE_CHANNEL_NAME(0);
2422 cp->wdc_channel.channel = 0;
2423 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2424 cp->wdc_channel.ch_queue =
2425 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2426 if (cp->wdc_channel.ch_queue == NULL) {
2427 printf("%s primary channel: "
2428 "can't allocate memory for command queue",
2429 sc->sc_wdcdev.sc_dev.dv_xname);
2430 return;
2431 }
2432 printf("%s: primary channel %s to ",
2433 sc->sc_wdcdev.sc_dev.dv_xname,
2434 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2435 "configured" : "wired");
2436 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2437 printf("native-PCI");
2438 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2439 pciide_pci_intr);
2440 } else {
2441 printf("compatibility");
2442 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2443 &cmdsize, &ctlsize);
2444 }
2445 printf(" mode\n");
2446 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2447 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2448 wdcattach(&cp->wdc_channel);
2449 if (pciide_chan_candisable(cp)) {
2450 pci_conf_write(sc->sc_pc, sc->sc_tag,
2451 PCI_COMMAND_STATUS_REG, 0);
2452 }
2453 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2454 if (cp->hw_ok == 0)
2455 return;
2456 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2457 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2458 cy693_setup_channel(&cp->wdc_channel);
2459 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2460 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2461 }
2462
2463 void
2464 cy693_setup_channel(chp)
2465 struct channel_softc *chp;
2466 {
2467 struct ata_drive_datas *drvp;
2468 int drive;
2469 u_int32_t cy_cmd_ctrl;
2470 u_int32_t idedma_ctl;
2471 struct pciide_channel *cp = (struct pciide_channel*)chp;
2472 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2473 int dma_mode = -1;
2474
2475 cy_cmd_ctrl = idedma_ctl = 0;
2476
2477 /* setup DMA if needed */
2478 pciide_channel_dma_setup(cp);
2479
2480 for (drive = 0; drive < 2; drive++) {
2481 drvp = &chp->ch_drive[drive];
2482 /* If no drive, skip */
2483 if ((drvp->drive_flags & DRIVE) == 0)
2484 continue;
2485 /* add timing values, setup DMA if needed */
2486 if (drvp->drive_flags & DRIVE_DMA) {
2487 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2488 /* use Multiword DMA */
2489 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2490 dma_mode = drvp->DMA_mode;
2491 }
2492 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2493 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2494 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2495 CY_CMD_CTRL_IOW_REC_OFF(drive));
2496 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2497 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2498 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2499 CY_CMD_CTRL_IOR_REC_OFF(drive));
2500 }
2501 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2502 chp->ch_drive[0].DMA_mode = dma_mode;
2503 chp->ch_drive[1].DMA_mode = dma_mode;
2504
2505 if (dma_mode == -1)
2506 dma_mode = 0;
2507
2508 if (sc->sc_cy_handle != NULL) {
2509 /* Note: `multiple' is implied. */
2510 cy82c693_write(sc->sc_cy_handle,
2511 (sc->sc_cy_compatchan == 0) ?
2512 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2513 }
2514
2515 pciide_print_modes(cp);
2516
2517 if (idedma_ctl != 0) {
2518 /* Add software bits in status register */
2519 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2520 IDEDMA_CTL, idedma_ctl);
2521 }
2522 }
2523
2524 void
2525 sis_chip_map(sc, pa)
2526 struct pciide_softc *sc;
2527 struct pci_attach_args *pa;
2528 {
2529 struct pciide_channel *cp;
2530 int channel;
2531 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2532 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2533 pcireg_t rev = PCI_REVISION(pa->pa_class);
2534 bus_size_t cmdsize, ctlsize;
2535
2536 if (pciide_chipen(sc, pa) == 0)
2537 return;
2538 printf("%s: bus-master DMA support present",
2539 sc->sc_wdcdev.sc_dev.dv_xname);
2540 pciide_mapreg_dma(sc, pa);
2541 printf("\n");
2542 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2543 WDC_CAPABILITY_MODE;
2544 if (sc->sc_dma_ok) {
2545 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2546 sc->sc_wdcdev.irqack = pciide_irqack;
2547 if (rev >= 0xd0)
2548 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2549 }
2550
2551 sc->sc_wdcdev.PIO_cap = 4;
2552 sc->sc_wdcdev.DMA_cap = 2;
2553 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2554 sc->sc_wdcdev.UDMA_cap = 2;
2555 sc->sc_wdcdev.set_modes = sis_setup_channel;
2556
2557 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2558 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2559
2560 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2561 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2562 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2563
2564 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2565 cp = &sc->pciide_channels[channel];
2566 if (pciide_chansetup(sc, channel, interface) == 0)
2567 continue;
2568 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2569 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2570 printf("%s: %s channel ignored (disabled)\n",
2571 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2572 continue;
2573 }
2574 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2575 pciide_pci_intr);
2576 if (cp->hw_ok == 0)
2577 continue;
2578 if (pciide_chan_candisable(cp)) {
2579 if (channel == 0)
2580 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2581 else
2582 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2583 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2584 sis_ctr0);
2585 }
2586 pciide_map_compat_intr(pa, cp, channel, interface);
2587 if (cp->hw_ok == 0)
2588 continue;
2589 sis_setup_channel(&cp->wdc_channel);
2590 }
2591 }
2592
2593 void
2594 sis_setup_channel(chp)
2595 struct channel_softc *chp;
2596 {
2597 struct ata_drive_datas *drvp;
2598 int drive;
2599 u_int32_t sis_tim;
2600 u_int32_t idedma_ctl;
2601 struct pciide_channel *cp = (struct pciide_channel*)chp;
2602 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2603
2604 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2605 "channel %d 0x%x\n", chp->channel,
2606 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2607 DEBUG_PROBE);
2608 sis_tim = 0;
2609 idedma_ctl = 0;
2610 /* setup DMA if needed */
2611 pciide_channel_dma_setup(cp);
2612
2613 for (drive = 0; drive < 2; drive++) {
2614 drvp = &chp->ch_drive[drive];
2615 /* If no drive, skip */
2616 if ((drvp->drive_flags & DRIVE) == 0)
2617 continue;
2618 /* add timing values, setup DMA if needed */
2619 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2620 (drvp->drive_flags & DRIVE_UDMA) == 0)
2621 goto pio;
2622
2623 if (drvp->drive_flags & DRIVE_UDMA) {
2624 /* use Ultra/DMA */
2625 drvp->drive_flags &= ~DRIVE_DMA;
2626 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2627 SIS_TIM_UDMA_TIME_OFF(drive);
2628 sis_tim |= SIS_TIM_UDMA_EN(drive);
2629 } else {
2630 /*
2631 * use Multiword DMA
2632 * Timings will be used for both PIO and DMA,
2633 * so adjust DMA mode if needed
2634 */
2635 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2636 drvp->PIO_mode = drvp->DMA_mode + 2;
2637 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2638 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2639 drvp->PIO_mode - 2 : 0;
2640 if (drvp->DMA_mode == 0)
2641 drvp->PIO_mode = 0;
2642 }
2643 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2644 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2645 SIS_TIM_ACT_OFF(drive);
2646 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2647 SIS_TIM_REC_OFF(drive);
2648 }
2649 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2650 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2651 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2652 if (idedma_ctl != 0) {
2653 /* Add software bits in status register */
2654 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2655 IDEDMA_CTL, idedma_ctl);
2656 }
2657 pciide_print_modes(cp);
2658 }
2659
2660 void
2661 acer_chip_map(sc, pa)
2662 struct pciide_softc *sc;
2663 struct pci_attach_args *pa;
2664 {
2665 struct pciide_channel *cp;
2666 int channel;
2667 pcireg_t cr, interface;
2668 bus_size_t cmdsize, ctlsize;
2669
2670 if (pciide_chipen(sc, pa) == 0)
2671 return;
2672 printf("%s: bus-master DMA support present",
2673 sc->sc_wdcdev.sc_dev.dv_xname);
2674 pciide_mapreg_dma(sc, pa);
2675 printf("\n");
2676 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2677 WDC_CAPABILITY_MODE;
2678 if (sc->sc_dma_ok) {
2679 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2680 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2681 sc->sc_wdcdev.irqack = pciide_irqack;
2682 }
2683
2684 sc->sc_wdcdev.PIO_cap = 4;
2685 sc->sc_wdcdev.DMA_cap = 2;
2686 sc->sc_wdcdev.UDMA_cap = 2;
2687 sc->sc_wdcdev.set_modes = acer_setup_channel;
2688 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2689 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2690
2691 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2692 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2693 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2694
2695 /* Enable "microsoft register bits" R/W. */
2696 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2697 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2698 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2699 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2700 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2701 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2702 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2703 ~ACER_CHANSTATUSREGS_RO);
2704 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2705 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2706 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2707 /* Don't use cr, re-read the real register content instead */
2708 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2709 PCI_CLASS_REG));
2710
2711 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2712 cp = &sc->pciide_channels[channel];
2713 if (pciide_chansetup(sc, channel, interface) == 0)
2714 continue;
2715 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2716 printf("%s: %s channel ignored (disabled)\n",
2717 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2718 continue;
2719 }
2720 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2721 acer_pci_intr);
2722 if (cp->hw_ok == 0)
2723 continue;
2724 if (pciide_chan_candisable(cp)) {
2725 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2726 pci_conf_write(sc->sc_pc, sc->sc_tag,
2727 PCI_CLASS_REG, cr);
2728 }
2729 pciide_map_compat_intr(pa, cp, channel, interface);
2730 acer_setup_channel(&cp->wdc_channel);
2731 }
2732 }
2733
2734 void
2735 acer_setup_channel(chp)
2736 struct channel_softc *chp;
2737 {
2738 struct ata_drive_datas *drvp;
2739 int drive;
2740 u_int32_t acer_fifo_udma;
2741 u_int32_t idedma_ctl;
2742 struct pciide_channel *cp = (struct pciide_channel*)chp;
2743 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2744
2745 idedma_ctl = 0;
2746 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2747 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2748 acer_fifo_udma), DEBUG_PROBE);
2749 /* setup DMA if needed */
2750 pciide_channel_dma_setup(cp);
2751
2752 for (drive = 0; drive < 2; drive++) {
2753 drvp = &chp->ch_drive[drive];
2754 /* If no drive, skip */
2755 if ((drvp->drive_flags & DRIVE) == 0)
2756 continue;
2757 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2758 "channel %d drive %d 0x%x\n", chp->channel, drive,
2759 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2760 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2761 /* clear FIFO/DMA mode */
2762 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2763 ACER_UDMA_EN(chp->channel, drive) |
2764 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2765
2766 /* add timing values, setup DMA if needed */
2767 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2768 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2769 acer_fifo_udma |=
2770 ACER_FTH_OPL(chp->channel, drive, 0x1);
2771 goto pio;
2772 }
2773
2774 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2775 if (drvp->drive_flags & DRIVE_UDMA) {
2776 /* use Ultra/DMA */
2777 drvp->drive_flags &= ~DRIVE_DMA;
2778 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2779 acer_fifo_udma |=
2780 ACER_UDMA_TIM(chp->channel, drive,
2781 acer_udma[drvp->UDMA_mode]);
2782 } else {
2783 /*
2784 * use Multiword DMA
2785 * Timings will be used for both PIO and DMA,
2786 * so adjust DMA mode if needed
2787 */
2788 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2789 drvp->PIO_mode = drvp->DMA_mode + 2;
2790 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2791 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2792 drvp->PIO_mode - 2 : 0;
2793 if (drvp->DMA_mode == 0)
2794 drvp->PIO_mode = 0;
2795 }
2796 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2797 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2798 ACER_IDETIM(chp->channel, drive),
2799 acer_pio[drvp->PIO_mode]);
2800 }
2801 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2802 acer_fifo_udma), DEBUG_PROBE);
2803 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2804 if (idedma_ctl != 0) {
2805 /* Add software bits in status register */
2806 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2807 IDEDMA_CTL, idedma_ctl);
2808 }
2809 pciide_print_modes(cp);
2810 }
2811
2812 int
2813 acer_pci_intr(arg)
2814 void *arg;
2815 {
2816 struct pciide_softc *sc = arg;
2817 struct pciide_channel *cp;
2818 struct channel_softc *wdc_cp;
2819 int i, rv, crv;
2820 u_int32_t chids;
2821
2822 rv = 0;
2823 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2824 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2825 cp = &sc->pciide_channels[i];
2826 wdc_cp = &cp->wdc_channel;
2827 /* If a compat channel skip. */
2828 if (cp->compat)
2829 continue;
2830 if (chids & ACER_CHIDS_INT(i)) {
2831 crv = wdcintr(wdc_cp);
2832 if (crv == 0)
2833 printf("%s:%d: bogus intr\n",
2834 sc->sc_wdcdev.sc_dev.dv_xname, i);
2835 else
2836 rv = 1;
2837 }
2838 }
2839 return rv;
2840 }
2841
2842 void
2843 hpt_chip_map(sc, pa)
2844 struct pciide_softc *sc;
2845 struct pci_attach_args *pa;
2846 {
2847 struct pciide_channel *cp;
2848 int i, compatchan, revision;
2849 pcireg_t interface;
2850 bus_size_t cmdsize, ctlsize;
2851
2852 if (pciide_chipen(sc, pa) == 0)
2853 return;
2854 revision = PCI_REVISION(pa->pa_class);
2855
2856 /*
2857 * when the chip is in native mode it identifies itself as a
2858 * 'misc mass storage'. Fake interface in this case.
2859 */
2860 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2861 interface = PCI_INTERFACE(pa->pa_class);
2862 } else {
2863 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2864 PCIIDE_INTERFACE_PCI(0);
2865 if (revision == HPT370_REV)
2866 interface |= PCIIDE_INTERFACE_PCI(1);
2867 }
2868
2869 printf("%s: bus-master DMA support present",
2870 sc->sc_wdcdev.sc_dev.dv_xname);
2871 pciide_mapreg_dma(sc, pa);
2872 printf("\n");
2873 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2874 WDC_CAPABILITY_MODE;
2875 if (sc->sc_dma_ok) {
2876 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2877 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2878 sc->sc_wdcdev.irqack = pciide_irqack;
2879 }
2880 sc->sc_wdcdev.PIO_cap = 4;
2881 sc->sc_wdcdev.DMA_cap = 2;
2882 sc->sc_wdcdev.UDMA_cap = 4;
2883
2884 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2885 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2886 if (revision == HPT366_REV) {
2887 /*
2888 * The 366 has 2 PCI IDE functions, one for primary and one
2889 * for secondary. So we need to call pciide_mapregs_compat()
2890 * with the real channel
2891 */
2892 if (pa->pa_function == 0) {
2893 compatchan = 0;
2894 } else if (pa->pa_function == 1) {
2895 compatchan = 1;
2896 } else {
2897 printf("%s: unexpected PCI function %d\n",
2898 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2899 return;
2900 }
2901 sc->sc_wdcdev.nchannels = 1;
2902 } else {
2903 sc->sc_wdcdev.nchannels = 2;
2904 }
2905 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2906 cp = &sc->pciide_channels[i];
2907 if (sc->sc_wdcdev.nchannels > 1) {
2908 compatchan = i;
2909 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
2910 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
2911 printf("%s: %s channel ignored (disabled)\n",
2912 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2913 continue;
2914 }
2915 }
2916 if (pciide_chansetup(sc, i, interface) == 0)
2917 continue;
2918 if (interface & PCIIDE_INTERFACE_PCI(i)) {
2919 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
2920 &ctlsize, hpt_pci_intr);
2921 } else {
2922 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
2923 &cmdsize, &ctlsize);
2924 }
2925 if (cp->hw_ok == 0)
2926 return;
2927 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2928 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2929 wdcattach(&cp->wdc_channel);
2930 hpt_setup_channel(&cp->wdc_channel);
2931 }
2932 if (revision == HPT370_REV) {
2933 /*
2934 * HPT370_REV has a bit to disable interrupts, make sure
2935 * to clear it
2936 */
2937 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
2938 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
2939 ~HPT_CSEL_IRQDIS);
2940 }
2941 return;
2942 }
2943
2944
2945 void
2946 hpt_setup_channel(chp)
2947 struct channel_softc *chp;
2948 {
2949 struct ata_drive_datas *drvp;
2950 int drive;
2951 int cable;
2952 u_int32_t before, after;
2953 u_int32_t idedma_ctl;
2954 struct pciide_channel *cp = (struct pciide_channel*)chp;
2955 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2956
2957 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
2958
2959 /* setup DMA if needed */
2960 pciide_channel_dma_setup(cp);
2961
2962 idedma_ctl = 0;
2963
2964 /* Per drive settings */
2965 for (drive = 0; drive < 2; drive++) {
2966 drvp = &chp->ch_drive[drive];
2967 /* If no drive, skip */
2968 if ((drvp->drive_flags & DRIVE) == 0)
2969 continue;
2970 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
2971 HPT_IDETIM(chp->channel, drive));
2972
2973 /* add timing values, setup DMA if needed */
2974 if (drvp->drive_flags & DRIVE_UDMA) {
2975 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
2976 drvp->UDMA_mode > 2)
2977 drvp->UDMA_mode = 2;
2978 after = (sc->sc_wdcdev.nchannels == 2) ?
2979 hpt370_udma[drvp->UDMA_mode] :
2980 hpt366_udma[drvp->UDMA_mode];
2981 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2982 } else if (drvp->drive_flags & DRIVE_DMA) {
2983 /*
2984 * use Multiword DMA.
2985 * Timings will be used for both PIO and DMA, so adjust
2986 * DMA mode if needed
2987 */
2988 if (drvp->PIO_mode >= 3 &&
2989 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2990 drvp->DMA_mode = drvp->PIO_mode - 2;
2991 }
2992 after = (sc->sc_wdcdev.nchannels == 2) ?
2993 hpt370_dma[drvp->DMA_mode] :
2994 hpt366_dma[drvp->DMA_mode];
2995 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2996 } else {
2997 /* PIO only */
2998 after = (sc->sc_wdcdev.nchannels == 2) ?
2999 hpt370_pio[drvp->PIO_mode] :
3000 hpt366_pio[drvp->PIO_mode];
3001 }
3002 pci_conf_write(sc->sc_pc, sc->sc_tag,
3003 HPT_IDETIM(chp->channel, drive), after);
3004 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3005 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3006 after, before), DEBUG_PROBE);
3007 }
3008 if (idedma_ctl != 0) {
3009 /* Add software bits in status register */
3010 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3011 IDEDMA_CTL, idedma_ctl);
3012 }
3013 pciide_print_modes(cp);
3014 }
3015
3016 int
3017 hpt_pci_intr(arg)
3018 void *arg;
3019 {
3020 struct pciide_softc *sc = arg;
3021 struct pciide_channel *cp;
3022 struct channel_softc *wdc_cp;
3023 int rv = 0;
3024 int dmastat, i, crv;
3025
3026 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3027 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3028 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3029 if((dmastat & IDEDMA_CTL_INTR) == 0)
3030 continue;
3031 cp = &sc->pciide_channels[i];
3032 wdc_cp = &cp->wdc_channel;
3033 crv = wdcintr(wdc_cp);
3034 if (crv == 0) {
3035 printf("%s:%d: bogus intr\n",
3036 sc->sc_wdcdev.sc_dev.dv_xname, i);
3037 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3038 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3039 } else
3040 rv = 1;
3041 }
3042 return rv;
3043 }
3044
3045
3046 /* A macro to test product */
3047 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
3048
3049 void
3050 pdc202xx_chip_map(sc, pa)
3051 struct pciide_softc *sc;
3052 struct pci_attach_args *pa;
3053 {
3054 struct pciide_channel *cp;
3055 int channel;
3056 pcireg_t interface, st, mode;
3057 bus_size_t cmdsize, ctlsize;
3058
3059 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3060 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3061 DEBUG_PROBE);
3062 if (pciide_chipen(sc, pa) == 0)
3063 return;
3064
3065 /* turn off RAID mode */
3066 st &= ~PDC2xx_STATE_IDERAID;
3067
3068 /*
3069 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3070 * mode. We have to fake interface
3071 */
3072 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3073 if (st & PDC2xx_STATE_NATIVE)
3074 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3075
3076 printf("%s: bus-master DMA support present",
3077 sc->sc_wdcdev.sc_dev.dv_xname);
3078 pciide_mapreg_dma(sc, pa);
3079 printf("\n");
3080 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3081 WDC_CAPABILITY_MODE;
3082 if (sc->sc_dma_ok) {
3083 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3084 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3085 sc->sc_wdcdev.irqack = pciide_irqack;
3086 }
3087 sc->sc_wdcdev.PIO_cap = 4;
3088 sc->sc_wdcdev.DMA_cap = 2;
3089 if (PDC_IS_262(sc))
3090 sc->sc_wdcdev.UDMA_cap = 4;
3091 else
3092 sc->sc_wdcdev.UDMA_cap = 2;
3093 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3094 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3095 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3096
3097 /* setup failsafe defaults */
3098 mode = 0;
3099 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3100 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3101 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3102 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3103 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3104 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3105 "initial timings 0x%x, now 0x%x\n", channel,
3106 pci_conf_read(sc->sc_pc, sc->sc_tag,
3107 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3108 DEBUG_PROBE);
3109 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3110 mode | PDC2xx_TIM_IORDYp);
3111 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3112 "initial timings 0x%x, now 0x%x\n", channel,
3113 pci_conf_read(sc->sc_pc, sc->sc_tag,
3114 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3115 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3116 mode);
3117 }
3118
3119 mode = PDC2xx_SCR_DMA;
3120 if (PDC_IS_262(sc)) {
3121 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3122 } else {
3123 /* the BIOS set it up this way */
3124 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3125 }
3126 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3127 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3128 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3129 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3130 DEBUG_PROBE);
3131 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3132
3133 /* controller initial state register is OK even without BIOS */
3134 /* Set DMA mode to IDE DMA compatibility */
3135 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3136 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3137 DEBUG_PROBE);
3138 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3139 mode | 0x1);
3140 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3141 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3142 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3143 mode | 0x1);
3144
3145 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3146 cp = &sc->pciide_channels[channel];
3147 if (pciide_chansetup(sc, channel, interface) == 0)
3148 continue;
3149 if ((st & (PDC_IS_262(sc) ?
3150 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3151 printf("%s: %s channel ignored (disabled)\n",
3152 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3153 continue;
3154 }
3155 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3156 pdc202xx_pci_intr);
3157 if (cp->hw_ok == 0)
3158 continue;
3159 if (pciide_chan_candisable(cp))
3160 st &= ~(PDC_IS_262(sc) ?
3161 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3162 pciide_map_compat_intr(pa, cp, channel, interface);
3163 pdc202xx_setup_channel(&cp->wdc_channel);
3164 }
3165 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3166 DEBUG_PROBE);
3167 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3168 return;
3169 }
3170
3171 void
3172 pdc202xx_setup_channel(chp)
3173 struct channel_softc *chp;
3174 {
3175 struct ata_drive_datas *drvp;
3176 int drive;
3177 pcireg_t mode, st;
3178 u_int32_t idedma_ctl, scr, atapi;
3179 struct pciide_channel *cp = (struct pciide_channel*)chp;
3180 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3181 int channel = chp->channel;
3182
3183 /* setup DMA if needed */
3184 pciide_channel_dma_setup(cp);
3185
3186 idedma_ctl = 0;
3187
3188 /* Per channel settings */
3189 if (PDC_IS_262(sc)) {
3190 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3191 PDC262_U66);
3192 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3193 /* Trimm UDMA mode */
3194 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3195 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3196 chp->ch_drive[0].UDMA_mode <= 2) ||
3197 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3198 chp->ch_drive[1].UDMA_mode <= 2)) {
3199 if (chp->ch_drive[0].UDMA_mode > 2)
3200 chp->ch_drive[0].UDMA_mode = 2;
3201 if (chp->ch_drive[1].UDMA_mode > 2)
3202 chp->ch_drive[1].UDMA_mode = 2;
3203 }
3204 /* Set U66 if needed */
3205 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3206 chp->ch_drive[0].UDMA_mode > 2) ||
3207 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3208 chp->ch_drive[1].UDMA_mode > 2))
3209 scr |= PDC262_U66_EN(channel);
3210 else
3211 scr &= ~PDC262_U66_EN(channel);
3212 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3213 PDC262_U66, scr);
3214 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3215 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3216 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3217 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3218 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3219 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3220 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3221 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3222 atapi = 0;
3223 else
3224 atapi = PDC262_ATAPI_UDMA;
3225 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3226 PDC262_ATAPI(channel), atapi);
3227 }
3228 }
3229 for (drive = 0; drive < 2; drive++) {
3230 drvp = &chp->ch_drive[drive];
3231 /* If no drive, skip */
3232 if ((drvp->drive_flags & DRIVE) == 0)
3233 continue;
3234 mode = 0;
3235 if (drvp->drive_flags & DRIVE_UDMA) {
3236 mode = PDC2xx_TIM_SET_MB(mode,
3237 pdc2xx_udma_mb[drvp->UDMA_mode]);
3238 mode = PDC2xx_TIM_SET_MC(mode,
3239 pdc2xx_udma_mc[drvp->UDMA_mode]);
3240 drvp->drive_flags &= ~DRIVE_DMA;
3241 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3242 } else if (drvp->drive_flags & DRIVE_DMA) {
3243 mode = PDC2xx_TIM_SET_MB(mode,
3244 pdc2xx_dma_mb[drvp->DMA_mode]);
3245 mode = PDC2xx_TIM_SET_MC(mode,
3246 pdc2xx_dma_mc[drvp->DMA_mode]);
3247 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3248 } else {
3249 mode = PDC2xx_TIM_SET_MB(mode,
3250 pdc2xx_dma_mb[0]);
3251 mode = PDC2xx_TIM_SET_MC(mode,
3252 pdc2xx_dma_mc[0]);
3253 }
3254 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3255 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3256 if (drvp->drive_flags & DRIVE_ATA)
3257 mode |= PDC2xx_TIM_PRE;
3258 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3259 if (drvp->PIO_mode >= 3) {
3260 mode |= PDC2xx_TIM_IORDY;
3261 if (drive == 0)
3262 mode |= PDC2xx_TIM_IORDYp;
3263 }
3264 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3265 "timings 0x%x\n",
3266 sc->sc_wdcdev.sc_dev.dv_xname,
3267 chp->channel, drive, mode), DEBUG_PROBE);
3268 pci_conf_write(sc->sc_pc, sc->sc_tag,
3269 PDC2xx_TIM(chp->channel, drive), mode);
3270 }
3271 if (idedma_ctl != 0) {
3272 /* Add software bits in status register */
3273 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3274 IDEDMA_CTL, idedma_ctl);
3275 }
3276 pciide_print_modes(cp);
3277 }
3278
3279 int
3280 pdc202xx_pci_intr(arg)
3281 void *arg;
3282 {
3283 struct pciide_softc *sc = arg;
3284 struct pciide_channel *cp;
3285 struct channel_softc *wdc_cp;
3286 int i, rv, crv;
3287 u_int32_t scr;
3288
3289 rv = 0;
3290 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3291 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3292 cp = &sc->pciide_channels[i];
3293 wdc_cp = &cp->wdc_channel;
3294 /* If a compat channel skip. */
3295 if (cp->compat)
3296 continue;
3297 if (scr & PDC2xx_SCR_INT(i)) {
3298 crv = wdcintr(wdc_cp);
3299 if (crv == 0)
3300 printf("%s:%d: bogus intr\n",
3301 sc->sc_wdcdev.sc_dev.dv_xname, i);
3302 else
3303 rv = 1;
3304 }
3305 }
3306 return rv;
3307 }
3308
3309 void
3310 opti_chip_map(sc, pa)
3311 struct pciide_softc *sc;
3312 struct pci_attach_args *pa;
3313 {
3314 struct pciide_channel *cp;
3315 bus_size_t cmdsize, ctlsize;
3316 pcireg_t interface;
3317 u_int8_t init_ctrl;
3318 int channel;
3319
3320 if (pciide_chipen(sc, pa) == 0)
3321 return;
3322 printf("%s: bus-master DMA support present",
3323 sc->sc_wdcdev.sc_dev.dv_xname);
3324 pciide_mapreg_dma(sc, pa);
3325 printf("\n");
3326
3327 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3328 WDC_CAPABILITY_MODE;
3329 sc->sc_wdcdev.PIO_cap = 4;
3330 if (sc->sc_dma_ok) {
3331 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3332 sc->sc_wdcdev.irqack = pciide_irqack;
3333 sc->sc_wdcdev.DMA_cap = 2;
3334 }
3335 sc->sc_wdcdev.set_modes = opti_setup_channel;
3336
3337 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3338 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3339
3340 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3341 OPTI_REG_INIT_CONTROL);
3342
3343 interface = PCI_INTERFACE(pa->pa_class);
3344
3345 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3346 cp = &sc->pciide_channels[channel];
3347 if (pciide_chansetup(sc, channel, interface) == 0)
3348 continue;
3349 if (channel == 1 &&
3350 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3351 printf("%s: %s channel ignored (disabled)\n",
3352 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3353 continue;
3354 }
3355 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3356 pciide_pci_intr);
3357 if (cp->hw_ok == 0)
3358 continue;
3359 pciide_map_compat_intr(pa, cp, channel, interface);
3360 if (cp->hw_ok == 0)
3361 continue;
3362 opti_setup_channel(&cp->wdc_channel);
3363 }
3364 }
3365
3366 void
3367 opti_setup_channel(chp)
3368 struct channel_softc *chp;
3369 {
3370 struct ata_drive_datas *drvp;
3371 struct pciide_channel *cp = (struct pciide_channel*)chp;
3372 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3373 int drive, spd;
3374 int mode[2];
3375 u_int8_t rv, mr;
3376
3377 /*
3378 * The `Delay' and `Address Setup Time' fields of the
3379 * Miscellaneous Register are always zero initially.
3380 */
3381 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3382 mr &= ~(OPTI_MISC_DELAY_MASK |
3383 OPTI_MISC_ADDR_SETUP_MASK |
3384 OPTI_MISC_INDEX_MASK);
3385
3386 /* Prime the control register before setting timing values */
3387 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3388
3389 /* Determine the clockrate of the PCIbus the chip is attached to */
3390 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3391 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3392
3393 /* setup DMA if needed */
3394 pciide_channel_dma_setup(cp);
3395
3396 for (drive = 0; drive < 2; drive++) {
3397 drvp = &chp->ch_drive[drive];
3398 /* If no drive, skip */
3399 if ((drvp->drive_flags & DRIVE) == 0) {
3400 mode[drive] = -1;
3401 continue;
3402 }
3403
3404 if ((drvp->drive_flags & DRIVE_DMA)) {
3405 /*
3406 * Timings will be used for both PIO and DMA,
3407 * so adjust DMA mode if needed
3408 */
3409 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3410 drvp->PIO_mode = drvp->DMA_mode + 2;
3411 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3412 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3413 drvp->PIO_mode - 2 : 0;
3414 if (drvp->DMA_mode == 0)
3415 drvp->PIO_mode = 0;
3416
3417 mode[drive] = drvp->DMA_mode + 5;
3418 } else
3419 mode[drive] = drvp->PIO_mode;
3420
3421 if (drive && mode[0] >= 0 &&
3422 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3423 /*
3424 * Can't have two drives using different values
3425 * for `Address Setup Time'.
3426 * Slow down the faster drive to compensate.
3427 */
3428 int d = (opti_tim_as[spd][mode[0]] >
3429 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3430
3431 mode[d] = mode[1-d];
3432 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3433 chp->ch_drive[d].DMA_mode = 0;
3434 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3435 }
3436 }
3437
3438 for (drive = 0; drive < 2; drive++) {
3439 int m;
3440 if ((m = mode[drive]) < 0)
3441 continue;
3442
3443 /* Set the Address Setup Time and select appropriate index */
3444 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3445 rv |= OPTI_MISC_INDEX(drive);
3446 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3447
3448 /* Set the pulse width and recovery timing parameters */
3449 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3450 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3451 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3452 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3453
3454 /* Set the Enhanced Mode register appropriately */
3455 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3456 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3457 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3458 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3459 }
3460
3461 /* Finally, enable the timings */
3462 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3463
3464 pciide_print_modes(cp);
3465 }
3466