pciide.c revision 1.68.2.20 1 /* $NetBSD: pciide.c,v 1.68.2.20 2001/02/26 21:49:16 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/cy82c693var.h>
121
122 #include "opt_pciide.h"
123
124 /* inlines for reading/writing 8-bit PCI registers */
125 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
126 int));
127 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
128 int, u_int8_t));
129
130 static __inline u_int8_t
131 pciide_pci_read(pc, pa, reg)
132 pci_chipset_tag_t pc;
133 pcitag_t pa;
134 int reg;
135 {
136
137 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
138 ((reg & 0x03) * 8) & 0xff);
139 }
140
141 static __inline void
142 pciide_pci_write(pc, pa, reg, val)
143 pci_chipset_tag_t pc;
144 pcitag_t pa;
145 int reg;
146 u_int8_t val;
147 {
148 pcireg_t pcival;
149
150 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
151 pcival &= ~(0xff << ((reg & 0x03) * 8));
152 pcival |= (val << ((reg & 0x03) * 8));
153 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
154 }
155
156 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157
158 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159 void piix_setup_channel __P((struct channel_softc*));
160 void piix3_4_setup_channel __P((struct channel_softc*));
161 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
163 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164
165 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
166 void amd756_setup_channel __P((struct channel_softc*));
167
168 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void apollo_setup_channel __P((struct channel_softc*));
170
171 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
173 void cmd0643_9_setup_channel __P((struct channel_softc*));
174 void cmd_channel_map __P((struct pci_attach_args *,
175 struct pciide_softc *, int));
176 int cmd_pci_intr __P((void *));
177 void cmd646_9_irqack __P((struct channel_softc *));
178
179 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void cy693_setup_channel __P((struct channel_softc*));
181
182 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void sis_setup_channel __P((struct channel_softc*));
184
185 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void acer_setup_channel __P((struct channel_softc*));
187 int acer_pci_intr __P((void *));
188
189 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void pdc202xx_setup_channel __P((struct channel_softc*));
191 int pdc202xx_pci_intr __P((void *));
192
193 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void opti_setup_channel __P((struct channel_softc*));
195
196 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void hpt_setup_channel __P((struct channel_softc*));
198 int hpt_pci_intr __P((void *));
199
200 void pciide_channel_dma_setup __P((struct pciide_channel *));
201 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
202 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
203 void pciide_dma_start __P((void*, int, int));
204 int pciide_dma_finish __P((void*, int, int, int));
205 void pciide_irqack __P((struct channel_softc *));
206 void pciide_print_modes __P((struct pciide_channel *));
207
208 struct pciide_product_desc {
209 u_int32_t ide_product;
210 int ide_flags;
211 const char *ide_name;
212 /* map and setup chip, probe drives */
213 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
214 };
215
216 /* Flags for ide_flags */
217 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
218
219 /* Default product description for devices not known from this controller */
220 const struct pciide_product_desc default_product_desc = {
221 0,
222 0,
223 "Generic PCI IDE controller",
224 default_chip_map,
225 };
226
227 const struct pciide_product_desc pciide_intel_products[] = {
228 { PCI_PRODUCT_INTEL_82092AA,
229 0,
230 "Intel 82092AA IDE controller",
231 default_chip_map,
232 },
233 { PCI_PRODUCT_INTEL_82371FB_IDE,
234 0,
235 "Intel 82371FB IDE controller (PIIX)",
236 piix_chip_map,
237 },
238 { PCI_PRODUCT_INTEL_82371SB_IDE,
239 0,
240 "Intel 82371SB IDE Interface (PIIX3)",
241 piix_chip_map,
242 },
243 { PCI_PRODUCT_INTEL_82371AB_IDE,
244 0,
245 "Intel 82371AB IDE controller (PIIX4)",
246 piix_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82801AA_IDE,
249 0,
250 "Intel 82801AA IDE Controller (ICH)",
251 piix_chip_map,
252 },
253 { PCI_PRODUCT_INTEL_82801AB_IDE,
254 0,
255 "Intel 82801AB IDE Controller (ICH0)",
256 piix_chip_map,
257 },
258 { PCI_PRODUCT_INTEL_82801BA_IDE,
259 0,
260 "Intel 82801BA IDE Controller (ICH2)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82801BAM_IDE,
264 0,
265 "Intel 82801BAM IDE Controller (ICH2)",
266 piix_chip_map,
267 },
268 { 0,
269 0,
270 NULL,
271 }
272 };
273
274 const struct pciide_product_desc pciide_amd_products[] = {
275 { PCI_PRODUCT_AMD_PBC756_IDE,
276 0,
277 "Advanced Micro Devices AMD756 IDE Controller",
278 amd756_chip_map
279 },
280 { 0,
281 0,
282 NULL,
283 }
284 };
285
286 const struct pciide_product_desc pciide_cmd_products[] = {
287 { PCI_PRODUCT_CMDTECH_640,
288 0,
289 "CMD Technology PCI0640",
290 cmd_chip_map
291 },
292 { PCI_PRODUCT_CMDTECH_643,
293 0,
294 "CMD Technology PCI0643",
295 cmd0643_9_chip_map,
296 },
297 { PCI_PRODUCT_CMDTECH_646,
298 0,
299 "CMD Technology PCI0646",
300 cmd0643_9_chip_map,
301 },
302 { PCI_PRODUCT_CMDTECH_648,
303 IDE_PCI_CLASS_OVERRIDE,
304 "CMD Technology PCI0648",
305 cmd0643_9_chip_map,
306 },
307 { PCI_PRODUCT_CMDTECH_649,
308 IDE_PCI_CLASS_OVERRIDE,
309 "CMD Technology PCI0649",
310 cmd0643_9_chip_map,
311 },
312 { 0,
313 0,
314 NULL,
315 }
316 };
317
318 const struct pciide_product_desc pciide_via_products[] = {
319 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
320 0,
321 "VIA Tech VT82C586 IDE Controller",
322 apollo_chip_map,
323 },
324 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
325 0,
326 "VIA Tech VT82C586A IDE Controller",
327 apollo_chip_map,
328 },
329 { 0,
330 0,
331 NULL,
332 }
333 };
334
335 const struct pciide_product_desc pciide_cypress_products[] = {
336 { PCI_PRODUCT_CONTAQ_82C693,
337 0,
338 "Cypress 82C693 IDE Controller",
339 cy693_chip_map,
340 },
341 { 0,
342 0,
343 NULL,
344 }
345 };
346
347 const struct pciide_product_desc pciide_sis_products[] = {
348 { PCI_PRODUCT_SIS_5597_IDE,
349 0,
350 "Silicon Integrated System 5597/5598 IDE controller",
351 sis_chip_map,
352 },
353 { 0,
354 0,
355 NULL,
356 }
357 };
358
359 const struct pciide_product_desc pciide_acer_products[] = {
360 { PCI_PRODUCT_ALI_M5229,
361 0,
362 "Acer Labs M5229 UDMA IDE Controller",
363 acer_chip_map,
364 },
365 { 0,
366 0,
367 NULL,
368 }
369 };
370
371 const struct pciide_product_desc pciide_promise_products[] = {
372 { PCI_PRODUCT_PROMISE_ULTRA33,
373 IDE_PCI_CLASS_OVERRIDE,
374 "Promise Ultra33/ATA Bus Master IDE Accelerator",
375 pdc202xx_chip_map,
376 },
377 { PCI_PRODUCT_PROMISE_ULTRA66,
378 IDE_PCI_CLASS_OVERRIDE,
379 "Promise Ultra66/ATA Bus Master IDE Accelerator",
380 pdc202xx_chip_map,
381 },
382 { PCI_PRODUCT_PROMISE_ULTRA100,
383 IDE_PCI_CLASS_OVERRIDE,
384 "Promise Ultra100/ATA Bus Master IDE Accelerator",
385 pdc202xx_chip_map,
386 },
387 { PCI_PRODUCT_PROMISE_ULTRA100X,
388 IDE_PCI_CLASS_OVERRIDE,
389 "Promise Ultra100/ATA Bus Master IDE Accelerator",
390 pdc202xx_chip_map,
391 },
392 { 0,
393 0,
394 NULL,
395 }
396 };
397
398 const struct pciide_product_desc pciide_opti_products[] = {
399 { PCI_PRODUCT_OPTI_82C621,
400 0,
401 "OPTi 82c621 PCI IDE controller",
402 opti_chip_map,
403 },
404 { PCI_PRODUCT_OPTI_82C568,
405 0,
406 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
407 opti_chip_map,
408 },
409 { PCI_PRODUCT_OPTI_82D568,
410 0,
411 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
412 opti_chip_map,
413 },
414 { 0,
415 0,
416 NULL,
417 }
418 };
419
420 const struct pciide_product_desc pciide_triones_products[] = {
421 { PCI_PRODUCT_TRIONES_HPT366,
422 IDE_PCI_CLASS_OVERRIDE,
423 "Triones/Highpoint HPT366/370 IDE Controller",
424 hpt_chip_map,
425 },
426 { 0,
427 0,
428 NULL,
429 }
430 };
431
432 struct pciide_vendor_desc {
433 u_int32_t ide_vendor;
434 const struct pciide_product_desc *ide_products;
435 };
436
437 const struct pciide_vendor_desc pciide_vendors[] = {
438 { PCI_VENDOR_INTEL, pciide_intel_products },
439 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
440 { PCI_VENDOR_VIATECH, pciide_via_products },
441 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
442 { PCI_VENDOR_SIS, pciide_sis_products },
443 { PCI_VENDOR_ALI, pciide_acer_products },
444 { PCI_VENDOR_PROMISE, pciide_promise_products },
445 { PCI_VENDOR_AMD, pciide_amd_products },
446 { PCI_VENDOR_OPTI, pciide_opti_products },
447 { PCI_VENDOR_TRIONES, pciide_triones_products },
448 { 0, NULL }
449 };
450
451 /* options passed via the 'flags' config keyword */
452 #define PCIIDE_OPTIONS_DMA 0x01
453
454 int pciide_match __P((struct device *, struct cfdata *, void *));
455 void pciide_attach __P((struct device *, struct device *, void *));
456
457 struct cfattach pciide_ca = {
458 sizeof(struct pciide_softc), pciide_match, pciide_attach
459 };
460 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
461 int pciide_mapregs_compat __P(( struct pci_attach_args *,
462 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
463 int pciide_mapregs_native __P((struct pci_attach_args *,
464 struct pciide_channel *, bus_size_t *, bus_size_t *,
465 int (*pci_intr) __P((void *))));
466 void pciide_mapreg_dma __P((struct pciide_softc *,
467 struct pci_attach_args *));
468 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
469 void pciide_mapchan __P((struct pci_attach_args *,
470 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
471 int (*pci_intr) __P((void *))));
472 int pciide_chan_candisable __P((struct pciide_channel *));
473 void pciide_map_compat_intr __P(( struct pci_attach_args *,
474 struct pciide_channel *, int, int));
475 int pciide_print __P((void *, const char *pnp));
476 int pciide_compat_intr __P((void *));
477 int pciide_pci_intr __P((void *));
478 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
479
480 const struct pciide_product_desc *
481 pciide_lookup_product(id)
482 u_int32_t id;
483 {
484 const struct pciide_product_desc *pp;
485 const struct pciide_vendor_desc *vp;
486
487 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
488 if (PCI_VENDOR(id) == vp->ide_vendor)
489 break;
490
491 if ((pp = vp->ide_products) == NULL)
492 return NULL;
493
494 for (; pp->ide_name != NULL; pp++)
495 if (PCI_PRODUCT(id) == pp->ide_product)
496 break;
497
498 if (pp->ide_name == NULL)
499 return NULL;
500 return pp;
501 }
502
503 int
504 pciide_match(parent, match, aux)
505 struct device *parent;
506 struct cfdata *match;
507 void *aux;
508 {
509 struct pci_attach_args *pa = aux;
510 const struct pciide_product_desc *pp;
511
512 /*
513 * Check the ID register to see that it's a PCI IDE controller.
514 * If it is, we assume that we can deal with it; it _should_
515 * work in a standardized way...
516 */
517 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
518 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
519 return (1);
520 }
521
522 /*
523 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
524 * controllers. Let see if we can deal with it anyway.
525 */
526 pp = pciide_lookup_product(pa->pa_id);
527 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
528 return (1);
529 }
530
531 return (0);
532 }
533
534 void
535 pciide_attach(parent, self, aux)
536 struct device *parent, *self;
537 void *aux;
538 {
539 struct pci_attach_args *pa = aux;
540 pci_chipset_tag_t pc = pa->pa_pc;
541 pcitag_t tag = pa->pa_tag;
542 struct pciide_softc *sc = (struct pciide_softc *)self;
543 pcireg_t csr;
544 char devinfo[256];
545 const char *displaydev;
546
547 sc->sc_pp = pciide_lookup_product(pa->pa_id);
548 if (sc->sc_pp == NULL) {
549 sc->sc_pp = &default_product_desc;
550 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
551 displaydev = devinfo;
552 } else
553 displaydev = sc->sc_pp->ide_name;
554
555 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
556
557 sc->sc_pc = pa->pa_pc;
558 sc->sc_tag = pa->pa_tag;
559 #ifdef WDCDEBUG
560 if (wdcdebug_pciide_mask & DEBUG_PROBE)
561 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
562 #endif
563 sc->sc_pp->chip_map(sc, pa);
564
565 if (sc->sc_dma_ok) {
566 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
567 csr |= PCI_COMMAND_MASTER_ENABLE;
568 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
569 }
570 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
571 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
572 }
573
574 /* tell wether the chip is enabled or not */
575 int
576 pciide_chipen(sc, pa)
577 struct pciide_softc *sc;
578 struct pci_attach_args *pa;
579 {
580 pcireg_t csr;
581 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
582 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
583 PCI_COMMAND_STATUS_REG);
584 printf("%s: device disabled (at %s)\n",
585 sc->sc_wdcdev.sc_dev.dv_xname,
586 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
587 "device" : "bridge");
588 return 0;
589 }
590 return 1;
591 }
592
593 int
594 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
595 struct pci_attach_args *pa;
596 struct pciide_channel *cp;
597 int compatchan;
598 bus_size_t *cmdsizep, *ctlsizep;
599 {
600 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
601 struct channel_softc *wdc_cp = &cp->wdc_channel;
602
603 cp->compat = 1;
604 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
605 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
606
607 wdc_cp->cmd_iot = pa->pa_iot;
608 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
609 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
610 printf("%s: couldn't map %s channel cmd regs\n",
611 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
612 return (0);
613 }
614
615 wdc_cp->ctl_iot = pa->pa_iot;
616 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
617 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
618 printf("%s: couldn't map %s channel ctl regs\n",
619 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
620 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
621 PCIIDE_COMPAT_CMD_SIZE);
622 return (0);
623 }
624
625 return (1);
626 }
627
628 int
629 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
630 struct pci_attach_args * pa;
631 struct pciide_channel *cp;
632 bus_size_t *cmdsizep, *ctlsizep;
633 int (*pci_intr) __P((void *));
634 {
635 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
636 struct channel_softc *wdc_cp = &cp->wdc_channel;
637 const char *intrstr;
638 pci_intr_handle_t intrhandle;
639
640 cp->compat = 0;
641
642 if (sc->sc_pci_ih == NULL) {
643 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
644 pa->pa_intrline, &intrhandle) != 0) {
645 printf("%s: couldn't map native-PCI interrupt\n",
646 sc->sc_wdcdev.sc_dev.dv_xname);
647 return 0;
648 }
649 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
650 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
651 intrhandle, IPL_BIO, pci_intr, sc);
652 if (sc->sc_pci_ih != NULL) {
653 printf("%s: using %s for native-PCI interrupt\n",
654 sc->sc_wdcdev.sc_dev.dv_xname,
655 intrstr ? intrstr : "unknown interrupt");
656 } else {
657 printf("%s: couldn't establish native-PCI interrupt",
658 sc->sc_wdcdev.sc_dev.dv_xname);
659 if (intrstr != NULL)
660 printf(" at %s", intrstr);
661 printf("\n");
662 return 0;
663 }
664 }
665 cp->ih = sc->sc_pci_ih;
666 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
667 PCI_MAPREG_TYPE_IO, 0,
668 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
669 printf("%s: couldn't map %s channel cmd regs\n",
670 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
671 return 0;
672 }
673
674 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
675 PCI_MAPREG_TYPE_IO, 0,
676 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
677 printf("%s: couldn't map %s channel ctl regs\n",
678 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
679 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
680 return 0;
681 }
682 /*
683 * In native mode, 4 bytes of I/O space are mapped for the control
684 * register, the control register is at offset 2. Pass the generic
685 * code a handle for only one byte at the rigth offset.
686 */
687 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
688 &wdc_cp->ctl_ioh) != 0) {
689 printf("%s: unable to subregion %s channel ctl regs\n",
690 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
691 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
692 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
693 return 0;
694 }
695 return (1);
696 }
697
698 void
699 pciide_mapreg_dma(sc, pa)
700 struct pciide_softc *sc;
701 struct pci_attach_args *pa;
702 {
703 pcireg_t maptype;
704
705 /*
706 * Map DMA registers
707 *
708 * Note that sc_dma_ok is the right variable to test to see if
709 * DMA can be done. If the interface doesn't support DMA,
710 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
711 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
712 * non-zero if the interface supports DMA and the registers
713 * could be mapped.
714 *
715 * XXX Note that despite the fact that the Bus Master IDE specs
716 * XXX say that "The bus master IDE function uses 16 bytes of IO
717 * XXX space," some controllers (at least the United
718 * XXX Microelectronics UM8886BF) place it in memory space.
719 */
720 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
721 PCIIDE_REG_BUS_MASTER_DMA);
722
723 switch (maptype) {
724 case PCI_MAPREG_TYPE_IO:
725 case PCI_MAPREG_MEM_TYPE_32BIT:
726 sc->sc_dma_ok = (pci_mapreg_map(pa,
727 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
728 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
729 sc->sc_dmat = pa->pa_dmat;
730 if (sc->sc_dma_ok == 0) {
731 printf(", but unused (couldn't map registers)");
732 } else {
733 sc->sc_wdcdev.dma_arg = sc;
734 sc->sc_wdcdev.dma_init = pciide_dma_init;
735 sc->sc_wdcdev.dma_start = pciide_dma_start;
736 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
737 }
738 break;
739
740 default:
741 sc->sc_dma_ok = 0;
742 printf(", but unsupported register maptype (0x%x)", maptype);
743 }
744 }
745
746 int
747 pciide_compat_intr(arg)
748 void *arg;
749 {
750 struct pciide_channel *cp = arg;
751
752 #ifdef DIAGNOSTIC
753 /* should only be called for a compat channel */
754 if (cp->compat == 0)
755 panic("pciide compat intr called for non-compat chan %p\n", cp);
756 #endif
757 return (wdcintr(&cp->wdc_channel));
758 }
759
760 int
761 pciide_pci_intr(arg)
762 void *arg;
763 {
764 struct pciide_softc *sc = arg;
765 struct pciide_channel *cp;
766 struct channel_softc *wdc_cp;
767 int i, rv, crv;
768
769 rv = 0;
770 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
771 cp = &sc->pciide_channels[i];
772 wdc_cp = &cp->wdc_channel;
773
774 /* If a compat channel skip. */
775 if (cp->compat)
776 continue;
777 /* if this channel not waiting for intr, skip */
778 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
779 continue;
780
781 crv = wdcintr(wdc_cp);
782 if (crv == 0)
783 ; /* leave rv alone */
784 else if (crv == 1)
785 rv = 1; /* claim the intr */
786 else if (rv == 0) /* crv should be -1 in this case */
787 rv = crv; /* if we've done no better, take it */
788 }
789 return (rv);
790 }
791
792 void
793 pciide_channel_dma_setup(cp)
794 struct pciide_channel *cp;
795 {
796 int drive;
797 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
798 struct ata_drive_datas *drvp;
799
800 for (drive = 0; drive < 2; drive++) {
801 drvp = &cp->wdc_channel.ch_drive[drive];
802 /* If no drive, skip */
803 if ((drvp->drive_flags & DRIVE) == 0)
804 continue;
805 /* setup DMA if needed */
806 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
807 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
808 sc->sc_dma_ok == 0) {
809 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
810 continue;
811 }
812 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
813 != 0) {
814 /* Abort DMA setup */
815 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
816 continue;
817 }
818 }
819 }
820
821 int
822 pciide_dma_table_setup(sc, channel, drive)
823 struct pciide_softc *sc;
824 int channel, drive;
825 {
826 bus_dma_segment_t seg;
827 int error, rseg;
828 const bus_size_t dma_table_size =
829 sizeof(struct idedma_table) * NIDEDMA_TABLES;
830 struct pciide_dma_maps *dma_maps =
831 &sc->pciide_channels[channel].dma_maps[drive];
832
833 /* If table was already allocated, just return */
834 if (dma_maps->dma_table)
835 return 0;
836
837 /* Allocate memory for the DMA tables and map it */
838 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
839 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
840 BUS_DMA_NOWAIT)) != 0) {
841 printf("%s:%d: unable to allocate table DMA for "
842 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
843 channel, drive, error);
844 return error;
845 }
846 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
847 dma_table_size,
848 (caddr_t *)&dma_maps->dma_table,
849 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
850 printf("%s:%d: unable to map table DMA for"
851 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
852 channel, drive, error);
853 return error;
854 }
855 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
856 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
857 seg.ds_addr), DEBUG_PROBE);
858
859 /* Create and load table DMA map for this disk */
860 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
861 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
862 &dma_maps->dmamap_table)) != 0) {
863 printf("%s:%d: unable to create table DMA map for "
864 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
865 channel, drive, error);
866 return error;
867 }
868 if ((error = bus_dmamap_load(sc->sc_dmat,
869 dma_maps->dmamap_table,
870 dma_maps->dma_table,
871 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
872 printf("%s:%d: unable to load table DMA map for "
873 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
874 channel, drive, error);
875 return error;
876 }
877 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
878 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
879 /* Create a xfer DMA map for this drive */
880 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
881 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
882 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
883 &dma_maps->dmamap_xfer)) != 0) {
884 printf("%s:%d: unable to create xfer DMA map for "
885 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
886 channel, drive, error);
887 return error;
888 }
889 return 0;
890 }
891
892 int
893 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
894 void *v;
895 int channel, drive;
896 void *databuf;
897 size_t datalen;
898 int flags;
899 {
900 struct pciide_softc *sc = v;
901 int error, seg;
902 struct pciide_dma_maps *dma_maps =
903 &sc->pciide_channels[channel].dma_maps[drive];
904
905 error = bus_dmamap_load(sc->sc_dmat,
906 dma_maps->dmamap_xfer,
907 databuf, datalen, NULL, BUS_DMA_NOWAIT);
908 if (error) {
909 printf("%s:%d: unable to load xfer DMA map for"
910 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
911 channel, drive, error);
912 return error;
913 }
914
915 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
916 dma_maps->dmamap_xfer->dm_mapsize,
917 (flags & WDC_DMA_READ) ?
918 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
919
920 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
921 #ifdef DIAGNOSTIC
922 /* A segment must not cross a 64k boundary */
923 {
924 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
925 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
926 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
927 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
928 printf("pciide_dma: segment %d physical addr 0x%lx"
929 " len 0x%lx not properly aligned\n",
930 seg, phys, len);
931 panic("pciide_dma: buf align");
932 }
933 }
934 #endif
935 dma_maps->dma_table[seg].base_addr =
936 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
937 dma_maps->dma_table[seg].byte_count =
938 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
939 IDEDMA_BYTE_COUNT_MASK);
940 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
941 seg, le32toh(dma_maps->dma_table[seg].byte_count),
942 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
943
944 }
945 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
946 htole32(IDEDMA_BYTE_COUNT_EOT);
947
948 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
949 dma_maps->dmamap_table->dm_mapsize,
950 BUS_DMASYNC_PREWRITE);
951
952 /* Maps are ready. Start DMA function */
953 #ifdef DIAGNOSTIC
954 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
955 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
956 dma_maps->dmamap_table->dm_segs[0].ds_addr);
957 panic("pciide_dma_init: table align");
958 }
959 #endif
960
961 /* Clear status bits */
962 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
963 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
964 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
965 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
966 /* Write table addr */
967 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
968 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
969 dma_maps->dmamap_table->dm_segs[0].ds_addr);
970 /* set read/write */
971 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
972 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
973 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
974 /* remember flags */
975 dma_maps->dma_flags = flags;
976 return 0;
977 }
978
979 void
980 pciide_dma_start(v, channel, drive)
981 void *v;
982 int channel, drive;
983 {
984 struct pciide_softc *sc = v;
985
986 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
987 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
988 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
989 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
990 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
991 }
992
993 int
994 pciide_dma_finish(v, channel, drive, force)
995 void *v;
996 int channel, drive;
997 int force;
998 {
999 struct pciide_softc *sc = v;
1000 u_int8_t status;
1001 int error = 0;
1002 struct pciide_dma_maps *dma_maps =
1003 &sc->pciide_channels[channel].dma_maps[drive];
1004
1005 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1006 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1007 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1008 DEBUG_XFERS);
1009
1010 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1011 return WDC_DMAST_NOIRQ;
1012
1013 /* stop DMA channel */
1014 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1015 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1016 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1017 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1018
1019 /* Unload the map of the data buffer */
1020 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1021 dma_maps->dmamap_xfer->dm_mapsize,
1022 (dma_maps->dma_flags & WDC_DMA_READ) ?
1023 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1024 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1025
1026 if ((status & IDEDMA_CTL_ERR) != 0) {
1027 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1028 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1029 error |= WDC_DMAST_ERR;
1030 }
1031
1032 if ((status & IDEDMA_CTL_INTR) == 0) {
1033 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1034 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1035 drive, status);
1036 error |= WDC_DMAST_NOIRQ;
1037 }
1038
1039 if ((status & IDEDMA_CTL_ACT) != 0) {
1040 /* data underrun, may be a valid condition for ATAPI */
1041 error |= WDC_DMAST_UNDER;
1042 }
1043 return error;
1044 }
1045
1046 void
1047 pciide_irqack(chp)
1048 struct channel_softc *chp;
1049 {
1050 struct pciide_channel *cp = (struct pciide_channel*)chp;
1051 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1052
1053 /* clear status bits in IDE DMA registers */
1054 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1055 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1056 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1057 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1058 }
1059
1060 /* some common code used by several chip_map */
1061 int
1062 pciide_chansetup(sc, channel, interface)
1063 struct pciide_softc *sc;
1064 int channel;
1065 pcireg_t interface;
1066 {
1067 struct pciide_channel *cp = &sc->pciide_channels[channel];
1068 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1069 cp->name = PCIIDE_CHANNEL_NAME(channel);
1070 cp->wdc_channel.channel = channel;
1071 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1072 cp->wdc_channel.ch_queue =
1073 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1074 if (cp->wdc_channel.ch_queue == NULL) {
1075 printf("%s %s channel: "
1076 "can't allocate memory for command queue",
1077 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1078 return 0;
1079 }
1080 printf("%s: %s channel %s to %s mode\n",
1081 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1082 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1083 "configured" : "wired",
1084 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1085 "native-PCI" : "compatibility");
1086 return 1;
1087 }
1088
1089 /* some common code used by several chip channel_map */
1090 void
1091 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1092 struct pci_attach_args *pa;
1093 struct pciide_channel *cp;
1094 pcireg_t interface;
1095 bus_size_t *cmdsizep, *ctlsizep;
1096 int (*pci_intr) __P((void *));
1097 {
1098 struct channel_softc *wdc_cp = &cp->wdc_channel;
1099
1100 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1101 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1102 pci_intr);
1103 else
1104 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1105 wdc_cp->channel, cmdsizep, ctlsizep);
1106
1107 if (cp->hw_ok == 0)
1108 return;
1109 wdc_cp->data32iot = wdc_cp->cmd_iot;
1110 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1111 wdcattach(wdc_cp);
1112 }
1113
1114 /*
1115 * Generic code to call to know if a channel can be disabled. Return 1
1116 * if channel can be disabled, 0 if not
1117 */
1118 int
1119 pciide_chan_candisable(cp)
1120 struct pciide_channel *cp;
1121 {
1122 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1123 struct channel_softc *wdc_cp = &cp->wdc_channel;
1124
1125 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1126 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1127 printf("%s: disabling %s channel (no drives)\n",
1128 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1129 cp->hw_ok = 0;
1130 return 1;
1131 }
1132 return 0;
1133 }
1134
1135 /*
1136 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1137 * Set hw_ok=0 on failure
1138 */
1139 void
1140 pciide_map_compat_intr(pa, cp, compatchan, interface)
1141 struct pci_attach_args *pa;
1142 struct pciide_channel *cp;
1143 int compatchan, interface;
1144 {
1145 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1146 struct channel_softc *wdc_cp = &cp->wdc_channel;
1147
1148 if (cp->hw_ok == 0)
1149 return;
1150 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1151 return;
1152
1153 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1154 pa, compatchan, pciide_compat_intr, cp);
1155 if (cp->ih == NULL) {
1156 printf("%s: no compatibility interrupt for use by %s "
1157 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1158 cp->hw_ok = 0;
1159 }
1160 }
1161
1162 void
1163 pciide_print_modes(cp)
1164 struct pciide_channel *cp;
1165 {
1166 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1167 int drive;
1168 struct channel_softc *chp;
1169 struct ata_drive_datas *drvp;
1170
1171 chp = &cp->wdc_channel;
1172 for (drive = 0; drive < 2; drive++) {
1173 drvp = &chp->ch_drive[drive];
1174 if ((drvp->drive_flags & DRIVE) == 0)
1175 continue;
1176 printf("%s(%s:%d:%d): using PIO mode %d",
1177 drvp->drv_softc->dv_xname,
1178 sc->sc_wdcdev.sc_dev.dv_xname,
1179 chp->channel, drive, drvp->PIO_mode);
1180 if (drvp->drive_flags & DRIVE_DMA)
1181 printf(", DMA mode %d", drvp->DMA_mode);
1182 if (drvp->drive_flags & DRIVE_UDMA)
1183 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1184 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1185 printf(" (using DMA data transfers)");
1186 printf("\n");
1187 }
1188 }
1189
1190 void
1191 default_chip_map(sc, pa)
1192 struct pciide_softc *sc;
1193 struct pci_attach_args *pa;
1194 {
1195 struct pciide_channel *cp;
1196 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1197 pcireg_t csr;
1198 int channel, drive;
1199 struct ata_drive_datas *drvp;
1200 u_int8_t idedma_ctl;
1201 bus_size_t cmdsize, ctlsize;
1202 char *failreason;
1203
1204 if (pciide_chipen(sc, pa) == 0)
1205 return;
1206
1207 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1208 printf("%s: bus-master DMA support present",
1209 sc->sc_wdcdev.sc_dev.dv_xname);
1210 if (sc->sc_pp == &default_product_desc &&
1211 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1212 PCIIDE_OPTIONS_DMA) == 0) {
1213 printf(", but unused (no driver support)");
1214 sc->sc_dma_ok = 0;
1215 } else {
1216 pciide_mapreg_dma(sc, pa);
1217 if (sc->sc_dma_ok != 0)
1218 printf(", used without full driver "
1219 "support");
1220 }
1221 } else {
1222 printf("%s: hardware does not support DMA",
1223 sc->sc_wdcdev.sc_dev.dv_xname);
1224 sc->sc_dma_ok = 0;
1225 }
1226 printf("\n");
1227 if (sc->sc_dma_ok) {
1228 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1229 sc->sc_wdcdev.irqack = pciide_irqack;
1230 }
1231 sc->sc_wdcdev.PIO_cap = 0;
1232 sc->sc_wdcdev.DMA_cap = 0;
1233
1234 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1235 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1236 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1237
1238 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1239 cp = &sc->pciide_channels[channel];
1240 if (pciide_chansetup(sc, channel, interface) == 0)
1241 continue;
1242 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1243 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1244 &ctlsize, pciide_pci_intr);
1245 } else {
1246 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1247 channel, &cmdsize, &ctlsize);
1248 }
1249 if (cp->hw_ok == 0)
1250 continue;
1251 /*
1252 * Check to see if something appears to be there.
1253 */
1254 failreason = NULL;
1255 if (!wdcprobe(&cp->wdc_channel)) {
1256 failreason = "not responding; disabled or no drives?";
1257 goto next;
1258 }
1259 /*
1260 * Now, make sure it's actually attributable to this PCI IDE
1261 * channel by trying to access the channel again while the
1262 * PCI IDE controller's I/O space is disabled. (If the
1263 * channel no longer appears to be there, it belongs to
1264 * this controller.) YUCK!
1265 */
1266 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1267 PCI_COMMAND_STATUS_REG);
1268 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1269 csr & ~PCI_COMMAND_IO_ENABLE);
1270 if (wdcprobe(&cp->wdc_channel))
1271 failreason = "other hardware responding at addresses";
1272 pci_conf_write(sc->sc_pc, sc->sc_tag,
1273 PCI_COMMAND_STATUS_REG, csr);
1274 next:
1275 if (failreason) {
1276 printf("%s: %s channel ignored (%s)\n",
1277 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1278 failreason);
1279 cp->hw_ok = 0;
1280 bus_space_unmap(cp->wdc_channel.cmd_iot,
1281 cp->wdc_channel.cmd_ioh, cmdsize);
1282 bus_space_unmap(cp->wdc_channel.ctl_iot,
1283 cp->wdc_channel.ctl_ioh, ctlsize);
1284 } else {
1285 pciide_map_compat_intr(pa, cp, channel, interface);
1286 }
1287 if (cp->hw_ok) {
1288 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1289 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1290 wdcattach(&cp->wdc_channel);
1291 }
1292 }
1293
1294 if (sc->sc_dma_ok == 0)
1295 return;
1296
1297 /* Allocate DMA maps */
1298 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1299 idedma_ctl = 0;
1300 cp = &sc->pciide_channels[channel];
1301 for (drive = 0; drive < 2; drive++) {
1302 drvp = &cp->wdc_channel.ch_drive[drive];
1303 /* If no drive, skip */
1304 if ((drvp->drive_flags & DRIVE) == 0)
1305 continue;
1306 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1307 continue;
1308 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1309 /* Abort DMA setup */
1310 printf("%s:%d:%d: can't allocate DMA maps, "
1311 "using PIO transfers\n",
1312 sc->sc_wdcdev.sc_dev.dv_xname,
1313 channel, drive);
1314 drvp->drive_flags &= ~DRIVE_DMA;
1315 }
1316 printf("%s:%d:%d: using DMA data transfers\n",
1317 sc->sc_wdcdev.sc_dev.dv_xname,
1318 channel, drive);
1319 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1320 }
1321 if (idedma_ctl != 0) {
1322 /* Add software bits in status register */
1323 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1324 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1325 idedma_ctl);
1326 }
1327 }
1328 }
1329
1330 void
1331 piix_chip_map(sc, pa)
1332 struct pciide_softc *sc;
1333 struct pci_attach_args *pa;
1334 {
1335 struct pciide_channel *cp;
1336 int channel;
1337 u_int32_t idetim;
1338 bus_size_t cmdsize, ctlsize;
1339
1340 if (pciide_chipen(sc, pa) == 0)
1341 return;
1342
1343 printf("%s: bus-master DMA support present",
1344 sc->sc_wdcdev.sc_dev.dv_xname);
1345 pciide_mapreg_dma(sc, pa);
1346 printf("\n");
1347 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1348 WDC_CAPABILITY_MODE;
1349 if (sc->sc_dma_ok) {
1350 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1351 sc->sc_wdcdev.irqack = pciide_irqack;
1352 switch(sc->sc_pp->ide_product) {
1353 case PCI_PRODUCT_INTEL_82371AB_IDE:
1354 case PCI_PRODUCT_INTEL_82801AA_IDE:
1355 case PCI_PRODUCT_INTEL_82801AB_IDE:
1356 case PCI_PRODUCT_INTEL_82801BA_IDE:
1357 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1358 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1359 }
1360 }
1361 sc->sc_wdcdev.PIO_cap = 4;
1362 sc->sc_wdcdev.DMA_cap = 2;
1363 switch(sc->sc_pp->ide_product) {
1364 case PCI_PRODUCT_INTEL_82801AA_IDE:
1365 sc->sc_wdcdev.UDMA_cap = 4;
1366 break;
1367 case PCI_PRODUCT_INTEL_82801BA_IDE:
1368 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1369 sc->sc_wdcdev.UDMA_cap = 5;
1370 break;
1371 default:
1372 sc->sc_wdcdev.UDMA_cap = 2;
1373 }
1374 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1375 sc->sc_wdcdev.set_modes = piix_setup_channel;
1376 else
1377 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1378 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1379 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1380
1381 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1382 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1383 DEBUG_PROBE);
1384 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1385 WDCDEBUG_PRINT((", sidetim=0x%x",
1386 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1387 DEBUG_PROBE);
1388 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1389 WDCDEBUG_PRINT((", udamreg 0x%x",
1390 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1391 DEBUG_PROBE);
1392 }
1393 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1394 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1395 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1396 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1397 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1398 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1399 DEBUG_PROBE);
1400 }
1401
1402 }
1403 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1404
1405 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1406 cp = &sc->pciide_channels[channel];
1407 /* PIIX is compat-only */
1408 if (pciide_chansetup(sc, channel, 0) == 0)
1409 continue;
1410 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1411 if ((PIIX_IDETIM_READ(idetim, channel) &
1412 PIIX_IDETIM_IDE) == 0) {
1413 printf("%s: %s channel ignored (disabled)\n",
1414 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1415 continue;
1416 }
1417 /* PIIX are compat-only pciide devices */
1418 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1419 if (cp->hw_ok == 0)
1420 continue;
1421 if (pciide_chan_candisable(cp)) {
1422 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1423 channel);
1424 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1425 idetim);
1426 }
1427 pciide_map_compat_intr(pa, cp, channel, 0);
1428 if (cp->hw_ok == 0)
1429 continue;
1430 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1431 }
1432
1433 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1434 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1435 DEBUG_PROBE);
1436 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1437 WDCDEBUG_PRINT((", sidetim=0x%x",
1438 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1439 DEBUG_PROBE);
1440 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1441 WDCDEBUG_PRINT((", udamreg 0x%x",
1442 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1443 DEBUG_PROBE);
1444 }
1445 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1446 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1447 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1448 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1449 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1450 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1451 DEBUG_PROBE);
1452 }
1453 }
1454 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1455 }
1456
1457 void
1458 piix_setup_channel(chp)
1459 struct channel_softc *chp;
1460 {
1461 u_int8_t mode[2], drive;
1462 u_int32_t oidetim, idetim, idedma_ctl;
1463 struct pciide_channel *cp = (struct pciide_channel*)chp;
1464 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1465 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1466
1467 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1468 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1469 idedma_ctl = 0;
1470
1471 /* set up new idetim: Enable IDE registers decode */
1472 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1473 chp->channel);
1474
1475 /* setup DMA */
1476 pciide_channel_dma_setup(cp);
1477
1478 /*
1479 * Here we have to mess up with drives mode: PIIX can't have
1480 * different timings for master and slave drives.
1481 * We need to find the best combination.
1482 */
1483
1484 /* If both drives supports DMA, take the lower mode */
1485 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1486 (drvp[1].drive_flags & DRIVE_DMA)) {
1487 mode[0] = mode[1] =
1488 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1489 drvp[0].DMA_mode = mode[0];
1490 drvp[1].DMA_mode = mode[1];
1491 goto ok;
1492 }
1493 /*
1494 * If only one drive supports DMA, use its mode, and
1495 * put the other one in PIO mode 0 if mode not compatible
1496 */
1497 if (drvp[0].drive_flags & DRIVE_DMA) {
1498 mode[0] = drvp[0].DMA_mode;
1499 mode[1] = drvp[1].PIO_mode;
1500 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1501 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1502 mode[1] = drvp[1].PIO_mode = 0;
1503 goto ok;
1504 }
1505 if (drvp[1].drive_flags & DRIVE_DMA) {
1506 mode[1] = drvp[1].DMA_mode;
1507 mode[0] = drvp[0].PIO_mode;
1508 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1509 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1510 mode[0] = drvp[0].PIO_mode = 0;
1511 goto ok;
1512 }
1513 /*
1514 * If both drives are not DMA, takes the lower mode, unless
1515 * one of them is PIO mode < 2
1516 */
1517 if (drvp[0].PIO_mode < 2) {
1518 mode[0] = drvp[0].PIO_mode = 0;
1519 mode[1] = drvp[1].PIO_mode;
1520 } else if (drvp[1].PIO_mode < 2) {
1521 mode[1] = drvp[1].PIO_mode = 0;
1522 mode[0] = drvp[0].PIO_mode;
1523 } else {
1524 mode[0] = mode[1] =
1525 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1526 drvp[0].PIO_mode = mode[0];
1527 drvp[1].PIO_mode = mode[1];
1528 }
1529 ok: /* The modes are setup */
1530 for (drive = 0; drive < 2; drive++) {
1531 if (drvp[drive].drive_flags & DRIVE_DMA) {
1532 idetim |= piix_setup_idetim_timings(
1533 mode[drive], 1, chp->channel);
1534 goto end;
1535 }
1536 }
1537 /* If we are there, none of the drives are DMA */
1538 if (mode[0] >= 2)
1539 idetim |= piix_setup_idetim_timings(
1540 mode[0], 0, chp->channel);
1541 else
1542 idetim |= piix_setup_idetim_timings(
1543 mode[1], 0, chp->channel);
1544 end: /*
1545 * timing mode is now set up in the controller. Enable
1546 * it per-drive
1547 */
1548 for (drive = 0; drive < 2; drive++) {
1549 /* If no drive, skip */
1550 if ((drvp[drive].drive_flags & DRIVE) == 0)
1551 continue;
1552 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1553 if (drvp[drive].drive_flags & DRIVE_DMA)
1554 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1555 }
1556 if (idedma_ctl != 0) {
1557 /* Add software bits in status register */
1558 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1559 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1560 idedma_ctl);
1561 }
1562 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1563 pciide_print_modes(cp);
1564 }
1565
1566 void
1567 piix3_4_setup_channel(chp)
1568 struct channel_softc *chp;
1569 {
1570 struct ata_drive_datas *drvp;
1571 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1572 struct pciide_channel *cp = (struct pciide_channel*)chp;
1573 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1574 int drive;
1575 int channel = chp->channel;
1576
1577 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1578 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1579 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1580 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1581 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1582 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1583 PIIX_SIDETIM_RTC_MASK(channel));
1584
1585 idedma_ctl = 0;
1586 /* If channel disabled, no need to go further */
1587 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1588 return;
1589 /* set up new idetim: Enable IDE registers decode */
1590 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1591
1592 /* setup DMA if needed */
1593 pciide_channel_dma_setup(cp);
1594
1595 for (drive = 0; drive < 2; drive++) {
1596 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1597 PIIX_UDMATIM_SET(0x3, channel, drive));
1598 drvp = &chp->ch_drive[drive];
1599 /* If no drive, skip */
1600 if ((drvp->drive_flags & DRIVE) == 0)
1601 continue;
1602 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1603 (drvp->drive_flags & DRIVE_UDMA) == 0))
1604 goto pio;
1605
1606 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1607 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1608 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1609 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1610 ideconf |= PIIX_CONFIG_PINGPONG;
1611 }
1612 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1613 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1614 /* setup Ultra/100 */
1615 if (drvp->UDMA_mode > 2 &&
1616 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1617 drvp->UDMA_mode = 2;
1618 if (drvp->UDMA_mode > 4) {
1619 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1620 } else {
1621 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1622 if (drvp->UDMA_mode > 2) {
1623 ideconf |= PIIX_CONFIG_UDMA66(channel,
1624 drive);
1625 } else {
1626 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1627 drive);
1628 }
1629 }
1630 }
1631 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1632 /* setup Ultra/66 */
1633 if (drvp->UDMA_mode > 2 &&
1634 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1635 drvp->UDMA_mode = 2;
1636 if (drvp->UDMA_mode > 2)
1637 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1638 else
1639 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1640 }
1641 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1642 (drvp->drive_flags & DRIVE_UDMA)) {
1643 /* use Ultra/DMA */
1644 drvp->drive_flags &= ~DRIVE_DMA;
1645 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1646 udmareg |= PIIX_UDMATIM_SET(
1647 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1648 } else {
1649 /* use Multiword DMA */
1650 drvp->drive_flags &= ~DRIVE_UDMA;
1651 if (drive == 0) {
1652 idetim |= piix_setup_idetim_timings(
1653 drvp->DMA_mode, 1, channel);
1654 } else {
1655 sidetim |= piix_setup_sidetim_timings(
1656 drvp->DMA_mode, 1, channel);
1657 idetim =PIIX_IDETIM_SET(idetim,
1658 PIIX_IDETIM_SITRE, channel);
1659 }
1660 }
1661 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1662
1663 pio: /* use PIO mode */
1664 idetim |= piix_setup_idetim_drvs(drvp);
1665 if (drive == 0) {
1666 idetim |= piix_setup_idetim_timings(
1667 drvp->PIO_mode, 0, channel);
1668 } else {
1669 sidetim |= piix_setup_sidetim_timings(
1670 drvp->PIO_mode, 0, channel);
1671 idetim =PIIX_IDETIM_SET(idetim,
1672 PIIX_IDETIM_SITRE, channel);
1673 }
1674 }
1675 if (idedma_ctl != 0) {
1676 /* Add software bits in status register */
1677 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1678 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1679 idedma_ctl);
1680 }
1681 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1682 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1683 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1684 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1685 pciide_print_modes(cp);
1686 }
1687
1688
1689 /* setup ISP and RTC fields, based on mode */
1690 static u_int32_t
1691 piix_setup_idetim_timings(mode, dma, channel)
1692 u_int8_t mode;
1693 u_int8_t dma;
1694 u_int8_t channel;
1695 {
1696
1697 if (dma)
1698 return PIIX_IDETIM_SET(0,
1699 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1700 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1701 channel);
1702 else
1703 return PIIX_IDETIM_SET(0,
1704 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1705 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1706 channel);
1707 }
1708
1709 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1710 static u_int32_t
1711 piix_setup_idetim_drvs(drvp)
1712 struct ata_drive_datas *drvp;
1713 {
1714 u_int32_t ret = 0;
1715 struct channel_softc *chp = drvp->chnl_softc;
1716 u_int8_t channel = chp->channel;
1717 u_int8_t drive = drvp->drive;
1718
1719 /*
1720 * If drive is using UDMA, timings setups are independant
1721 * So just check DMA and PIO here.
1722 */
1723 if (drvp->drive_flags & DRIVE_DMA) {
1724 /* if mode = DMA mode 0, use compatible timings */
1725 if ((drvp->drive_flags & DRIVE_DMA) &&
1726 drvp->DMA_mode == 0) {
1727 drvp->PIO_mode = 0;
1728 return ret;
1729 }
1730 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1731 /*
1732 * PIO and DMA timings are the same, use fast timings for PIO
1733 * too, else use compat timings.
1734 */
1735 if ((piix_isp_pio[drvp->PIO_mode] !=
1736 piix_isp_dma[drvp->DMA_mode]) ||
1737 (piix_rtc_pio[drvp->PIO_mode] !=
1738 piix_rtc_dma[drvp->DMA_mode]))
1739 drvp->PIO_mode = 0;
1740 /* if PIO mode <= 2, use compat timings for PIO */
1741 if (drvp->PIO_mode <= 2) {
1742 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1743 channel);
1744 return ret;
1745 }
1746 }
1747
1748 /*
1749 * Now setup PIO modes. If mode < 2, use compat timings.
1750 * Else enable fast timings. Enable IORDY and prefetch/post
1751 * if PIO mode >= 3.
1752 */
1753
1754 if (drvp->PIO_mode < 2)
1755 return ret;
1756
1757 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1758 if (drvp->PIO_mode >= 3) {
1759 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1760 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1761 }
1762 return ret;
1763 }
1764
1765 /* setup values in SIDETIM registers, based on mode */
1766 static u_int32_t
1767 piix_setup_sidetim_timings(mode, dma, channel)
1768 u_int8_t mode;
1769 u_int8_t dma;
1770 u_int8_t channel;
1771 {
1772 if (dma)
1773 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1774 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1775 else
1776 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1777 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1778 }
1779
1780 void
1781 amd756_chip_map(sc, pa)
1782 struct pciide_softc *sc;
1783 struct pci_attach_args *pa;
1784 {
1785 struct pciide_channel *cp;
1786 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1787 int channel;
1788 pcireg_t chanenable;
1789 bus_size_t cmdsize, ctlsize;
1790
1791 if (pciide_chipen(sc, pa) == 0)
1792 return;
1793 printf("%s: bus-master DMA support present",
1794 sc->sc_wdcdev.sc_dev.dv_xname);
1795 pciide_mapreg_dma(sc, pa);
1796 printf("\n");
1797 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1798 WDC_CAPABILITY_MODE;
1799 if (sc->sc_dma_ok) {
1800 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1801 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1802 sc->sc_wdcdev.irqack = pciide_irqack;
1803 }
1804 sc->sc_wdcdev.PIO_cap = 4;
1805 sc->sc_wdcdev.DMA_cap = 2;
1806 sc->sc_wdcdev.UDMA_cap = 4;
1807 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1808 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1809 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1810 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1811
1812 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1813 DEBUG_PROBE);
1814 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1815 cp = &sc->pciide_channels[channel];
1816 if (pciide_chansetup(sc, channel, interface) == 0)
1817 continue;
1818
1819 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1820 printf("%s: %s channel ignored (disabled)\n",
1821 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1822 continue;
1823 }
1824 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1825 pciide_pci_intr);
1826
1827 if (pciide_chan_candisable(cp))
1828 chanenable &= ~AMD756_CHAN_EN(channel);
1829 pciide_map_compat_intr(pa, cp, channel, interface);
1830 if (cp->hw_ok == 0)
1831 continue;
1832
1833 amd756_setup_channel(&cp->wdc_channel);
1834 }
1835 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1836 chanenable);
1837 return;
1838 }
1839
1840 void
1841 amd756_setup_channel(chp)
1842 struct channel_softc *chp;
1843 {
1844 u_int32_t udmatim_reg, datatim_reg;
1845 u_int8_t idedma_ctl;
1846 int mode, drive;
1847 struct ata_drive_datas *drvp;
1848 struct pciide_channel *cp = (struct pciide_channel*)chp;
1849 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1850 #ifndef PCIIDE_AMD756_ENABLEDMA
1851 int rev = PCI_REVISION(
1852 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1853 #endif
1854
1855 idedma_ctl = 0;
1856 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1857 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1858 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1859 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1860
1861 /* setup DMA if needed */
1862 pciide_channel_dma_setup(cp);
1863
1864 for (drive = 0; drive < 2; drive++) {
1865 drvp = &chp->ch_drive[drive];
1866 /* If no drive, skip */
1867 if ((drvp->drive_flags & DRIVE) == 0)
1868 continue;
1869 /* add timing values, setup DMA if needed */
1870 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1871 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1872 mode = drvp->PIO_mode;
1873 goto pio;
1874 }
1875 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1876 (drvp->drive_flags & DRIVE_UDMA)) {
1877 /* use Ultra/DMA */
1878 drvp->drive_flags &= ~DRIVE_DMA;
1879 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1880 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1881 AMD756_UDMA_TIME(chp->channel, drive,
1882 amd756_udma_tim[drvp->UDMA_mode]);
1883 /* can use PIO timings, MW DMA unused */
1884 mode = drvp->PIO_mode;
1885 } else {
1886 /* use Multiword DMA, but only if revision is OK */
1887 drvp->drive_flags &= ~DRIVE_UDMA;
1888 #ifndef PCIIDE_AMD756_ENABLEDMA
1889 /*
1890 * The workaround doesn't seem to be necessary
1891 * with all drives, so it can be disabled by
1892 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1893 * triggered.
1894 */
1895 if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1896 printf("%s:%d:%d: multi-word DMA disabled due "
1897 "to chip revision\n",
1898 sc->sc_wdcdev.sc_dev.dv_xname,
1899 chp->channel, drive);
1900 mode = drvp->PIO_mode;
1901 drvp->drive_flags &= ~DRIVE_DMA;
1902 goto pio;
1903 }
1904 #endif
1905 /* mode = min(pio, dma+2) */
1906 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1907 mode = drvp->PIO_mode;
1908 else
1909 mode = drvp->DMA_mode + 2;
1910 }
1911 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1912
1913 pio: /* setup PIO mode */
1914 if (mode <= 2) {
1915 drvp->DMA_mode = 0;
1916 drvp->PIO_mode = 0;
1917 mode = 0;
1918 } else {
1919 drvp->PIO_mode = mode;
1920 drvp->DMA_mode = mode - 2;
1921 }
1922 datatim_reg |=
1923 AMD756_DATATIM_PULSE(chp->channel, drive,
1924 amd756_pio_set[mode]) |
1925 AMD756_DATATIM_RECOV(chp->channel, drive,
1926 amd756_pio_rec[mode]);
1927 }
1928 if (idedma_ctl != 0) {
1929 /* Add software bits in status register */
1930 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1931 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1932 idedma_ctl);
1933 }
1934 pciide_print_modes(cp);
1935 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1936 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1937 }
1938
1939 void
1940 apollo_chip_map(sc, pa)
1941 struct pciide_softc *sc;
1942 struct pci_attach_args *pa;
1943 {
1944 struct pciide_channel *cp;
1945 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1946 int rev = PCI_REVISION(pa->pa_class);
1947 int channel;
1948 u_int32_t ideconf, udma_conf, old_udma_conf;
1949 bus_size_t cmdsize, ctlsize;
1950
1951 if (pciide_chipen(sc, pa) == 0)
1952 return;
1953 printf("%s: bus-master DMA support present",
1954 sc->sc_wdcdev.sc_dev.dv_xname);
1955 pciide_mapreg_dma(sc, pa);
1956 printf("\n");
1957 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1958 WDC_CAPABILITY_MODE;
1959 if (sc->sc_dma_ok) {
1960 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1961 sc->sc_wdcdev.irqack = pciide_irqack;
1962 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE
1963 && rev >= 6)
1964 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1965 }
1966 sc->sc_wdcdev.PIO_cap = 4;
1967 sc->sc_wdcdev.DMA_cap = 2;
1968 sc->sc_wdcdev.UDMA_cap = 2;
1969 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1970 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1971 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1972
1973 old_udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1974 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1975 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1976 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1977 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1978 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1979 old_udma_conf),
1980 DEBUG_PROBE);
1981 pci_conf_write(sc->sc_pc, sc->sc_tag,
1982 old_udma_conf | (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1983 APO_UDMA_EN_MTH(0, 0) | APO_UDMA_CLK66(0)),
1984 APO_UDMA);
1985 udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1986 WDCDEBUG_PRINT(("apollo_chip_map: APO_UDMA now 0x%x\n", udma_conf),
1987 DEBUG_PROBE);
1988 if ((udma_conf & (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1989 APO_UDMA_EN_MTH(0, 0))) ==
1990 (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1991 APO_UDMA_EN_MTH(0, 0))) {
1992 if ((udma_conf & APO_UDMA_CLK66(0)) ==
1993 APO_UDMA_CLK66(0)) {
1994 printf("%s: Ultra/66 capable\n",
1995 sc->sc_wdcdev.sc_dev.dv_xname);
1996 sc->sc_wdcdev.UDMA_cap = 4;
1997 } else {
1998 printf("%s: Ultra/33 capable\n",
1999 sc->sc_wdcdev.sc_dev.dv_xname);
2000 sc->sc_wdcdev.UDMA_cap = 2;
2001 }
2002 } else {
2003 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_UDMA;
2004 }
2005 pci_conf_write(sc->sc_pc, sc->sc_tag, old_udma_conf, APO_UDMA);
2006
2007 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2008 cp = &sc->pciide_channels[channel];
2009 if (pciide_chansetup(sc, channel, interface) == 0)
2010 continue;
2011
2012 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2013 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2014 printf("%s: %s channel ignored (disabled)\n",
2015 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2016 continue;
2017 }
2018 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2019 pciide_pci_intr);
2020 if (cp->hw_ok == 0)
2021 continue;
2022 if (pciide_chan_candisable(cp)) {
2023 ideconf &= ~APO_IDECONF_EN(channel);
2024 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2025 ideconf);
2026 }
2027 pciide_map_compat_intr(pa, cp, channel, interface);
2028
2029 if (cp->hw_ok == 0)
2030 continue;
2031 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2032 }
2033 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2034 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2035 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2036 }
2037
2038 void
2039 apollo_setup_channel(chp)
2040 struct channel_softc *chp;
2041 {
2042 u_int32_t udmatim_reg, datatim_reg;
2043 u_int8_t idedma_ctl;
2044 int mode, drive;
2045 struct ata_drive_datas *drvp;
2046 struct pciide_channel *cp = (struct pciide_channel*)chp;
2047 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2048
2049 idedma_ctl = 0;
2050 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2051 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2052 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2053 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
2054
2055 /* setup DMA if needed */
2056 pciide_channel_dma_setup(cp);
2057
2058 /*
2059 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2060 * downgrade to Ultra/33 if needed
2061 */
2062 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2063 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2064 /* both drives UDMA */
2065 if (chp->ch_drive[0].UDMA_mode > 2 &&
2066 chp->ch_drive[1].UDMA_mode <= 2) {
2067 /* drive 0 Ultra/66, drive 1 Ultra/33 */
2068 chp->ch_drive[0].UDMA_mode = 2;
2069 } else if (chp->ch_drive[1].UDMA_mode > 2 &&
2070 chp->ch_drive[0].UDMA_mode <= 2) {
2071 /* drive 1 Ultra/66, drive 0 Ultra/33 */
2072 chp->ch_drive[1].UDMA_mode = 2;
2073 }
2074 }
2075
2076 for (drive = 0; drive < 2; drive++) {
2077 drvp = &chp->ch_drive[drive];
2078 /* If no drive, skip */
2079 if ((drvp->drive_flags & DRIVE) == 0)
2080 continue;
2081 /* add timing values, setup DMA if needed */
2082 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2083 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2084 mode = drvp->PIO_mode;
2085 goto pio;
2086 }
2087 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2088 (drvp->drive_flags & DRIVE_UDMA)) {
2089 /* use Ultra/DMA */
2090 drvp->drive_flags &= ~DRIVE_DMA;
2091 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2092 APO_UDMA_EN_MTH(chp->channel, drive) |
2093 APO_UDMA_TIME(chp->channel, drive,
2094 apollo_udma_tim[drvp->UDMA_mode]);
2095 if (drvp->UDMA_mode > 2)
2096 udmatim_reg |=
2097 APO_UDMA_CLK66(chp->channel);
2098 /* can use PIO timings, MW DMA unused */
2099 mode = drvp->PIO_mode;
2100 } else {
2101 /* use Multiword DMA */
2102 drvp->drive_flags &= ~DRIVE_UDMA;
2103 /* mode = min(pio, dma+2) */
2104 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2105 mode = drvp->PIO_mode;
2106 else
2107 mode = drvp->DMA_mode + 2;
2108 }
2109 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2110
2111 pio: /* setup PIO mode */
2112 if (mode <= 2) {
2113 drvp->DMA_mode = 0;
2114 drvp->PIO_mode = 0;
2115 mode = 0;
2116 } else {
2117 drvp->PIO_mode = mode;
2118 drvp->DMA_mode = mode - 2;
2119 }
2120 datatim_reg |=
2121 APO_DATATIM_PULSE(chp->channel, drive,
2122 apollo_pio_set[mode]) |
2123 APO_DATATIM_RECOV(chp->channel, drive,
2124 apollo_pio_rec[mode]);
2125 }
2126 if (idedma_ctl != 0) {
2127 /* Add software bits in status register */
2128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2129 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2130 idedma_ctl);
2131 }
2132 pciide_print_modes(cp);
2133 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2134 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2135 }
2136
2137 void
2138 cmd_channel_map(pa, sc, channel)
2139 struct pci_attach_args *pa;
2140 struct pciide_softc *sc;
2141 int channel;
2142 {
2143 struct pciide_channel *cp = &sc->pciide_channels[channel];
2144 bus_size_t cmdsize, ctlsize;
2145 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2146 int interface;
2147
2148 /*
2149 * The 0648/0649 can be told to identify as a RAID controller.
2150 * In this case, we have to fake interface
2151 */
2152 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2153 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2154 PCIIDE_INTERFACE_SETTABLE(1);
2155 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2156 CMD_CONF_DSA1)
2157 interface |= PCIIDE_INTERFACE_PCI(0) |
2158 PCIIDE_INTERFACE_PCI(1);
2159 } else {
2160 interface = PCI_INTERFACE(pa->pa_class);
2161 }
2162
2163 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2164 cp->name = PCIIDE_CHANNEL_NAME(channel);
2165 cp->wdc_channel.channel = channel;
2166 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2167
2168 if (channel > 0) {
2169 cp->wdc_channel.ch_queue =
2170 sc->pciide_channels[0].wdc_channel.ch_queue;
2171 } else {
2172 cp->wdc_channel.ch_queue =
2173 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2174 }
2175 if (cp->wdc_channel.ch_queue == NULL) {
2176 printf("%s %s channel: "
2177 "can't allocate memory for command queue",
2178 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2179 return;
2180 }
2181
2182 printf("%s: %s channel %s to %s mode\n",
2183 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2184 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2185 "configured" : "wired",
2186 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2187 "native-PCI" : "compatibility");
2188
2189 /*
2190 * with a CMD PCI64x, if we get here, the first channel is enabled:
2191 * there's no way to disable the first channel without disabling
2192 * the whole device
2193 */
2194 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2195 printf("%s: %s channel ignored (disabled)\n",
2196 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2197 return;
2198 }
2199
2200 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2201 if (cp->hw_ok == 0)
2202 return;
2203 if (channel == 1) {
2204 if (pciide_chan_candisable(cp)) {
2205 ctrl &= ~CMD_CTRL_2PORT;
2206 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2207 CMD_CTRL, ctrl);
2208 }
2209 }
2210 pciide_map_compat_intr(pa, cp, channel, interface);
2211 }
2212
2213 int
2214 cmd_pci_intr(arg)
2215 void *arg;
2216 {
2217 struct pciide_softc *sc = arg;
2218 struct pciide_channel *cp;
2219 struct channel_softc *wdc_cp;
2220 int i, rv, crv;
2221 u_int32_t priirq, secirq;
2222
2223 rv = 0;
2224 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2225 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2226 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2227 cp = &sc->pciide_channels[i];
2228 wdc_cp = &cp->wdc_channel;
2229 /* If a compat channel skip. */
2230 if (cp->compat)
2231 continue;
2232 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2233 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2234 crv = wdcintr(wdc_cp);
2235 if (crv == 0)
2236 printf("%s:%d: bogus intr\n",
2237 sc->sc_wdcdev.sc_dev.dv_xname, i);
2238 else
2239 rv = 1;
2240 }
2241 }
2242 return rv;
2243 }
2244
2245 void
2246 cmd_chip_map(sc, pa)
2247 struct pciide_softc *sc;
2248 struct pci_attach_args *pa;
2249 {
2250 int channel;
2251
2252 /*
2253 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2254 * and base adresses registers can be disabled at
2255 * hardware level. In this case, the device is wired
2256 * in compat mode and its first channel is always enabled,
2257 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2258 * In fact, it seems that the first channel of the CMD PCI0640
2259 * can't be disabled.
2260 */
2261
2262 #ifdef PCIIDE_CMD064x_DISABLE
2263 if (pciide_chipen(sc, pa) == 0)
2264 return;
2265 #endif
2266
2267 printf("%s: hardware does not support DMA\n",
2268 sc->sc_wdcdev.sc_dev.dv_xname);
2269 sc->sc_dma_ok = 0;
2270
2271 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2272 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2273 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2274
2275 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2276 cmd_channel_map(pa, sc, channel);
2277 }
2278 }
2279
2280 void
2281 cmd0643_9_chip_map(sc, pa)
2282 struct pciide_softc *sc;
2283 struct pci_attach_args *pa;
2284 {
2285 struct pciide_channel *cp;
2286 int channel;
2287 int rev = PCI_REVISION(
2288 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2289
2290 /*
2291 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2292 * and base adresses registers can be disabled at
2293 * hardware level. In this case, the device is wired
2294 * in compat mode and its first channel is always enabled,
2295 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2296 * In fact, it seems that the first channel of the CMD PCI0640
2297 * can't be disabled.
2298 */
2299
2300 #ifdef PCIIDE_CMD064x_DISABLE
2301 if (pciide_chipen(sc, pa) == 0)
2302 return;
2303 #endif
2304 printf("%s: bus-master DMA support present",
2305 sc->sc_wdcdev.sc_dev.dv_xname);
2306 pciide_mapreg_dma(sc, pa);
2307 printf("\n");
2308 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2309 WDC_CAPABILITY_MODE;
2310 if (sc->sc_dma_ok) {
2311 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2312 switch (sc->sc_pp->ide_product) {
2313 case PCI_PRODUCT_CMDTECH_649:
2314 case PCI_PRODUCT_CMDTECH_648:
2315 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2316 sc->sc_wdcdev.UDMA_cap = 4;
2317 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2318 break;
2319 case PCI_PRODUCT_CMDTECH_646:
2320 if (rev >= CMD0646U2_REV) {
2321 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2322 sc->sc_wdcdev.UDMA_cap = 2;
2323 } else if (rev >= CMD0646U_REV) {
2324 /*
2325 * Linux's driver claims that the 646U is broken
2326 * with UDMA. Only enable it if we know what we're
2327 * doing
2328 */
2329 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2331 sc->sc_wdcdev.UDMA_cap = 2;
2332 #endif
2333 /* explicitely disable UDMA */
2334 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2335 CMD_UDMATIM(0), 0);
2336 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2337 CMD_UDMATIM(1), 0);
2338 }
2339 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2340 break;
2341 default:
2342 sc->sc_wdcdev.irqack = pciide_irqack;
2343 }
2344 }
2345
2346 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2347 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2348 sc->sc_wdcdev.PIO_cap = 4;
2349 sc->sc_wdcdev.DMA_cap = 2;
2350 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2351
2352 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2353 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2354 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2355 DEBUG_PROBE);
2356
2357 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2358 cp = &sc->pciide_channels[channel];
2359 cmd_channel_map(pa, sc, channel);
2360 if (cp->hw_ok == 0)
2361 continue;
2362 cmd0643_9_setup_channel(&cp->wdc_channel);
2363 }
2364 /*
2365 * note - this also makes sure we clear the irq disable and reset
2366 * bits
2367 */
2368 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2369 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2370 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2371 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2372 DEBUG_PROBE);
2373 }
2374
2375 void
2376 cmd0643_9_setup_channel(chp)
2377 struct channel_softc *chp;
2378 {
2379 struct ata_drive_datas *drvp;
2380 u_int8_t tim;
2381 u_int32_t idedma_ctl, udma_reg;
2382 int drive;
2383 struct pciide_channel *cp = (struct pciide_channel*)chp;
2384 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2385
2386 idedma_ctl = 0;
2387 /* setup DMA if needed */
2388 pciide_channel_dma_setup(cp);
2389
2390 for (drive = 0; drive < 2; drive++) {
2391 drvp = &chp->ch_drive[drive];
2392 /* If no drive, skip */
2393 if ((drvp->drive_flags & DRIVE) == 0)
2394 continue;
2395 /* add timing values, setup DMA if needed */
2396 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2397 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2398 if (drvp->drive_flags & DRIVE_UDMA) {
2399 /* UltraDMA on a 646U2, 0648 or 0649 */
2400 drvp->drive_flags &= ~DRIVE_DMA;
2401 udma_reg = pciide_pci_read(sc->sc_pc,
2402 sc->sc_tag, CMD_UDMATIM(chp->channel));
2403 if (drvp->UDMA_mode > 2 &&
2404 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2405 CMD_BICSR) &
2406 CMD_BICSR_80(chp->channel)) == 0)
2407 drvp->UDMA_mode = 2;
2408 if (drvp->UDMA_mode > 2)
2409 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2410 else if (sc->sc_wdcdev.UDMA_cap > 2)
2411 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2412 udma_reg |= CMD_UDMATIM_UDMA(drive);
2413 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2414 CMD_UDMATIM_TIM_OFF(drive));
2415 udma_reg |=
2416 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2417 CMD_UDMATIM_TIM_OFF(drive));
2418 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2419 CMD_UDMATIM(chp->channel), udma_reg);
2420 } else {
2421 /*
2422 * use Multiword DMA.
2423 * Timings will be used for both PIO and DMA,
2424 * so adjust DMA mode if needed
2425 * if we have a 0646U2/8/9, turn off UDMA
2426 */
2427 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2428 udma_reg = pciide_pci_read(sc->sc_pc,
2429 sc->sc_tag,
2430 CMD_UDMATIM(chp->channel));
2431 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2432 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2433 CMD_UDMATIM(chp->channel),
2434 udma_reg);
2435 }
2436 if (drvp->PIO_mode >= 3 &&
2437 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2438 drvp->DMA_mode = drvp->PIO_mode - 2;
2439 }
2440 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2441 }
2442 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2443 }
2444 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2445 CMD_DATA_TIM(chp->channel, drive), tim);
2446 }
2447 if (idedma_ctl != 0) {
2448 /* Add software bits in status register */
2449 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2450 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2451 idedma_ctl);
2452 }
2453 pciide_print_modes(cp);
2454 }
2455
2456 void
2457 cmd646_9_irqack(chp)
2458 struct channel_softc *chp;
2459 {
2460 u_int32_t priirq, secirq;
2461 struct pciide_channel *cp = (struct pciide_channel*)chp;
2462 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2463
2464 if (chp->channel == 0) {
2465 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2466 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2467 } else {
2468 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2469 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2470 }
2471 pciide_irqack(chp);
2472 }
2473
2474 void
2475 cy693_chip_map(sc, pa)
2476 struct pciide_softc *sc;
2477 struct pci_attach_args *pa;
2478 {
2479 struct pciide_channel *cp;
2480 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2481 bus_size_t cmdsize, ctlsize;
2482
2483 if (pciide_chipen(sc, pa) == 0)
2484 return;
2485 /*
2486 * this chip has 2 PCI IDE functions, one for primary and one for
2487 * secondary. So we need to call pciide_mapregs_compat() with
2488 * the real channel
2489 */
2490 if (pa->pa_function == 1) {
2491 sc->sc_cy_compatchan = 0;
2492 } else if (pa->pa_function == 2) {
2493 sc->sc_cy_compatchan = 1;
2494 } else {
2495 printf("%s: unexpected PCI function %d\n",
2496 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2497 return;
2498 }
2499 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2500 printf("%s: bus-master DMA support present",
2501 sc->sc_wdcdev.sc_dev.dv_xname);
2502 pciide_mapreg_dma(sc, pa);
2503 } else {
2504 printf("%s: hardware does not support DMA",
2505 sc->sc_wdcdev.sc_dev.dv_xname);
2506 sc->sc_dma_ok = 0;
2507 }
2508 printf("\n");
2509
2510 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2511 if (sc->sc_cy_handle == NULL) {
2512 printf("%s: unable to map hyperCache control registers\n",
2513 sc->sc_wdcdev.sc_dev.dv_xname);
2514 sc->sc_dma_ok = 0;
2515 }
2516
2517 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2518 WDC_CAPABILITY_MODE;
2519 if (sc->sc_dma_ok) {
2520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2521 sc->sc_wdcdev.irqack = pciide_irqack;
2522 }
2523 sc->sc_wdcdev.PIO_cap = 4;
2524 sc->sc_wdcdev.DMA_cap = 2;
2525 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2526
2527 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2528 sc->sc_wdcdev.nchannels = 1;
2529
2530 /* Only one channel for this chip; if we are here it's enabled */
2531 cp = &sc->pciide_channels[0];
2532 sc->wdc_chanarray[0] = &cp->wdc_channel;
2533 cp->name = PCIIDE_CHANNEL_NAME(0);
2534 cp->wdc_channel.channel = 0;
2535 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2536 cp->wdc_channel.ch_queue =
2537 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2538 if (cp->wdc_channel.ch_queue == NULL) {
2539 printf("%s primary channel: "
2540 "can't allocate memory for command queue",
2541 sc->sc_wdcdev.sc_dev.dv_xname);
2542 return;
2543 }
2544 printf("%s: primary channel %s to ",
2545 sc->sc_wdcdev.sc_dev.dv_xname,
2546 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2547 "configured" : "wired");
2548 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2549 printf("native-PCI");
2550 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2551 pciide_pci_intr);
2552 } else {
2553 printf("compatibility");
2554 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2555 &cmdsize, &ctlsize);
2556 }
2557 printf(" mode\n");
2558 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2559 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2560 wdcattach(&cp->wdc_channel);
2561 if (pciide_chan_candisable(cp)) {
2562 pci_conf_write(sc->sc_pc, sc->sc_tag,
2563 PCI_COMMAND_STATUS_REG, 0);
2564 }
2565 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2566 if (cp->hw_ok == 0)
2567 return;
2568 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2569 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2570 cy693_setup_channel(&cp->wdc_channel);
2571 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2572 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2573 }
2574
2575 void
2576 cy693_setup_channel(chp)
2577 struct channel_softc *chp;
2578 {
2579 struct ata_drive_datas *drvp;
2580 int drive;
2581 u_int32_t cy_cmd_ctrl;
2582 u_int32_t idedma_ctl;
2583 struct pciide_channel *cp = (struct pciide_channel*)chp;
2584 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2585 int dma_mode = -1;
2586
2587 cy_cmd_ctrl = idedma_ctl = 0;
2588
2589 /* setup DMA if needed */
2590 pciide_channel_dma_setup(cp);
2591
2592 for (drive = 0; drive < 2; drive++) {
2593 drvp = &chp->ch_drive[drive];
2594 /* If no drive, skip */
2595 if ((drvp->drive_flags & DRIVE) == 0)
2596 continue;
2597 /* add timing values, setup DMA if needed */
2598 if (drvp->drive_flags & DRIVE_DMA) {
2599 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2600 /* use Multiword DMA */
2601 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2602 dma_mode = drvp->DMA_mode;
2603 }
2604 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2605 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2606 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2607 CY_CMD_CTRL_IOW_REC_OFF(drive));
2608 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2609 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2610 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2611 CY_CMD_CTRL_IOR_REC_OFF(drive));
2612 }
2613 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2614 chp->ch_drive[0].DMA_mode = dma_mode;
2615 chp->ch_drive[1].DMA_mode = dma_mode;
2616
2617 if (dma_mode == -1)
2618 dma_mode = 0;
2619
2620 if (sc->sc_cy_handle != NULL) {
2621 /* Note: `multiple' is implied. */
2622 cy82c693_write(sc->sc_cy_handle,
2623 (sc->sc_cy_compatchan == 0) ?
2624 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2625 }
2626
2627 pciide_print_modes(cp);
2628
2629 if (idedma_ctl != 0) {
2630 /* Add software bits in status register */
2631 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2632 IDEDMA_CTL, idedma_ctl);
2633 }
2634 }
2635
2636 void
2637 sis_chip_map(sc, pa)
2638 struct pciide_softc *sc;
2639 struct pci_attach_args *pa;
2640 {
2641 struct pciide_channel *cp;
2642 int channel;
2643 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2644 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2645 pcireg_t rev = PCI_REVISION(pa->pa_class);
2646 bus_size_t cmdsize, ctlsize;
2647
2648 if (pciide_chipen(sc, pa) == 0)
2649 return;
2650 printf("%s: bus-master DMA support present",
2651 sc->sc_wdcdev.sc_dev.dv_xname);
2652 pciide_mapreg_dma(sc, pa);
2653 printf("\n");
2654 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2655 WDC_CAPABILITY_MODE;
2656 if (sc->sc_dma_ok) {
2657 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2658 sc->sc_wdcdev.irqack = pciide_irqack;
2659 if (rev >= 0xd0)
2660 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2661 }
2662
2663 sc->sc_wdcdev.PIO_cap = 4;
2664 sc->sc_wdcdev.DMA_cap = 2;
2665 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2666 sc->sc_wdcdev.UDMA_cap = 2;
2667 sc->sc_wdcdev.set_modes = sis_setup_channel;
2668
2669 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2670 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2671
2672 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2673 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2674 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2675
2676 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2677 cp = &sc->pciide_channels[channel];
2678 if (pciide_chansetup(sc, channel, interface) == 0)
2679 continue;
2680 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2681 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2682 printf("%s: %s channel ignored (disabled)\n",
2683 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2684 continue;
2685 }
2686 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2687 pciide_pci_intr);
2688 if (cp->hw_ok == 0)
2689 continue;
2690 if (pciide_chan_candisable(cp)) {
2691 if (channel == 0)
2692 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2693 else
2694 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2695 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2696 sis_ctr0);
2697 }
2698 pciide_map_compat_intr(pa, cp, channel, interface);
2699 if (cp->hw_ok == 0)
2700 continue;
2701 sis_setup_channel(&cp->wdc_channel);
2702 }
2703 }
2704
2705 void
2706 sis_setup_channel(chp)
2707 struct channel_softc *chp;
2708 {
2709 struct ata_drive_datas *drvp;
2710 int drive;
2711 u_int32_t sis_tim;
2712 u_int32_t idedma_ctl;
2713 struct pciide_channel *cp = (struct pciide_channel*)chp;
2714 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2715
2716 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2717 "channel %d 0x%x\n", chp->channel,
2718 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2719 DEBUG_PROBE);
2720 sis_tim = 0;
2721 idedma_ctl = 0;
2722 /* setup DMA if needed */
2723 pciide_channel_dma_setup(cp);
2724
2725 for (drive = 0; drive < 2; drive++) {
2726 drvp = &chp->ch_drive[drive];
2727 /* If no drive, skip */
2728 if ((drvp->drive_flags & DRIVE) == 0)
2729 continue;
2730 /* add timing values, setup DMA if needed */
2731 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2732 (drvp->drive_flags & DRIVE_UDMA) == 0)
2733 goto pio;
2734
2735 if (drvp->drive_flags & DRIVE_UDMA) {
2736 /* use Ultra/DMA */
2737 drvp->drive_flags &= ~DRIVE_DMA;
2738 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2739 SIS_TIM_UDMA_TIME_OFF(drive);
2740 sis_tim |= SIS_TIM_UDMA_EN(drive);
2741 } else {
2742 /*
2743 * use Multiword DMA
2744 * Timings will be used for both PIO and DMA,
2745 * so adjust DMA mode if needed
2746 */
2747 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2748 drvp->PIO_mode = drvp->DMA_mode + 2;
2749 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2750 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2751 drvp->PIO_mode - 2 : 0;
2752 if (drvp->DMA_mode == 0)
2753 drvp->PIO_mode = 0;
2754 }
2755 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2756 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2757 SIS_TIM_ACT_OFF(drive);
2758 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2759 SIS_TIM_REC_OFF(drive);
2760 }
2761 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2762 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2763 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2764 if (idedma_ctl != 0) {
2765 /* Add software bits in status register */
2766 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2767 IDEDMA_CTL, idedma_ctl);
2768 }
2769 pciide_print_modes(cp);
2770 }
2771
2772 void
2773 acer_chip_map(sc, pa)
2774 struct pciide_softc *sc;
2775 struct pci_attach_args *pa;
2776 {
2777 struct pciide_channel *cp;
2778 int channel;
2779 pcireg_t cr, interface;
2780 bus_size_t cmdsize, ctlsize;
2781
2782 if (pciide_chipen(sc, pa) == 0)
2783 return;
2784 printf("%s: bus-master DMA support present",
2785 sc->sc_wdcdev.sc_dev.dv_xname);
2786 pciide_mapreg_dma(sc, pa);
2787 printf("\n");
2788 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2789 WDC_CAPABILITY_MODE;
2790 if (sc->sc_dma_ok) {
2791 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2792 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2793 sc->sc_wdcdev.irqack = pciide_irqack;
2794 }
2795
2796 sc->sc_wdcdev.PIO_cap = 4;
2797 sc->sc_wdcdev.DMA_cap = 2;
2798 sc->sc_wdcdev.UDMA_cap = 2;
2799 sc->sc_wdcdev.set_modes = acer_setup_channel;
2800 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2801 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2802
2803 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2804 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2805 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2806
2807 /* Enable "microsoft register bits" R/W. */
2808 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2809 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2810 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2811 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2812 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2813 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2814 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2815 ~ACER_CHANSTATUSREGS_RO);
2816 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2817 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2818 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2819 /* Don't use cr, re-read the real register content instead */
2820 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2821 PCI_CLASS_REG));
2822
2823 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2824 cp = &sc->pciide_channels[channel];
2825 if (pciide_chansetup(sc, channel, interface) == 0)
2826 continue;
2827 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2828 printf("%s: %s channel ignored (disabled)\n",
2829 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2830 continue;
2831 }
2832 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2833 acer_pci_intr);
2834 if (cp->hw_ok == 0)
2835 continue;
2836 if (pciide_chan_candisable(cp)) {
2837 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2838 pci_conf_write(sc->sc_pc, sc->sc_tag,
2839 PCI_CLASS_REG, cr);
2840 }
2841 pciide_map_compat_intr(pa, cp, channel, interface);
2842 acer_setup_channel(&cp->wdc_channel);
2843 }
2844 }
2845
2846 void
2847 acer_setup_channel(chp)
2848 struct channel_softc *chp;
2849 {
2850 struct ata_drive_datas *drvp;
2851 int drive;
2852 u_int32_t acer_fifo_udma;
2853 u_int32_t idedma_ctl;
2854 struct pciide_channel *cp = (struct pciide_channel*)chp;
2855 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2856
2857 idedma_ctl = 0;
2858 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2859 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2860 acer_fifo_udma), DEBUG_PROBE);
2861 /* setup DMA if needed */
2862 pciide_channel_dma_setup(cp);
2863
2864 for (drive = 0; drive < 2; drive++) {
2865 drvp = &chp->ch_drive[drive];
2866 /* If no drive, skip */
2867 if ((drvp->drive_flags & DRIVE) == 0)
2868 continue;
2869 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2870 "channel %d drive %d 0x%x\n", chp->channel, drive,
2871 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2872 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2873 /* clear FIFO/DMA mode */
2874 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2875 ACER_UDMA_EN(chp->channel, drive) |
2876 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2877
2878 /* add timing values, setup DMA if needed */
2879 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2880 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2881 acer_fifo_udma |=
2882 ACER_FTH_OPL(chp->channel, drive, 0x1);
2883 goto pio;
2884 }
2885
2886 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2887 if (drvp->drive_flags & DRIVE_UDMA) {
2888 /* use Ultra/DMA */
2889 drvp->drive_flags &= ~DRIVE_DMA;
2890 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2891 acer_fifo_udma |=
2892 ACER_UDMA_TIM(chp->channel, drive,
2893 acer_udma[drvp->UDMA_mode]);
2894 } else {
2895 /*
2896 * use Multiword DMA
2897 * Timings will be used for both PIO and DMA,
2898 * so adjust DMA mode if needed
2899 */
2900 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2901 drvp->PIO_mode = drvp->DMA_mode + 2;
2902 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2903 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2904 drvp->PIO_mode - 2 : 0;
2905 if (drvp->DMA_mode == 0)
2906 drvp->PIO_mode = 0;
2907 }
2908 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2909 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2910 ACER_IDETIM(chp->channel, drive),
2911 acer_pio[drvp->PIO_mode]);
2912 }
2913 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2914 acer_fifo_udma), DEBUG_PROBE);
2915 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2916 if (idedma_ctl != 0) {
2917 /* Add software bits in status register */
2918 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2919 IDEDMA_CTL, idedma_ctl);
2920 }
2921 pciide_print_modes(cp);
2922 }
2923
2924 int
2925 acer_pci_intr(arg)
2926 void *arg;
2927 {
2928 struct pciide_softc *sc = arg;
2929 struct pciide_channel *cp;
2930 struct channel_softc *wdc_cp;
2931 int i, rv, crv;
2932 u_int32_t chids;
2933
2934 rv = 0;
2935 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2936 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2937 cp = &sc->pciide_channels[i];
2938 wdc_cp = &cp->wdc_channel;
2939 /* If a compat channel skip. */
2940 if (cp->compat)
2941 continue;
2942 if (chids & ACER_CHIDS_INT(i)) {
2943 crv = wdcintr(wdc_cp);
2944 if (crv == 0)
2945 printf("%s:%d: bogus intr\n",
2946 sc->sc_wdcdev.sc_dev.dv_xname, i);
2947 else
2948 rv = 1;
2949 }
2950 }
2951 return rv;
2952 }
2953
2954 void
2955 hpt_chip_map(sc, pa)
2956 struct pciide_softc *sc;
2957 struct pci_attach_args *pa;
2958 {
2959 struct pciide_channel *cp;
2960 int i, compatchan, revision;
2961 pcireg_t interface;
2962 bus_size_t cmdsize, ctlsize;
2963
2964 if (pciide_chipen(sc, pa) == 0)
2965 return;
2966 revision = PCI_REVISION(pa->pa_class);
2967
2968 /*
2969 * when the chip is in native mode it identifies itself as a
2970 * 'misc mass storage'. Fake interface in this case.
2971 */
2972 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2973 interface = PCI_INTERFACE(pa->pa_class);
2974 } else {
2975 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2976 PCIIDE_INTERFACE_PCI(0);
2977 if (revision == HPT370_REV)
2978 interface |= PCIIDE_INTERFACE_PCI(1);
2979 }
2980
2981 printf("%s: bus-master DMA support present",
2982 sc->sc_wdcdev.sc_dev.dv_xname);
2983 pciide_mapreg_dma(sc, pa);
2984 printf("\n");
2985 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2986 WDC_CAPABILITY_MODE;
2987 if (sc->sc_dma_ok) {
2988 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2989 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2990 sc->sc_wdcdev.irqack = pciide_irqack;
2991 }
2992 sc->sc_wdcdev.PIO_cap = 4;
2993 sc->sc_wdcdev.DMA_cap = 2;
2994
2995 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2996 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2997 if (revision == HPT366_REV) {
2998 sc->sc_wdcdev.UDMA_cap = 4;
2999 /*
3000 * The 366 has 2 PCI IDE functions, one for primary and one
3001 * for secondary. So we need to call pciide_mapregs_compat()
3002 * with the real channel
3003 */
3004 if (pa->pa_function == 0) {
3005 compatchan = 0;
3006 } else if (pa->pa_function == 1) {
3007 compatchan = 1;
3008 } else {
3009 printf("%s: unexpected PCI function %d\n",
3010 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3011 return;
3012 }
3013 sc->sc_wdcdev.nchannels = 1;
3014 } else {
3015 sc->sc_wdcdev.nchannels = 2;
3016 sc->sc_wdcdev.UDMA_cap = 5;
3017 }
3018 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3019 cp = &sc->pciide_channels[i];
3020 if (sc->sc_wdcdev.nchannels > 1) {
3021 compatchan = i;
3022 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3023 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3024 printf("%s: %s channel ignored (disabled)\n",
3025 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3026 continue;
3027 }
3028 }
3029 if (pciide_chansetup(sc, i, interface) == 0)
3030 continue;
3031 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3032 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3033 &ctlsize, hpt_pci_intr);
3034 } else {
3035 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3036 &cmdsize, &ctlsize);
3037 }
3038 if (cp->hw_ok == 0)
3039 return;
3040 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3041 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3042 wdcattach(&cp->wdc_channel);
3043 hpt_setup_channel(&cp->wdc_channel);
3044 }
3045 if (revision == HPT370_REV) {
3046 /*
3047 * HPT370_REV has a bit to disable interrupts, make sure
3048 * to clear it
3049 */
3050 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3051 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3052 ~HPT_CSEL_IRQDIS);
3053 }
3054 return;
3055 }
3056
3057 void
3058 hpt_setup_channel(chp)
3059 struct channel_softc *chp;
3060 {
3061 struct ata_drive_datas *drvp;
3062 int drive;
3063 int cable;
3064 u_int32_t before, after;
3065 u_int32_t idedma_ctl;
3066 struct pciide_channel *cp = (struct pciide_channel*)chp;
3067 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3068
3069 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3070
3071 /* setup DMA if needed */
3072 pciide_channel_dma_setup(cp);
3073
3074 idedma_ctl = 0;
3075
3076 /* Per drive settings */
3077 for (drive = 0; drive < 2; drive++) {
3078 drvp = &chp->ch_drive[drive];
3079 /* If no drive, skip */
3080 if ((drvp->drive_flags & DRIVE) == 0)
3081 continue;
3082 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3083 HPT_IDETIM(chp->channel, drive));
3084
3085 /* add timing values, setup DMA if needed */
3086 if (drvp->drive_flags & DRIVE_UDMA) {
3087 /* use Ultra/DMA */
3088 drvp->drive_flags &= ~DRIVE_DMA;
3089 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3090 drvp->UDMA_mode > 2)
3091 drvp->UDMA_mode = 2;
3092 after = (sc->sc_wdcdev.nchannels == 2) ?
3093 hpt370_udma[drvp->UDMA_mode] :
3094 hpt366_udma[drvp->UDMA_mode];
3095 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3096 } else if (drvp->drive_flags & DRIVE_DMA) {
3097 /*
3098 * use Multiword DMA.
3099 * Timings will be used for both PIO and DMA, so adjust
3100 * DMA mode if needed
3101 */
3102 if (drvp->PIO_mode >= 3 &&
3103 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3104 drvp->DMA_mode = drvp->PIO_mode - 2;
3105 }
3106 after = (sc->sc_wdcdev.nchannels == 2) ?
3107 hpt370_dma[drvp->DMA_mode] :
3108 hpt366_dma[drvp->DMA_mode];
3109 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3110 } else {
3111 /* PIO only */
3112 after = (sc->sc_wdcdev.nchannels == 2) ?
3113 hpt370_pio[drvp->PIO_mode] :
3114 hpt366_pio[drvp->PIO_mode];
3115 }
3116 pci_conf_write(sc->sc_pc, sc->sc_tag,
3117 HPT_IDETIM(chp->channel, drive), after);
3118 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3119 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3120 after, before), DEBUG_PROBE);
3121 }
3122 if (idedma_ctl != 0) {
3123 /* Add software bits in status register */
3124 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3125 IDEDMA_CTL, idedma_ctl);
3126 }
3127 pciide_print_modes(cp);
3128 }
3129
3130 int
3131 hpt_pci_intr(arg)
3132 void *arg;
3133 {
3134 struct pciide_softc *sc = arg;
3135 struct pciide_channel *cp;
3136 struct channel_softc *wdc_cp;
3137 int rv = 0;
3138 int dmastat, i, crv;
3139
3140 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3141 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3142 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3143 if((dmastat & IDEDMA_CTL_INTR) == 0)
3144 continue;
3145 cp = &sc->pciide_channels[i];
3146 wdc_cp = &cp->wdc_channel;
3147 crv = wdcintr(wdc_cp);
3148 if (crv == 0) {
3149 printf("%s:%d: bogus intr\n",
3150 sc->sc_wdcdev.sc_dev.dv_xname, i);
3151 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3152 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3153 } else
3154 rv = 1;
3155 }
3156 return rv;
3157 }
3158
3159
3160 /* A macro to test product */
3161 #define PDC_IS_262(sc) \
3162 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3163 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3164 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3165
3166 void
3167 pdc202xx_chip_map(sc, pa)
3168 struct pciide_softc *sc;
3169 struct pci_attach_args *pa;
3170 {
3171 struct pciide_channel *cp;
3172 int channel;
3173 pcireg_t interface, st, mode;
3174 bus_size_t cmdsize, ctlsize;
3175
3176 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3177 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3178 DEBUG_PROBE);
3179 if (pciide_chipen(sc, pa) == 0)
3180 return;
3181
3182 /* turn off RAID mode */
3183 st &= ~PDC2xx_STATE_IDERAID;
3184
3185 /*
3186 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3187 * mode. We have to fake interface
3188 */
3189 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3190 if (st & PDC2xx_STATE_NATIVE)
3191 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3192
3193 printf("%s: bus-master DMA support present",
3194 sc->sc_wdcdev.sc_dev.dv_xname);
3195 pciide_mapreg_dma(sc, pa);
3196 printf("\n");
3197 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3198 WDC_CAPABILITY_MODE;
3199 if (sc->sc_dma_ok) {
3200 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3201 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3202 sc->sc_wdcdev.irqack = pciide_irqack;
3203 }
3204 sc->sc_wdcdev.PIO_cap = 4;
3205 sc->sc_wdcdev.DMA_cap = 2;
3206 if (PDC_IS_262(sc))
3207 sc->sc_wdcdev.UDMA_cap = 4;
3208 else
3209 sc->sc_wdcdev.UDMA_cap = 2;
3210 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3211 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3212 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3213
3214 /* setup failsafe defaults */
3215 mode = 0;
3216 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3217 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3218 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3219 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3220 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3221 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3222 "initial timings 0x%x, now 0x%x\n", channel,
3223 pci_conf_read(sc->sc_pc, sc->sc_tag,
3224 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3225 DEBUG_PROBE);
3226 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3227 mode | PDC2xx_TIM_IORDYp);
3228 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3229 "initial timings 0x%x, now 0x%x\n", channel,
3230 pci_conf_read(sc->sc_pc, sc->sc_tag,
3231 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3232 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3233 mode);
3234 }
3235
3236 mode = PDC2xx_SCR_DMA;
3237 if (PDC_IS_262(sc)) {
3238 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3239 } else {
3240 /* the BIOS set it up this way */
3241 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3242 }
3243 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3244 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3245 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3246 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3247 DEBUG_PROBE);
3248 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3249
3250 /* controller initial state register is OK even without BIOS */
3251 /* Set DMA mode to IDE DMA compatibility */
3252 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3253 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3254 DEBUG_PROBE);
3255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3256 mode | 0x1);
3257 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3258 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3259 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3260 mode | 0x1);
3261
3262 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3263 cp = &sc->pciide_channels[channel];
3264 if (pciide_chansetup(sc, channel, interface) == 0)
3265 continue;
3266 if ((st & (PDC_IS_262(sc) ?
3267 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3268 printf("%s: %s channel ignored (disabled)\n",
3269 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3270 continue;
3271 }
3272 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3273 pdc202xx_pci_intr);
3274 if (cp->hw_ok == 0)
3275 continue;
3276 if (pciide_chan_candisable(cp))
3277 st &= ~(PDC_IS_262(sc) ?
3278 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3279 pciide_map_compat_intr(pa, cp, channel, interface);
3280 pdc202xx_setup_channel(&cp->wdc_channel);
3281 }
3282 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3283 DEBUG_PROBE);
3284 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3285 return;
3286 }
3287
3288 void
3289 pdc202xx_setup_channel(chp)
3290 struct channel_softc *chp;
3291 {
3292 struct ata_drive_datas *drvp;
3293 int drive;
3294 pcireg_t mode, st;
3295 u_int32_t idedma_ctl, scr, atapi;
3296 struct pciide_channel *cp = (struct pciide_channel*)chp;
3297 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3298 int channel = chp->channel;
3299
3300 /* setup DMA if needed */
3301 pciide_channel_dma_setup(cp);
3302
3303 idedma_ctl = 0;
3304
3305 /* Per channel settings */
3306 if (PDC_IS_262(sc)) {
3307 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3308 PDC262_U66);
3309 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3310 /* Trimm UDMA mode */
3311 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3312 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3313 chp->ch_drive[0].UDMA_mode <= 2) ||
3314 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3315 chp->ch_drive[1].UDMA_mode <= 2)) {
3316 if (chp->ch_drive[0].UDMA_mode > 2)
3317 chp->ch_drive[0].UDMA_mode = 2;
3318 if (chp->ch_drive[1].UDMA_mode > 2)
3319 chp->ch_drive[1].UDMA_mode = 2;
3320 }
3321 /* Set U66 if needed */
3322 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3323 chp->ch_drive[0].UDMA_mode > 2) ||
3324 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3325 chp->ch_drive[1].UDMA_mode > 2))
3326 scr |= PDC262_U66_EN(channel);
3327 else
3328 scr &= ~PDC262_U66_EN(channel);
3329 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3330 PDC262_U66, scr);
3331 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3332 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3333 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3334 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3335 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3336 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3337 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3338 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3339 atapi = 0;
3340 else
3341 atapi = PDC262_ATAPI_UDMA;
3342 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3343 PDC262_ATAPI(channel), atapi);
3344 }
3345 }
3346 for (drive = 0; drive < 2; drive++) {
3347 drvp = &chp->ch_drive[drive];
3348 /* If no drive, skip */
3349 if ((drvp->drive_flags & DRIVE) == 0)
3350 continue;
3351 mode = 0;
3352 if (drvp->drive_flags & DRIVE_UDMA) {
3353 /* use Ultra/DMA */
3354 drvp->drive_flags &= ~DRIVE_DMA;
3355 mode = PDC2xx_TIM_SET_MB(mode,
3356 pdc2xx_udma_mb[drvp->UDMA_mode]);
3357 mode = PDC2xx_TIM_SET_MC(mode,
3358 pdc2xx_udma_mc[drvp->UDMA_mode]);
3359 drvp->drive_flags &= ~DRIVE_DMA;
3360 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3361 } else if (drvp->drive_flags & DRIVE_DMA) {
3362 mode = PDC2xx_TIM_SET_MB(mode,
3363 pdc2xx_dma_mb[drvp->DMA_mode]);
3364 mode = PDC2xx_TIM_SET_MC(mode,
3365 pdc2xx_dma_mc[drvp->DMA_mode]);
3366 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3367 } else {
3368 mode = PDC2xx_TIM_SET_MB(mode,
3369 pdc2xx_dma_mb[0]);
3370 mode = PDC2xx_TIM_SET_MC(mode,
3371 pdc2xx_dma_mc[0]);
3372 }
3373 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3374 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3375 if (drvp->drive_flags & DRIVE_ATA)
3376 mode |= PDC2xx_TIM_PRE;
3377 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3378 if (drvp->PIO_mode >= 3) {
3379 mode |= PDC2xx_TIM_IORDY;
3380 if (drive == 0)
3381 mode |= PDC2xx_TIM_IORDYp;
3382 }
3383 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3384 "timings 0x%x\n",
3385 sc->sc_wdcdev.sc_dev.dv_xname,
3386 chp->channel, drive, mode), DEBUG_PROBE);
3387 pci_conf_write(sc->sc_pc, sc->sc_tag,
3388 PDC2xx_TIM(chp->channel, drive), mode);
3389 }
3390 if (idedma_ctl != 0) {
3391 /* Add software bits in status register */
3392 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3393 IDEDMA_CTL, idedma_ctl);
3394 }
3395 pciide_print_modes(cp);
3396 }
3397
3398 int
3399 pdc202xx_pci_intr(arg)
3400 void *arg;
3401 {
3402 struct pciide_softc *sc = arg;
3403 struct pciide_channel *cp;
3404 struct channel_softc *wdc_cp;
3405 int i, rv, crv;
3406 u_int32_t scr;
3407
3408 rv = 0;
3409 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3410 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3411 cp = &sc->pciide_channels[i];
3412 wdc_cp = &cp->wdc_channel;
3413 /* If a compat channel skip. */
3414 if (cp->compat)
3415 continue;
3416 if (scr & PDC2xx_SCR_INT(i)) {
3417 crv = wdcintr(wdc_cp);
3418 if (crv == 0)
3419 printf("%s:%d: bogus intr\n",
3420 sc->sc_wdcdev.sc_dev.dv_xname, i);
3421 else
3422 rv = 1;
3423 }
3424 }
3425 return rv;
3426 }
3427
3428 void
3429 opti_chip_map(sc, pa)
3430 struct pciide_softc *sc;
3431 struct pci_attach_args *pa;
3432 {
3433 struct pciide_channel *cp;
3434 bus_size_t cmdsize, ctlsize;
3435 pcireg_t interface;
3436 u_int8_t init_ctrl;
3437 int channel;
3438
3439 if (pciide_chipen(sc, pa) == 0)
3440 return;
3441 printf("%s: bus-master DMA support present",
3442 sc->sc_wdcdev.sc_dev.dv_xname);
3443 pciide_mapreg_dma(sc, pa);
3444 printf("\n");
3445
3446 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3447 WDC_CAPABILITY_MODE;
3448 sc->sc_wdcdev.PIO_cap = 4;
3449 if (sc->sc_dma_ok) {
3450 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3451 sc->sc_wdcdev.irqack = pciide_irqack;
3452 sc->sc_wdcdev.DMA_cap = 2;
3453 }
3454 sc->sc_wdcdev.set_modes = opti_setup_channel;
3455
3456 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3457 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3458
3459 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3460 OPTI_REG_INIT_CONTROL);
3461
3462 interface = PCI_INTERFACE(pa->pa_class);
3463
3464 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3465 cp = &sc->pciide_channels[channel];
3466 if (pciide_chansetup(sc, channel, interface) == 0)
3467 continue;
3468 if (channel == 1 &&
3469 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3470 printf("%s: %s channel ignored (disabled)\n",
3471 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3472 continue;
3473 }
3474 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3475 pciide_pci_intr);
3476 if (cp->hw_ok == 0)
3477 continue;
3478 pciide_map_compat_intr(pa, cp, channel, interface);
3479 if (cp->hw_ok == 0)
3480 continue;
3481 opti_setup_channel(&cp->wdc_channel);
3482 }
3483 }
3484
3485 void
3486 opti_setup_channel(chp)
3487 struct channel_softc *chp;
3488 {
3489 struct ata_drive_datas *drvp;
3490 struct pciide_channel *cp = (struct pciide_channel*)chp;
3491 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3492 int drive, spd;
3493 int mode[2];
3494 u_int8_t rv, mr;
3495
3496 /*
3497 * The `Delay' and `Address Setup Time' fields of the
3498 * Miscellaneous Register are always zero initially.
3499 */
3500 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3501 mr &= ~(OPTI_MISC_DELAY_MASK |
3502 OPTI_MISC_ADDR_SETUP_MASK |
3503 OPTI_MISC_INDEX_MASK);
3504
3505 /* Prime the control register before setting timing values */
3506 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3507
3508 /* Determine the clockrate of the PCIbus the chip is attached to */
3509 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3510 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3511
3512 /* setup DMA if needed */
3513 pciide_channel_dma_setup(cp);
3514
3515 for (drive = 0; drive < 2; drive++) {
3516 drvp = &chp->ch_drive[drive];
3517 /* If no drive, skip */
3518 if ((drvp->drive_flags & DRIVE) == 0) {
3519 mode[drive] = -1;
3520 continue;
3521 }
3522
3523 if ((drvp->drive_flags & DRIVE_DMA)) {
3524 /*
3525 * Timings will be used for both PIO and DMA,
3526 * so adjust DMA mode if needed
3527 */
3528 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3529 drvp->PIO_mode = drvp->DMA_mode + 2;
3530 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3531 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3532 drvp->PIO_mode - 2 : 0;
3533 if (drvp->DMA_mode == 0)
3534 drvp->PIO_mode = 0;
3535
3536 mode[drive] = drvp->DMA_mode + 5;
3537 } else
3538 mode[drive] = drvp->PIO_mode;
3539
3540 if (drive && mode[0] >= 0 &&
3541 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3542 /*
3543 * Can't have two drives using different values
3544 * for `Address Setup Time'.
3545 * Slow down the faster drive to compensate.
3546 */
3547 int d = (opti_tim_as[spd][mode[0]] >
3548 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3549
3550 mode[d] = mode[1-d];
3551 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3552 chp->ch_drive[d].DMA_mode = 0;
3553 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3554 }
3555 }
3556
3557 for (drive = 0; drive < 2; drive++) {
3558 int m;
3559 if ((m = mode[drive]) < 0)
3560 continue;
3561
3562 /* Set the Address Setup Time and select appropriate index */
3563 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3564 rv |= OPTI_MISC_INDEX(drive);
3565 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3566
3567 /* Set the pulse width and recovery timing parameters */
3568 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3569 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3570 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3571 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3572
3573 /* Set the Enhanced Mode register appropriately */
3574 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3575 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3576 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3577 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3578 }
3579
3580 /* Finally, enable the timings */
3581 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3582
3583 pciide_print_modes(cp);
3584 }
3585