pciide.c revision 1.33.2.5 1 /* $NetBSD: pciide.c,v 1.33.2.5 2000/07/07 17:33:49 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120
121 #if BYTE_ORDER == BIG_ENDIAN
122 #define htole16(x) bswap16((u_int16_t)(x))
123 #define htole32(x) bswap32((u_int32_t)(x))
124 #define htole64(x) bswap64((u_int64_t)(x))
125 #else /* LITTLE_ENDIAN */
126 #define htole16(x) (x)
127 #define htole32(x) (x)
128 #define htole64(x) (x)
129 #endif
130 #define le16toh(x) htole16(x)
131 #define le32toh(x) htole32(x)
132 #define le64toh(x) htole64(x)
133
134 /* inlines for reading/writing 8-bit PCI registers */
135 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
136 int));
137 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
138 int, u_int8_t));
139
140 static __inline u_int8_t
141 pciide_pci_read(pc, pa, reg)
142 pci_chipset_tag_t pc;
143 pcitag_t pa;
144 int reg;
145 {
146
147 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
148 ((reg & 0x03) * 8) & 0xff);
149 }
150
151 static __inline void
152 pciide_pci_write(pc, pa, reg, val)
153 pci_chipset_tag_t pc;
154 pcitag_t pa;
155 int reg;
156 u_int8_t val;
157 {
158 pcireg_t pcival;
159
160 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
161 pcival &= ~(0xff << ((reg & 0x03) * 8));
162 pcival |= (val << ((reg & 0x03) * 8));
163 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
164 }
165
166 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167
168 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void piix_setup_channel __P((struct channel_softc*));
170 void piix3_4_setup_channel __P((struct channel_softc*));
171 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
172 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
173 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
174
175 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
176 void amd756_setup_channel __P((struct channel_softc*));
177
178 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void apollo_setup_channel __P((struct channel_softc*));
180
181 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void cmd0643_9_setup_channel __P((struct channel_softc*));
184 void cmd_channel_map __P((struct pci_attach_args *,
185 struct pciide_softc *, int));
186 int cmd_pci_intr __P((void *));
187 void cmd648_9_irqack __P((struct channel_softc *));
188
189 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void cy693_setup_channel __P((struct channel_softc*));
191
192 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void sis_setup_channel __P((struct channel_softc*));
194
195 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void acer_setup_channel __P((struct channel_softc*));
197 int acer_pci_intr __P((void *));
198
199 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
200 void pdc202xx_setup_channel __P((struct channel_softc*));
201 int pdc202xx_pci_intr __P((void *));
202
203 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
204 void opti_setup_channel __P((struct channel_softc*));
205
206 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
207 void hpt_setup_channel __P((struct channel_softc*));
208 int hpt_pci_intr __P((void *));
209
210 void pciide_channel_dma_setup __P((struct pciide_channel *));
211 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
212 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
213 void pciide_dma_start __P((void*, int, int));
214 int pciide_dma_finish __P((void*, int, int, int));
215 void pciide_irqack __P((struct channel_softc *));
216 void pciide_print_modes __P((struct pciide_channel *));
217
218 struct pciide_product_desc {
219 u_int32_t ide_product;
220 int ide_flags;
221 const char *ide_name;
222 /* map and setup chip, probe drives */
223 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
224 };
225
226 /* Flags for ide_flags */
227 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
228
229 /* Default product description for devices not known from this controller */
230 const struct pciide_product_desc default_product_desc = {
231 0,
232 0,
233 "Generic PCI IDE controller",
234 default_chip_map,
235 };
236
237 const struct pciide_product_desc pciide_intel_products[] = {
238 { PCI_PRODUCT_INTEL_82092AA,
239 0,
240 "Intel 82092AA IDE controller",
241 default_chip_map,
242 },
243 { PCI_PRODUCT_INTEL_82371FB_IDE,
244 0,
245 "Intel 82371FB IDE controller (PIIX)",
246 piix_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82371SB_IDE,
249 0,
250 "Intel 82371SB IDE Interface (PIIX3)",
251 piix_chip_map,
252 },
253 { PCI_PRODUCT_INTEL_82371AB_IDE,
254 0,
255 "Intel 82371AB IDE controller (PIIX4)",
256 piix_chip_map,
257 },
258 { PCI_PRODUCT_INTEL_82801AA_IDE,
259 0,
260 "Intel 82801AA IDE Controller (ICH)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82801AB_IDE,
264 0,
265 "Intel 82801AB IDE Controller (ICH0)",
266 piix_chip_map,
267 },
268 { 0,
269 0,
270 NULL,
271 }
272 };
273
274 const struct pciide_product_desc pciide_amd_products[] = {
275 { PCI_PRODUCT_AMD_PBC756_IDE,
276 0,
277 "Advanced Micro Devices AMD756 IDE Controller",
278 amd756_chip_map
279 },
280 { 0,
281 0,
282 NULL,
283 }
284 };
285
286 const struct pciide_product_desc pciide_cmd_products[] = {
287 { PCI_PRODUCT_CMDTECH_640,
288 0,
289 "CMD Technology PCI0640",
290 cmd_chip_map
291 },
292 { PCI_PRODUCT_CMDTECH_643,
293 0,
294 "CMD Technology PCI0643",
295 cmd0643_9_chip_map,
296 },
297 { PCI_PRODUCT_CMDTECH_646,
298 0,
299 "CMD Technology PCI0646",
300 cmd0643_9_chip_map,
301 },
302 { PCI_PRODUCT_CMDTECH_648,
303 IDE_PCI_CLASS_OVERRIDE,
304 "CMD Technology PCI0648",
305 cmd0643_9_chip_map,
306 },
307 { PCI_PRODUCT_CMDTECH_649,
308 IDE_PCI_CLASS_OVERRIDE,
309 "CMD Technology PCI0649",
310 cmd0643_9_chip_map,
311 },
312 { 0,
313 0,
314 NULL,
315 }
316 };
317
318 const struct pciide_product_desc pciide_via_products[] = {
319 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
320 0,
321 "VIA Tech VT82C586 IDE Controller",
322 apollo_chip_map,
323 },
324 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
325 0,
326 "VIA Tech VT82C586A IDE Controller",
327 apollo_chip_map,
328 },
329 { 0,
330 0,
331 NULL,
332 }
333 };
334
335 const struct pciide_product_desc pciide_cypress_products[] = {
336 { PCI_PRODUCT_CONTAQ_82C693,
337 0,
338 "Cypress 82C693 IDE Controller",
339 cy693_chip_map,
340 },
341 { 0,
342 0,
343 NULL,
344 }
345 };
346
347 const struct pciide_product_desc pciide_sis_products[] = {
348 { PCI_PRODUCT_SIS_5597_IDE,
349 0,
350 "Silicon Integrated System 5597/5598 IDE controller",
351 sis_chip_map,
352 },
353 { 0,
354 0,
355 NULL,
356 }
357 };
358
359 const struct pciide_product_desc pciide_acer_products[] = {
360 { PCI_PRODUCT_ALI_M5229,
361 0,
362 "Acer Labs M5229 UDMA IDE Controller",
363 acer_chip_map,
364 },
365 { 0,
366 0,
367 NULL,
368 }
369 };
370
371 const struct pciide_product_desc pciide_promise_products[] = {
372 { PCI_PRODUCT_PROMISE_ULTRA33,
373 IDE_PCI_CLASS_OVERRIDE,
374 "Promise Ultra33/ATA Bus Master IDE Accelerator",
375 pdc202xx_chip_map,
376 },
377 { PCI_PRODUCT_PROMISE_ULTRA66,
378 IDE_PCI_CLASS_OVERRIDE,
379 "Promise Ultra66/ATA Bus Master IDE Accelerator",
380 pdc202xx_chip_map,
381 },
382 { 0,
383 0,
384 NULL,
385 }
386 };
387
388 const struct pciide_product_desc pciide_opti_products[] = {
389 { PCI_PRODUCT_OPTI_82C621,
390 0,
391 "OPTi 82c621 PCI IDE controller",
392 opti_chip_map,
393 },
394 { PCI_PRODUCT_OPTI_82C568,
395 0,
396 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
397 opti_chip_map,
398 },
399 { PCI_PRODUCT_OPTI_82D568,
400 0,
401 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
402 opti_chip_map,
403 },
404 { 0,
405 0,
406 NULL,
407 }
408 };
409
410 const struct pciide_product_desc pciide_triones_products[] = {
411 { PCI_PRODUCT_TRIONES_HPT366,
412 IDE_PCI_CLASS_OVERRIDE,
413 "Triones/Highpoint HPT366/370 IDE Controller",
414 hpt_chip_map,
415 },
416 { 0,
417 0,
418 NULL,
419 }
420 };
421
422 struct pciide_vendor_desc {
423 u_int32_t ide_vendor;
424 const struct pciide_product_desc *ide_products;
425 };
426
427 const struct pciide_vendor_desc pciide_vendors[] = {
428 { PCI_VENDOR_INTEL, pciide_intel_products },
429 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
430 { PCI_VENDOR_VIATECH, pciide_via_products },
431 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
432 { PCI_VENDOR_SIS, pciide_sis_products },
433 { PCI_VENDOR_ALI, pciide_acer_products },
434 { PCI_VENDOR_PROMISE, pciide_promise_products },
435 { PCI_VENDOR_AMD, pciide_amd_products },
436 { PCI_VENDOR_OPTI, pciide_opti_products },
437 { PCI_VENDOR_TRIONES, pciide_triones_products },
438 { 0, NULL }
439 };
440
441 /* options passed via the 'flags' config keyword */
442 #define PCIIDE_OPTIONS_DMA 0x01
443
444 int pciide_match __P((struct device *, struct cfdata *, void *));
445 void pciide_attach __P((struct device *, struct device *, void *));
446
447 struct cfattach pciide_ca = {
448 sizeof(struct pciide_softc), pciide_match, pciide_attach
449 };
450 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
451 int pciide_mapregs_compat __P(( struct pci_attach_args *,
452 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
453 int pciide_mapregs_native __P((struct pci_attach_args *,
454 struct pciide_channel *, bus_size_t *, bus_size_t *,
455 int (*pci_intr) __P((void *))));
456 void pciide_mapreg_dma __P((struct pciide_softc *,
457 struct pci_attach_args *));
458 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
459 void pciide_mapchan __P((struct pci_attach_args *,
460 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
461 int (*pci_intr) __P((void *))));
462 int pciide_chan_candisable __P((struct pciide_channel *));
463 void pciide_map_compat_intr __P(( struct pci_attach_args *,
464 struct pciide_channel *, int, int));
465 int pciide_print __P((void *, const char *pnp));
466 int pciide_compat_intr __P((void *));
467 int pciide_pci_intr __P((void *));
468 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
469
470 const struct pciide_product_desc *
471 pciide_lookup_product(id)
472 u_int32_t id;
473 {
474 const struct pciide_product_desc *pp;
475 const struct pciide_vendor_desc *vp;
476
477 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
478 if (PCI_VENDOR(id) == vp->ide_vendor)
479 break;
480
481 if ((pp = vp->ide_products) == NULL)
482 return NULL;
483
484 for (; pp->ide_name != NULL; pp++)
485 if (PCI_PRODUCT(id) == pp->ide_product)
486 break;
487
488 if (pp->ide_name == NULL)
489 return NULL;
490 return pp;
491 }
492
493 int
494 pciide_match(parent, match, aux)
495 struct device *parent;
496 struct cfdata *match;
497 void *aux;
498 {
499 struct pci_attach_args *pa = aux;
500 const struct pciide_product_desc *pp;
501
502 /*
503 * Check the ID register to see that it's a PCI IDE controller.
504 * If it is, we assume that we can deal with it; it _should_
505 * work in a standardized way...
506 */
507 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
508 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
509 return (1);
510 }
511
512 /*
513 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
514 * controllers. Let see if we can deal with it anyway.
515 */
516 pp = pciide_lookup_product(pa->pa_id);
517 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
518 return (1);
519 }
520
521 return (0);
522 }
523
524 void
525 pciide_attach(parent, self, aux)
526 struct device *parent, *self;
527 void *aux;
528 {
529 struct pci_attach_args *pa = aux;
530 pci_chipset_tag_t pc = pa->pa_pc;
531 pcitag_t tag = pa->pa_tag;
532 struct pciide_softc *sc = (struct pciide_softc *)self;
533 pcireg_t csr;
534 char devinfo[256];
535 const char *displaydev;
536
537 sc->sc_pp = pciide_lookup_product(pa->pa_id);
538 if (sc->sc_pp == NULL) {
539 sc->sc_pp = &default_product_desc;
540 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
541 displaydev = devinfo;
542 } else
543 displaydev = sc->sc_pp->ide_name;
544
545 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
546
547 sc->sc_pc = pa->pa_pc;
548 sc->sc_tag = pa->pa_tag;
549 #ifdef WDCDEBUG
550 if (wdcdebug_pciide_mask & DEBUG_PROBE)
551 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
552 #endif
553 sc->sc_pp->chip_map(sc, pa);
554
555 if (sc->sc_dma_ok) {
556 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
557 csr |= PCI_COMMAND_MASTER_ENABLE;
558 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
559 }
560 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
561 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
562 }
563
564 /* tell wether the chip is enabled or not */
565 int
566 pciide_chipen(sc, pa)
567 struct pciide_softc *sc;
568 struct pci_attach_args *pa;
569 {
570 pcireg_t csr;
571 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
572 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
573 PCI_COMMAND_STATUS_REG);
574 printf("%s: device disabled (at %s)\n",
575 sc->sc_wdcdev.sc_dev.dv_xname,
576 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
577 "device" : "bridge");
578 return 0;
579 }
580 return 1;
581 }
582
583 int
584 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
585 struct pci_attach_args *pa;
586 struct pciide_channel *cp;
587 int compatchan;
588 bus_size_t *cmdsizep, *ctlsizep;
589 {
590 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
591 struct channel_softc *wdc_cp = &cp->wdc_channel;
592
593 cp->compat = 1;
594 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
595 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
596
597 wdc_cp->cmd_iot = pa->pa_iot;
598 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
599 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
600 printf("%s: couldn't map %s channel cmd regs\n",
601 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
602 return (0);
603 }
604
605 wdc_cp->ctl_iot = pa->pa_iot;
606 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
607 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
608 printf("%s: couldn't map %s channel ctl regs\n",
609 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
610 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
611 PCIIDE_COMPAT_CMD_SIZE);
612 return (0);
613 }
614
615 return (1);
616 }
617
618 int
619 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
620 struct pci_attach_args * pa;
621 struct pciide_channel *cp;
622 bus_size_t *cmdsizep, *ctlsizep;
623 int (*pci_intr) __P((void *));
624 {
625 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
626 struct channel_softc *wdc_cp = &cp->wdc_channel;
627 const char *intrstr;
628 pci_intr_handle_t intrhandle;
629
630 cp->compat = 0;
631
632 if (sc->sc_pci_ih == NULL) {
633 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
634 pa->pa_intrline, &intrhandle) != 0) {
635 printf("%s: couldn't map native-PCI interrupt\n",
636 sc->sc_wdcdev.sc_dev.dv_xname);
637 return 0;
638 }
639 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
640 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
641 intrhandle, IPL_BIO, pci_intr, sc);
642 if (sc->sc_pci_ih != NULL) {
643 printf("%s: using %s for native-PCI interrupt\n",
644 sc->sc_wdcdev.sc_dev.dv_xname,
645 intrstr ? intrstr : "unknown interrupt");
646 } else {
647 printf("%s: couldn't establish native-PCI interrupt",
648 sc->sc_wdcdev.sc_dev.dv_xname);
649 if (intrstr != NULL)
650 printf(" at %s", intrstr);
651 printf("\n");
652 return 0;
653 }
654 }
655 cp->ih = sc->sc_pci_ih;
656 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
657 PCI_MAPREG_TYPE_IO, 0,
658 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
659 printf("%s: couldn't map %s channel cmd regs\n",
660 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
661 return 0;
662 }
663
664 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
665 PCI_MAPREG_TYPE_IO, 0,
666 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
667 printf("%s: couldn't map %s channel ctl regs\n",
668 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
669 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
670 return 0;
671 }
672 return (1);
673 }
674
675 void
676 pciide_mapreg_dma(sc, pa)
677 struct pciide_softc *sc;
678 struct pci_attach_args *pa;
679 {
680 /*
681 * Map DMA registers
682 *
683 * Note that sc_dma_ok is the right variable to test to see if
684 * DMA can be done. If the interface doesn't support DMA,
685 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
686 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
687 * non-zero if the interface supports DMA and the registers
688 * could be mapped.
689 *
690 * XXX Note that despite the fact that the Bus Master IDE specs
691 * XXX say that "The bus master IDE function uses 16 bytes of IO
692 * XXX space," some controllers (at least the United
693 * XXX Microelectronics UM8886BF) place it in memory space.
694 */
695 sc->sc_dma_ok = (pci_mapreg_map(pa,
696 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 0,
697 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
698 sc->sc_dmat = pa->pa_dmat;
699 if (sc->sc_dma_ok == 0) {
700 printf(", but unused (couldn't map registers)");
701 } else {
702 sc->sc_wdcdev.dma_arg = sc;
703 sc->sc_wdcdev.dma_init = pciide_dma_init;
704 sc->sc_wdcdev.dma_start = pciide_dma_start;
705 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
706 }
707 }
708
709 int
710 pciide_compat_intr(arg)
711 void *arg;
712 {
713 struct pciide_channel *cp = arg;
714
715 #ifdef DIAGNOSTIC
716 /* should only be called for a compat channel */
717 if (cp->compat == 0)
718 panic("pciide compat intr called for non-compat chan %p\n", cp);
719 #endif
720 return (wdcintr(&cp->wdc_channel));
721 }
722
723 int
724 pciide_pci_intr(arg)
725 void *arg;
726 {
727 struct pciide_softc *sc = arg;
728 struct pciide_channel *cp;
729 struct channel_softc *wdc_cp;
730 int i, rv, crv;
731
732 rv = 0;
733 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
734 cp = &sc->pciide_channels[i];
735 wdc_cp = &cp->wdc_channel;
736
737 /* If a compat channel skip. */
738 if (cp->compat)
739 continue;
740 /* if this channel not waiting for intr, skip */
741 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
742 continue;
743
744 crv = wdcintr(wdc_cp);
745 if (crv == 0)
746 ; /* leave rv alone */
747 else if (crv == 1)
748 rv = 1; /* claim the intr */
749 else if (rv == 0) /* crv should be -1 in this case */
750 rv = crv; /* if we've done no better, take it */
751 }
752 return (rv);
753 }
754
755 void
756 pciide_channel_dma_setup(cp)
757 struct pciide_channel *cp;
758 {
759 int drive;
760 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
761 struct ata_drive_datas *drvp;
762
763 for (drive = 0; drive < 2; drive++) {
764 drvp = &cp->wdc_channel.ch_drive[drive];
765 /* If no drive, skip */
766 if ((drvp->drive_flags & DRIVE) == 0)
767 continue;
768 /* setup DMA if needed */
769 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
770 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
771 sc->sc_dma_ok == 0) {
772 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
773 continue;
774 }
775 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
776 != 0) {
777 /* Abort DMA setup */
778 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
779 continue;
780 }
781 }
782 }
783
784 int
785 pciide_dma_table_setup(sc, channel, drive)
786 struct pciide_softc *sc;
787 int channel, drive;
788 {
789 bus_dma_segment_t seg;
790 int error, rseg;
791 const bus_size_t dma_table_size =
792 sizeof(struct idedma_table) * NIDEDMA_TABLES;
793 struct pciide_dma_maps *dma_maps =
794 &sc->pciide_channels[channel].dma_maps[drive];
795
796 /* If table was already allocated, just return */
797 if (dma_maps->dma_table)
798 return 0;
799
800 /* Allocate memory for the DMA tables and map it */
801 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
802 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
803 BUS_DMA_NOWAIT)) != 0) {
804 printf("%s:%d: unable to allocate table DMA for "
805 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
806 channel, drive, error);
807 return error;
808 }
809 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
810 dma_table_size,
811 (caddr_t *)&dma_maps->dma_table,
812 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
813 printf("%s:%d: unable to map table DMA for"
814 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
815 channel, drive, error);
816 return error;
817 }
818 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
819 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
820 seg.ds_addr), DEBUG_PROBE);
821
822 /* Create and load table DMA map for this disk */
823 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
824 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
825 &dma_maps->dmamap_table)) != 0) {
826 printf("%s:%d: unable to create table DMA map for "
827 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
828 channel, drive, error);
829 return error;
830 }
831 if ((error = bus_dmamap_load(sc->sc_dmat,
832 dma_maps->dmamap_table,
833 dma_maps->dma_table,
834 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
835 printf("%s:%d: unable to load table DMA map for "
836 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
837 channel, drive, error);
838 return error;
839 }
840 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
841 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
842 /* Create a xfer DMA map for this drive */
843 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
844 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
845 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
846 &dma_maps->dmamap_xfer)) != 0) {
847 printf("%s:%d: unable to create xfer DMA map for "
848 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
849 channel, drive, error);
850 return error;
851 }
852 return 0;
853 }
854
855 int
856 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
857 void *v;
858 int channel, drive;
859 void *databuf;
860 size_t datalen;
861 int flags;
862 {
863 struct pciide_softc *sc = v;
864 int error, seg;
865 struct pciide_dma_maps *dma_maps =
866 &sc->pciide_channels[channel].dma_maps[drive];
867
868 error = bus_dmamap_load(sc->sc_dmat,
869 dma_maps->dmamap_xfer,
870 databuf, datalen, NULL, BUS_DMA_NOWAIT);
871 if (error) {
872 printf("%s:%d: unable to load xfer DMA map for"
873 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
874 channel, drive, error);
875 return error;
876 }
877
878 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
879 dma_maps->dmamap_xfer->dm_mapsize,
880 (flags & WDC_DMA_READ) ?
881 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
882
883 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
884 #ifdef DIAGNOSTIC
885 /* A segment must not cross a 64k boundary */
886 {
887 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
888 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
889 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
890 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
891 printf("pciide_dma: segment %d physical addr 0x%lx"
892 " len 0x%lx not properly aligned\n",
893 seg, phys, len);
894 panic("pciide_dma: buf align");
895 }
896 }
897 #endif
898 dma_maps->dma_table[seg].base_addr =
899 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
900 dma_maps->dma_table[seg].byte_count =
901 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
902 IDEDMA_BYTE_COUNT_MASK);
903 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
904 seg, le32toh(dma_maps->dma_table[seg].byte_count),
905 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
906
907 }
908 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
909 htole32(IDEDMA_BYTE_COUNT_EOT);
910
911 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
912 dma_maps->dmamap_table->dm_mapsize,
913 BUS_DMASYNC_PREWRITE);
914
915 /* Maps are ready. Start DMA function */
916 #ifdef DIAGNOSTIC
917 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
918 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
919 dma_maps->dmamap_table->dm_segs[0].ds_addr);
920 panic("pciide_dma_init: table align");
921 }
922 #endif
923
924 /* Clear status bits */
925 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
926 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
927 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
928 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
929 /* Write table addr */
930 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
931 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
932 dma_maps->dmamap_table->dm_segs[0].ds_addr);
933 /* set read/write */
934 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
935 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
936 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
937 /* remember flags */
938 dma_maps->dma_flags = flags;
939 return 0;
940 }
941
942 void
943 pciide_dma_start(v, channel, drive)
944 void *v;
945 int channel, drive;
946 {
947 struct pciide_softc *sc = v;
948
949 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
950 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
951 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
952 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
953 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
954 }
955
956 int
957 pciide_dma_finish(v, channel, drive, force)
958 void *v;
959 int channel, drive;
960 int force;
961 {
962 struct pciide_softc *sc = v;
963 u_int8_t status;
964 int error = 0;
965 struct pciide_dma_maps *dma_maps =
966 &sc->pciide_channels[channel].dma_maps[drive];
967
968 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
969 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
970 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
971 DEBUG_XFERS);
972
973 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
974 return WDC_DMAST_NOIRQ;
975
976 /* stop DMA channel */
977 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
978 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
979 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
980 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
981
982 /* Unload the map of the data buffer */
983 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
984 dma_maps->dmamap_xfer->dm_mapsize,
985 (dma_maps->dma_flags & WDC_DMA_READ) ?
986 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
987 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
988
989 if ((status & IDEDMA_CTL_ERR) != 0) {
990 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
991 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
992 error |= WDC_DMAST_ERR;
993 }
994
995 if ((status & IDEDMA_CTL_INTR) == 0) {
996 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
997 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
998 drive, status);
999 error |= WDC_DMAST_NOIRQ;
1000 }
1001
1002 if ((status & IDEDMA_CTL_ACT) != 0) {
1003 /* data underrun, may be a valid condition for ATAPI */
1004 error |= WDC_DMAST_UNDER;
1005 }
1006 return error;
1007 }
1008
1009 void
1010 pciide_irqack(chp)
1011 struct channel_softc *chp;
1012 {
1013 struct pciide_channel *cp = (struct pciide_channel*)chp;
1014 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1015
1016 /* clear status bits in IDE DMA registers */
1017 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1018 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1019 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1020 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1021 }
1022
1023 /* some common code used by several chip_map */
1024 int
1025 pciide_chansetup(sc, channel, interface)
1026 struct pciide_softc *sc;
1027 int channel;
1028 pcireg_t interface;
1029 {
1030 struct pciide_channel *cp = &sc->pciide_channels[channel];
1031 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1032 cp->name = PCIIDE_CHANNEL_NAME(channel);
1033 cp->wdc_channel.channel = channel;
1034 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1035 cp->wdc_channel.ch_queue =
1036 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1037 if (cp->wdc_channel.ch_queue == NULL) {
1038 printf("%s %s channel: "
1039 "can't allocate memory for command queue",
1040 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1041 return 0;
1042 }
1043 printf("%s: %s channel %s to %s mode\n",
1044 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1045 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1046 "configured" : "wired",
1047 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1048 "native-PCI" : "compatibility");
1049 return 1;
1050 }
1051
1052 /* some common code used by several chip channel_map */
1053 void
1054 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1055 struct pci_attach_args *pa;
1056 struct pciide_channel *cp;
1057 pcireg_t interface;
1058 bus_size_t *cmdsizep, *ctlsizep;
1059 int (*pci_intr) __P((void *));
1060 {
1061 struct channel_softc *wdc_cp = &cp->wdc_channel;
1062
1063 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1064 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1065 pci_intr);
1066 else
1067 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1068 wdc_cp->channel, cmdsizep, ctlsizep);
1069
1070 if (cp->hw_ok == 0)
1071 return;
1072 wdc_cp->data32iot = wdc_cp->cmd_iot;
1073 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1074 wdcattach(wdc_cp);
1075 }
1076
1077 /*
1078 * Generic code to call to know if a channel can be disabled. Return 1
1079 * if channel can be disabled, 0 if not
1080 */
1081 int
1082 pciide_chan_candisable(cp)
1083 struct pciide_channel *cp;
1084 {
1085 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1086 struct channel_softc *wdc_cp = &cp->wdc_channel;
1087
1088 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1089 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1090 printf("%s: disabling %s channel (no drives)\n",
1091 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1092 cp->hw_ok = 0;
1093 return 1;
1094 }
1095 return 0;
1096 }
1097
1098 /*
1099 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1100 * Set hw_ok=0 on failure
1101 */
1102 void
1103 pciide_map_compat_intr(pa, cp, compatchan, interface)
1104 struct pci_attach_args *pa;
1105 struct pciide_channel *cp;
1106 int compatchan, interface;
1107 {
1108 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1109 struct channel_softc *wdc_cp = &cp->wdc_channel;
1110
1111 if (cp->hw_ok == 0)
1112 return;
1113 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1114 return;
1115
1116 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1117 pa, compatchan, pciide_compat_intr, cp);
1118 if (cp->ih == NULL) {
1119 printf("%s: no compatibility interrupt for use by %s "
1120 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1121 cp->hw_ok = 0;
1122 }
1123 }
1124
1125 void
1126 pciide_print_modes(cp)
1127 struct pciide_channel *cp;
1128 {
1129 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1130 int drive;
1131 struct channel_softc *chp;
1132 struct ata_drive_datas *drvp;
1133
1134 chp = &cp->wdc_channel;
1135 for (drive = 0; drive < 2; drive++) {
1136 drvp = &chp->ch_drive[drive];
1137 if ((drvp->drive_flags & DRIVE) == 0)
1138 continue;
1139 printf("%s(%s:%d:%d): using PIO mode %d",
1140 drvp->drv_softc->dv_xname,
1141 sc->sc_wdcdev.sc_dev.dv_xname,
1142 chp->channel, drive, drvp->PIO_mode);
1143 if (drvp->drive_flags & DRIVE_DMA)
1144 printf(", DMA mode %d", drvp->DMA_mode);
1145 if (drvp->drive_flags & DRIVE_UDMA)
1146 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1147 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1148 printf(" (using DMA data transfers)");
1149 printf("\n");
1150 }
1151 }
1152
1153 void
1154 default_chip_map(sc, pa)
1155 struct pciide_softc *sc;
1156 struct pci_attach_args *pa;
1157 {
1158 struct pciide_channel *cp;
1159 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1160 pcireg_t csr;
1161 int channel, drive;
1162 struct ata_drive_datas *drvp;
1163 u_int8_t idedma_ctl;
1164 bus_size_t cmdsize, ctlsize;
1165 char *failreason;
1166
1167 if (pciide_chipen(sc, pa) == 0)
1168 return;
1169
1170 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1171 printf("%s: bus-master DMA support present",
1172 sc->sc_wdcdev.sc_dev.dv_xname);
1173 if (sc->sc_pp == &default_product_desc &&
1174 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1175 PCIIDE_OPTIONS_DMA) == 0) {
1176 printf(", but unused (no driver support)");
1177 sc->sc_dma_ok = 0;
1178 } else {
1179 pciide_mapreg_dma(sc, pa);
1180 if (sc->sc_dma_ok != 0)
1181 printf(", used without full driver "
1182 "support");
1183 }
1184 } else {
1185 printf("%s: hardware does not support DMA",
1186 sc->sc_wdcdev.sc_dev.dv_xname);
1187 sc->sc_dma_ok = 0;
1188 }
1189 printf("\n");
1190 if (sc->sc_dma_ok) {
1191 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1192 sc->sc_wdcdev.irqack = pciide_irqack;
1193 }
1194 sc->sc_wdcdev.PIO_cap = 0;
1195 sc->sc_wdcdev.DMA_cap = 0;
1196
1197 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1198 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1199 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1200
1201 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1202 cp = &sc->pciide_channels[channel];
1203 if (pciide_chansetup(sc, channel, interface) == 0)
1204 continue;
1205 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1206 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1207 &ctlsize, pciide_pci_intr);
1208 } else {
1209 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1210 channel, &cmdsize, &ctlsize);
1211 }
1212 if (cp->hw_ok == 0)
1213 continue;
1214 /*
1215 * Check to see if something appears to be there.
1216 */
1217 failreason = NULL;
1218 if (!wdcprobe(&cp->wdc_channel)) {
1219 failreason = "not responding; disabled or no drives?";
1220 goto next;
1221 }
1222 /*
1223 * Now, make sure it's actually attributable to this PCI IDE
1224 * channel by trying to access the channel again while the
1225 * PCI IDE controller's I/O space is disabled. (If the
1226 * channel no longer appears to be there, it belongs to
1227 * this controller.) YUCK!
1228 */
1229 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1230 PCI_COMMAND_STATUS_REG);
1231 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1232 csr & ~PCI_COMMAND_IO_ENABLE);
1233 if (wdcprobe(&cp->wdc_channel))
1234 failreason = "other hardware responding at addresses";
1235 pci_conf_write(sc->sc_pc, sc->sc_tag,
1236 PCI_COMMAND_STATUS_REG, csr);
1237 next:
1238 if (failreason) {
1239 printf("%s: %s channel ignored (%s)\n",
1240 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1241 failreason);
1242 cp->hw_ok = 0;
1243 bus_space_unmap(cp->wdc_channel.cmd_iot,
1244 cp->wdc_channel.cmd_ioh, cmdsize);
1245 bus_space_unmap(cp->wdc_channel.ctl_iot,
1246 cp->wdc_channel.ctl_ioh, ctlsize);
1247 } else {
1248 pciide_map_compat_intr(pa, cp, channel, interface);
1249 }
1250 if (cp->hw_ok) {
1251 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1252 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1253 wdcattach(&cp->wdc_channel);
1254 }
1255 }
1256
1257 if (sc->sc_dma_ok == 0)
1258 return;
1259
1260 /* Allocate DMA maps */
1261 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1262 idedma_ctl = 0;
1263 cp = &sc->pciide_channels[channel];
1264 for (drive = 0; drive < 2; drive++) {
1265 drvp = &cp->wdc_channel.ch_drive[drive];
1266 /* If no drive, skip */
1267 if ((drvp->drive_flags & DRIVE) == 0)
1268 continue;
1269 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1270 continue;
1271 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1272 /* Abort DMA setup */
1273 printf("%s:%d:%d: can't allocate DMA maps, "
1274 "using PIO transfers\n",
1275 sc->sc_wdcdev.sc_dev.dv_xname,
1276 channel, drive);
1277 drvp->drive_flags &= ~DRIVE_DMA;
1278 }
1279 printf("%s:%d:%d: using DMA data transfers\n",
1280 sc->sc_wdcdev.sc_dev.dv_xname,
1281 channel, drive);
1282 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1283 }
1284 if (idedma_ctl != 0) {
1285 /* Add software bits in status register */
1286 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1287 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1288 idedma_ctl);
1289 }
1290 }
1291 }
1292
1293 void
1294 piix_chip_map(sc, pa)
1295 struct pciide_softc *sc;
1296 struct pci_attach_args *pa;
1297 {
1298 struct pciide_channel *cp;
1299 int channel;
1300 u_int32_t idetim;
1301 bus_size_t cmdsize, ctlsize;
1302
1303 if (pciide_chipen(sc, pa) == 0)
1304 return;
1305
1306 printf("%s: bus-master DMA support present",
1307 sc->sc_wdcdev.sc_dev.dv_xname);
1308 pciide_mapreg_dma(sc, pa);
1309 printf("\n");
1310 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1311 WDC_CAPABILITY_MODE;
1312 if (sc->sc_dma_ok) {
1313 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1314 sc->sc_wdcdev.irqack = pciide_irqack;
1315 switch(sc->sc_pp->ide_product) {
1316 case PCI_PRODUCT_INTEL_82371AB_IDE:
1317 case PCI_PRODUCT_INTEL_82801AA_IDE:
1318 case PCI_PRODUCT_INTEL_82801AB_IDE:
1319 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1320 }
1321 }
1322 sc->sc_wdcdev.PIO_cap = 4;
1323 sc->sc_wdcdev.DMA_cap = 2;
1324 sc->sc_wdcdev.UDMA_cap =
1325 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1326 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1327 sc->sc_wdcdev.set_modes = piix_setup_channel;
1328 else
1329 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1330 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1331 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1332
1333 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1334 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1335 DEBUG_PROBE);
1336 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1337 WDCDEBUG_PRINT((", sidetim=0x%x",
1338 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1339 DEBUG_PROBE);
1340 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1341 WDCDEBUG_PRINT((", udamreg 0x%x",
1342 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1343 DEBUG_PROBE);
1344 }
1345 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1346 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1347 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1348 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1349 DEBUG_PROBE);
1350 }
1351
1352 }
1353 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1354
1355 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1356 cp = &sc->pciide_channels[channel];
1357 /* PIIX is compat-only */
1358 if (pciide_chansetup(sc, channel, 0) == 0)
1359 continue;
1360 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1361 if ((PIIX_IDETIM_READ(idetim, channel) &
1362 PIIX_IDETIM_IDE) == 0) {
1363 printf("%s: %s channel ignored (disabled)\n",
1364 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1365 continue;
1366 }
1367 /* PIIX are compat-only pciide devices */
1368 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1369 if (cp->hw_ok == 0)
1370 continue;
1371 if (pciide_chan_candisable(cp)) {
1372 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1373 channel);
1374 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1375 idetim);
1376 }
1377 pciide_map_compat_intr(pa, cp, channel, 0);
1378 if (cp->hw_ok == 0)
1379 continue;
1380 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1381 }
1382
1383 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1384 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1385 DEBUG_PROBE);
1386 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1387 WDCDEBUG_PRINT((", sidetim=0x%x",
1388 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1389 DEBUG_PROBE);
1390 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1391 WDCDEBUG_PRINT((", udamreg 0x%x",
1392 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1393 DEBUG_PROBE);
1394 }
1395 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1396 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1397 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1398 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1399 DEBUG_PROBE);
1400 }
1401 }
1402 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1403 }
1404
1405 void
1406 piix_setup_channel(chp)
1407 struct channel_softc *chp;
1408 {
1409 u_int8_t mode[2], drive;
1410 u_int32_t oidetim, idetim, idedma_ctl;
1411 struct pciide_channel *cp = (struct pciide_channel*)chp;
1412 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1413 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1414
1415 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1416 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1417 idedma_ctl = 0;
1418
1419 /* set up new idetim: Enable IDE registers decode */
1420 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1421 chp->channel);
1422
1423 /* setup DMA */
1424 pciide_channel_dma_setup(cp);
1425
1426 /*
1427 * Here we have to mess up with drives mode: PIIX can't have
1428 * different timings for master and slave drives.
1429 * We need to find the best combination.
1430 */
1431
1432 /* If both drives supports DMA, take the lower mode */
1433 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1434 (drvp[1].drive_flags & DRIVE_DMA)) {
1435 mode[0] = mode[1] =
1436 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1437 drvp[0].DMA_mode = mode[0];
1438 drvp[1].DMA_mode = mode[1];
1439 goto ok;
1440 }
1441 /*
1442 * If only one drive supports DMA, use its mode, and
1443 * put the other one in PIO mode 0 if mode not compatible
1444 */
1445 if (drvp[0].drive_flags & DRIVE_DMA) {
1446 mode[0] = drvp[0].DMA_mode;
1447 mode[1] = drvp[1].PIO_mode;
1448 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1449 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1450 mode[1] = drvp[1].PIO_mode = 0;
1451 goto ok;
1452 }
1453 if (drvp[1].drive_flags & DRIVE_DMA) {
1454 mode[1] = drvp[1].DMA_mode;
1455 mode[0] = drvp[0].PIO_mode;
1456 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1457 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1458 mode[0] = drvp[0].PIO_mode = 0;
1459 goto ok;
1460 }
1461 /*
1462 * If both drives are not DMA, takes the lower mode, unless
1463 * one of them is PIO mode < 2
1464 */
1465 if (drvp[0].PIO_mode < 2) {
1466 mode[0] = drvp[0].PIO_mode = 0;
1467 mode[1] = drvp[1].PIO_mode;
1468 } else if (drvp[1].PIO_mode < 2) {
1469 mode[1] = drvp[1].PIO_mode = 0;
1470 mode[0] = drvp[0].PIO_mode;
1471 } else {
1472 mode[0] = mode[1] =
1473 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1474 drvp[0].PIO_mode = mode[0];
1475 drvp[1].PIO_mode = mode[1];
1476 }
1477 ok: /* The modes are setup */
1478 for (drive = 0; drive < 2; drive++) {
1479 if (drvp[drive].drive_flags & DRIVE_DMA) {
1480 idetim |= piix_setup_idetim_timings(
1481 mode[drive], 1, chp->channel);
1482 goto end;
1483 }
1484 }
1485 /* If we are there, none of the drives are DMA */
1486 if (mode[0] >= 2)
1487 idetim |= piix_setup_idetim_timings(
1488 mode[0], 0, chp->channel);
1489 else
1490 idetim |= piix_setup_idetim_timings(
1491 mode[1], 0, chp->channel);
1492 end: /*
1493 * timing mode is now set up in the controller. Enable
1494 * it per-drive
1495 */
1496 for (drive = 0; drive < 2; drive++) {
1497 /* If no drive, skip */
1498 if ((drvp[drive].drive_flags & DRIVE) == 0)
1499 continue;
1500 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1501 if (drvp[drive].drive_flags & DRIVE_DMA)
1502 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1503 }
1504 if (idedma_ctl != 0) {
1505 /* Add software bits in status register */
1506 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1507 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1508 idedma_ctl);
1509 }
1510 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1511 pciide_print_modes(cp);
1512 }
1513
1514 void
1515 piix3_4_setup_channel(chp)
1516 struct channel_softc *chp;
1517 {
1518 struct ata_drive_datas *drvp;
1519 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1520 struct pciide_channel *cp = (struct pciide_channel*)chp;
1521 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1522 int drive;
1523 int channel = chp->channel;
1524
1525 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1526 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1527 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1528 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1529 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1530 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1531 PIIX_SIDETIM_RTC_MASK(channel));
1532
1533 idedma_ctl = 0;
1534 /* If channel disabled, no need to go further */
1535 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1536 return;
1537 /* set up new idetim: Enable IDE registers decode */
1538 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1539
1540 /* setup DMA if needed */
1541 pciide_channel_dma_setup(cp);
1542
1543 for (drive = 0; drive < 2; drive++) {
1544 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1545 PIIX_UDMATIM_SET(0x3, channel, drive));
1546 drvp = &chp->ch_drive[drive];
1547 /* If no drive, skip */
1548 if ((drvp->drive_flags & DRIVE) == 0)
1549 continue;
1550 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1551 (drvp->drive_flags & DRIVE_UDMA) == 0))
1552 goto pio;
1553
1554 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1555 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1556 ideconf |= PIIX_CONFIG_PINGPONG;
1557 }
1558 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1559 /* setup Ultra/66 */
1560 if (drvp->UDMA_mode > 2 &&
1561 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1562 drvp->UDMA_mode = 2;
1563 if (drvp->UDMA_mode > 2)
1564 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1565 else
1566 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1567 }
1568 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1569 (drvp->drive_flags & DRIVE_UDMA)) {
1570 /* use Ultra/DMA */
1571 drvp->drive_flags &= ~DRIVE_DMA;
1572 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1573 udmareg |= PIIX_UDMATIM_SET(
1574 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1575 } else {
1576 /* use Multiword DMA */
1577 drvp->drive_flags &= ~DRIVE_UDMA;
1578 if (drive == 0) {
1579 idetim |= piix_setup_idetim_timings(
1580 drvp->DMA_mode, 1, channel);
1581 } else {
1582 sidetim |= piix_setup_sidetim_timings(
1583 drvp->DMA_mode, 1, channel);
1584 idetim =PIIX_IDETIM_SET(idetim,
1585 PIIX_IDETIM_SITRE, channel);
1586 }
1587 }
1588 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1589
1590 pio: /* use PIO mode */
1591 idetim |= piix_setup_idetim_drvs(drvp);
1592 if (drive == 0) {
1593 idetim |= piix_setup_idetim_timings(
1594 drvp->PIO_mode, 0, channel);
1595 } else {
1596 sidetim |= piix_setup_sidetim_timings(
1597 drvp->PIO_mode, 0, channel);
1598 idetim =PIIX_IDETIM_SET(idetim,
1599 PIIX_IDETIM_SITRE, channel);
1600 }
1601 }
1602 if (idedma_ctl != 0) {
1603 /* Add software bits in status register */
1604 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1605 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1606 idedma_ctl);
1607 }
1608 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1609 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1610 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1611 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1612 pciide_print_modes(cp);
1613 }
1614
1615
1616 /* setup ISP and RTC fields, based on mode */
1617 static u_int32_t
1618 piix_setup_idetim_timings(mode, dma, channel)
1619 u_int8_t mode;
1620 u_int8_t dma;
1621 u_int8_t channel;
1622 {
1623
1624 if (dma)
1625 return PIIX_IDETIM_SET(0,
1626 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1627 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1628 channel);
1629 else
1630 return PIIX_IDETIM_SET(0,
1631 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1632 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1633 channel);
1634 }
1635
1636 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1637 static u_int32_t
1638 piix_setup_idetim_drvs(drvp)
1639 struct ata_drive_datas *drvp;
1640 {
1641 u_int32_t ret = 0;
1642 struct channel_softc *chp = drvp->chnl_softc;
1643 u_int8_t channel = chp->channel;
1644 u_int8_t drive = drvp->drive;
1645
1646 /*
1647 * If drive is using UDMA, timings setups are independant
1648 * So just check DMA and PIO here.
1649 */
1650 if (drvp->drive_flags & DRIVE_DMA) {
1651 /* if mode = DMA mode 0, use compatible timings */
1652 if ((drvp->drive_flags & DRIVE_DMA) &&
1653 drvp->DMA_mode == 0) {
1654 drvp->PIO_mode = 0;
1655 return ret;
1656 }
1657 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1658 /*
1659 * PIO and DMA timings are the same, use fast timings for PIO
1660 * too, else use compat timings.
1661 */
1662 if ((piix_isp_pio[drvp->PIO_mode] !=
1663 piix_isp_dma[drvp->DMA_mode]) ||
1664 (piix_rtc_pio[drvp->PIO_mode] !=
1665 piix_rtc_dma[drvp->DMA_mode]))
1666 drvp->PIO_mode = 0;
1667 /* if PIO mode <= 2, use compat timings for PIO */
1668 if (drvp->PIO_mode <= 2) {
1669 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1670 channel);
1671 return ret;
1672 }
1673 }
1674
1675 /*
1676 * Now setup PIO modes. If mode < 2, use compat timings.
1677 * Else enable fast timings. Enable IORDY and prefetch/post
1678 * if PIO mode >= 3.
1679 */
1680
1681 if (drvp->PIO_mode < 2)
1682 return ret;
1683
1684 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1685 if (drvp->PIO_mode >= 3) {
1686 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1687 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1688 }
1689 return ret;
1690 }
1691
1692 /* setup values in SIDETIM registers, based on mode */
1693 static u_int32_t
1694 piix_setup_sidetim_timings(mode, dma, channel)
1695 u_int8_t mode;
1696 u_int8_t dma;
1697 u_int8_t channel;
1698 {
1699 if (dma)
1700 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1701 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1702 else
1703 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1704 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1705 }
1706
1707 void
1708 amd756_chip_map(sc, pa)
1709 struct pciide_softc *sc;
1710 struct pci_attach_args *pa;
1711 {
1712 struct pciide_channel *cp;
1713 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1714 int channel;
1715 pcireg_t chanenable;
1716 bus_size_t cmdsize, ctlsize;
1717
1718 if (pciide_chipen(sc, pa) == 0)
1719 return;
1720 printf("%s: bus-master DMA support present",
1721 sc->sc_wdcdev.sc_dev.dv_xname);
1722 pciide_mapreg_dma(sc, pa);
1723 printf("\n");
1724 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1725 WDC_CAPABILITY_MODE;
1726 if (sc->sc_dma_ok) {
1727 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1728 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1729 sc->sc_wdcdev.irqack = pciide_irqack;
1730 }
1731 sc->sc_wdcdev.PIO_cap = 4;
1732 sc->sc_wdcdev.DMA_cap = 2;
1733 sc->sc_wdcdev.UDMA_cap = 4;
1734 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1735 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1736 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1737 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1738
1739 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1740 DEBUG_PROBE);
1741 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1742 cp = &sc->pciide_channels[channel];
1743 if (pciide_chansetup(sc, channel, interface) == 0)
1744 continue;
1745
1746 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1747 printf("%s: %s channel ignored (disabled)\n",
1748 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1749 continue;
1750 }
1751 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1752 pciide_pci_intr);
1753
1754 if (pciide_chan_candisable(cp))
1755 chanenable &= ~AMD756_CHAN_EN(channel);
1756 pciide_map_compat_intr(pa, cp, channel, interface);
1757 if (cp->hw_ok == 0)
1758 continue;
1759
1760 amd756_setup_channel(&cp->wdc_channel);
1761 }
1762 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1763 chanenable);
1764 return;
1765 }
1766
1767 void
1768 amd756_setup_channel(chp)
1769 struct channel_softc *chp;
1770 {
1771 u_int32_t udmatim_reg, datatim_reg;
1772 u_int8_t idedma_ctl;
1773 int mode, drive;
1774 struct ata_drive_datas *drvp;
1775 struct pciide_channel *cp = (struct pciide_channel*)chp;
1776 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1777 int rev = PCI_REVISION(
1778 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1779
1780 idedma_ctl = 0;
1781 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1782 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1783 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1784 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1785
1786 /* setup DMA if needed */
1787 pciide_channel_dma_setup(cp);
1788
1789 for (drive = 0; drive < 2; drive++) {
1790 drvp = &chp->ch_drive[drive];
1791 /* If no drive, skip */
1792 if ((drvp->drive_flags & DRIVE) == 0)
1793 continue;
1794 /* add timing values, setup DMA if needed */
1795 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1796 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1797 mode = drvp->PIO_mode;
1798 goto pio;
1799 }
1800 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1801 (drvp->drive_flags & DRIVE_UDMA)) {
1802 /* use Ultra/DMA */
1803 drvp->drive_flags &= ~DRIVE_DMA;
1804 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1805 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1806 AMD756_UDMA_TIME(chp->channel, drive,
1807 amd756_udma_tim[drvp->UDMA_mode]);
1808 /* can use PIO timings, MW DMA unused */
1809 mode = drvp->PIO_mode;
1810 } else {
1811 /* use Multiword DMA, but only if revision is OK */
1812 drvp->drive_flags &= ~DRIVE_UDMA;
1813 #ifndef PCIIDE_AMD756_ENABLEDMA
1814 /*
1815 * The workaround doesn't seem to be necessary
1816 * with all drives, so it can be disabled by
1817 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1818 * triggered.
1819 */
1820 if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1821 printf("%s:%d:%d: multi-word DMA disabled due "
1822 "to chip revision\n",
1823 sc->sc_wdcdev.sc_dev.dv_xname,
1824 chp->channel, drive);
1825 mode = drvp->PIO_mode;
1826 drvp->drive_flags &= ~DRIVE_DMA;
1827 goto pio;
1828 }
1829 #endif
1830 /* mode = min(pio, dma+2) */
1831 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1832 mode = drvp->PIO_mode;
1833 else
1834 mode = drvp->DMA_mode + 2;
1835 }
1836 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1837
1838 pio: /* setup PIO mode */
1839 if (mode <= 2) {
1840 drvp->DMA_mode = 0;
1841 drvp->PIO_mode = 0;
1842 mode = 0;
1843 } else {
1844 drvp->PIO_mode = mode;
1845 drvp->DMA_mode = mode - 2;
1846 }
1847 datatim_reg |=
1848 AMD756_DATATIM_PULSE(chp->channel, drive,
1849 amd756_pio_set[mode]) |
1850 AMD756_DATATIM_RECOV(chp->channel, drive,
1851 amd756_pio_rec[mode]);
1852 }
1853 if (idedma_ctl != 0) {
1854 /* Add software bits in status register */
1855 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1856 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1857 idedma_ctl);
1858 }
1859 pciide_print_modes(cp);
1860 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1861 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1862 }
1863
1864 void
1865 apollo_chip_map(sc, pa)
1866 struct pciide_softc *sc;
1867 struct pci_attach_args *pa;
1868 {
1869 struct pciide_channel *cp;
1870 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1871 int channel;
1872 u_int32_t ideconf;
1873 bus_size_t cmdsize, ctlsize;
1874
1875 if (pciide_chipen(sc, pa) == 0)
1876 return;
1877 printf("%s: bus-master DMA support present",
1878 sc->sc_wdcdev.sc_dev.dv_xname);
1879 pciide_mapreg_dma(sc, pa);
1880 printf("\n");
1881 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1882 WDC_CAPABILITY_MODE;
1883 if (sc->sc_dma_ok) {
1884 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1885 sc->sc_wdcdev.irqack = pciide_irqack;
1886 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1887 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1888 }
1889 sc->sc_wdcdev.PIO_cap = 4;
1890 sc->sc_wdcdev.DMA_cap = 2;
1891 sc->sc_wdcdev.UDMA_cap = 2;
1892 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1893 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1894 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1895
1896 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1897 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1898 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1899 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1900 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1901 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1902 DEBUG_PROBE);
1903
1904 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1905 cp = &sc->pciide_channels[channel];
1906 if (pciide_chansetup(sc, channel, interface) == 0)
1907 continue;
1908
1909 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1910 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1911 printf("%s: %s channel ignored (disabled)\n",
1912 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1913 continue;
1914 }
1915 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1916 pciide_pci_intr);
1917 if (cp->hw_ok == 0)
1918 continue;
1919 if (pciide_chan_candisable(cp)) {
1920 ideconf &= ~APO_IDECONF_EN(channel);
1921 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1922 ideconf);
1923 }
1924 pciide_map_compat_intr(pa, cp, channel, interface);
1925
1926 if (cp->hw_ok == 0)
1927 continue;
1928 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1929 }
1930 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1931 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1932 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1933 }
1934
1935 void
1936 apollo_setup_channel(chp)
1937 struct channel_softc *chp;
1938 {
1939 u_int32_t udmatim_reg, datatim_reg;
1940 u_int8_t idedma_ctl;
1941 int mode, drive;
1942 struct ata_drive_datas *drvp;
1943 struct pciide_channel *cp = (struct pciide_channel*)chp;
1944 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1945
1946 idedma_ctl = 0;
1947 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1948 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1949 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1950 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1951
1952 /* setup DMA if needed */
1953 pciide_channel_dma_setup(cp);
1954
1955 for (drive = 0; drive < 2; drive++) {
1956 drvp = &chp->ch_drive[drive];
1957 /* If no drive, skip */
1958 if ((drvp->drive_flags & DRIVE) == 0)
1959 continue;
1960 /* add timing values, setup DMA if needed */
1961 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1962 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1963 mode = drvp->PIO_mode;
1964 goto pio;
1965 }
1966 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1967 (drvp->drive_flags & DRIVE_UDMA)) {
1968 /* use Ultra/DMA */
1969 drvp->drive_flags &= ~DRIVE_DMA;
1970 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1971 APO_UDMA_EN_MTH(chp->channel, drive) |
1972 APO_UDMA_TIME(chp->channel, drive,
1973 apollo_udma_tim[drvp->UDMA_mode]);
1974 /* can use PIO timings, MW DMA unused */
1975 mode = drvp->PIO_mode;
1976 } else {
1977 /* use Multiword DMA */
1978 drvp->drive_flags &= ~DRIVE_UDMA;
1979 /* mode = min(pio, dma+2) */
1980 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1981 mode = drvp->PIO_mode;
1982 else
1983 mode = drvp->DMA_mode + 2;
1984 }
1985 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1986
1987 pio: /* setup PIO mode */
1988 if (mode <= 2) {
1989 drvp->DMA_mode = 0;
1990 drvp->PIO_mode = 0;
1991 mode = 0;
1992 } else {
1993 drvp->PIO_mode = mode;
1994 drvp->DMA_mode = mode - 2;
1995 }
1996 datatim_reg |=
1997 APO_DATATIM_PULSE(chp->channel, drive,
1998 apollo_pio_set[mode]) |
1999 APO_DATATIM_RECOV(chp->channel, drive,
2000 apollo_pio_rec[mode]);
2001 }
2002 if (idedma_ctl != 0) {
2003 /* Add software bits in status register */
2004 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2005 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2006 idedma_ctl);
2007 }
2008 pciide_print_modes(cp);
2009 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2010 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2011 }
2012
2013 void
2014 cmd_channel_map(pa, sc, channel)
2015 struct pci_attach_args *pa;
2016 struct pciide_softc *sc;
2017 int channel;
2018 {
2019 struct pciide_channel *cp = &sc->pciide_channels[channel];
2020 bus_size_t cmdsize, ctlsize;
2021 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2022 int interface;
2023
2024 /*
2025 * The 0648/0649 can be told to identify as a RAID controller.
2026 * In this case, we have to fake interface
2027 */
2028 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2029 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2030 PCIIDE_INTERFACE_SETTABLE(1);
2031 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2032 CMD_CONF_DSA1)
2033 interface |= PCIIDE_INTERFACE_PCI(0) |
2034 PCIIDE_INTERFACE_PCI(1);
2035 } else {
2036 interface = PCI_INTERFACE(pa->pa_class);
2037 }
2038
2039 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2040 cp->name = PCIIDE_CHANNEL_NAME(channel);
2041 cp->wdc_channel.channel = channel;
2042 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2043
2044 if (channel > 0) {
2045 cp->wdc_channel.ch_queue =
2046 sc->pciide_channels[0].wdc_channel.ch_queue;
2047 } else {
2048 cp->wdc_channel.ch_queue =
2049 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2050 }
2051 if (cp->wdc_channel.ch_queue == NULL) {
2052 printf("%s %s channel: "
2053 "can't allocate memory for command queue",
2054 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2055 return;
2056 }
2057
2058 printf("%s: %s channel %s to %s mode\n",
2059 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2060 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2061 "configured" : "wired",
2062 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2063 "native-PCI" : "compatibility");
2064
2065 /*
2066 * with a CMD PCI64x, if we get here, the first channel is enabled:
2067 * there's no way to disable the first channel without disabling
2068 * the whole device
2069 */
2070 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2071 printf("%s: %s channel ignored (disabled)\n",
2072 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2073 return;
2074 }
2075
2076 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2077 if (cp->hw_ok == 0)
2078 return;
2079 if (channel == 1) {
2080 if (pciide_chan_candisable(cp)) {
2081 ctrl &= ~CMD_CTRL_2PORT;
2082 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2083 CMD_CTRL, ctrl);
2084 }
2085 }
2086 pciide_map_compat_intr(pa, cp, channel, interface);
2087 }
2088
2089 int
2090 cmd_pci_intr(arg)
2091 void *arg;
2092 {
2093 struct pciide_softc *sc = arg;
2094 struct pciide_channel *cp;
2095 struct channel_softc *wdc_cp;
2096 int i, rv, crv;
2097 u_int32_t priirq, secirq;
2098
2099 rv = 0;
2100 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2101 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2102 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2103 cp = &sc->pciide_channels[i];
2104 wdc_cp = &cp->wdc_channel;
2105 /* If a compat channel skip. */
2106 if (cp->compat)
2107 continue;
2108 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2109 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2110 crv = wdcintr(wdc_cp);
2111 if (crv == 0)
2112 printf("%s:%d: bogus intr\n",
2113 sc->sc_wdcdev.sc_dev.dv_xname, i);
2114 else
2115 rv = 1;
2116 }
2117 }
2118 return rv;
2119 }
2120
2121 void
2122 cmd_chip_map(sc, pa)
2123 struct pciide_softc *sc;
2124 struct pci_attach_args *pa;
2125 {
2126 int channel;
2127
2128 /*
2129 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2130 * and base adresses registers can be disabled at
2131 * hardware level. In this case, the device is wired
2132 * in compat mode and its first channel is always enabled,
2133 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2134 * In fact, it seems that the first channel of the CMD PCI0640
2135 * can't be disabled.
2136 */
2137
2138 #ifdef PCIIDE_CMD064x_DISABLE
2139 if (pciide_chipen(sc, pa) == 0)
2140 return;
2141 #endif
2142
2143 printf("%s: hardware does not support DMA\n",
2144 sc->sc_wdcdev.sc_dev.dv_xname);
2145 sc->sc_dma_ok = 0;
2146
2147 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2148 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2149 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2150
2151 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2152 cmd_channel_map(pa, sc, channel);
2153 }
2154 }
2155
2156 void
2157 cmd0643_9_chip_map(sc, pa)
2158 struct pciide_softc *sc;
2159 struct pci_attach_args *pa;
2160 {
2161 struct pciide_channel *cp;
2162 int channel;
2163
2164 /*
2165 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2166 * and base adresses registers can be disabled at
2167 * hardware level. In this case, the device is wired
2168 * in compat mode and its first channel is always enabled,
2169 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2170 * In fact, it seems that the first channel of the CMD PCI0640
2171 * can't be disabled.
2172 */
2173
2174 #ifdef PCIIDE_CMD064x_DISABLE
2175 if (pciide_chipen(sc, pa) == 0)
2176 return;
2177 #endif
2178 printf("%s: bus-master DMA support present",
2179 sc->sc_wdcdev.sc_dev.dv_xname);
2180 pciide_mapreg_dma(sc, pa);
2181 printf("\n");
2182 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2183 WDC_CAPABILITY_MODE;
2184 if (sc->sc_dma_ok) {
2185 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2186 switch (sc->sc_pp->ide_product) {
2187 case PCI_PRODUCT_CMDTECH_649:
2188 case PCI_PRODUCT_CMDTECH_648:
2189 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2190 sc->sc_wdcdev.UDMA_cap = 4;
2191 sc->sc_wdcdev.irqack = cmd648_9_irqack;
2192 break;
2193 default:
2194 sc->sc_wdcdev.irqack = pciide_irqack;
2195 }
2196 }
2197
2198 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2199 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2200 sc->sc_wdcdev.PIO_cap = 4;
2201 sc->sc_wdcdev.DMA_cap = 2;
2202 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2203
2204 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2205 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2206 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2207 DEBUG_PROBE);
2208
2209 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2210 cp = &sc->pciide_channels[channel];
2211 cmd_channel_map(pa, sc, channel);
2212 if (cp->hw_ok == 0)
2213 continue;
2214 cmd0643_9_setup_channel(&cp->wdc_channel);
2215 }
2216 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2217 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2218 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2219 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2220 DEBUG_PROBE);
2221 }
2222
2223 void
2224 cmd0643_9_setup_channel(chp)
2225 struct channel_softc *chp;
2226 {
2227 struct ata_drive_datas *drvp;
2228 u_int8_t tim;
2229 u_int32_t idedma_ctl, udma_reg;
2230 int drive;
2231 struct pciide_channel *cp = (struct pciide_channel*)chp;
2232 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2233
2234 idedma_ctl = 0;
2235 /* setup DMA if needed */
2236 pciide_channel_dma_setup(cp);
2237
2238 for (drive = 0; drive < 2; drive++) {
2239 drvp = &chp->ch_drive[drive];
2240 /* If no drive, skip */
2241 if ((drvp->drive_flags & DRIVE) == 0)
2242 continue;
2243 /* add timing values, setup DMA if needed */
2244 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2245 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2246 if (drvp->drive_flags & DRIVE_UDMA) {
2247 /* UltraDMA on a 0648 or 0649 */
2248 udma_reg = pciide_pci_read(sc->sc_pc,
2249 sc->sc_tag, CMD_UDMATIM(chp->channel));
2250 if (drvp->UDMA_mode > 2 &&
2251 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2252 CMD_BICSR) &
2253 CMD_BICSR_80(chp->channel)) == 0)
2254 drvp->UDMA_mode = 2;
2255 if (drvp->UDMA_mode > 2)
2256 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2257 else
2258 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2259 udma_reg |= CMD_UDMATIM_UDMA(drive);
2260 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2261 CMD_UDMATIM_TIM_OFF(drive));
2262 udma_reg |=
2263 (cmd0648_9_tim_udma[drvp->UDMA_mode] <<
2264 CMD_UDMATIM_TIM_OFF(drive));
2265 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2266 CMD_UDMATIM(chp->channel), udma_reg);
2267 } else {
2268 /*
2269 * use Multiword DMA.
2270 * Timings will be used for both PIO and DMA,
2271 * so adjust DMA mode if needed
2272 * if we have a 0648/9, turn off UDMA
2273 */
2274 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2275 udma_reg = pciide_pci_read(sc->sc_pc,
2276 sc->sc_tag,
2277 CMD_UDMATIM(chp->channel));
2278 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2279 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2280 CMD_UDMATIM(chp->channel),
2281 udma_reg);
2282 }
2283 if (drvp->PIO_mode >= 3 &&
2284 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2285 drvp->DMA_mode = drvp->PIO_mode - 2;
2286 }
2287 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2288 }
2289 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2290 }
2291 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2292 CMD_DATA_TIM(chp->channel, drive), tim);
2293 }
2294 if (idedma_ctl != 0) {
2295 /* Add software bits in status register */
2296 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2297 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2298 idedma_ctl);
2299 }
2300 pciide_print_modes(cp);
2301 }
2302
2303 void
2304 cmd648_9_irqack(chp)
2305 struct channel_softc *chp;
2306 {
2307 u_int32_t priirq, secirq;
2308 struct pciide_channel *cp = (struct pciide_channel*)chp;
2309 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2310
2311 if (chp->channel == 0) {
2312 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2313 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2314 } else {
2315 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2316 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2317 }
2318 pciide_irqack(chp);
2319 }
2320
2321 void
2322 cy693_chip_map(sc, pa)
2323 struct pciide_softc *sc;
2324 struct pci_attach_args *pa;
2325 {
2326 struct pciide_channel *cp;
2327 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2328 bus_size_t cmdsize, ctlsize;
2329
2330 if (pciide_chipen(sc, pa) == 0)
2331 return;
2332 /*
2333 * this chip has 2 PCI IDE functions, one for primary and one for
2334 * secondary. So we need to call pciide_mapregs_compat() with
2335 * the real channel
2336 */
2337 if (pa->pa_function == 1) {
2338 sc->sc_cy_compatchan = 0;
2339 } else if (pa->pa_function == 2) {
2340 sc->sc_cy_compatchan = 1;
2341 } else {
2342 printf("%s: unexpected PCI function %d\n",
2343 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2344 return;
2345 }
2346 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2347 printf("%s: bus-master DMA support present, "
2348 "but unused (no driver support)",
2349 sc->sc_wdcdev.sc_dev.dv_xname);
2350 } else {
2351 printf("%s: hardware does not support DMA",
2352 sc->sc_wdcdev.sc_dev.dv_xname);
2353 }
2354 sc->sc_dma_ok = 0;
2355 printf("\n");
2356
2357 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2358 WDC_CAPABILITY_MODE;
2359 sc->sc_wdcdev.PIO_cap = 4;
2360 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2361
2362 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2363 sc->sc_wdcdev.nchannels = 1;
2364
2365 /* Only one channel for this chip; if we are here it's enabled */
2366 cp = &sc->pciide_channels[0];
2367 sc->wdc_chanarray[0] = &cp->wdc_channel;
2368 cp->name = PCIIDE_CHANNEL_NAME(0);
2369 cp->wdc_channel.channel = 0;
2370 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2371 cp->wdc_channel.ch_queue =
2372 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2373 if (cp->wdc_channel.ch_queue == NULL) {
2374 printf("%s primary channel: "
2375 "can't allocate memory for command queue",
2376 sc->sc_wdcdev.sc_dev.dv_xname);
2377 return;
2378 }
2379 printf("%s: primary channel %s to ",
2380 sc->sc_wdcdev.sc_dev.dv_xname,
2381 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2382 "configured" : "wired");
2383 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2384 printf("native-PCI");
2385 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2386 pciide_pci_intr);
2387 } else {
2388 printf("compatibility");
2389 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2390 &cmdsize, &ctlsize);
2391 }
2392 printf(" mode\n");
2393 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2394 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2395 wdcattach(&cp->wdc_channel);
2396 if (pciide_chan_candisable(cp)) {
2397 pci_conf_write(sc->sc_pc, sc->sc_tag,
2398 PCI_COMMAND_STATUS_REG, 0);
2399 }
2400 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2401 if (cp->hw_ok == 0)
2402 return;
2403 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2404 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2405 cy693_setup_channel(&cp->wdc_channel);
2406 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2407 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2408 }
2409
2410 void
2411 cy693_setup_channel(chp)
2412 struct channel_softc *chp;
2413 {
2414 struct ata_drive_datas *drvp;
2415 int drive;
2416 u_int32_t cy_cmd_ctrl;
2417 u_int32_t idedma_ctl;
2418 struct pciide_channel *cp = (struct pciide_channel*)chp;
2419 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2420 cy_cmd_ctrl = idedma_ctl = 0;
2421
2422 for (drive = 0; drive < 2; drive++) {
2423 drvp = &chp->ch_drive[drive];
2424 /* If no drive, skip */
2425 if ((drvp->drive_flags & DRIVE) == 0)
2426 continue;
2427 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2428 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2429 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2430 CY_CMD_CTRL_IOW_REC_OFF(drive));
2431 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2432 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2433 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2434 CY_CMD_CTRL_IOR_REC_OFF(drive));
2435 }
2436 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2437
2438 pciide_print_modes(cp);
2439
2440 if (idedma_ctl != 0) {
2441 /* Add software bits in status register */
2442 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2443 IDEDMA_CTL, idedma_ctl);
2444 }
2445 }
2446
2447 void
2448 sis_chip_map(sc, pa)
2449 struct pciide_softc *sc;
2450 struct pci_attach_args *pa;
2451 {
2452 struct pciide_channel *cp;
2453 int channel;
2454 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2455 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2456 pcireg_t rev = PCI_REVISION(pa->pa_class);
2457 bus_size_t cmdsize, ctlsize;
2458
2459 if (pciide_chipen(sc, pa) == 0)
2460 return;
2461 printf("%s: bus-master DMA support present",
2462 sc->sc_wdcdev.sc_dev.dv_xname);
2463 pciide_mapreg_dma(sc, pa);
2464 printf("\n");
2465 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2466 WDC_CAPABILITY_MODE;
2467 if (sc->sc_dma_ok) {
2468 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2469 sc->sc_wdcdev.irqack = pciide_irqack;
2470 if (rev >= 0xd0)
2471 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2472 }
2473
2474 sc->sc_wdcdev.PIO_cap = 4;
2475 sc->sc_wdcdev.DMA_cap = 2;
2476 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2477 sc->sc_wdcdev.UDMA_cap = 2;
2478 sc->sc_wdcdev.set_modes = sis_setup_channel;
2479
2480 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2481 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2482
2483 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2484 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2485 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2486
2487 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2488 cp = &sc->pciide_channels[channel];
2489 if (pciide_chansetup(sc, channel, interface) == 0)
2490 continue;
2491 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2492 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2493 printf("%s: %s channel ignored (disabled)\n",
2494 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2495 continue;
2496 }
2497 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2498 pciide_pci_intr);
2499 if (cp->hw_ok == 0)
2500 continue;
2501 if (pciide_chan_candisable(cp)) {
2502 if (channel == 0)
2503 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2504 else
2505 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2506 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2507 sis_ctr0);
2508 }
2509 pciide_map_compat_intr(pa, cp, channel, interface);
2510 if (cp->hw_ok == 0)
2511 continue;
2512 sis_setup_channel(&cp->wdc_channel);
2513 }
2514 }
2515
2516 void
2517 sis_setup_channel(chp)
2518 struct channel_softc *chp;
2519 {
2520 struct ata_drive_datas *drvp;
2521 int drive;
2522 u_int32_t sis_tim;
2523 u_int32_t idedma_ctl;
2524 struct pciide_channel *cp = (struct pciide_channel*)chp;
2525 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2526
2527 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2528 "channel %d 0x%x\n", chp->channel,
2529 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2530 DEBUG_PROBE);
2531 sis_tim = 0;
2532 idedma_ctl = 0;
2533 /* setup DMA if needed */
2534 pciide_channel_dma_setup(cp);
2535
2536 for (drive = 0; drive < 2; drive++) {
2537 drvp = &chp->ch_drive[drive];
2538 /* If no drive, skip */
2539 if ((drvp->drive_flags & DRIVE) == 0)
2540 continue;
2541 /* add timing values, setup DMA if needed */
2542 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2543 (drvp->drive_flags & DRIVE_UDMA) == 0)
2544 goto pio;
2545
2546 if (drvp->drive_flags & DRIVE_UDMA) {
2547 /* use Ultra/DMA */
2548 drvp->drive_flags &= ~DRIVE_DMA;
2549 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2550 SIS_TIM_UDMA_TIME_OFF(drive);
2551 sis_tim |= SIS_TIM_UDMA_EN(drive);
2552 } else {
2553 /*
2554 * use Multiword DMA
2555 * Timings will be used for both PIO and DMA,
2556 * so adjust DMA mode if needed
2557 */
2558 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2559 drvp->PIO_mode = drvp->DMA_mode + 2;
2560 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2561 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2562 drvp->PIO_mode - 2 : 0;
2563 if (drvp->DMA_mode == 0)
2564 drvp->PIO_mode = 0;
2565 }
2566 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2567 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2568 SIS_TIM_ACT_OFF(drive);
2569 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2570 SIS_TIM_REC_OFF(drive);
2571 }
2572 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2573 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2574 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2575 if (idedma_ctl != 0) {
2576 /* Add software bits in status register */
2577 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2578 IDEDMA_CTL, idedma_ctl);
2579 }
2580 pciide_print_modes(cp);
2581 }
2582
2583 void
2584 acer_chip_map(sc, pa)
2585 struct pciide_softc *sc;
2586 struct pci_attach_args *pa;
2587 {
2588 struct pciide_channel *cp;
2589 int channel;
2590 pcireg_t cr, interface;
2591 bus_size_t cmdsize, ctlsize;
2592
2593 if (pciide_chipen(sc, pa) == 0)
2594 return;
2595 printf("%s: bus-master DMA support present",
2596 sc->sc_wdcdev.sc_dev.dv_xname);
2597 pciide_mapreg_dma(sc, pa);
2598 printf("\n");
2599 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2600 WDC_CAPABILITY_MODE;
2601 if (sc->sc_dma_ok) {
2602 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2603 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2604 sc->sc_wdcdev.irqack = pciide_irqack;
2605 }
2606
2607 sc->sc_wdcdev.PIO_cap = 4;
2608 sc->sc_wdcdev.DMA_cap = 2;
2609 sc->sc_wdcdev.UDMA_cap = 2;
2610 sc->sc_wdcdev.set_modes = acer_setup_channel;
2611 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2612 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2613
2614 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2615 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2616 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2617
2618 /* Enable "microsoft register bits" R/W. */
2619 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2620 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2621 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2622 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2623 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2624 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2625 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2626 ~ACER_CHANSTATUSREGS_RO);
2627 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2628 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2629 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2630 /* Don't use cr, re-read the real register content instead */
2631 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2632 PCI_CLASS_REG));
2633
2634 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2635 cp = &sc->pciide_channels[channel];
2636 if (pciide_chansetup(sc, channel, interface) == 0)
2637 continue;
2638 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2639 printf("%s: %s channel ignored (disabled)\n",
2640 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2641 continue;
2642 }
2643 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2644 acer_pci_intr);
2645 if (cp->hw_ok == 0)
2646 continue;
2647 if (pciide_chan_candisable(cp)) {
2648 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2649 pci_conf_write(sc->sc_pc, sc->sc_tag,
2650 PCI_CLASS_REG, cr);
2651 }
2652 pciide_map_compat_intr(pa, cp, channel, interface);
2653 acer_setup_channel(&cp->wdc_channel);
2654 }
2655 }
2656
2657 void
2658 acer_setup_channel(chp)
2659 struct channel_softc *chp;
2660 {
2661 struct ata_drive_datas *drvp;
2662 int drive;
2663 u_int32_t acer_fifo_udma;
2664 u_int32_t idedma_ctl;
2665 struct pciide_channel *cp = (struct pciide_channel*)chp;
2666 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2667
2668 idedma_ctl = 0;
2669 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2670 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2671 acer_fifo_udma), DEBUG_PROBE);
2672 /* setup DMA if needed */
2673 pciide_channel_dma_setup(cp);
2674
2675 for (drive = 0; drive < 2; drive++) {
2676 drvp = &chp->ch_drive[drive];
2677 /* If no drive, skip */
2678 if ((drvp->drive_flags & DRIVE) == 0)
2679 continue;
2680 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2681 "channel %d drive %d 0x%x\n", chp->channel, drive,
2682 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2683 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2684 /* clear FIFO/DMA mode */
2685 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2686 ACER_UDMA_EN(chp->channel, drive) |
2687 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2688
2689 /* add timing values, setup DMA if needed */
2690 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2691 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2692 acer_fifo_udma |=
2693 ACER_FTH_OPL(chp->channel, drive, 0x1);
2694 goto pio;
2695 }
2696
2697 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2698 if (drvp->drive_flags & DRIVE_UDMA) {
2699 /* use Ultra/DMA */
2700 drvp->drive_flags &= ~DRIVE_DMA;
2701 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2702 acer_fifo_udma |=
2703 ACER_UDMA_TIM(chp->channel, drive,
2704 acer_udma[drvp->UDMA_mode]);
2705 } else {
2706 /*
2707 * use Multiword DMA
2708 * Timings will be used for both PIO and DMA,
2709 * so adjust DMA mode if needed
2710 */
2711 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2712 drvp->PIO_mode = drvp->DMA_mode + 2;
2713 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2714 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2715 drvp->PIO_mode - 2 : 0;
2716 if (drvp->DMA_mode == 0)
2717 drvp->PIO_mode = 0;
2718 }
2719 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2720 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2721 ACER_IDETIM(chp->channel, drive),
2722 acer_pio[drvp->PIO_mode]);
2723 }
2724 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2725 acer_fifo_udma), DEBUG_PROBE);
2726 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2727 if (idedma_ctl != 0) {
2728 /* Add software bits in status register */
2729 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2730 IDEDMA_CTL, idedma_ctl);
2731 }
2732 pciide_print_modes(cp);
2733 }
2734
2735 int
2736 acer_pci_intr(arg)
2737 void *arg;
2738 {
2739 struct pciide_softc *sc = arg;
2740 struct pciide_channel *cp;
2741 struct channel_softc *wdc_cp;
2742 int i, rv, crv;
2743 u_int32_t chids;
2744
2745 rv = 0;
2746 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2747 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2748 cp = &sc->pciide_channels[i];
2749 wdc_cp = &cp->wdc_channel;
2750 /* If a compat channel skip. */
2751 if (cp->compat)
2752 continue;
2753 if (chids & ACER_CHIDS_INT(i)) {
2754 crv = wdcintr(wdc_cp);
2755 if (crv == 0)
2756 printf("%s:%d: bogus intr\n",
2757 sc->sc_wdcdev.sc_dev.dv_xname, i);
2758 else
2759 rv = 1;
2760 }
2761 }
2762 return rv;
2763 }
2764
2765 void
2766 hpt_chip_map(sc, pa)
2767 struct pciide_softc *sc;
2768 struct pci_attach_args *pa;
2769 {
2770 struct pciide_channel *cp;
2771 int i, compatchan, revision;
2772 pcireg_t interface;
2773 bus_size_t cmdsize, ctlsize;
2774
2775 if (pciide_chipen(sc, pa) == 0)
2776 return;
2777 revision = PCI_REVISION(pa->pa_class);
2778
2779 /*
2780 * when the chip is in native mode it identifies itself as a
2781 * 'misc mass storage'. Fake interface in this case.
2782 */
2783 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2784 interface = PCI_INTERFACE(pa->pa_class);
2785 } else {
2786 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2787 PCIIDE_INTERFACE_PCI(0);
2788 if (revision == HPT370_REV)
2789 interface |= PCIIDE_INTERFACE_PCI(1);
2790 }
2791
2792 printf("%s: bus-master DMA support present",
2793 sc->sc_wdcdev.sc_dev.dv_xname);
2794 pciide_mapreg_dma(sc, pa);
2795 printf("\n");
2796 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2797 WDC_CAPABILITY_MODE;
2798 if (sc->sc_dma_ok) {
2799 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2800 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2801 sc->sc_wdcdev.irqack = pciide_irqack;
2802 }
2803 sc->sc_wdcdev.PIO_cap = 4;
2804 sc->sc_wdcdev.DMA_cap = 2;
2805 sc->sc_wdcdev.UDMA_cap = 4;
2806
2807 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2808 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2809 if (revision == HPT366_REV) {
2810 /*
2811 * The 366 has 2 PCI IDE functions, one for primary and one
2812 * for secondary. So we need to call pciide_mapregs_compat()
2813 * with the real channel
2814 */
2815 if (pa->pa_function == 0) {
2816 compatchan = 0;
2817 } else if (pa->pa_function == 1) {
2818 compatchan = 1;
2819 } else {
2820 printf("%s: unexpected PCI function %d\n",
2821 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2822 return;
2823 }
2824 sc->sc_wdcdev.nchannels = 1;
2825 } else {
2826 sc->sc_wdcdev.nchannels = 2;
2827 }
2828 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2829 cp = &sc->pciide_channels[i];
2830 if (sc->sc_wdcdev.nchannels > 1) {
2831 compatchan = i;
2832 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
2833 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
2834 printf("%s: %s channel ignored (disabled)\n",
2835 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2836 continue;
2837 }
2838 }
2839 if (pciide_chansetup(sc, i, interface) == 0)
2840 continue;
2841 if (interface & PCIIDE_INTERFACE_PCI(i)) {
2842 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
2843 &ctlsize, hpt_pci_intr);
2844 } else {
2845 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
2846 &cmdsize, &ctlsize);
2847 }
2848 if (cp->hw_ok == 0)
2849 return;
2850 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2851 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2852 wdcattach(&cp->wdc_channel);
2853 hpt_setup_channel(&cp->wdc_channel);
2854 }
2855
2856 return;
2857 }
2858
2859
2860 void
2861 hpt_setup_channel(chp)
2862 struct channel_softc *chp;
2863 {
2864 struct ata_drive_datas *drvp;
2865 int drive;
2866 int cable;
2867 u_int32_t before, after;
2868 u_int32_t idedma_ctl;
2869 struct pciide_channel *cp = (struct pciide_channel*)chp;
2870 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2871
2872 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
2873
2874 /* setup DMA if needed */
2875 pciide_channel_dma_setup(cp);
2876
2877 idedma_ctl = 0;
2878
2879 /* Per drive settings */
2880 for (drive = 0; drive < 2; drive++) {
2881 drvp = &chp->ch_drive[drive];
2882 /* If no drive, skip */
2883 if ((drvp->drive_flags & DRIVE) == 0)
2884 continue;
2885 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
2886 HPT_IDETIM(chp->channel, drive));
2887
2888 /* add timing values, setup DMA if needed */
2889 if (drvp->drive_flags & DRIVE_UDMA) {
2890 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
2891 drvp->UDMA_mode > 2)
2892 drvp->UDMA_mode = 2;
2893 after = (sc->sc_wdcdev.nchannels == 2) ?
2894 hpt370_udma[drvp->UDMA_mode] :
2895 hpt366_udma[drvp->UDMA_mode];
2896 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2897 } else if (drvp->drive_flags & DRIVE_DMA) {
2898 /*
2899 * use Multiword DMA.
2900 * Timings will be used for both PIO and DMA, so adjust
2901 * DMA mode if needed
2902 */
2903 if (drvp->PIO_mode >= 3 &&
2904 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2905 drvp->DMA_mode = drvp->PIO_mode - 2;
2906 }
2907 after = (sc->sc_wdcdev.nchannels == 2) ?
2908 hpt370_dma[drvp->DMA_mode] :
2909 hpt366_dma[drvp->DMA_mode];
2910 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2911 } else {
2912 /* PIO only */
2913 after = (sc->sc_wdcdev.nchannels == 2) ?
2914 hpt370_pio[drvp->PIO_mode] :
2915 hpt366_pio[drvp->PIO_mode];
2916 }
2917 pci_conf_write(sc->sc_pc, sc->sc_tag,
2918 HPT_IDETIM(chp->channel, drive), after);
2919 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
2920 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
2921 after, before), DEBUG_PROBE);
2922 }
2923 if (idedma_ctl != 0) {
2924 /* Add software bits in status register */
2925 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2926 IDEDMA_CTL, idedma_ctl);
2927 }
2928 pciide_print_modes(cp);
2929 }
2930
2931 int
2932 hpt_pci_intr(arg)
2933 void *arg;
2934 {
2935 struct pciide_softc *sc = arg;
2936 struct pciide_channel *cp;
2937 struct channel_softc *wdc_cp;
2938 int rv = 0;
2939 int dmastat, i, crv;
2940
2941 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2942 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2943 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
2944 if((dmastat & IDEDMA_CTL_INTR) == 0)
2945 continue;
2946 cp = &sc->pciide_channels[i];
2947 wdc_cp = &cp->wdc_channel;
2948 crv = wdcintr(wdc_cp);
2949 if (crv == 0) {
2950 printf("%s:%d: bogus intr\n",
2951 sc->sc_wdcdev.sc_dev.dv_xname, i);
2952 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2953 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
2954 } else
2955 rv = 1;
2956 }
2957 return rv;
2958 }
2959
2960
2961 /* A macro to test product */
2962 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2963
2964 void
2965 pdc202xx_chip_map(sc, pa)
2966 struct pciide_softc *sc;
2967 struct pci_attach_args *pa;
2968 {
2969 struct pciide_channel *cp;
2970 int channel;
2971 pcireg_t interface, st, mode;
2972 bus_size_t cmdsize, ctlsize;
2973
2974 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2975 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2976 DEBUG_PROBE);
2977 if (pciide_chipen(sc, pa) == 0)
2978 return;
2979
2980 /* turn off RAID mode */
2981 st &= ~PDC2xx_STATE_IDERAID;
2982
2983 /*
2984 * can't rely on the PCI_CLASS_REG content if the chip was in raid
2985 * mode. We have to fake interface
2986 */
2987 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
2988 if (st & PDC2xx_STATE_NATIVE)
2989 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2990
2991 printf("%s: bus-master DMA support present",
2992 sc->sc_wdcdev.sc_dev.dv_xname);
2993 pciide_mapreg_dma(sc, pa);
2994 printf("\n");
2995 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2996 WDC_CAPABILITY_MODE;
2997 if (sc->sc_dma_ok) {
2998 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2999 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3000 sc->sc_wdcdev.irqack = pciide_irqack;
3001 }
3002 sc->sc_wdcdev.PIO_cap = 4;
3003 sc->sc_wdcdev.DMA_cap = 2;
3004 if (PDC_IS_262(sc))
3005 sc->sc_wdcdev.UDMA_cap = 4;
3006 else
3007 sc->sc_wdcdev.UDMA_cap = 2;
3008 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3009 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3010 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3011
3012 /* setup failsafe defaults */
3013 mode = 0;
3014 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3015 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3016 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3017 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3018 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3019 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3020 "initial timings 0x%x, now 0x%x\n", channel,
3021 pci_conf_read(sc->sc_pc, sc->sc_tag,
3022 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3023 DEBUG_PROBE);
3024 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3025 mode | PDC2xx_TIM_IORDYp);
3026 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3027 "initial timings 0x%x, now 0x%x\n", channel,
3028 pci_conf_read(sc->sc_pc, sc->sc_tag,
3029 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3030 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3031 mode);
3032 }
3033
3034 mode = PDC2xx_SCR_DMA;
3035 if (PDC_IS_262(sc)) {
3036 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3037 } else {
3038 /* the BIOS set it up this way */
3039 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3040 }
3041 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3042 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3043 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3044 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3045 DEBUG_PROBE);
3046 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3047
3048 /* controller initial state register is OK even without BIOS */
3049 /* Set DMA mode to IDE DMA compatibility */
3050 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3051 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3052 DEBUG_PROBE);
3053 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3054 mode | 0x1);
3055 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3056 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3057 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3058 mode | 0x1);
3059
3060 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3061 cp = &sc->pciide_channels[channel];
3062 if (pciide_chansetup(sc, channel, interface) == 0)
3063 continue;
3064 if ((st & (PDC_IS_262(sc) ?
3065 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3066 printf("%s: %s channel ignored (disabled)\n",
3067 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3068 continue;
3069 }
3070 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3071 pdc202xx_pci_intr);
3072 if (cp->hw_ok == 0)
3073 continue;
3074 if (pciide_chan_candisable(cp))
3075 st &= ~(PDC_IS_262(sc) ?
3076 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3077 pciide_map_compat_intr(pa, cp, channel, interface);
3078 pdc202xx_setup_channel(&cp->wdc_channel);
3079 }
3080 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3081 DEBUG_PROBE);
3082 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3083 return;
3084 }
3085
3086 void
3087 pdc202xx_setup_channel(chp)
3088 struct channel_softc *chp;
3089 {
3090 struct ata_drive_datas *drvp;
3091 int drive;
3092 pcireg_t mode, st;
3093 u_int32_t idedma_ctl, scr, atapi;
3094 struct pciide_channel *cp = (struct pciide_channel*)chp;
3095 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3096 int channel = chp->channel;
3097
3098 /* setup DMA if needed */
3099 pciide_channel_dma_setup(cp);
3100
3101 idedma_ctl = 0;
3102
3103 /* Per channel settings */
3104 if (PDC_IS_262(sc)) {
3105 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3106 PDC262_U66);
3107 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3108 /* Trimm UDMA mode */
3109 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3110 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3111 chp->ch_drive[0].UDMA_mode <= 2) ||
3112 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3113 chp->ch_drive[1].UDMA_mode <= 2)) {
3114 if (chp->ch_drive[0].UDMA_mode > 2)
3115 chp->ch_drive[0].UDMA_mode = 2;
3116 if (chp->ch_drive[1].UDMA_mode > 2)
3117 chp->ch_drive[1].UDMA_mode = 2;
3118 }
3119 /* Set U66 if needed */
3120 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3121 chp->ch_drive[0].UDMA_mode > 2) ||
3122 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3123 chp->ch_drive[1].UDMA_mode > 2))
3124 scr |= PDC262_U66_EN(channel);
3125 else
3126 scr &= ~PDC262_U66_EN(channel);
3127 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3128 PDC262_U66, scr);
3129 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3130 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3131 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3132 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3133 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3134 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3135 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3136 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3137 atapi = 0;
3138 else
3139 atapi = PDC262_ATAPI_UDMA;
3140 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3141 PDC262_ATAPI(channel), atapi);
3142 }
3143 }
3144 for (drive = 0; drive < 2; drive++) {
3145 drvp = &chp->ch_drive[drive];
3146 /* If no drive, skip */
3147 if ((drvp->drive_flags & DRIVE) == 0)
3148 continue;
3149 mode = 0;
3150 if (drvp->drive_flags & DRIVE_UDMA) {
3151 mode = PDC2xx_TIM_SET_MB(mode,
3152 pdc2xx_udma_mb[drvp->UDMA_mode]);
3153 mode = PDC2xx_TIM_SET_MC(mode,
3154 pdc2xx_udma_mc[drvp->UDMA_mode]);
3155 drvp->drive_flags &= ~DRIVE_DMA;
3156 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3157 } else if (drvp->drive_flags & DRIVE_DMA) {
3158 mode = PDC2xx_TIM_SET_MB(mode,
3159 pdc2xx_dma_mb[drvp->DMA_mode]);
3160 mode = PDC2xx_TIM_SET_MC(mode,
3161 pdc2xx_dma_mc[drvp->DMA_mode]);
3162 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3163 } else {
3164 mode = PDC2xx_TIM_SET_MB(mode,
3165 pdc2xx_dma_mb[0]);
3166 mode = PDC2xx_TIM_SET_MC(mode,
3167 pdc2xx_dma_mc[0]);
3168 }
3169 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3170 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3171 if (drvp->drive_flags & DRIVE_ATA)
3172 mode |= PDC2xx_TIM_PRE;
3173 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3174 if (drvp->PIO_mode >= 3) {
3175 mode |= PDC2xx_TIM_IORDY;
3176 if (drive == 0)
3177 mode |= PDC2xx_TIM_IORDYp;
3178 }
3179 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3180 "timings 0x%x\n",
3181 sc->sc_wdcdev.sc_dev.dv_xname,
3182 chp->channel, drive, mode), DEBUG_PROBE);
3183 pci_conf_write(sc->sc_pc, sc->sc_tag,
3184 PDC2xx_TIM(chp->channel, drive), mode);
3185 }
3186 if (idedma_ctl != 0) {
3187 /* Add software bits in status register */
3188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3189 IDEDMA_CTL, idedma_ctl);
3190 }
3191 pciide_print_modes(cp);
3192 }
3193
3194 int
3195 pdc202xx_pci_intr(arg)
3196 void *arg;
3197 {
3198 struct pciide_softc *sc = arg;
3199 struct pciide_channel *cp;
3200 struct channel_softc *wdc_cp;
3201 int i, rv, crv;
3202 u_int32_t scr;
3203
3204 rv = 0;
3205 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3206 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3207 cp = &sc->pciide_channels[i];
3208 wdc_cp = &cp->wdc_channel;
3209 /* If a compat channel skip. */
3210 if (cp->compat)
3211 continue;
3212 if (scr & PDC2xx_SCR_INT(i)) {
3213 crv = wdcintr(wdc_cp);
3214 if (crv == 0)
3215 printf("%s:%d: bogus intr\n",
3216 sc->sc_wdcdev.sc_dev.dv_xname, i);
3217 else
3218 rv = 1;
3219 }
3220 }
3221 return rv;
3222 }
3223
3224 void
3225 opti_chip_map(sc, pa)
3226 struct pciide_softc *sc;
3227 struct pci_attach_args *pa;
3228 {
3229 struct pciide_channel *cp;
3230 bus_size_t cmdsize, ctlsize;
3231 pcireg_t interface;
3232 u_int8_t init_ctrl;
3233 int channel;
3234
3235 if (pciide_chipen(sc, pa) == 0)
3236 return;
3237 printf("%s: bus-master DMA support present",
3238 sc->sc_wdcdev.sc_dev.dv_xname);
3239 pciide_mapreg_dma(sc, pa);
3240 printf("\n");
3241
3242 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3243 WDC_CAPABILITY_MODE;
3244 sc->sc_wdcdev.PIO_cap = 4;
3245 if (sc->sc_dma_ok) {
3246 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3247 sc->sc_wdcdev.irqack = pciide_irqack;
3248 sc->sc_wdcdev.DMA_cap = 2;
3249 }
3250 sc->sc_wdcdev.set_modes = opti_setup_channel;
3251
3252 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3253 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3254
3255 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3256 OPTI_REG_INIT_CONTROL);
3257
3258 interface = PCI_INTERFACE(pa->pa_class);
3259
3260 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3261 cp = &sc->pciide_channels[channel];
3262 if (pciide_chansetup(sc, channel, interface) == 0)
3263 continue;
3264 if (channel == 1 &&
3265 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3266 printf("%s: %s channel ignored (disabled)\n",
3267 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3268 continue;
3269 }
3270 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3271 pciide_pci_intr);
3272 if (cp->hw_ok == 0)
3273 continue;
3274 pciide_map_compat_intr(pa, cp, channel, interface);
3275 if (cp->hw_ok == 0)
3276 continue;
3277 opti_setup_channel(&cp->wdc_channel);
3278 }
3279 }
3280
3281 void
3282 opti_setup_channel(chp)
3283 struct channel_softc *chp;
3284 {
3285 struct ata_drive_datas *drvp;
3286 struct pciide_channel *cp = (struct pciide_channel*)chp;
3287 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3288 int drive, spd;
3289 int mode[2];
3290 u_int8_t rv, mr;
3291
3292 /*
3293 * The `Delay' and `Address Setup Time' fields of the
3294 * Miscellaneous Register are always zero initially.
3295 */
3296 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3297 mr &= ~(OPTI_MISC_DELAY_MASK |
3298 OPTI_MISC_ADDR_SETUP_MASK |
3299 OPTI_MISC_INDEX_MASK);
3300
3301 /* Prime the control register before setting timing values */
3302 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3303
3304 /* Determine the clockrate of the PCIbus the chip is attached to */
3305 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3306 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3307
3308 /* setup DMA if needed */
3309 pciide_channel_dma_setup(cp);
3310
3311 for (drive = 0; drive < 2; drive++) {
3312 drvp = &chp->ch_drive[drive];
3313 /* If no drive, skip */
3314 if ((drvp->drive_flags & DRIVE) == 0) {
3315 mode[drive] = -1;
3316 continue;
3317 }
3318
3319 if ((drvp->drive_flags & DRIVE_DMA)) {
3320 /*
3321 * Timings will be used for both PIO and DMA,
3322 * so adjust DMA mode if needed
3323 */
3324 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3325 drvp->PIO_mode = drvp->DMA_mode + 2;
3326 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3327 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3328 drvp->PIO_mode - 2 : 0;
3329 if (drvp->DMA_mode == 0)
3330 drvp->PIO_mode = 0;
3331
3332 mode[drive] = drvp->DMA_mode + 5;
3333 } else
3334 mode[drive] = drvp->PIO_mode;
3335
3336 if (drive && mode[0] >= 0 &&
3337 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3338 /*
3339 * Can't have two drives using different values
3340 * for `Address Setup Time'.
3341 * Slow down the faster drive to compensate.
3342 */
3343 int d = (opti_tim_as[spd][mode[0]] >
3344 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3345
3346 mode[d] = mode[1-d];
3347 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3348 chp->ch_drive[d].DMA_mode = 0;
3349 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3350 }
3351 }
3352
3353 for (drive = 0; drive < 2; drive++) {
3354 int m;
3355 if ((m = mode[drive]) < 0)
3356 continue;
3357
3358 /* Set the Address Setup Time and select appropriate index */
3359 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3360 rv |= OPTI_MISC_INDEX(drive);
3361 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3362
3363 /* Set the pulse width and recovery timing parameters */
3364 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3365 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3366 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3367 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3368
3369 /* Set the Enhanced Mode register appropriately */
3370 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3371 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3372 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3373 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3374 }
3375
3376 /* Finally, enable the timings */
3377 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3378
3379 pciide_print_modes(cp);
3380 }
3381