pciide.c revision 1.72 1 /* $NetBSD: pciide.c,v 1.72 2000/06/27 05:57:05 tron Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/cy82c693var.h>
119
120 /* inlines for reading/writing 8-bit PCI registers */
121 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
122 int));
123 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
124 int, u_int8_t));
125
126 static __inline u_int8_t
127 pciide_pci_read(pc, pa, reg)
128 pci_chipset_tag_t pc;
129 pcitag_t pa;
130 int reg;
131 {
132
133 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
134 ((reg & 0x03) * 8) & 0xff);
135 }
136
137 static __inline void
138 pciide_pci_write(pc, pa, reg, val)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 u_int8_t val;
143 {
144 pcireg_t pcival;
145
146 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
147 pcival &= ~(0xff << ((reg & 0x03) * 8));
148 pcival |= (val << ((reg & 0x03) * 8));
149 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
150 }
151
152 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
153
154 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155 void piix_setup_channel __P((struct channel_softc*));
156 void piix3_4_setup_channel __P((struct channel_softc*));
157 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
158 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
159 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160
161 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
162 void amd756_setup_channel __P((struct channel_softc*));
163
164 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void apollo_setup_channel __P((struct channel_softc*));
166
167 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void cmd0643_9_setup_channel __P((struct channel_softc*));
170 void cmd_channel_map __P((struct pci_attach_args *,
171 struct pciide_softc *, int));
172 int cmd_pci_intr __P((void *));
173 void cmd648_9_irqack __P((struct channel_softc *));
174
175 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
176 void cy693_setup_channel __P((struct channel_softc*));
177
178 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void sis_setup_channel __P((struct channel_softc*));
180
181 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void acer_setup_channel __P((struct channel_softc*));
183 int acer_pci_intr __P((void *));
184
185 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void pdc202xx_setup_channel __P((struct channel_softc*));
187 int pdc202xx_pci_intr __P((void *));
188
189 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void opti_setup_channel __P((struct channel_softc*));
191
192 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void hpt_setup_channel __P((struct channel_softc*));
194 int hpt_pci_intr __P((void *));
195
196 void pciide_channel_dma_setup __P((struct pciide_channel *));
197 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
198 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
199 void pciide_dma_start __P((void*, int, int));
200 int pciide_dma_finish __P((void*, int, int, int));
201 void pciide_irqack __P((struct channel_softc *));
202 void pciide_print_modes __P((struct pciide_channel *));
203
204 struct pciide_product_desc {
205 u_int32_t ide_product;
206 int ide_flags;
207 const char *ide_name;
208 /* map and setup chip, probe drives */
209 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
210 };
211
212 /* Flags for ide_flags */
213 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
214
215 /* Default product description for devices not known from this controller */
216 const struct pciide_product_desc default_product_desc = {
217 0,
218 0,
219 "Generic PCI IDE controller",
220 default_chip_map,
221 };
222
223 const struct pciide_product_desc pciide_intel_products[] = {
224 { PCI_PRODUCT_INTEL_82092AA,
225 0,
226 "Intel 82092AA IDE controller",
227 default_chip_map,
228 },
229 { PCI_PRODUCT_INTEL_82371FB_IDE,
230 0,
231 "Intel 82371FB IDE controller (PIIX)",
232 piix_chip_map,
233 },
234 { PCI_PRODUCT_INTEL_82371SB_IDE,
235 0,
236 "Intel 82371SB IDE Interface (PIIX3)",
237 piix_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371AB_IDE,
240 0,
241 "Intel 82371AB IDE controller (PIIX4)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82801AA_IDE,
245 0,
246 "Intel 82801AA IDE Controller (ICH)",
247 piix_chip_map,
248 },
249 { PCI_PRODUCT_INTEL_82801AB_IDE,
250 0,
251 "Intel 82801AB IDE Controller (ICH0)",
252 piix_chip_map,
253 },
254 { 0,
255 0,
256 NULL,
257 }
258 };
259
260 const struct pciide_product_desc pciide_amd_products[] = {
261 { PCI_PRODUCT_AMD_PBC756_IDE,
262 0,
263 "Advanced Micro Devices AMD756 IDE Controller",
264 amd756_chip_map
265 },
266 { 0,
267 0,
268 NULL,
269 }
270 };
271
272 const struct pciide_product_desc pciide_cmd_products[] = {
273 { PCI_PRODUCT_CMDTECH_640,
274 0,
275 "CMD Technology PCI0640",
276 cmd_chip_map
277 },
278 { PCI_PRODUCT_CMDTECH_643,
279 0,
280 "CMD Technology PCI0643",
281 cmd0643_9_chip_map,
282 },
283 { PCI_PRODUCT_CMDTECH_646,
284 0,
285 "CMD Technology PCI0646",
286 cmd0643_9_chip_map,
287 },
288 { PCI_PRODUCT_CMDTECH_648,
289 IDE_PCI_CLASS_OVERRIDE,
290 "CMD Technology PCI0648",
291 cmd0643_9_chip_map,
292 },
293 { PCI_PRODUCT_CMDTECH_649,
294 IDE_PCI_CLASS_OVERRIDE,
295 "CMD Technology PCI0649",
296 cmd0643_9_chip_map,
297 },
298 { 0,
299 0,
300 NULL,
301 }
302 };
303
304 const struct pciide_product_desc pciide_via_products[] = {
305 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
306 0,
307 "VIA Tech VT82C586 IDE Controller",
308 apollo_chip_map,
309 },
310 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
311 0,
312 "VIA Tech VT82C586A IDE Controller",
313 apollo_chip_map,
314 },
315 { 0,
316 0,
317 NULL,
318 }
319 };
320
321 const struct pciide_product_desc pciide_cypress_products[] = {
322 { PCI_PRODUCT_CONTAQ_82C693,
323 0,
324 "Cypress 82C693 IDE Controller",
325 cy693_chip_map,
326 },
327 { 0,
328 0,
329 NULL,
330 }
331 };
332
333 const struct pciide_product_desc pciide_sis_products[] = {
334 { PCI_PRODUCT_SIS_5597_IDE,
335 0,
336 "Silicon Integrated System 5597/5598 IDE controller",
337 sis_chip_map,
338 },
339 { 0,
340 0,
341 NULL,
342 }
343 };
344
345 const struct pciide_product_desc pciide_acer_products[] = {
346 { PCI_PRODUCT_ALI_M5229,
347 0,
348 "Acer Labs M5229 UDMA IDE Controller",
349 acer_chip_map,
350 },
351 { 0,
352 0,
353 NULL,
354 }
355 };
356
357 const struct pciide_product_desc pciide_promise_products[] = {
358 { PCI_PRODUCT_PROMISE_ULTRA33,
359 IDE_PCI_CLASS_OVERRIDE,
360 "Promise Ultra33/ATA Bus Master IDE Accelerator",
361 pdc202xx_chip_map,
362 },
363 { PCI_PRODUCT_PROMISE_ULTRA66,
364 IDE_PCI_CLASS_OVERRIDE,
365 "Promise Ultra66/ATA Bus Master IDE Accelerator",
366 pdc202xx_chip_map,
367 },
368 { 0,
369 0,
370 NULL,
371 }
372 };
373
374 const struct pciide_product_desc pciide_opti_products[] = {
375 { PCI_PRODUCT_OPTI_82C621,
376 0,
377 "OPTi 82c621 PCI IDE controller",
378 opti_chip_map,
379 },
380 { PCI_PRODUCT_OPTI_82C568,
381 0,
382 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
383 opti_chip_map,
384 },
385 { PCI_PRODUCT_OPTI_82D568,
386 0,
387 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
388 opti_chip_map,
389 },
390 { 0,
391 0,
392 NULL,
393 }
394 };
395
396 const struct pciide_product_desc pciide_triones_products[] = {
397 { PCI_PRODUCT_TRIONES_HPT366,
398 IDE_PCI_CLASS_OVERRIDE,
399 "Triones/Highpoint HPT366/370 IDE Controller",
400 hpt_chip_map,
401 },
402 { 0,
403 0,
404 NULL,
405 }
406 };
407
408 struct pciide_vendor_desc {
409 u_int32_t ide_vendor;
410 const struct pciide_product_desc *ide_products;
411 };
412
413 const struct pciide_vendor_desc pciide_vendors[] = {
414 { PCI_VENDOR_INTEL, pciide_intel_products },
415 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
416 { PCI_VENDOR_VIATECH, pciide_via_products },
417 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
418 { PCI_VENDOR_SIS, pciide_sis_products },
419 { PCI_VENDOR_ALI, pciide_acer_products },
420 { PCI_VENDOR_PROMISE, pciide_promise_products },
421 { PCI_VENDOR_AMD, pciide_amd_products },
422 { PCI_VENDOR_OPTI, pciide_opti_products },
423 { PCI_VENDOR_TRIONES, pciide_triones_products },
424 { 0, NULL }
425 };
426
427 /* options passed via the 'flags' config keyword */
428 #define PCIIDE_OPTIONS_DMA 0x01
429
430 int pciide_match __P((struct device *, struct cfdata *, void *));
431 void pciide_attach __P((struct device *, struct device *, void *));
432
433 struct cfattach pciide_ca = {
434 sizeof(struct pciide_softc), pciide_match, pciide_attach
435 };
436 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
437 int pciide_mapregs_compat __P(( struct pci_attach_args *,
438 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
439 int pciide_mapregs_native __P((struct pci_attach_args *,
440 struct pciide_channel *, bus_size_t *, bus_size_t *,
441 int (*pci_intr) __P((void *))));
442 void pciide_mapreg_dma __P((struct pciide_softc *,
443 struct pci_attach_args *));
444 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
445 void pciide_mapchan __P((struct pci_attach_args *,
446 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
447 int (*pci_intr) __P((void *))));
448 int pciide_chan_candisable __P((struct pciide_channel *));
449 void pciide_map_compat_intr __P(( struct pci_attach_args *,
450 struct pciide_channel *, int, int));
451 int pciide_print __P((void *, const char *pnp));
452 int pciide_compat_intr __P((void *));
453 int pciide_pci_intr __P((void *));
454 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
455
456 const struct pciide_product_desc *
457 pciide_lookup_product(id)
458 u_int32_t id;
459 {
460 const struct pciide_product_desc *pp;
461 const struct pciide_vendor_desc *vp;
462
463 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
464 if (PCI_VENDOR(id) == vp->ide_vendor)
465 break;
466
467 if ((pp = vp->ide_products) == NULL)
468 return NULL;
469
470 for (; pp->ide_name != NULL; pp++)
471 if (PCI_PRODUCT(id) == pp->ide_product)
472 break;
473
474 if (pp->ide_name == NULL)
475 return NULL;
476 return pp;
477 }
478
479 int
480 pciide_match(parent, match, aux)
481 struct device *parent;
482 struct cfdata *match;
483 void *aux;
484 {
485 struct pci_attach_args *pa = aux;
486 const struct pciide_product_desc *pp;
487
488 /*
489 * Check the ID register to see that it's a PCI IDE controller.
490 * If it is, we assume that we can deal with it; it _should_
491 * work in a standardized way...
492 */
493 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
494 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
495 return (1);
496 }
497
498 /*
499 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
500 * controllers. Let see if we can deal with it anyway.
501 */
502 pp = pciide_lookup_product(pa->pa_id);
503 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
504 return (1);
505 }
506
507 return (0);
508 }
509
510 void
511 pciide_attach(parent, self, aux)
512 struct device *parent, *self;
513 void *aux;
514 {
515 struct pci_attach_args *pa = aux;
516 pci_chipset_tag_t pc = pa->pa_pc;
517 pcitag_t tag = pa->pa_tag;
518 struct pciide_softc *sc = (struct pciide_softc *)self;
519 pcireg_t csr;
520 char devinfo[256];
521 const char *displaydev;
522
523 sc->sc_pp = pciide_lookup_product(pa->pa_id);
524 if (sc->sc_pp == NULL) {
525 sc->sc_pp = &default_product_desc;
526 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
527 displaydev = devinfo;
528 } else
529 displaydev = sc->sc_pp->ide_name;
530
531 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
532
533 sc->sc_pc = pa->pa_pc;
534 sc->sc_tag = pa->pa_tag;
535 #ifdef WDCDEBUG
536 if (wdcdebug_pciide_mask & DEBUG_PROBE)
537 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
538 #endif
539 sc->sc_pp->chip_map(sc, pa);
540
541 if (sc->sc_dma_ok) {
542 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
543 csr |= PCI_COMMAND_MASTER_ENABLE;
544 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
545 }
546 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
547 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
548 }
549
550 /* tell wether the chip is enabled or not */
551 int
552 pciide_chipen(sc, pa)
553 struct pciide_softc *sc;
554 struct pci_attach_args *pa;
555 {
556 pcireg_t csr;
557 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
558 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
559 PCI_COMMAND_STATUS_REG);
560 printf("%s: device disabled (at %s)\n",
561 sc->sc_wdcdev.sc_dev.dv_xname,
562 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
563 "device" : "bridge");
564 return 0;
565 }
566 return 1;
567 }
568
569 int
570 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
571 struct pci_attach_args *pa;
572 struct pciide_channel *cp;
573 int compatchan;
574 bus_size_t *cmdsizep, *ctlsizep;
575 {
576 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
577 struct channel_softc *wdc_cp = &cp->wdc_channel;
578
579 cp->compat = 1;
580 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
581 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
582
583 wdc_cp->cmd_iot = pa->pa_iot;
584 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
585 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
586 printf("%s: couldn't map %s channel cmd regs\n",
587 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
588 return (0);
589 }
590
591 wdc_cp->ctl_iot = pa->pa_iot;
592 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
593 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
594 printf("%s: couldn't map %s channel ctl regs\n",
595 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
596 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
597 PCIIDE_COMPAT_CMD_SIZE);
598 return (0);
599 }
600
601 return (1);
602 }
603
604 int
605 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
606 struct pci_attach_args * pa;
607 struct pciide_channel *cp;
608 bus_size_t *cmdsizep, *ctlsizep;
609 int (*pci_intr) __P((void *));
610 {
611 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
612 struct channel_softc *wdc_cp = &cp->wdc_channel;
613 const char *intrstr;
614 pci_intr_handle_t intrhandle;
615
616 cp->compat = 0;
617
618 if (sc->sc_pci_ih == NULL) {
619 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
620 pa->pa_intrline, &intrhandle) != 0) {
621 printf("%s: couldn't map native-PCI interrupt\n",
622 sc->sc_wdcdev.sc_dev.dv_xname);
623 return 0;
624 }
625 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
626 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
627 intrhandle, IPL_BIO, pci_intr, sc);
628 if (sc->sc_pci_ih != NULL) {
629 printf("%s: using %s for native-PCI interrupt\n",
630 sc->sc_wdcdev.sc_dev.dv_xname,
631 intrstr ? intrstr : "unknown interrupt");
632 } else {
633 printf("%s: couldn't establish native-PCI interrupt",
634 sc->sc_wdcdev.sc_dev.dv_xname);
635 if (intrstr != NULL)
636 printf(" at %s", intrstr);
637 printf("\n");
638 return 0;
639 }
640 }
641 cp->ih = sc->sc_pci_ih;
642 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
643 PCI_MAPREG_TYPE_IO, 0,
644 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
645 printf("%s: couldn't map %s channel cmd regs\n",
646 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
647 return 0;
648 }
649
650 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
651 PCI_MAPREG_TYPE_IO, 0,
652 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
653 printf("%s: couldn't map %s channel ctl regs\n",
654 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
655 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
656 return 0;
657 }
658 return (1);
659 }
660
661 void
662 pciide_mapreg_dma(sc, pa)
663 struct pciide_softc *sc;
664 struct pci_attach_args *pa;
665 {
666 pcireg_t maptype;
667
668 /*
669 * Map DMA registers
670 *
671 * Note that sc_dma_ok is the right variable to test to see if
672 * DMA can be done. If the interface doesn't support DMA,
673 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
674 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
675 * non-zero if the interface supports DMA and the registers
676 * could be mapped.
677 *
678 * XXX Note that despite the fact that the Bus Master IDE specs
679 * XXX say that "The bus master IDE function uses 16 bytes of IO
680 * XXX space," some controllers (at least the United
681 * XXX Microelectronics UM8886BF) place it in memory space.
682 */
683 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
684 PCIIDE_REG_BUS_MASTER_DMA);
685
686 switch (maptype) {
687 case PCI_MAPREG_TYPE_IO:
688 case PCI_MAPREG_MEM_TYPE_32BIT:
689 sc->sc_dma_ok = (pci_mapreg_map(pa,
690 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
691 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
692 sc->sc_dmat = pa->pa_dmat;
693 if (sc->sc_dma_ok == 0) {
694 printf(", but unused (couldn't map registers)");
695 } else {
696 sc->sc_wdcdev.dma_arg = sc;
697 sc->sc_wdcdev.dma_init = pciide_dma_init;
698 sc->sc_wdcdev.dma_start = pciide_dma_start;
699 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
700 }
701 break;
702
703 default:
704 sc->sc_dma_ok = 0;
705 printf(", but unsupported register maptype (0x%x)", maptype);
706 }
707 }
708
709 int
710 pciide_compat_intr(arg)
711 void *arg;
712 {
713 struct pciide_channel *cp = arg;
714
715 #ifdef DIAGNOSTIC
716 /* should only be called for a compat channel */
717 if (cp->compat == 0)
718 panic("pciide compat intr called for non-compat chan %p\n", cp);
719 #endif
720 return (wdcintr(&cp->wdc_channel));
721 }
722
723 int
724 pciide_pci_intr(arg)
725 void *arg;
726 {
727 struct pciide_softc *sc = arg;
728 struct pciide_channel *cp;
729 struct channel_softc *wdc_cp;
730 int i, rv, crv;
731
732 rv = 0;
733 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
734 cp = &sc->pciide_channels[i];
735 wdc_cp = &cp->wdc_channel;
736
737 /* If a compat channel skip. */
738 if (cp->compat)
739 continue;
740 /* if this channel not waiting for intr, skip */
741 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
742 continue;
743
744 crv = wdcintr(wdc_cp);
745 if (crv == 0)
746 ; /* leave rv alone */
747 else if (crv == 1)
748 rv = 1; /* claim the intr */
749 else if (rv == 0) /* crv should be -1 in this case */
750 rv = crv; /* if we've done no better, take it */
751 }
752 return (rv);
753 }
754
755 void
756 pciide_channel_dma_setup(cp)
757 struct pciide_channel *cp;
758 {
759 int drive;
760 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
761 struct ata_drive_datas *drvp;
762
763 for (drive = 0; drive < 2; drive++) {
764 drvp = &cp->wdc_channel.ch_drive[drive];
765 /* If no drive, skip */
766 if ((drvp->drive_flags & DRIVE) == 0)
767 continue;
768 /* setup DMA if needed */
769 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
770 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
771 sc->sc_dma_ok == 0) {
772 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
773 continue;
774 }
775 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
776 != 0) {
777 /* Abort DMA setup */
778 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
779 continue;
780 }
781 }
782 }
783
784 int
785 pciide_dma_table_setup(sc, channel, drive)
786 struct pciide_softc *sc;
787 int channel, drive;
788 {
789 bus_dma_segment_t seg;
790 int error, rseg;
791 const bus_size_t dma_table_size =
792 sizeof(struct idedma_table) * NIDEDMA_TABLES;
793 struct pciide_dma_maps *dma_maps =
794 &sc->pciide_channels[channel].dma_maps[drive];
795
796 /* If table was already allocated, just return */
797 if (dma_maps->dma_table)
798 return 0;
799
800 /* Allocate memory for the DMA tables and map it */
801 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
802 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
803 BUS_DMA_NOWAIT)) != 0) {
804 printf("%s:%d: unable to allocate table DMA for "
805 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
806 channel, drive, error);
807 return error;
808 }
809 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
810 dma_table_size,
811 (caddr_t *)&dma_maps->dma_table,
812 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
813 printf("%s:%d: unable to map table DMA for"
814 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
815 channel, drive, error);
816 return error;
817 }
818 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
819 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
820 seg.ds_addr), DEBUG_PROBE);
821
822 /* Create and load table DMA map for this disk */
823 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
824 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
825 &dma_maps->dmamap_table)) != 0) {
826 printf("%s:%d: unable to create table DMA map for "
827 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
828 channel, drive, error);
829 return error;
830 }
831 if ((error = bus_dmamap_load(sc->sc_dmat,
832 dma_maps->dmamap_table,
833 dma_maps->dma_table,
834 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
835 printf("%s:%d: unable to load table DMA map for "
836 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
837 channel, drive, error);
838 return error;
839 }
840 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
841 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
842 /* Create a xfer DMA map for this drive */
843 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
844 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
845 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
846 &dma_maps->dmamap_xfer)) != 0) {
847 printf("%s:%d: unable to create xfer DMA map for "
848 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
849 channel, drive, error);
850 return error;
851 }
852 return 0;
853 }
854
855 int
856 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
857 void *v;
858 int channel, drive;
859 void *databuf;
860 size_t datalen;
861 int flags;
862 {
863 struct pciide_softc *sc = v;
864 int error, seg;
865 struct pciide_dma_maps *dma_maps =
866 &sc->pciide_channels[channel].dma_maps[drive];
867
868 error = bus_dmamap_load(sc->sc_dmat,
869 dma_maps->dmamap_xfer,
870 databuf, datalen, NULL, BUS_DMA_NOWAIT);
871 if (error) {
872 printf("%s:%d: unable to load xfer DMA map for"
873 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
874 channel, drive, error);
875 return error;
876 }
877
878 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
879 dma_maps->dmamap_xfer->dm_mapsize,
880 (flags & WDC_DMA_READ) ?
881 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
882
883 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
884 #ifdef DIAGNOSTIC
885 /* A segment must not cross a 64k boundary */
886 {
887 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
888 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
889 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
890 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
891 printf("pciide_dma: segment %d physical addr 0x%lx"
892 " len 0x%lx not properly aligned\n",
893 seg, phys, len);
894 panic("pciide_dma: buf align");
895 }
896 }
897 #endif
898 dma_maps->dma_table[seg].base_addr =
899 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
900 dma_maps->dma_table[seg].byte_count =
901 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
902 IDEDMA_BYTE_COUNT_MASK);
903 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
904 seg, le32toh(dma_maps->dma_table[seg].byte_count),
905 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
906
907 }
908 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
909 htole32(IDEDMA_BYTE_COUNT_EOT);
910
911 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
912 dma_maps->dmamap_table->dm_mapsize,
913 BUS_DMASYNC_PREWRITE);
914
915 /* Maps are ready. Start DMA function */
916 #ifdef DIAGNOSTIC
917 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
918 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
919 dma_maps->dmamap_table->dm_segs[0].ds_addr);
920 panic("pciide_dma_init: table align");
921 }
922 #endif
923
924 /* Clear status bits */
925 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
926 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
927 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
928 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
929 /* Write table addr */
930 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
931 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
932 dma_maps->dmamap_table->dm_segs[0].ds_addr);
933 /* set read/write */
934 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
935 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
936 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
937 /* remember flags */
938 dma_maps->dma_flags = flags;
939 return 0;
940 }
941
942 void
943 pciide_dma_start(v, channel, drive)
944 void *v;
945 int channel, drive;
946 {
947 struct pciide_softc *sc = v;
948
949 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
950 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
951 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
952 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
953 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
954 }
955
956 int
957 pciide_dma_finish(v, channel, drive, force)
958 void *v;
959 int channel, drive;
960 int force;
961 {
962 struct pciide_softc *sc = v;
963 u_int8_t status;
964 int error = 0;
965 struct pciide_dma_maps *dma_maps =
966 &sc->pciide_channels[channel].dma_maps[drive];
967
968 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
969 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
970 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
971 DEBUG_XFERS);
972
973 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
974 return WDC_DMAST_NOIRQ;
975
976 /* stop DMA channel */
977 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
978 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
979 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
980 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
981
982 /* Unload the map of the data buffer */
983 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
984 dma_maps->dmamap_xfer->dm_mapsize,
985 (dma_maps->dma_flags & WDC_DMA_READ) ?
986 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
987 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
988
989 if ((status & IDEDMA_CTL_ERR) != 0) {
990 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
991 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
992 error |= WDC_DMAST_ERR;
993 }
994
995 if ((status & IDEDMA_CTL_INTR) == 0) {
996 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
997 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
998 drive, status);
999 error |= WDC_DMAST_NOIRQ;
1000 }
1001
1002 if ((status & IDEDMA_CTL_ACT) != 0) {
1003 /* data underrun, may be a valid condition for ATAPI */
1004 error |= WDC_DMAST_UNDER;
1005 }
1006 return error;
1007 }
1008
1009 void
1010 pciide_irqack(chp)
1011 struct channel_softc *chp;
1012 {
1013 struct pciide_channel *cp = (struct pciide_channel*)chp;
1014 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1015
1016 /* clear status bits in IDE DMA registers */
1017 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1018 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1019 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1020 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1021 }
1022
1023 /* some common code used by several chip_map */
1024 int
1025 pciide_chansetup(sc, channel, interface)
1026 struct pciide_softc *sc;
1027 int channel;
1028 pcireg_t interface;
1029 {
1030 struct pciide_channel *cp = &sc->pciide_channels[channel];
1031 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1032 cp->name = PCIIDE_CHANNEL_NAME(channel);
1033 cp->wdc_channel.channel = channel;
1034 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1035 cp->wdc_channel.ch_queue =
1036 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1037 if (cp->wdc_channel.ch_queue == NULL) {
1038 printf("%s %s channel: "
1039 "can't allocate memory for command queue",
1040 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1041 return 0;
1042 }
1043 printf("%s: %s channel %s to %s mode\n",
1044 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1045 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1046 "configured" : "wired",
1047 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1048 "native-PCI" : "compatibility");
1049 return 1;
1050 }
1051
1052 /* some common code used by several chip channel_map */
1053 void
1054 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1055 struct pci_attach_args *pa;
1056 struct pciide_channel *cp;
1057 pcireg_t interface;
1058 bus_size_t *cmdsizep, *ctlsizep;
1059 int (*pci_intr) __P((void *));
1060 {
1061 struct channel_softc *wdc_cp = &cp->wdc_channel;
1062
1063 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1064 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1065 pci_intr);
1066 else
1067 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1068 wdc_cp->channel, cmdsizep, ctlsizep);
1069
1070 if (cp->hw_ok == 0)
1071 return;
1072 wdc_cp->data32iot = wdc_cp->cmd_iot;
1073 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1074 wdcattach(wdc_cp);
1075 }
1076
1077 /*
1078 * Generic code to call to know if a channel can be disabled. Return 1
1079 * if channel can be disabled, 0 if not
1080 */
1081 int
1082 pciide_chan_candisable(cp)
1083 struct pciide_channel *cp;
1084 {
1085 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1086 struct channel_softc *wdc_cp = &cp->wdc_channel;
1087
1088 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1089 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1090 printf("%s: disabling %s channel (no drives)\n",
1091 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1092 cp->hw_ok = 0;
1093 return 1;
1094 }
1095 return 0;
1096 }
1097
1098 /*
1099 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1100 * Set hw_ok=0 on failure
1101 */
1102 void
1103 pciide_map_compat_intr(pa, cp, compatchan, interface)
1104 struct pci_attach_args *pa;
1105 struct pciide_channel *cp;
1106 int compatchan, interface;
1107 {
1108 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1109 struct channel_softc *wdc_cp = &cp->wdc_channel;
1110
1111 if (cp->hw_ok == 0)
1112 return;
1113 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1114 return;
1115
1116 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1117 pa, compatchan, pciide_compat_intr, cp);
1118 if (cp->ih == NULL) {
1119 printf("%s: no compatibility interrupt for use by %s "
1120 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1121 cp->hw_ok = 0;
1122 }
1123 }
1124
1125 void
1126 pciide_print_modes(cp)
1127 struct pciide_channel *cp;
1128 {
1129 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1130 int drive;
1131 struct channel_softc *chp;
1132 struct ata_drive_datas *drvp;
1133
1134 chp = &cp->wdc_channel;
1135 for (drive = 0; drive < 2; drive++) {
1136 drvp = &chp->ch_drive[drive];
1137 if ((drvp->drive_flags & DRIVE) == 0)
1138 continue;
1139 printf("%s(%s:%d:%d): using PIO mode %d",
1140 drvp->drv_softc->dv_xname,
1141 sc->sc_wdcdev.sc_dev.dv_xname,
1142 chp->channel, drive, drvp->PIO_mode);
1143 if (drvp->drive_flags & DRIVE_DMA)
1144 printf(", DMA mode %d", drvp->DMA_mode);
1145 if (drvp->drive_flags & DRIVE_UDMA)
1146 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1147 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1148 printf(" (using DMA data transfers)");
1149 printf("\n");
1150 }
1151 }
1152
1153 void
1154 default_chip_map(sc, pa)
1155 struct pciide_softc *sc;
1156 struct pci_attach_args *pa;
1157 {
1158 struct pciide_channel *cp;
1159 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1160 pcireg_t csr;
1161 int channel, drive;
1162 struct ata_drive_datas *drvp;
1163 u_int8_t idedma_ctl;
1164 bus_size_t cmdsize, ctlsize;
1165 char *failreason;
1166
1167 if (pciide_chipen(sc, pa) == 0)
1168 return;
1169
1170 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1171 printf("%s: bus-master DMA support present",
1172 sc->sc_wdcdev.sc_dev.dv_xname);
1173 if (sc->sc_pp == &default_product_desc &&
1174 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1175 PCIIDE_OPTIONS_DMA) == 0) {
1176 printf(", but unused (no driver support)");
1177 sc->sc_dma_ok = 0;
1178 } else {
1179 pciide_mapreg_dma(sc, pa);
1180 if (sc->sc_dma_ok != 0)
1181 printf(", used without full driver "
1182 "support");
1183 }
1184 } else {
1185 printf("%s: hardware does not support DMA",
1186 sc->sc_wdcdev.sc_dev.dv_xname);
1187 sc->sc_dma_ok = 0;
1188 }
1189 printf("\n");
1190 if (sc->sc_dma_ok) {
1191 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1192 sc->sc_wdcdev.irqack = pciide_irqack;
1193 }
1194 sc->sc_wdcdev.PIO_cap = 0;
1195 sc->sc_wdcdev.DMA_cap = 0;
1196
1197 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1198 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1199 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1200
1201 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1202 cp = &sc->pciide_channels[channel];
1203 if (pciide_chansetup(sc, channel, interface) == 0)
1204 continue;
1205 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1206 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1207 &ctlsize, pciide_pci_intr);
1208 } else {
1209 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1210 channel, &cmdsize, &ctlsize);
1211 }
1212 if (cp->hw_ok == 0)
1213 continue;
1214 /*
1215 * Check to see if something appears to be there.
1216 */
1217 failreason = NULL;
1218 if (!wdcprobe(&cp->wdc_channel)) {
1219 failreason = "not responding; disabled or no drives?";
1220 goto next;
1221 }
1222 /*
1223 * Now, make sure it's actually attributable to this PCI IDE
1224 * channel by trying to access the channel again while the
1225 * PCI IDE controller's I/O space is disabled. (If the
1226 * channel no longer appears to be there, it belongs to
1227 * this controller.) YUCK!
1228 */
1229 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1230 PCI_COMMAND_STATUS_REG);
1231 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1232 csr & ~PCI_COMMAND_IO_ENABLE);
1233 if (wdcprobe(&cp->wdc_channel))
1234 failreason = "other hardware responding at addresses";
1235 pci_conf_write(sc->sc_pc, sc->sc_tag,
1236 PCI_COMMAND_STATUS_REG, csr);
1237 next:
1238 if (failreason) {
1239 printf("%s: %s channel ignored (%s)\n",
1240 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1241 failreason);
1242 cp->hw_ok = 0;
1243 bus_space_unmap(cp->wdc_channel.cmd_iot,
1244 cp->wdc_channel.cmd_ioh, cmdsize);
1245 bus_space_unmap(cp->wdc_channel.ctl_iot,
1246 cp->wdc_channel.ctl_ioh, ctlsize);
1247 } else {
1248 pciide_map_compat_intr(pa, cp, channel, interface);
1249 }
1250 if (cp->hw_ok) {
1251 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1252 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1253 wdcattach(&cp->wdc_channel);
1254 }
1255 }
1256
1257 if (sc->sc_dma_ok == 0)
1258 return;
1259
1260 /* Allocate DMA maps */
1261 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1262 idedma_ctl = 0;
1263 cp = &sc->pciide_channels[channel];
1264 for (drive = 0; drive < 2; drive++) {
1265 drvp = &cp->wdc_channel.ch_drive[drive];
1266 /* If no drive, skip */
1267 if ((drvp->drive_flags & DRIVE) == 0)
1268 continue;
1269 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1270 continue;
1271 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1272 /* Abort DMA setup */
1273 printf("%s:%d:%d: can't allocate DMA maps, "
1274 "using PIO transfers\n",
1275 sc->sc_wdcdev.sc_dev.dv_xname,
1276 channel, drive);
1277 drvp->drive_flags &= ~DRIVE_DMA;
1278 }
1279 printf("%s:%d:%d: using DMA data transfers\n",
1280 sc->sc_wdcdev.sc_dev.dv_xname,
1281 channel, drive);
1282 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1283 }
1284 if (idedma_ctl != 0) {
1285 /* Add software bits in status register */
1286 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1287 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1288 idedma_ctl);
1289 }
1290 }
1291 }
1292
1293 void
1294 piix_chip_map(sc, pa)
1295 struct pciide_softc *sc;
1296 struct pci_attach_args *pa;
1297 {
1298 struct pciide_channel *cp;
1299 int channel;
1300 u_int32_t idetim;
1301 bus_size_t cmdsize, ctlsize;
1302
1303 if (pciide_chipen(sc, pa) == 0)
1304 return;
1305
1306 printf("%s: bus-master DMA support present",
1307 sc->sc_wdcdev.sc_dev.dv_xname);
1308 pciide_mapreg_dma(sc, pa);
1309 printf("\n");
1310 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1311 WDC_CAPABILITY_MODE;
1312 if (sc->sc_dma_ok) {
1313 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1314 sc->sc_wdcdev.irqack = pciide_irqack;
1315 switch(sc->sc_pp->ide_product) {
1316 case PCI_PRODUCT_INTEL_82371AB_IDE:
1317 case PCI_PRODUCT_INTEL_82801AA_IDE:
1318 case PCI_PRODUCT_INTEL_82801AB_IDE:
1319 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1320 }
1321 }
1322 sc->sc_wdcdev.PIO_cap = 4;
1323 sc->sc_wdcdev.DMA_cap = 2;
1324 sc->sc_wdcdev.UDMA_cap =
1325 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1326 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1327 sc->sc_wdcdev.set_modes = piix_setup_channel;
1328 else
1329 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1330 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1331 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1332
1333 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1334 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1335 DEBUG_PROBE);
1336 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1337 WDCDEBUG_PRINT((", sidetim=0x%x",
1338 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1339 DEBUG_PROBE);
1340 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1341 WDCDEBUG_PRINT((", udamreg 0x%x",
1342 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1343 DEBUG_PROBE);
1344 }
1345 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1346 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1347 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1348 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1349 DEBUG_PROBE);
1350 }
1351
1352 }
1353 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1354
1355 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1356 cp = &sc->pciide_channels[channel];
1357 /* PIIX is compat-only */
1358 if (pciide_chansetup(sc, channel, 0) == 0)
1359 continue;
1360 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1361 if ((PIIX_IDETIM_READ(idetim, channel) &
1362 PIIX_IDETIM_IDE) == 0) {
1363 printf("%s: %s channel ignored (disabled)\n",
1364 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1365 continue;
1366 }
1367 /* PIIX are compat-only pciide devices */
1368 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1369 if (cp->hw_ok == 0)
1370 continue;
1371 if (pciide_chan_candisable(cp)) {
1372 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1373 channel);
1374 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1375 idetim);
1376 }
1377 pciide_map_compat_intr(pa, cp, channel, 0);
1378 if (cp->hw_ok == 0)
1379 continue;
1380 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1381 }
1382
1383 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1384 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1385 DEBUG_PROBE);
1386 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1387 WDCDEBUG_PRINT((", sidetim=0x%x",
1388 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1389 DEBUG_PROBE);
1390 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1391 WDCDEBUG_PRINT((", udamreg 0x%x",
1392 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1393 DEBUG_PROBE);
1394 }
1395 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1396 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1397 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1398 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1399 DEBUG_PROBE);
1400 }
1401 }
1402 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1403 }
1404
1405 void
1406 piix_setup_channel(chp)
1407 struct channel_softc *chp;
1408 {
1409 u_int8_t mode[2], drive;
1410 u_int32_t oidetim, idetim, idedma_ctl;
1411 struct pciide_channel *cp = (struct pciide_channel*)chp;
1412 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1413 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1414
1415 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1416 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1417 idedma_ctl = 0;
1418
1419 /* set up new idetim: Enable IDE registers decode */
1420 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1421 chp->channel);
1422
1423 /* setup DMA */
1424 pciide_channel_dma_setup(cp);
1425
1426 /*
1427 * Here we have to mess up with drives mode: PIIX can't have
1428 * different timings for master and slave drives.
1429 * We need to find the best combination.
1430 */
1431
1432 /* If both drives supports DMA, take the lower mode */
1433 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1434 (drvp[1].drive_flags & DRIVE_DMA)) {
1435 mode[0] = mode[1] =
1436 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1437 drvp[0].DMA_mode = mode[0];
1438 drvp[1].DMA_mode = mode[1];
1439 goto ok;
1440 }
1441 /*
1442 * If only one drive supports DMA, use its mode, and
1443 * put the other one in PIO mode 0 if mode not compatible
1444 */
1445 if (drvp[0].drive_flags & DRIVE_DMA) {
1446 mode[0] = drvp[0].DMA_mode;
1447 mode[1] = drvp[1].PIO_mode;
1448 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1449 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1450 mode[1] = drvp[1].PIO_mode = 0;
1451 goto ok;
1452 }
1453 if (drvp[1].drive_flags & DRIVE_DMA) {
1454 mode[1] = drvp[1].DMA_mode;
1455 mode[0] = drvp[0].PIO_mode;
1456 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1457 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1458 mode[0] = drvp[0].PIO_mode = 0;
1459 goto ok;
1460 }
1461 /*
1462 * If both drives are not DMA, takes the lower mode, unless
1463 * one of them is PIO mode < 2
1464 */
1465 if (drvp[0].PIO_mode < 2) {
1466 mode[0] = drvp[0].PIO_mode = 0;
1467 mode[1] = drvp[1].PIO_mode;
1468 } else if (drvp[1].PIO_mode < 2) {
1469 mode[1] = drvp[1].PIO_mode = 0;
1470 mode[0] = drvp[0].PIO_mode;
1471 } else {
1472 mode[0] = mode[1] =
1473 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1474 drvp[0].PIO_mode = mode[0];
1475 drvp[1].PIO_mode = mode[1];
1476 }
1477 ok: /* The modes are setup */
1478 for (drive = 0; drive < 2; drive++) {
1479 if (drvp[drive].drive_flags & DRIVE_DMA) {
1480 idetim |= piix_setup_idetim_timings(
1481 mode[drive], 1, chp->channel);
1482 goto end;
1483 }
1484 }
1485 /* If we are there, none of the drives are DMA */
1486 if (mode[0] >= 2)
1487 idetim |= piix_setup_idetim_timings(
1488 mode[0], 0, chp->channel);
1489 else
1490 idetim |= piix_setup_idetim_timings(
1491 mode[1], 0, chp->channel);
1492 end: /*
1493 * timing mode is now set up in the controller. Enable
1494 * it per-drive
1495 */
1496 for (drive = 0; drive < 2; drive++) {
1497 /* If no drive, skip */
1498 if ((drvp[drive].drive_flags & DRIVE) == 0)
1499 continue;
1500 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1501 if (drvp[drive].drive_flags & DRIVE_DMA)
1502 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1503 }
1504 if (idedma_ctl != 0) {
1505 /* Add software bits in status register */
1506 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1507 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1508 idedma_ctl);
1509 }
1510 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1511 pciide_print_modes(cp);
1512 }
1513
1514 void
1515 piix3_4_setup_channel(chp)
1516 struct channel_softc *chp;
1517 {
1518 struct ata_drive_datas *drvp;
1519 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1520 struct pciide_channel *cp = (struct pciide_channel*)chp;
1521 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1522 int drive;
1523 int channel = chp->channel;
1524
1525 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1526 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1527 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1528 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1529 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1530 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1531 PIIX_SIDETIM_RTC_MASK(channel));
1532
1533 idedma_ctl = 0;
1534 /* If channel disabled, no need to go further */
1535 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1536 return;
1537 /* set up new idetim: Enable IDE registers decode */
1538 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1539
1540 /* setup DMA if needed */
1541 pciide_channel_dma_setup(cp);
1542
1543 for (drive = 0; drive < 2; drive++) {
1544 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1545 PIIX_UDMATIM_SET(0x3, channel, drive));
1546 drvp = &chp->ch_drive[drive];
1547 /* If no drive, skip */
1548 if ((drvp->drive_flags & DRIVE) == 0)
1549 continue;
1550 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1551 (drvp->drive_flags & DRIVE_UDMA) == 0))
1552 goto pio;
1553
1554 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1555 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1556 ideconf |= PIIX_CONFIG_PINGPONG;
1557 }
1558 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1559 /* setup Ultra/66 */
1560 if (drvp->UDMA_mode > 2 &&
1561 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1562 drvp->UDMA_mode = 2;
1563 if (drvp->UDMA_mode > 2)
1564 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1565 else
1566 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1567 }
1568 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1569 (drvp->drive_flags & DRIVE_UDMA)) {
1570 /* use Ultra/DMA */
1571 drvp->drive_flags &= ~DRIVE_DMA;
1572 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1573 udmareg |= PIIX_UDMATIM_SET(
1574 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1575 } else {
1576 /* use Multiword DMA */
1577 drvp->drive_flags &= ~DRIVE_UDMA;
1578 if (drive == 0) {
1579 idetim |= piix_setup_idetim_timings(
1580 drvp->DMA_mode, 1, channel);
1581 } else {
1582 sidetim |= piix_setup_sidetim_timings(
1583 drvp->DMA_mode, 1, channel);
1584 idetim =PIIX_IDETIM_SET(idetim,
1585 PIIX_IDETIM_SITRE, channel);
1586 }
1587 }
1588 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1589
1590 pio: /* use PIO mode */
1591 idetim |= piix_setup_idetim_drvs(drvp);
1592 if (drive == 0) {
1593 idetim |= piix_setup_idetim_timings(
1594 drvp->PIO_mode, 0, channel);
1595 } else {
1596 sidetim |= piix_setup_sidetim_timings(
1597 drvp->PIO_mode, 0, channel);
1598 idetim =PIIX_IDETIM_SET(idetim,
1599 PIIX_IDETIM_SITRE, channel);
1600 }
1601 }
1602 if (idedma_ctl != 0) {
1603 /* Add software bits in status register */
1604 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1605 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1606 idedma_ctl);
1607 }
1608 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1609 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1610 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1611 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1612 pciide_print_modes(cp);
1613 }
1614
1615
1616 /* setup ISP and RTC fields, based on mode */
1617 static u_int32_t
1618 piix_setup_idetim_timings(mode, dma, channel)
1619 u_int8_t mode;
1620 u_int8_t dma;
1621 u_int8_t channel;
1622 {
1623
1624 if (dma)
1625 return PIIX_IDETIM_SET(0,
1626 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1627 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1628 channel);
1629 else
1630 return PIIX_IDETIM_SET(0,
1631 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1632 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1633 channel);
1634 }
1635
1636 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1637 static u_int32_t
1638 piix_setup_idetim_drvs(drvp)
1639 struct ata_drive_datas *drvp;
1640 {
1641 u_int32_t ret = 0;
1642 struct channel_softc *chp = drvp->chnl_softc;
1643 u_int8_t channel = chp->channel;
1644 u_int8_t drive = drvp->drive;
1645
1646 /*
1647 * If drive is using UDMA, timings setups are independant
1648 * So just check DMA and PIO here.
1649 */
1650 if (drvp->drive_flags & DRIVE_DMA) {
1651 /* if mode = DMA mode 0, use compatible timings */
1652 if ((drvp->drive_flags & DRIVE_DMA) &&
1653 drvp->DMA_mode == 0) {
1654 drvp->PIO_mode = 0;
1655 return ret;
1656 }
1657 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1658 /*
1659 * PIO and DMA timings are the same, use fast timings for PIO
1660 * too, else use compat timings.
1661 */
1662 if ((piix_isp_pio[drvp->PIO_mode] !=
1663 piix_isp_dma[drvp->DMA_mode]) ||
1664 (piix_rtc_pio[drvp->PIO_mode] !=
1665 piix_rtc_dma[drvp->DMA_mode]))
1666 drvp->PIO_mode = 0;
1667 /* if PIO mode <= 2, use compat timings for PIO */
1668 if (drvp->PIO_mode <= 2) {
1669 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1670 channel);
1671 return ret;
1672 }
1673 }
1674
1675 /*
1676 * Now setup PIO modes. If mode < 2, use compat timings.
1677 * Else enable fast timings. Enable IORDY and prefetch/post
1678 * if PIO mode >= 3.
1679 */
1680
1681 if (drvp->PIO_mode < 2)
1682 return ret;
1683
1684 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1685 if (drvp->PIO_mode >= 3) {
1686 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1687 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1688 }
1689 return ret;
1690 }
1691
1692 /* setup values in SIDETIM registers, based on mode */
1693 static u_int32_t
1694 piix_setup_sidetim_timings(mode, dma, channel)
1695 u_int8_t mode;
1696 u_int8_t dma;
1697 u_int8_t channel;
1698 {
1699 if (dma)
1700 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1701 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1702 else
1703 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1704 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1705 }
1706
1707 void
1708 amd756_chip_map(sc, pa)
1709 struct pciide_softc *sc;
1710 struct pci_attach_args *pa;
1711 {
1712 struct pciide_channel *cp;
1713 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1714 int channel;
1715 pcireg_t chanenable;
1716 bus_size_t cmdsize, ctlsize;
1717
1718 if (pciide_chipen(sc, pa) == 0)
1719 return;
1720 printf("%s: bus-master DMA support present",
1721 sc->sc_wdcdev.sc_dev.dv_xname);
1722 pciide_mapreg_dma(sc, pa);
1723 printf("\n");
1724 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1725 WDC_CAPABILITY_MODE;
1726 if (sc->sc_dma_ok) {
1727 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1728 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1729 sc->sc_wdcdev.irqack = pciide_irqack;
1730 }
1731 sc->sc_wdcdev.PIO_cap = 4;
1732 sc->sc_wdcdev.DMA_cap = 2;
1733 sc->sc_wdcdev.UDMA_cap = 4;
1734 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1735 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1736 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1737 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1738
1739 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1740 DEBUG_PROBE);
1741 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1742 cp = &sc->pciide_channels[channel];
1743 if (pciide_chansetup(sc, channel, interface) == 0)
1744 continue;
1745
1746 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1747 printf("%s: %s channel ignored (disabled)\n",
1748 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1749 continue;
1750 }
1751 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1752 pciide_pci_intr);
1753
1754 if (pciide_chan_candisable(cp))
1755 chanenable &= ~AMD756_CHAN_EN(channel);
1756 pciide_map_compat_intr(pa, cp, channel, interface);
1757 if (cp->hw_ok == 0)
1758 continue;
1759
1760 amd756_setup_channel(&cp->wdc_channel);
1761 }
1762 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1763 chanenable);
1764 return;
1765 }
1766
1767 void
1768 amd756_setup_channel(chp)
1769 struct channel_softc *chp;
1770 {
1771 u_int32_t udmatim_reg, datatim_reg;
1772 u_int8_t idedma_ctl;
1773 int mode, drive;
1774 struct ata_drive_datas *drvp;
1775 struct pciide_channel *cp = (struct pciide_channel*)chp;
1776 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1777
1778 idedma_ctl = 0;
1779 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1780 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1781 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1782 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1783
1784 /* setup DMA if needed */
1785 pciide_channel_dma_setup(cp);
1786
1787 for (drive = 0; drive < 2; drive++) {
1788 drvp = &chp->ch_drive[drive];
1789 /* If no drive, skip */
1790 if ((drvp->drive_flags & DRIVE) == 0)
1791 continue;
1792 /* add timing values, setup DMA if needed */
1793 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1794 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1795 mode = drvp->PIO_mode;
1796 goto pio;
1797 }
1798 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1799 (drvp->drive_flags & DRIVE_UDMA)) {
1800 /* use Ultra/DMA */
1801 drvp->drive_flags &= ~DRIVE_DMA;
1802 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1803 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1804 AMD756_UDMA_TIME(chp->channel, drive,
1805 amd756_udma_tim[drvp->UDMA_mode]);
1806 /* can use PIO timings, MW DMA unused */
1807 mode = drvp->PIO_mode;
1808 } else {
1809 /* use Multiword DMA */
1810 drvp->drive_flags &= ~DRIVE_UDMA;
1811 /* mode = min(pio, dma+2) */
1812 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1813 mode = drvp->PIO_mode;
1814 else
1815 mode = drvp->DMA_mode + 2;
1816 }
1817 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1818
1819 pio: /* setup PIO mode */
1820 if (mode <= 2) {
1821 drvp->DMA_mode = 0;
1822 drvp->PIO_mode = 0;
1823 mode = 0;
1824 } else {
1825 drvp->PIO_mode = mode;
1826 drvp->DMA_mode = mode - 2;
1827 }
1828 datatim_reg |=
1829 AMD756_DATATIM_PULSE(chp->channel, drive,
1830 amd756_pio_set[mode]) |
1831 AMD756_DATATIM_RECOV(chp->channel, drive,
1832 amd756_pio_rec[mode]);
1833 }
1834 if (idedma_ctl != 0) {
1835 /* Add software bits in status register */
1836 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1837 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1838 idedma_ctl);
1839 }
1840 pciide_print_modes(cp);
1841 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1842 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1843 }
1844
1845 void
1846 apollo_chip_map(sc, pa)
1847 struct pciide_softc *sc;
1848 struct pci_attach_args *pa;
1849 {
1850 struct pciide_channel *cp;
1851 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1852 int channel;
1853 u_int32_t ideconf;
1854 bus_size_t cmdsize, ctlsize;
1855
1856 if (pciide_chipen(sc, pa) == 0)
1857 return;
1858 printf("%s: bus-master DMA support present",
1859 sc->sc_wdcdev.sc_dev.dv_xname);
1860 pciide_mapreg_dma(sc, pa);
1861 printf("\n");
1862 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1863 WDC_CAPABILITY_MODE;
1864 if (sc->sc_dma_ok) {
1865 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1866 sc->sc_wdcdev.irqack = pciide_irqack;
1867 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1868 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1869 }
1870 sc->sc_wdcdev.PIO_cap = 4;
1871 sc->sc_wdcdev.DMA_cap = 2;
1872 sc->sc_wdcdev.UDMA_cap = 2;
1873 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1874 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1875 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1876
1877 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1878 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1879 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1880 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1881 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1882 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1883 DEBUG_PROBE);
1884
1885 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1886 cp = &sc->pciide_channels[channel];
1887 if (pciide_chansetup(sc, channel, interface) == 0)
1888 continue;
1889
1890 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1891 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1892 printf("%s: %s channel ignored (disabled)\n",
1893 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1894 continue;
1895 }
1896 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1897 pciide_pci_intr);
1898 if (cp->hw_ok == 0)
1899 continue;
1900 if (pciide_chan_candisable(cp)) {
1901 ideconf &= ~APO_IDECONF_EN(channel);
1902 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1903 ideconf);
1904 }
1905 pciide_map_compat_intr(pa, cp, channel, interface);
1906
1907 if (cp->hw_ok == 0)
1908 continue;
1909 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1910 }
1911 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1912 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1913 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1914 }
1915
1916 void
1917 apollo_setup_channel(chp)
1918 struct channel_softc *chp;
1919 {
1920 u_int32_t udmatim_reg, datatim_reg;
1921 u_int8_t idedma_ctl;
1922 int mode, drive;
1923 struct ata_drive_datas *drvp;
1924 struct pciide_channel *cp = (struct pciide_channel*)chp;
1925 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1926
1927 idedma_ctl = 0;
1928 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1929 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1930 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1931 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1932
1933 /* setup DMA if needed */
1934 pciide_channel_dma_setup(cp);
1935
1936 for (drive = 0; drive < 2; drive++) {
1937 drvp = &chp->ch_drive[drive];
1938 /* If no drive, skip */
1939 if ((drvp->drive_flags & DRIVE) == 0)
1940 continue;
1941 /* add timing values, setup DMA if needed */
1942 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1943 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1944 mode = drvp->PIO_mode;
1945 goto pio;
1946 }
1947 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1948 (drvp->drive_flags & DRIVE_UDMA)) {
1949 /* use Ultra/DMA */
1950 drvp->drive_flags &= ~DRIVE_DMA;
1951 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1952 APO_UDMA_EN_MTH(chp->channel, drive) |
1953 APO_UDMA_TIME(chp->channel, drive,
1954 apollo_udma_tim[drvp->UDMA_mode]);
1955 /* can use PIO timings, MW DMA unused */
1956 mode = drvp->PIO_mode;
1957 } else {
1958 /* use Multiword DMA */
1959 drvp->drive_flags &= ~DRIVE_UDMA;
1960 /* mode = min(pio, dma+2) */
1961 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1962 mode = drvp->PIO_mode;
1963 else
1964 mode = drvp->DMA_mode + 2;
1965 }
1966 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1967
1968 pio: /* setup PIO mode */
1969 if (mode <= 2) {
1970 drvp->DMA_mode = 0;
1971 drvp->PIO_mode = 0;
1972 mode = 0;
1973 } else {
1974 drvp->PIO_mode = mode;
1975 drvp->DMA_mode = mode - 2;
1976 }
1977 datatim_reg |=
1978 APO_DATATIM_PULSE(chp->channel, drive,
1979 apollo_pio_set[mode]) |
1980 APO_DATATIM_RECOV(chp->channel, drive,
1981 apollo_pio_rec[mode]);
1982 }
1983 if (idedma_ctl != 0) {
1984 /* Add software bits in status register */
1985 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1986 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1987 idedma_ctl);
1988 }
1989 pciide_print_modes(cp);
1990 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
1991 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
1992 }
1993
1994 void
1995 cmd_channel_map(pa, sc, channel)
1996 struct pci_attach_args *pa;
1997 struct pciide_softc *sc;
1998 int channel;
1999 {
2000 struct pciide_channel *cp = &sc->pciide_channels[channel];
2001 bus_size_t cmdsize, ctlsize;
2002 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2003 int interface;
2004
2005 /*
2006 * The 0648/0649 can be told to identify as a RAID controller.
2007 * In this case, we have to fake interface
2008 */
2009 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2010 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2011 PCIIDE_INTERFACE_SETTABLE(1);
2012 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2013 CMD_CONF_DSA1)
2014 interface |= PCIIDE_INTERFACE_PCI(0) |
2015 PCIIDE_INTERFACE_PCI(1);
2016 } else {
2017 interface = PCI_INTERFACE(pa->pa_class);
2018 }
2019
2020 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2021 cp->name = PCIIDE_CHANNEL_NAME(channel);
2022 cp->wdc_channel.channel = channel;
2023 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2024
2025 if (channel > 0) {
2026 cp->wdc_channel.ch_queue =
2027 sc->pciide_channels[0].wdc_channel.ch_queue;
2028 } else {
2029 cp->wdc_channel.ch_queue =
2030 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2031 }
2032 if (cp->wdc_channel.ch_queue == NULL) {
2033 printf("%s %s channel: "
2034 "can't allocate memory for command queue",
2035 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2036 return;
2037 }
2038
2039 printf("%s: %s channel %s to %s mode\n",
2040 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2041 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2042 "configured" : "wired",
2043 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2044 "native-PCI" : "compatibility");
2045
2046 /*
2047 * with a CMD PCI64x, if we get here, the first channel is enabled:
2048 * there's no way to disable the first channel without disabling
2049 * the whole device
2050 */
2051 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2052 printf("%s: %s channel ignored (disabled)\n",
2053 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2054 return;
2055 }
2056
2057 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2058 if (cp->hw_ok == 0)
2059 return;
2060 if (channel == 1) {
2061 if (pciide_chan_candisable(cp)) {
2062 ctrl &= ~CMD_CTRL_2PORT;
2063 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2064 CMD_CTRL, ctrl);
2065 }
2066 }
2067 pciide_map_compat_intr(pa, cp, channel, interface);
2068 }
2069
2070 int
2071 cmd_pci_intr(arg)
2072 void *arg;
2073 {
2074 struct pciide_softc *sc = arg;
2075 struct pciide_channel *cp;
2076 struct channel_softc *wdc_cp;
2077 int i, rv, crv;
2078 u_int32_t priirq, secirq;
2079
2080 rv = 0;
2081 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2082 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2083 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2084 cp = &sc->pciide_channels[i];
2085 wdc_cp = &cp->wdc_channel;
2086 /* If a compat channel skip. */
2087 if (cp->compat)
2088 continue;
2089 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2090 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2091 crv = wdcintr(wdc_cp);
2092 if (crv == 0)
2093 printf("%s:%d: bogus intr\n",
2094 sc->sc_wdcdev.sc_dev.dv_xname, i);
2095 else
2096 rv = 1;
2097 }
2098 }
2099 return rv;
2100 }
2101
2102 void
2103 cmd_chip_map(sc, pa)
2104 struct pciide_softc *sc;
2105 struct pci_attach_args *pa;
2106 {
2107 int channel;
2108
2109 /*
2110 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2111 * and base adresses registers can be disabled at
2112 * hardware level. In this case, the device is wired
2113 * in compat mode and its first channel is always enabled,
2114 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2115 * In fact, it seems that the first channel of the CMD PCI0640
2116 * can't be disabled.
2117 */
2118
2119 #ifdef PCIIDE_CMD064x_DISABLE
2120 if (pciide_chipen(sc, pa) == 0)
2121 return;
2122 #endif
2123
2124 printf("%s: hardware does not support DMA\n",
2125 sc->sc_wdcdev.sc_dev.dv_xname);
2126 sc->sc_dma_ok = 0;
2127
2128 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2129 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2130 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2131
2132 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2133 cmd_channel_map(pa, sc, channel);
2134 }
2135 }
2136
2137 void
2138 cmd0643_9_chip_map(sc, pa)
2139 struct pciide_softc *sc;
2140 struct pci_attach_args *pa;
2141 {
2142 struct pciide_channel *cp;
2143 int channel;
2144
2145 /*
2146 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2147 * and base adresses registers can be disabled at
2148 * hardware level. In this case, the device is wired
2149 * in compat mode and its first channel is always enabled,
2150 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2151 * In fact, it seems that the first channel of the CMD PCI0640
2152 * can't be disabled.
2153 */
2154
2155 #ifdef PCIIDE_CMD064x_DISABLE
2156 if (pciide_chipen(sc, pa) == 0)
2157 return;
2158 #endif
2159 printf("%s: bus-master DMA support present",
2160 sc->sc_wdcdev.sc_dev.dv_xname);
2161 pciide_mapreg_dma(sc, pa);
2162 printf("\n");
2163 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2164 WDC_CAPABILITY_MODE;
2165 if (sc->sc_dma_ok) {
2166 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2167 switch (sc->sc_pp->ide_product) {
2168 case PCI_PRODUCT_CMDTECH_649:
2169 case PCI_PRODUCT_CMDTECH_648:
2170 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2171 sc->sc_wdcdev.UDMA_cap = 4;
2172 sc->sc_wdcdev.irqack = cmd648_9_irqack;
2173 break;
2174 default:
2175 sc->sc_wdcdev.irqack = pciide_irqack;
2176 }
2177 }
2178
2179 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2180 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2181 sc->sc_wdcdev.PIO_cap = 4;
2182 sc->sc_wdcdev.DMA_cap = 2;
2183 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2184
2185 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2186 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2187 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2188 DEBUG_PROBE);
2189
2190 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2191 cp = &sc->pciide_channels[channel];
2192 cmd_channel_map(pa, sc, channel);
2193 if (cp->hw_ok == 0)
2194 continue;
2195 cmd0643_9_setup_channel(&cp->wdc_channel);
2196 }
2197 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2198 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2199 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2200 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2201 DEBUG_PROBE);
2202 }
2203
2204 void
2205 cmd0643_9_setup_channel(chp)
2206 struct channel_softc *chp;
2207 {
2208 struct ata_drive_datas *drvp;
2209 u_int8_t tim;
2210 u_int32_t idedma_ctl, udma_reg;
2211 int drive;
2212 struct pciide_channel *cp = (struct pciide_channel*)chp;
2213 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2214
2215 idedma_ctl = 0;
2216 /* setup DMA if needed */
2217 pciide_channel_dma_setup(cp);
2218
2219 for (drive = 0; drive < 2; drive++) {
2220 drvp = &chp->ch_drive[drive];
2221 /* If no drive, skip */
2222 if ((drvp->drive_flags & DRIVE) == 0)
2223 continue;
2224 /* add timing values, setup DMA if needed */
2225 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2226 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2227 if (drvp->drive_flags & DRIVE_UDMA) {
2228 /* UltraDMA on a 0648 or 0649 */
2229 udma_reg = pciide_pci_read(sc->sc_pc,
2230 sc->sc_tag, CMD_UDMATIM(chp->channel));
2231 if (drvp->UDMA_mode > 2 &&
2232 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2233 CMD_BICSR) &
2234 CMD_BICSR_80(chp->channel)) == 0)
2235 drvp->UDMA_mode = 2;
2236 if (drvp->UDMA_mode > 2)
2237 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2238 else
2239 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2240 udma_reg |= CMD_UDMATIM_UDMA(drive);
2241 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2242 CMD_UDMATIM_TIM_OFF(drive));
2243 udma_reg |=
2244 (cmd0648_9_tim_udma[drvp->UDMA_mode] <<
2245 CMD_UDMATIM_TIM_OFF(drive));
2246 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2247 CMD_UDMATIM(chp->channel), udma_reg);
2248 } else {
2249 /*
2250 * use Multiword DMA.
2251 * Timings will be used for both PIO and DMA,
2252 * so adjust DMA mode if needed
2253 * if we have a 0648/9, turn off UDMA
2254 */
2255 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2256 udma_reg = pciide_pci_read(sc->sc_pc,
2257 sc->sc_tag,
2258 CMD_UDMATIM(chp->channel));
2259 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2260 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2261 CMD_UDMATIM(chp->channel),
2262 udma_reg);
2263 }
2264 if (drvp->PIO_mode >= 3 &&
2265 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2266 drvp->DMA_mode = drvp->PIO_mode - 2;
2267 }
2268 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2269 }
2270 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2271 }
2272 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2273 CMD_DATA_TIM(chp->channel, drive), tim);
2274 }
2275 if (idedma_ctl != 0) {
2276 /* Add software bits in status register */
2277 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2278 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2279 idedma_ctl);
2280 }
2281 pciide_print_modes(cp);
2282 }
2283
2284 void
2285 cmd648_9_irqack(chp)
2286 struct channel_softc *chp;
2287 {
2288 u_int32_t priirq, secirq;
2289 struct pciide_channel *cp = (struct pciide_channel*)chp;
2290 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2291
2292 if (chp->channel == 0) {
2293 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2294 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2295 } else {
2296 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2297 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2298 }
2299 pciide_irqack(chp);
2300 }
2301
2302 void
2303 cy693_chip_map(sc, pa)
2304 struct pciide_softc *sc;
2305 struct pci_attach_args *pa;
2306 {
2307 struct pciide_channel *cp;
2308 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2309 bus_size_t cmdsize, ctlsize;
2310
2311 if (pciide_chipen(sc, pa) == 0)
2312 return;
2313 /*
2314 * this chip has 2 PCI IDE functions, one for primary and one for
2315 * secondary. So we need to call pciide_mapregs_compat() with
2316 * the real channel
2317 */
2318 if (pa->pa_function == 1) {
2319 sc->sc_cy_compatchan = 0;
2320 } else if (pa->pa_function == 2) {
2321 sc->sc_cy_compatchan = 1;
2322 } else {
2323 printf("%s: unexpected PCI function %d\n",
2324 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2325 return;
2326 }
2327 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2328 printf("%s: bus-master DMA support present",
2329 sc->sc_wdcdev.sc_dev.dv_xname);
2330 pciide_mapreg_dma(sc, pa);
2331 } else {
2332 printf("%s: hardware does not support DMA",
2333 sc->sc_wdcdev.sc_dev.dv_xname);
2334 sc->sc_dma_ok = 0;
2335 }
2336 printf("\n");
2337
2338 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2339 if (sc->sc_cy_handle == NULL) {
2340 printf("%s: unable to map hyperCache control registers\n",
2341 sc->sc_wdcdev.sc_dev.dv_xname);
2342 sc->sc_dma_ok = 0;
2343 }
2344
2345 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2346 WDC_CAPABILITY_MODE;
2347 if (sc->sc_dma_ok) {
2348 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2349 sc->sc_wdcdev.irqack = pciide_irqack;
2350 }
2351 sc->sc_wdcdev.PIO_cap = 4;
2352 sc->sc_wdcdev.DMA_cap = 2;
2353 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2354
2355 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2356 sc->sc_wdcdev.nchannels = 1;
2357
2358 /* Only one channel for this chip; if we are here it's enabled */
2359 cp = &sc->pciide_channels[0];
2360 sc->wdc_chanarray[0] = &cp->wdc_channel;
2361 cp->name = PCIIDE_CHANNEL_NAME(0);
2362 cp->wdc_channel.channel = 0;
2363 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2364 cp->wdc_channel.ch_queue =
2365 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2366 if (cp->wdc_channel.ch_queue == NULL) {
2367 printf("%s primary channel: "
2368 "can't allocate memory for command queue",
2369 sc->sc_wdcdev.sc_dev.dv_xname);
2370 return;
2371 }
2372 printf("%s: primary channel %s to ",
2373 sc->sc_wdcdev.sc_dev.dv_xname,
2374 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2375 "configured" : "wired");
2376 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2377 printf("native-PCI");
2378 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2379 pciide_pci_intr);
2380 } else {
2381 printf("compatibility");
2382 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2383 &cmdsize, &ctlsize);
2384 }
2385 printf(" mode\n");
2386 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2387 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2388 wdcattach(&cp->wdc_channel);
2389 if (pciide_chan_candisable(cp)) {
2390 pci_conf_write(sc->sc_pc, sc->sc_tag,
2391 PCI_COMMAND_STATUS_REG, 0);
2392 }
2393 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2394 if (cp->hw_ok == 0)
2395 return;
2396 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2397 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2398 cy693_setup_channel(&cp->wdc_channel);
2399 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2400 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2401 }
2402
2403 void
2404 cy693_setup_channel(chp)
2405 struct channel_softc *chp;
2406 {
2407 struct ata_drive_datas *drvp;
2408 int drive;
2409 u_int32_t cy_cmd_ctrl;
2410 u_int32_t idedma_ctl;
2411 struct pciide_channel *cp = (struct pciide_channel*)chp;
2412 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2413 int dma_mode = -1;
2414
2415 cy_cmd_ctrl = idedma_ctl = 0;
2416
2417 /* setup DMA if needed */
2418 pciide_channel_dma_setup(cp);
2419
2420 for (drive = 0; drive < 2; drive++) {
2421 drvp = &chp->ch_drive[drive];
2422 /* If no drive, skip */
2423 if ((drvp->drive_flags & DRIVE) == 0)
2424 continue;
2425 /* add timing values, setup DMA if needed */
2426 if (drvp->drive_flags & DRIVE_DMA) {
2427 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2428 /* use Multiword DMA */
2429 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2430 dma_mode = drvp->DMA_mode;
2431 }
2432 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2433 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2434 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2435 CY_CMD_CTRL_IOW_REC_OFF(drive));
2436 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2437 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2438 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2439 CY_CMD_CTRL_IOR_REC_OFF(drive));
2440 }
2441 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2442 chp->ch_drive[0].DMA_mode = dma_mode;
2443 chp->ch_drive[1].DMA_mode = dma_mode;
2444
2445 if (dma_mode == -1)
2446 dma_mode = 0;
2447
2448 if (sc->sc_cy_handle != NULL) {
2449 /* Note: `multiple' is implied. */
2450 cy82c693_write(sc->sc_cy_handle,
2451 (sc->sc_cy_compatchan == 0) ?
2452 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2453 }
2454
2455 pciide_print_modes(cp);
2456
2457 if (idedma_ctl != 0) {
2458 /* Add software bits in status register */
2459 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2460 IDEDMA_CTL, idedma_ctl);
2461 }
2462 }
2463
2464 void
2465 sis_chip_map(sc, pa)
2466 struct pciide_softc *sc;
2467 struct pci_attach_args *pa;
2468 {
2469 struct pciide_channel *cp;
2470 int channel;
2471 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2472 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2473 pcireg_t rev = PCI_REVISION(pa->pa_class);
2474 bus_size_t cmdsize, ctlsize;
2475
2476 if (pciide_chipen(sc, pa) == 0)
2477 return;
2478 printf("%s: bus-master DMA support present",
2479 sc->sc_wdcdev.sc_dev.dv_xname);
2480 pciide_mapreg_dma(sc, pa);
2481 printf("\n");
2482 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2483 WDC_CAPABILITY_MODE;
2484 if (sc->sc_dma_ok) {
2485 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2486 sc->sc_wdcdev.irqack = pciide_irqack;
2487 if (rev >= 0xd0)
2488 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2489 }
2490
2491 sc->sc_wdcdev.PIO_cap = 4;
2492 sc->sc_wdcdev.DMA_cap = 2;
2493 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2494 sc->sc_wdcdev.UDMA_cap = 2;
2495 sc->sc_wdcdev.set_modes = sis_setup_channel;
2496
2497 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2498 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2499
2500 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2501 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2502 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2503
2504 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2505 cp = &sc->pciide_channels[channel];
2506 if (pciide_chansetup(sc, channel, interface) == 0)
2507 continue;
2508 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2509 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2510 printf("%s: %s channel ignored (disabled)\n",
2511 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2512 continue;
2513 }
2514 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2515 pciide_pci_intr);
2516 if (cp->hw_ok == 0)
2517 continue;
2518 if (pciide_chan_candisable(cp)) {
2519 if (channel == 0)
2520 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2521 else
2522 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2523 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2524 sis_ctr0);
2525 }
2526 pciide_map_compat_intr(pa, cp, channel, interface);
2527 if (cp->hw_ok == 0)
2528 continue;
2529 sis_setup_channel(&cp->wdc_channel);
2530 }
2531 }
2532
2533 void
2534 sis_setup_channel(chp)
2535 struct channel_softc *chp;
2536 {
2537 struct ata_drive_datas *drvp;
2538 int drive;
2539 u_int32_t sis_tim;
2540 u_int32_t idedma_ctl;
2541 struct pciide_channel *cp = (struct pciide_channel*)chp;
2542 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2543
2544 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2545 "channel %d 0x%x\n", chp->channel,
2546 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2547 DEBUG_PROBE);
2548 sis_tim = 0;
2549 idedma_ctl = 0;
2550 /* setup DMA if needed */
2551 pciide_channel_dma_setup(cp);
2552
2553 for (drive = 0; drive < 2; drive++) {
2554 drvp = &chp->ch_drive[drive];
2555 /* If no drive, skip */
2556 if ((drvp->drive_flags & DRIVE) == 0)
2557 continue;
2558 /* add timing values, setup DMA if needed */
2559 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2560 (drvp->drive_flags & DRIVE_UDMA) == 0)
2561 goto pio;
2562
2563 if (drvp->drive_flags & DRIVE_UDMA) {
2564 /* use Ultra/DMA */
2565 drvp->drive_flags &= ~DRIVE_DMA;
2566 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2567 SIS_TIM_UDMA_TIME_OFF(drive);
2568 sis_tim |= SIS_TIM_UDMA_EN(drive);
2569 } else {
2570 /*
2571 * use Multiword DMA
2572 * Timings will be used for both PIO and DMA,
2573 * so adjust DMA mode if needed
2574 */
2575 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2576 drvp->PIO_mode = drvp->DMA_mode + 2;
2577 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2578 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2579 drvp->PIO_mode - 2 : 0;
2580 if (drvp->DMA_mode == 0)
2581 drvp->PIO_mode = 0;
2582 }
2583 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2584 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2585 SIS_TIM_ACT_OFF(drive);
2586 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2587 SIS_TIM_REC_OFF(drive);
2588 }
2589 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2590 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2591 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2592 if (idedma_ctl != 0) {
2593 /* Add software bits in status register */
2594 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2595 IDEDMA_CTL, idedma_ctl);
2596 }
2597 pciide_print_modes(cp);
2598 }
2599
2600 void
2601 acer_chip_map(sc, pa)
2602 struct pciide_softc *sc;
2603 struct pci_attach_args *pa;
2604 {
2605 struct pciide_channel *cp;
2606 int channel;
2607 pcireg_t cr, interface;
2608 bus_size_t cmdsize, ctlsize;
2609
2610 if (pciide_chipen(sc, pa) == 0)
2611 return;
2612 printf("%s: bus-master DMA support present",
2613 sc->sc_wdcdev.sc_dev.dv_xname);
2614 pciide_mapreg_dma(sc, pa);
2615 printf("\n");
2616 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2617 WDC_CAPABILITY_MODE;
2618 if (sc->sc_dma_ok) {
2619 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2620 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2621 sc->sc_wdcdev.irqack = pciide_irqack;
2622 }
2623
2624 sc->sc_wdcdev.PIO_cap = 4;
2625 sc->sc_wdcdev.DMA_cap = 2;
2626 sc->sc_wdcdev.UDMA_cap = 2;
2627 sc->sc_wdcdev.set_modes = acer_setup_channel;
2628 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2629 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2630
2631 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2632 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2633 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2634
2635 /* Enable "microsoft register bits" R/W. */
2636 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2637 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2638 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2639 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2640 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2641 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2642 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2643 ~ACER_CHANSTATUSREGS_RO);
2644 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2645 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2646 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2647 /* Don't use cr, re-read the real register content instead */
2648 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2649 PCI_CLASS_REG));
2650
2651 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2652 cp = &sc->pciide_channels[channel];
2653 if (pciide_chansetup(sc, channel, interface) == 0)
2654 continue;
2655 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2656 printf("%s: %s channel ignored (disabled)\n",
2657 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2658 continue;
2659 }
2660 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2661 acer_pci_intr);
2662 if (cp->hw_ok == 0)
2663 continue;
2664 if (pciide_chan_candisable(cp)) {
2665 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2666 pci_conf_write(sc->sc_pc, sc->sc_tag,
2667 PCI_CLASS_REG, cr);
2668 }
2669 pciide_map_compat_intr(pa, cp, channel, interface);
2670 acer_setup_channel(&cp->wdc_channel);
2671 }
2672 }
2673
2674 void
2675 acer_setup_channel(chp)
2676 struct channel_softc *chp;
2677 {
2678 struct ata_drive_datas *drvp;
2679 int drive;
2680 u_int32_t acer_fifo_udma;
2681 u_int32_t idedma_ctl;
2682 struct pciide_channel *cp = (struct pciide_channel*)chp;
2683 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2684
2685 idedma_ctl = 0;
2686 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2687 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2688 acer_fifo_udma), DEBUG_PROBE);
2689 /* setup DMA if needed */
2690 pciide_channel_dma_setup(cp);
2691
2692 for (drive = 0; drive < 2; drive++) {
2693 drvp = &chp->ch_drive[drive];
2694 /* If no drive, skip */
2695 if ((drvp->drive_flags & DRIVE) == 0)
2696 continue;
2697 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2698 "channel %d drive %d 0x%x\n", chp->channel, drive,
2699 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2700 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2701 /* clear FIFO/DMA mode */
2702 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2703 ACER_UDMA_EN(chp->channel, drive) |
2704 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2705
2706 /* add timing values, setup DMA if needed */
2707 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2708 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2709 acer_fifo_udma |=
2710 ACER_FTH_OPL(chp->channel, drive, 0x1);
2711 goto pio;
2712 }
2713
2714 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2715 if (drvp->drive_flags & DRIVE_UDMA) {
2716 /* use Ultra/DMA */
2717 drvp->drive_flags &= ~DRIVE_DMA;
2718 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2719 acer_fifo_udma |=
2720 ACER_UDMA_TIM(chp->channel, drive,
2721 acer_udma[drvp->UDMA_mode]);
2722 } else {
2723 /*
2724 * use Multiword DMA
2725 * Timings will be used for both PIO and DMA,
2726 * so adjust DMA mode if needed
2727 */
2728 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2729 drvp->PIO_mode = drvp->DMA_mode + 2;
2730 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2731 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2732 drvp->PIO_mode - 2 : 0;
2733 if (drvp->DMA_mode == 0)
2734 drvp->PIO_mode = 0;
2735 }
2736 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2737 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2738 ACER_IDETIM(chp->channel, drive),
2739 acer_pio[drvp->PIO_mode]);
2740 }
2741 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2742 acer_fifo_udma), DEBUG_PROBE);
2743 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2744 if (idedma_ctl != 0) {
2745 /* Add software bits in status register */
2746 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2747 IDEDMA_CTL, idedma_ctl);
2748 }
2749 pciide_print_modes(cp);
2750 }
2751
2752 int
2753 acer_pci_intr(arg)
2754 void *arg;
2755 {
2756 struct pciide_softc *sc = arg;
2757 struct pciide_channel *cp;
2758 struct channel_softc *wdc_cp;
2759 int i, rv, crv;
2760 u_int32_t chids;
2761
2762 rv = 0;
2763 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2764 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2765 cp = &sc->pciide_channels[i];
2766 wdc_cp = &cp->wdc_channel;
2767 /* If a compat channel skip. */
2768 if (cp->compat)
2769 continue;
2770 if (chids & ACER_CHIDS_INT(i)) {
2771 crv = wdcintr(wdc_cp);
2772 if (crv == 0)
2773 printf("%s:%d: bogus intr\n",
2774 sc->sc_wdcdev.sc_dev.dv_xname, i);
2775 else
2776 rv = 1;
2777 }
2778 }
2779 return rv;
2780 }
2781
2782 void
2783 hpt_chip_map(sc, pa)
2784 struct pciide_softc *sc;
2785 struct pci_attach_args *pa;
2786 {
2787 struct pciide_channel *cp;
2788 int i, compatchan, revision;
2789 pcireg_t interface;
2790 bus_size_t cmdsize, ctlsize;
2791
2792 if (pciide_chipen(sc, pa) == 0)
2793 return;
2794 revision = PCI_REVISION(pa->pa_class);
2795
2796 /*
2797 * when the chip is in native mode it identifies itself as a
2798 * 'misc mass storage'. Fake interface in this case.
2799 */
2800 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2801 interface = PCI_INTERFACE(pa->pa_class);
2802 } else {
2803 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2804 PCIIDE_INTERFACE_PCI(0);
2805 if (revision == HPT370_REV)
2806 interface |= PCIIDE_INTERFACE_PCI(1);
2807 }
2808
2809 printf("%s: bus-master DMA support present",
2810 sc->sc_wdcdev.sc_dev.dv_xname);
2811 pciide_mapreg_dma(sc, pa);
2812 printf("\n");
2813 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2814 WDC_CAPABILITY_MODE;
2815 if (sc->sc_dma_ok) {
2816 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2817 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2818 sc->sc_wdcdev.irqack = pciide_irqack;
2819 }
2820 sc->sc_wdcdev.PIO_cap = 4;
2821 sc->sc_wdcdev.DMA_cap = 2;
2822 sc->sc_wdcdev.UDMA_cap = 4;
2823
2824 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2825 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2826 sc->sc_wdcdev.nchannels = (revision == HPT366_REV) ? 1 : 2;
2827 if (revision == HPT366_REV) {
2828 /*
2829 * The 366 has 2 PCI IDE functions, one for primary and one
2830 * for secondary. So we need to call pciide_mapregs_compat()
2831 * with the real channel
2832 */
2833 if (pa->pa_function == 0) {
2834 compatchan = 0;
2835 } else if (pa->pa_function == 1) {
2836 compatchan = 1;
2837 } else {
2838 printf("%s: unexpected PCI function %d\n",
2839 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2840 return;
2841 }
2842 sc->sc_wdcdev.nchannels = 1;
2843 } else {
2844 sc->sc_wdcdev.nchannels = 2;
2845 }
2846 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2847 cp = &sc->pciide_channels[0];
2848 if (sc->sc_wdcdev.nchannels > 1) {
2849 compatchan = i;
2850 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
2851 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
2852 printf("%s: %s channel ignored (disabled)\n",
2853 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2854 continue;
2855 }
2856 }
2857 if (pciide_chansetup(sc, i, interface) == 0)
2858 continue;
2859 if (interface & PCIIDE_INTERFACE_PCI(i)) {
2860 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
2861 &ctlsize, hpt_pci_intr);
2862 } else {
2863 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
2864 &cmdsize, &ctlsize);
2865 }
2866 if (cp->hw_ok == 0)
2867 return;
2868 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2869 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2870 wdcattach(&cp->wdc_channel);
2871 hpt_setup_channel(&cp->wdc_channel);
2872 }
2873
2874 return;
2875 }
2876
2877
2878 void
2879 hpt_setup_channel(chp)
2880 struct channel_softc *chp;
2881 {
2882 struct ata_drive_datas *drvp;
2883 int drive;
2884 int cable;
2885 u_int32_t before, after;
2886 u_int32_t idedma_ctl;
2887 struct pciide_channel *cp = (struct pciide_channel*)chp;
2888 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2889
2890 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
2891
2892 /* setup DMA if needed */
2893 pciide_channel_dma_setup(cp);
2894
2895 idedma_ctl = 0;
2896
2897 /* Per drive settings */
2898 for (drive = 0; drive < 2; drive++) {
2899 drvp = &chp->ch_drive[drive];
2900 /* If no drive, skip */
2901 if ((drvp->drive_flags & DRIVE) == 0)
2902 continue;
2903 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
2904 HPT_IDETIM(chp->channel, drive));
2905
2906 /* add timing values, setup DMA if needed */
2907 if (drvp->drive_flags & DRIVE_UDMA) {
2908 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
2909 drvp->UDMA_mode > 2)
2910 drvp->UDMA_mode = 2;
2911 after = (sc->sc_wdcdev.nchannels == 2) ?
2912 hpt370_udma[drvp->UDMA_mode] :
2913 hpt366_udma[drvp->UDMA_mode];
2914 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2915 } else if (drvp->drive_flags & DRIVE_DMA) {
2916 /*
2917 * use Multiword DMA.
2918 * Timings will be used for both PIO and DMA, so adjust
2919 * DMA mode if needed
2920 */
2921 if (drvp->PIO_mode >= 3 &&
2922 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2923 drvp->DMA_mode = drvp->PIO_mode - 2;
2924 }
2925 after = (sc->sc_wdcdev.nchannels == 2) ?
2926 hpt370_dma[drvp->DMA_mode] :
2927 hpt366_dma[drvp->DMA_mode];
2928 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2929 } else {
2930 /* PIO only */
2931 after = (sc->sc_wdcdev.nchannels == 2) ?
2932 hpt370_pio[drvp->PIO_mode] :
2933 hpt366_pio[drvp->PIO_mode];
2934 }
2935 pci_conf_write(sc->sc_pc, sc->sc_tag,
2936 HPT_IDETIM(chp->channel, drive), after);
2937 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
2938 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
2939 after, before), DEBUG_PROBE);
2940 }
2941 if (idedma_ctl != 0) {
2942 /* Add software bits in status register */
2943 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2944 IDEDMA_CTL, idedma_ctl);
2945 }
2946 pciide_print_modes(cp);
2947 }
2948
2949 int
2950 hpt_pci_intr(arg)
2951 void *arg;
2952 {
2953 struct pciide_softc *sc = arg;
2954 struct pciide_channel *cp;
2955 struct channel_softc *wdc_cp;
2956 int rv = 0;
2957 int dmastat, i, crv;
2958
2959 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2960 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2961 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
2962 if((dmastat & IDEDMA_CTL_INTR) == 0)
2963 continue;
2964 cp = &sc->pciide_channels[i];
2965 wdc_cp = &cp->wdc_channel;
2966 crv = wdcintr(wdc_cp);
2967 if (crv == 0) {
2968 printf("%s:%d: bogus intr\n",
2969 sc->sc_wdcdev.sc_dev.dv_xname, i);
2970 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2971 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
2972 } else
2973 rv = 1;
2974 }
2975 return rv;
2976 }
2977
2978
2979 /* A macro to test product */
2980 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2981
2982 void
2983 pdc202xx_chip_map(sc, pa)
2984 struct pciide_softc *sc;
2985 struct pci_attach_args *pa;
2986 {
2987 struct pciide_channel *cp;
2988 int channel;
2989 pcireg_t interface, st, mode;
2990 bus_size_t cmdsize, ctlsize;
2991
2992 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2993 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2994 DEBUG_PROBE);
2995 if (pciide_chipen(sc, pa) == 0)
2996 return;
2997
2998 /* turn off RAID mode */
2999 st &= ~PDC2xx_STATE_IDERAID;
3000
3001 /*
3002 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3003 * mode. We have to fake interface
3004 */
3005 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3006 if (st & PDC2xx_STATE_NATIVE)
3007 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3008
3009 printf("%s: bus-master DMA support present",
3010 sc->sc_wdcdev.sc_dev.dv_xname);
3011 pciide_mapreg_dma(sc, pa);
3012 printf("\n");
3013 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3014 WDC_CAPABILITY_MODE;
3015 if (sc->sc_dma_ok) {
3016 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3017 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3018 sc->sc_wdcdev.irqack = pciide_irqack;
3019 }
3020 sc->sc_wdcdev.PIO_cap = 4;
3021 sc->sc_wdcdev.DMA_cap = 2;
3022 if (PDC_IS_262(sc))
3023 sc->sc_wdcdev.UDMA_cap = 4;
3024 else
3025 sc->sc_wdcdev.UDMA_cap = 2;
3026 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3027 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3028 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3029
3030 /* setup failsafe defaults */
3031 mode = 0;
3032 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3033 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3034 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3035 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3036 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3037 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3038 "initial timings 0x%x, now 0x%x\n", channel,
3039 pci_conf_read(sc->sc_pc, sc->sc_tag,
3040 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3041 DEBUG_PROBE);
3042 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3043 mode | PDC2xx_TIM_IORDYp);
3044 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3045 "initial timings 0x%x, now 0x%x\n", channel,
3046 pci_conf_read(sc->sc_pc, sc->sc_tag,
3047 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3048 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3049 mode);
3050 }
3051
3052 mode = PDC2xx_SCR_DMA;
3053 if (PDC_IS_262(sc)) {
3054 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3055 } else {
3056 /* the BIOS set it up this way */
3057 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3058 }
3059 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3060 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3061 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3062 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3063 DEBUG_PROBE);
3064 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3065
3066 /* controller initial state register is OK even without BIOS */
3067 /* Set DMA mode to IDE DMA compatibility */
3068 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3069 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3070 DEBUG_PROBE);
3071 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3072 mode | 0x1);
3073 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3074 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3075 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3076 mode | 0x1);
3077
3078 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3079 cp = &sc->pciide_channels[channel];
3080 if (pciide_chansetup(sc, channel, interface) == 0)
3081 continue;
3082 if ((st & (PDC_IS_262(sc) ?
3083 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3084 printf("%s: %s channel ignored (disabled)\n",
3085 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3086 continue;
3087 }
3088 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3089 pdc202xx_pci_intr);
3090 if (cp->hw_ok == 0)
3091 continue;
3092 if (pciide_chan_candisable(cp))
3093 st &= ~(PDC_IS_262(sc) ?
3094 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3095 pciide_map_compat_intr(pa, cp, channel, interface);
3096 pdc202xx_setup_channel(&cp->wdc_channel);
3097 }
3098 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3099 DEBUG_PROBE);
3100 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3101 return;
3102 }
3103
3104 void
3105 pdc202xx_setup_channel(chp)
3106 struct channel_softc *chp;
3107 {
3108 struct ata_drive_datas *drvp;
3109 int drive;
3110 pcireg_t mode, st;
3111 u_int32_t idedma_ctl, scr, atapi;
3112 struct pciide_channel *cp = (struct pciide_channel*)chp;
3113 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3114 int channel = chp->channel;
3115
3116 /* setup DMA if needed */
3117 pciide_channel_dma_setup(cp);
3118
3119 idedma_ctl = 0;
3120
3121 /* Per channel settings */
3122 if (PDC_IS_262(sc)) {
3123 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3124 PDC262_U66);
3125 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3126 /* Trimm UDMA mode */
3127 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3128 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3129 chp->ch_drive[0].UDMA_mode <= 2) ||
3130 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3131 chp->ch_drive[1].UDMA_mode <= 2)) {
3132 if (chp->ch_drive[0].UDMA_mode > 2)
3133 chp->ch_drive[0].UDMA_mode = 2;
3134 if (chp->ch_drive[1].UDMA_mode > 2)
3135 chp->ch_drive[1].UDMA_mode = 2;
3136 }
3137 /* Set U66 if needed */
3138 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3139 chp->ch_drive[0].UDMA_mode > 2) ||
3140 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3141 chp->ch_drive[1].UDMA_mode > 2))
3142 scr |= PDC262_U66_EN(channel);
3143 else
3144 scr &= ~PDC262_U66_EN(channel);
3145 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3146 PDC262_U66, scr);
3147 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3148 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3149 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3150 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3151 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3152 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3153 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3154 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3155 atapi = 0;
3156 else
3157 atapi = PDC262_ATAPI_UDMA;
3158 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3159 PDC262_ATAPI(channel), atapi);
3160 }
3161 }
3162 for (drive = 0; drive < 2; drive++) {
3163 drvp = &chp->ch_drive[drive];
3164 /* If no drive, skip */
3165 if ((drvp->drive_flags & DRIVE) == 0)
3166 continue;
3167 mode = 0;
3168 if (drvp->drive_flags & DRIVE_UDMA) {
3169 mode = PDC2xx_TIM_SET_MB(mode,
3170 pdc2xx_udma_mb[drvp->UDMA_mode]);
3171 mode = PDC2xx_TIM_SET_MC(mode,
3172 pdc2xx_udma_mc[drvp->UDMA_mode]);
3173 drvp->drive_flags &= ~DRIVE_DMA;
3174 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3175 } else if (drvp->drive_flags & DRIVE_DMA) {
3176 mode = PDC2xx_TIM_SET_MB(mode,
3177 pdc2xx_dma_mb[drvp->DMA_mode]);
3178 mode = PDC2xx_TIM_SET_MC(mode,
3179 pdc2xx_dma_mc[drvp->DMA_mode]);
3180 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3181 } else {
3182 mode = PDC2xx_TIM_SET_MB(mode,
3183 pdc2xx_dma_mb[0]);
3184 mode = PDC2xx_TIM_SET_MC(mode,
3185 pdc2xx_dma_mc[0]);
3186 }
3187 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3188 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3189 if (drvp->drive_flags & DRIVE_ATA)
3190 mode |= PDC2xx_TIM_PRE;
3191 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3192 if (drvp->PIO_mode >= 3) {
3193 mode |= PDC2xx_TIM_IORDY;
3194 if (drive == 0)
3195 mode |= PDC2xx_TIM_IORDYp;
3196 }
3197 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3198 "timings 0x%x\n",
3199 sc->sc_wdcdev.sc_dev.dv_xname,
3200 chp->channel, drive, mode), DEBUG_PROBE);
3201 pci_conf_write(sc->sc_pc, sc->sc_tag,
3202 PDC2xx_TIM(chp->channel, drive), mode);
3203 }
3204 if (idedma_ctl != 0) {
3205 /* Add software bits in status register */
3206 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3207 IDEDMA_CTL, idedma_ctl);
3208 }
3209 pciide_print_modes(cp);
3210 }
3211
3212 int
3213 pdc202xx_pci_intr(arg)
3214 void *arg;
3215 {
3216 struct pciide_softc *sc = arg;
3217 struct pciide_channel *cp;
3218 struct channel_softc *wdc_cp;
3219 int i, rv, crv;
3220 u_int32_t scr;
3221
3222 rv = 0;
3223 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3224 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3225 cp = &sc->pciide_channels[i];
3226 wdc_cp = &cp->wdc_channel;
3227 /* If a compat channel skip. */
3228 if (cp->compat)
3229 continue;
3230 if (scr & PDC2xx_SCR_INT(i)) {
3231 crv = wdcintr(wdc_cp);
3232 if (crv == 0)
3233 printf("%s:%d: bogus intr\n",
3234 sc->sc_wdcdev.sc_dev.dv_xname, i);
3235 else
3236 rv = 1;
3237 }
3238 }
3239 return rv;
3240 }
3241
3242 void
3243 opti_chip_map(sc, pa)
3244 struct pciide_softc *sc;
3245 struct pci_attach_args *pa;
3246 {
3247 struct pciide_channel *cp;
3248 bus_size_t cmdsize, ctlsize;
3249 pcireg_t interface;
3250 u_int8_t init_ctrl;
3251 int channel;
3252
3253 if (pciide_chipen(sc, pa) == 0)
3254 return;
3255 printf("%s: bus-master DMA support present",
3256 sc->sc_wdcdev.sc_dev.dv_xname);
3257 pciide_mapreg_dma(sc, pa);
3258 printf("\n");
3259
3260 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3261 WDC_CAPABILITY_MODE;
3262 sc->sc_wdcdev.PIO_cap = 4;
3263 if (sc->sc_dma_ok) {
3264 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3265 sc->sc_wdcdev.irqack = pciide_irqack;
3266 sc->sc_wdcdev.DMA_cap = 2;
3267 }
3268 sc->sc_wdcdev.set_modes = opti_setup_channel;
3269
3270 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3271 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3272
3273 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3274 OPTI_REG_INIT_CONTROL);
3275
3276 interface = PCI_INTERFACE(pa->pa_class);
3277
3278 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3279 cp = &sc->pciide_channels[channel];
3280 if (pciide_chansetup(sc, channel, interface) == 0)
3281 continue;
3282 if (channel == 1 &&
3283 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3284 printf("%s: %s channel ignored (disabled)\n",
3285 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3286 continue;
3287 }
3288 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3289 pciide_pci_intr);
3290 if (cp->hw_ok == 0)
3291 continue;
3292 pciide_map_compat_intr(pa, cp, channel, interface);
3293 if (cp->hw_ok == 0)
3294 continue;
3295 opti_setup_channel(&cp->wdc_channel);
3296 }
3297 }
3298
3299 void
3300 opti_setup_channel(chp)
3301 struct channel_softc *chp;
3302 {
3303 struct ata_drive_datas *drvp;
3304 struct pciide_channel *cp = (struct pciide_channel*)chp;
3305 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3306 int drive, spd;
3307 int mode[2];
3308 u_int8_t rv, mr;
3309
3310 /*
3311 * The `Delay' and `Address Setup Time' fields of the
3312 * Miscellaneous Register are always zero initially.
3313 */
3314 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3315 mr &= ~(OPTI_MISC_DELAY_MASK |
3316 OPTI_MISC_ADDR_SETUP_MASK |
3317 OPTI_MISC_INDEX_MASK);
3318
3319 /* Prime the control register before setting timing values */
3320 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3321
3322 /* Determine the clockrate of the PCIbus the chip is attached to */
3323 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3324 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3325
3326 /* setup DMA if needed */
3327 pciide_channel_dma_setup(cp);
3328
3329 for (drive = 0; drive < 2; drive++) {
3330 drvp = &chp->ch_drive[drive];
3331 /* If no drive, skip */
3332 if ((drvp->drive_flags & DRIVE) == 0) {
3333 mode[drive] = -1;
3334 continue;
3335 }
3336
3337 if ((drvp->drive_flags & DRIVE_DMA)) {
3338 /*
3339 * Timings will be used for both PIO and DMA,
3340 * so adjust DMA mode if needed
3341 */
3342 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3343 drvp->PIO_mode = drvp->DMA_mode + 2;
3344 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3345 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3346 drvp->PIO_mode - 2 : 0;
3347 if (drvp->DMA_mode == 0)
3348 drvp->PIO_mode = 0;
3349
3350 mode[drive] = drvp->DMA_mode + 5;
3351 } else
3352 mode[drive] = drvp->PIO_mode;
3353
3354 if (drive && mode[0] >= 0 &&
3355 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3356 /*
3357 * Can't have two drives using different values
3358 * for `Address Setup Time'.
3359 * Slow down the faster drive to compensate.
3360 */
3361 int d = (opti_tim_as[spd][mode[0]] >
3362 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3363
3364 mode[d] = mode[1-d];
3365 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3366 chp->ch_drive[d].DMA_mode = 0;
3367 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3368 }
3369 }
3370
3371 for (drive = 0; drive < 2; drive++) {
3372 int m;
3373 if ((m = mode[drive]) < 0)
3374 continue;
3375
3376 /* Set the Address Setup Time and select appropriate index */
3377 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3378 rv |= OPTI_MISC_INDEX(drive);
3379 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3380
3381 /* Set the pulse width and recovery timing parameters */
3382 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3383 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3384 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3385 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3386
3387 /* Set the Enhanced Mode register appropriately */
3388 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3389 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3390 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3391 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3392 }
3393
3394 /* Finally, enable the timings */
3395 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3396
3397 pciide_print_modes(cp);
3398 }
3399