pciide.c revision 1.68 1 /* $NetBSD: pciide.c,v 1.68 2000/06/12 21:25:01 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/cy82c693var.h>
121
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 int, u_int8_t));
127
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 pci_chipset_tag_t pc;
131 pcitag_t pa;
132 int reg;
133 {
134
135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 ((reg & 0x03) * 8) & 0xff);
137 }
138
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 pci_chipset_tag_t pc;
142 pcitag_t pa;
143 int reg;
144 u_int8_t val;
145 {
146 pcireg_t pcival;
147
148 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 pcival &= ~(0xff << ((reg & 0x03) * 8));
150 pcival |= (val << ((reg & 0x03) * 8));
151 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153
154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155
156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157 void piix_setup_channel __P((struct channel_softc*));
158 void piix3_4_setup_channel __P((struct channel_softc*));
159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162
163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164 void amd756_setup_channel __P((struct channel_softc*));
165
166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void apollo_setup_channel __P((struct channel_softc*));
168
169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void cmd0643_6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_6_setup_channel __P((struct channel_softc*));
172 void cmd_channel_map __P((struct pci_attach_args *,
173 struct pciide_softc *, int));
174 int cmd_pci_intr __P((void *));
175
176 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
177 void cy693_setup_channel __P((struct channel_softc*));
178
179 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void sis_setup_channel __P((struct channel_softc*));
181
182 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void acer_setup_channel __P((struct channel_softc*));
184 int acer_pci_intr __P((void *));
185
186 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
187 void pdc202xx_setup_channel __P((struct channel_softc*));
188 int pdc202xx_pci_intr __P((void *));
189
190 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void opti_setup_channel __P((struct channel_softc*));
192
193 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void hpt_setup_channel __P((struct channel_softc*));
195 int hpt_pci_intr __P((void *));
196
197 void pciide_channel_dma_setup __P((struct pciide_channel *));
198 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
199 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
200 void pciide_dma_start __P((void*, int, int));
201 int pciide_dma_finish __P((void*, int, int, int));
202 void pciide_irqack __P((struct channel_softc *));
203 void pciide_print_modes __P((struct pciide_channel *));
204
205 struct pciide_product_desc {
206 u_int32_t ide_product;
207 int ide_flags;
208 const char *ide_name;
209 /* map and setup chip, probe drives */
210 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
211 };
212
213 /* Flags for ide_flags */
214 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
215
216 /* Default product description for devices not known from this controller */
217 const struct pciide_product_desc default_product_desc = {
218 0,
219 0,
220 "Generic PCI IDE controller",
221 default_chip_map,
222 };
223
224 const struct pciide_product_desc pciide_intel_products[] = {
225 { PCI_PRODUCT_INTEL_82092AA,
226 0,
227 "Intel 82092AA IDE controller",
228 default_chip_map,
229 },
230 { PCI_PRODUCT_INTEL_82371FB_IDE,
231 0,
232 "Intel 82371FB IDE controller (PIIX)",
233 piix_chip_map,
234 },
235 { PCI_PRODUCT_INTEL_82371SB_IDE,
236 0,
237 "Intel 82371SB IDE Interface (PIIX3)",
238 piix_chip_map,
239 },
240 { PCI_PRODUCT_INTEL_82371AB_IDE,
241 0,
242 "Intel 82371AB IDE controller (PIIX4)",
243 piix_chip_map,
244 },
245 { PCI_PRODUCT_INTEL_82801AA_IDE,
246 0,
247 "Intel 82801AA IDE Controller (ICH)",
248 piix_chip_map,
249 },
250 { PCI_PRODUCT_INTEL_82801AB_IDE,
251 0,
252 "Intel 82801AB IDE Controller (ICH0)",
253 piix_chip_map,
254 },
255 { 0,
256 0,
257 NULL,
258 }
259 };
260
261 const struct pciide_product_desc pciide_amd_products[] = {
262 { PCI_PRODUCT_AMD_PBC756_IDE,
263 0,
264 "Advanced Micro Devices AMD756 IDE Controller",
265 amd756_chip_map
266 },
267 { 0,
268 0,
269 NULL,
270 }
271 };
272
273 const struct pciide_product_desc pciide_cmd_products[] = {
274 { PCI_PRODUCT_CMDTECH_640,
275 0,
276 "CMD Technology PCI0640",
277 cmd_chip_map
278 },
279 { PCI_PRODUCT_CMDTECH_643,
280 0,
281 "CMD Technology PCI0643",
282 cmd0643_6_chip_map,
283 },
284 { PCI_PRODUCT_CMDTECH_646,
285 0,
286 "CMD Technology PCI0646",
287 cmd0643_6_chip_map,
288 },
289 { 0,
290 0,
291 NULL,
292 }
293 };
294
295 const struct pciide_product_desc pciide_via_products[] = {
296 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
297 0,
298 "VIA Tech VT82C586 IDE Controller",
299 apollo_chip_map,
300 },
301 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
302 0,
303 "VIA Tech VT82C586A IDE Controller",
304 apollo_chip_map,
305 },
306 { 0,
307 0,
308 NULL,
309 }
310 };
311
312 const struct pciide_product_desc pciide_cypress_products[] = {
313 { PCI_PRODUCT_CONTAQ_82C693,
314 0,
315 "Cypress 82C693 IDE Controller",
316 cy693_chip_map,
317 },
318 { 0,
319 0,
320 NULL,
321 }
322 };
323
324 const struct pciide_product_desc pciide_sis_products[] = {
325 { PCI_PRODUCT_SIS_5597_IDE,
326 0,
327 "Silicon Integrated System 5597/5598 IDE controller",
328 sis_chip_map,
329 },
330 { 0,
331 0,
332 NULL,
333 }
334 };
335
336 const struct pciide_product_desc pciide_acer_products[] = {
337 { PCI_PRODUCT_ALI_M5229,
338 0,
339 "Acer Labs M5229 UDMA IDE Controller",
340 acer_chip_map,
341 },
342 { 0,
343 0,
344 NULL,
345 }
346 };
347
348 const struct pciide_product_desc pciide_promise_products[] = {
349 { PCI_PRODUCT_PROMISE_ULTRA33,
350 IDE_PCI_CLASS_OVERRIDE,
351 "Promise Ultra33/ATA Bus Master IDE Accelerator",
352 pdc202xx_chip_map,
353 },
354 { PCI_PRODUCT_PROMISE_ULTRA66,
355 IDE_PCI_CLASS_OVERRIDE,
356 "Promise Ultra66/ATA Bus Master IDE Accelerator",
357 pdc202xx_chip_map,
358 },
359 { 0,
360 0,
361 NULL,
362 }
363 };
364
365 const struct pciide_product_desc pciide_opti_products[] = {
366 { PCI_PRODUCT_OPTI_82C621,
367 0,
368 "OPTi 82c621 PCI IDE controller",
369 opti_chip_map,
370 },
371 { PCI_PRODUCT_OPTI_82C568,
372 0,
373 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
374 opti_chip_map,
375 },
376 { PCI_PRODUCT_OPTI_82D568,
377 0,
378 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
379 opti_chip_map,
380 },
381 { 0,
382 0,
383 NULL,
384 }
385 };
386
387 const struct pciide_product_desc pciide_triones_products[] = {
388 { PCI_PRODUCT_TRIONES_HPT366,
389 IDE_PCI_CLASS_OVERRIDE,
390 "Triones/Highpoint HPT366/370 IDE Controller",
391 hpt_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 }
397 };
398
399 struct pciide_vendor_desc {
400 u_int32_t ide_vendor;
401 const struct pciide_product_desc *ide_products;
402 };
403
404 const struct pciide_vendor_desc pciide_vendors[] = {
405 { PCI_VENDOR_INTEL, pciide_intel_products },
406 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
407 { PCI_VENDOR_VIATECH, pciide_via_products },
408 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
409 { PCI_VENDOR_SIS, pciide_sis_products },
410 { PCI_VENDOR_ALI, pciide_acer_products },
411 { PCI_VENDOR_PROMISE, pciide_promise_products },
412 { PCI_VENDOR_AMD, pciide_amd_products },
413 { PCI_VENDOR_OPTI, pciide_opti_products },
414 { PCI_VENDOR_TRIONES, pciide_triones_products },
415 { 0, NULL }
416 };
417
418 /* options passed via the 'flags' config keyword */
419 #define PCIIDE_OPTIONS_DMA 0x01
420
421 int pciide_match __P((struct device *, struct cfdata *, void *));
422 void pciide_attach __P((struct device *, struct device *, void *));
423
424 struct cfattach pciide_ca = {
425 sizeof(struct pciide_softc), pciide_match, pciide_attach
426 };
427 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
428 int pciide_mapregs_compat __P(( struct pci_attach_args *,
429 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
430 int pciide_mapregs_native __P((struct pci_attach_args *,
431 struct pciide_channel *, bus_size_t *, bus_size_t *,
432 int (*pci_intr) __P((void *))));
433 void pciide_mapreg_dma __P((struct pciide_softc *,
434 struct pci_attach_args *));
435 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
436 void pciide_mapchan __P((struct pci_attach_args *,
437 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
438 int (*pci_intr) __P((void *))));
439 int pciide_chan_candisable __P((struct pciide_channel *));
440 void pciide_map_compat_intr __P(( struct pci_attach_args *,
441 struct pciide_channel *, int, int));
442 int pciide_print __P((void *, const char *pnp));
443 int pciide_compat_intr __P((void *));
444 int pciide_pci_intr __P((void *));
445 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
446
447 const struct pciide_product_desc *
448 pciide_lookup_product(id)
449 u_int32_t id;
450 {
451 const struct pciide_product_desc *pp;
452 const struct pciide_vendor_desc *vp;
453
454 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
455 if (PCI_VENDOR(id) == vp->ide_vendor)
456 break;
457
458 if ((pp = vp->ide_products) == NULL)
459 return NULL;
460
461 for (; pp->ide_name != NULL; pp++)
462 if (PCI_PRODUCT(id) == pp->ide_product)
463 break;
464
465 if (pp->ide_name == NULL)
466 return NULL;
467 return pp;
468 }
469
470 int
471 pciide_match(parent, match, aux)
472 struct device *parent;
473 struct cfdata *match;
474 void *aux;
475 {
476 struct pci_attach_args *pa = aux;
477 const struct pciide_product_desc *pp;
478
479 /*
480 * Check the ID register to see that it's a PCI IDE controller.
481 * If it is, we assume that we can deal with it; it _should_
482 * work in a standardized way...
483 */
484 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
485 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
486 return (1);
487 }
488
489 /*
490 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
491 * controllers. Let see if we can deal with it anyway.
492 */
493 pp = pciide_lookup_product(pa->pa_id);
494 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
495 return (1);
496 }
497
498 return (0);
499 }
500
501 void
502 pciide_attach(parent, self, aux)
503 struct device *parent, *self;
504 void *aux;
505 {
506 struct pci_attach_args *pa = aux;
507 pci_chipset_tag_t pc = pa->pa_pc;
508 pcitag_t tag = pa->pa_tag;
509 struct pciide_softc *sc = (struct pciide_softc *)self;
510 pcireg_t csr;
511 char devinfo[256];
512 const char *displaydev;
513
514 sc->sc_pp = pciide_lookup_product(pa->pa_id);
515 if (sc->sc_pp == NULL) {
516 sc->sc_pp = &default_product_desc;
517 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
518 displaydev = devinfo;
519 } else
520 displaydev = sc->sc_pp->ide_name;
521
522 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
523
524 sc->sc_pc = pa->pa_pc;
525 sc->sc_tag = pa->pa_tag;
526 #ifdef WDCDEBUG
527 if (wdcdebug_pciide_mask & DEBUG_PROBE)
528 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
529 #endif
530 sc->sc_pp->chip_map(sc, pa);
531
532 if (sc->sc_dma_ok) {
533 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
534 csr |= PCI_COMMAND_MASTER_ENABLE;
535 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
536 }
537 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
538 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
539 }
540
541 /* tell wether the chip is enabled or not */
542 int
543 pciide_chipen(sc, pa)
544 struct pciide_softc *sc;
545 struct pci_attach_args *pa;
546 {
547 pcireg_t csr;
548 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
549 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
550 PCI_COMMAND_STATUS_REG);
551 printf("%s: device disabled (at %s)\n",
552 sc->sc_wdcdev.sc_dev.dv_xname,
553 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
554 "device" : "bridge");
555 return 0;
556 }
557 return 1;
558 }
559
560 int
561 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
562 struct pci_attach_args *pa;
563 struct pciide_channel *cp;
564 int compatchan;
565 bus_size_t *cmdsizep, *ctlsizep;
566 {
567 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
568 struct channel_softc *wdc_cp = &cp->wdc_channel;
569
570 cp->compat = 1;
571 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
572 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
573
574 wdc_cp->cmd_iot = pa->pa_iot;
575 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
576 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
577 printf("%s: couldn't map %s channel cmd regs\n",
578 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
579 return (0);
580 }
581
582 wdc_cp->ctl_iot = pa->pa_iot;
583 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
584 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
585 printf("%s: couldn't map %s channel ctl regs\n",
586 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
587 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
588 PCIIDE_COMPAT_CMD_SIZE);
589 return (0);
590 }
591
592 return (1);
593 }
594
595 int
596 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
597 struct pci_attach_args * pa;
598 struct pciide_channel *cp;
599 bus_size_t *cmdsizep, *ctlsizep;
600 int (*pci_intr) __P((void *));
601 {
602 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
603 struct channel_softc *wdc_cp = &cp->wdc_channel;
604 const char *intrstr;
605 pci_intr_handle_t intrhandle;
606
607 cp->compat = 0;
608
609 if (sc->sc_pci_ih == NULL) {
610 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
611 pa->pa_intrline, &intrhandle) != 0) {
612 printf("%s: couldn't map native-PCI interrupt\n",
613 sc->sc_wdcdev.sc_dev.dv_xname);
614 return 0;
615 }
616 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
617 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
618 intrhandle, IPL_BIO, pci_intr, sc);
619 if (sc->sc_pci_ih != NULL) {
620 printf("%s: using %s for native-PCI interrupt\n",
621 sc->sc_wdcdev.sc_dev.dv_xname,
622 intrstr ? intrstr : "unknown interrupt");
623 } else {
624 printf("%s: couldn't establish native-PCI interrupt",
625 sc->sc_wdcdev.sc_dev.dv_xname);
626 if (intrstr != NULL)
627 printf(" at %s", intrstr);
628 printf("\n");
629 return 0;
630 }
631 }
632 cp->ih = sc->sc_pci_ih;
633 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
634 PCI_MAPREG_TYPE_IO, 0,
635 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
636 printf("%s: couldn't map %s channel cmd regs\n",
637 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
638 return 0;
639 }
640
641 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
642 PCI_MAPREG_TYPE_IO, 0,
643 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
644 printf("%s: couldn't map %s channel ctl regs\n",
645 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
646 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
647 return 0;
648 }
649 return (1);
650 }
651
652 void
653 pciide_mapreg_dma(sc, pa)
654 struct pciide_softc *sc;
655 struct pci_attach_args *pa;
656 {
657 pcireg_t maptype;
658
659 /*
660 * Map DMA registers
661 *
662 * Note that sc_dma_ok is the right variable to test to see if
663 * DMA can be done. If the interface doesn't support DMA,
664 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
665 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
666 * non-zero if the interface supports DMA and the registers
667 * could be mapped.
668 *
669 * XXX Note that despite the fact that the Bus Master IDE specs
670 * XXX say that "The bus master IDE function uses 16 bytes of IO
671 * XXX space," some controllers (at least the United
672 * XXX Microelectronics UM8886BF) place it in memory space.
673 */
674 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
675 PCIIDE_REG_BUS_MASTER_DMA);
676
677 switch (maptype) {
678 case PCI_MAPREG_TYPE_IO:
679 case PCI_MAPREG_MEM_TYPE_32BIT:
680 sc->sc_dma_ok = (pci_mapreg_map(pa,
681 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
682 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
683 sc->sc_dmat = pa->pa_dmat;
684 if (sc->sc_dma_ok == 0) {
685 printf(", but unused (couldn't map registers)");
686 } else {
687 sc->sc_wdcdev.dma_arg = sc;
688 sc->sc_wdcdev.dma_init = pciide_dma_init;
689 sc->sc_wdcdev.dma_start = pciide_dma_start;
690 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
691 }
692 break;
693
694 default:
695 sc->sc_dma_ok = 0;
696 printf(", but unsupported register maptype (0x%x)", maptype);
697 }
698 }
699
700 int
701 pciide_compat_intr(arg)
702 void *arg;
703 {
704 struct pciide_channel *cp = arg;
705
706 #ifdef DIAGNOSTIC
707 /* should only be called for a compat channel */
708 if (cp->compat == 0)
709 panic("pciide compat intr called for non-compat chan %p\n", cp);
710 #endif
711 return (wdcintr(&cp->wdc_channel));
712 }
713
714 int
715 pciide_pci_intr(arg)
716 void *arg;
717 {
718 struct pciide_softc *sc = arg;
719 struct pciide_channel *cp;
720 struct channel_softc *wdc_cp;
721 int i, rv, crv;
722
723 rv = 0;
724 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
725 cp = &sc->pciide_channels[i];
726 wdc_cp = &cp->wdc_channel;
727
728 /* If a compat channel skip. */
729 if (cp->compat)
730 continue;
731 /* if this channel not waiting for intr, skip */
732 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
733 continue;
734
735 crv = wdcintr(wdc_cp);
736 if (crv == 0)
737 ; /* leave rv alone */
738 else if (crv == 1)
739 rv = 1; /* claim the intr */
740 else if (rv == 0) /* crv should be -1 in this case */
741 rv = crv; /* if we've done no better, take it */
742 }
743 return (rv);
744 }
745
746 void
747 pciide_channel_dma_setup(cp)
748 struct pciide_channel *cp;
749 {
750 int drive;
751 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
752 struct ata_drive_datas *drvp;
753
754 for (drive = 0; drive < 2; drive++) {
755 drvp = &cp->wdc_channel.ch_drive[drive];
756 /* If no drive, skip */
757 if ((drvp->drive_flags & DRIVE) == 0)
758 continue;
759 /* setup DMA if needed */
760 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
761 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
762 sc->sc_dma_ok == 0) {
763 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
764 continue;
765 }
766 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
767 != 0) {
768 /* Abort DMA setup */
769 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
770 continue;
771 }
772 }
773 }
774
775 int
776 pciide_dma_table_setup(sc, channel, drive)
777 struct pciide_softc *sc;
778 int channel, drive;
779 {
780 bus_dma_segment_t seg;
781 int error, rseg;
782 const bus_size_t dma_table_size =
783 sizeof(struct idedma_table) * NIDEDMA_TABLES;
784 struct pciide_dma_maps *dma_maps =
785 &sc->pciide_channels[channel].dma_maps[drive];
786
787 /* If table was already allocated, just return */
788 if (dma_maps->dma_table)
789 return 0;
790
791 /* Allocate memory for the DMA tables and map it */
792 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
793 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
794 BUS_DMA_NOWAIT)) != 0) {
795 printf("%s:%d: unable to allocate table DMA for "
796 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
797 channel, drive, error);
798 return error;
799 }
800 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
801 dma_table_size,
802 (caddr_t *)&dma_maps->dma_table,
803 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
804 printf("%s:%d: unable to map table DMA for"
805 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
806 channel, drive, error);
807 return error;
808 }
809 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
810 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
811 seg.ds_addr), DEBUG_PROBE);
812
813 /* Create and load table DMA map for this disk */
814 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
815 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
816 &dma_maps->dmamap_table)) != 0) {
817 printf("%s:%d: unable to create table DMA map for "
818 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
819 channel, drive, error);
820 return error;
821 }
822 if ((error = bus_dmamap_load(sc->sc_dmat,
823 dma_maps->dmamap_table,
824 dma_maps->dma_table,
825 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
826 printf("%s:%d: unable to load table DMA map for "
827 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
828 channel, drive, error);
829 return error;
830 }
831 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
832 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
833 /* Create a xfer DMA map for this drive */
834 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
835 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
836 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
837 &dma_maps->dmamap_xfer)) != 0) {
838 printf("%s:%d: unable to create xfer DMA map for "
839 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
840 channel, drive, error);
841 return error;
842 }
843 return 0;
844 }
845
846 int
847 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
848 void *v;
849 int channel, drive;
850 void *databuf;
851 size_t datalen;
852 int flags;
853 {
854 struct pciide_softc *sc = v;
855 int error, seg;
856 struct pciide_dma_maps *dma_maps =
857 &sc->pciide_channels[channel].dma_maps[drive];
858
859 error = bus_dmamap_load(sc->sc_dmat,
860 dma_maps->dmamap_xfer,
861 databuf, datalen, NULL, BUS_DMA_NOWAIT);
862 if (error) {
863 printf("%s:%d: unable to load xfer DMA map for"
864 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
865 channel, drive, error);
866 return error;
867 }
868
869 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
870 dma_maps->dmamap_xfer->dm_mapsize,
871 (flags & WDC_DMA_READ) ?
872 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
873
874 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
875 #ifdef DIAGNOSTIC
876 /* A segment must not cross a 64k boundary */
877 {
878 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
879 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
880 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
881 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
882 printf("pciide_dma: segment %d physical addr 0x%lx"
883 " len 0x%lx not properly aligned\n",
884 seg, phys, len);
885 panic("pciide_dma: buf align");
886 }
887 }
888 #endif
889 dma_maps->dma_table[seg].base_addr =
890 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
891 dma_maps->dma_table[seg].byte_count =
892 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
893 IDEDMA_BYTE_COUNT_MASK);
894 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
895 seg, le32toh(dma_maps->dma_table[seg].byte_count),
896 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
897
898 }
899 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
900 htole32(IDEDMA_BYTE_COUNT_EOT);
901
902 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
903 dma_maps->dmamap_table->dm_mapsize,
904 BUS_DMASYNC_PREWRITE);
905
906 /* Maps are ready. Start DMA function */
907 #ifdef DIAGNOSTIC
908 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
909 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
910 dma_maps->dmamap_table->dm_segs[0].ds_addr);
911 panic("pciide_dma_init: table align");
912 }
913 #endif
914
915 /* Clear status bits */
916 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
917 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
918 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
919 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
920 /* Write table addr */
921 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
922 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
923 dma_maps->dmamap_table->dm_segs[0].ds_addr);
924 /* set read/write */
925 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
926 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
927 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
928 /* remember flags */
929 dma_maps->dma_flags = flags;
930 return 0;
931 }
932
933 void
934 pciide_dma_start(v, channel, drive)
935 void *v;
936 int channel, drive;
937 {
938 struct pciide_softc *sc = v;
939
940 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
941 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
942 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
943 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
944 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
945 }
946
947 int
948 pciide_dma_finish(v, channel, drive, force)
949 void *v;
950 int channel, drive;
951 int force;
952 {
953 struct pciide_softc *sc = v;
954 u_int8_t status;
955 int error = 0;
956 struct pciide_dma_maps *dma_maps =
957 &sc->pciide_channels[channel].dma_maps[drive];
958
959 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
960 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
961 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
962 DEBUG_XFERS);
963
964 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
965 return WDC_DMAST_NOIRQ;
966
967 /* stop DMA channel */
968 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
969 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
970 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
971 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
972
973 /* Unload the map of the data buffer */
974 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
975 dma_maps->dmamap_xfer->dm_mapsize,
976 (dma_maps->dma_flags & WDC_DMA_READ) ?
977 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
978 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
979
980 if ((status & IDEDMA_CTL_ERR) != 0) {
981 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
982 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
983 error |= WDC_DMAST_ERR;
984 }
985
986 if ((status & IDEDMA_CTL_INTR) == 0) {
987 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
988 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
989 drive, status);
990 error |= WDC_DMAST_NOIRQ;
991 }
992
993 if ((status & IDEDMA_CTL_ACT) != 0) {
994 /* data underrun, may be a valid condition for ATAPI */
995 error |= WDC_DMAST_UNDER;
996 }
997 return error;
998 }
999
1000 void
1001 pciide_irqack(chp)
1002 struct channel_softc *chp;
1003 {
1004 struct pciide_channel *cp = (struct pciide_channel*)chp;
1005 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1006
1007 /* clear status bits in IDE DMA registers */
1008 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1009 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1010 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1011 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1012 }
1013
1014 /* some common code used by several chip_map */
1015 int
1016 pciide_chansetup(sc, channel, interface)
1017 struct pciide_softc *sc;
1018 int channel;
1019 pcireg_t interface;
1020 {
1021 struct pciide_channel *cp = &sc->pciide_channels[channel];
1022 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1023 cp->name = PCIIDE_CHANNEL_NAME(channel);
1024 cp->wdc_channel.channel = channel;
1025 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1026 cp->wdc_channel.ch_queue =
1027 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1028 if (cp->wdc_channel.ch_queue == NULL) {
1029 printf("%s %s channel: "
1030 "can't allocate memory for command queue",
1031 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1032 return 0;
1033 }
1034 printf("%s: %s channel %s to %s mode\n",
1035 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1036 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1037 "configured" : "wired",
1038 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1039 "native-PCI" : "compatibility");
1040 return 1;
1041 }
1042
1043 /* some common code used by several chip channel_map */
1044 void
1045 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1046 struct pci_attach_args *pa;
1047 struct pciide_channel *cp;
1048 pcireg_t interface;
1049 bus_size_t *cmdsizep, *ctlsizep;
1050 int (*pci_intr) __P((void *));
1051 {
1052 struct channel_softc *wdc_cp = &cp->wdc_channel;
1053
1054 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1055 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1056 pci_intr);
1057 else
1058 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1059 wdc_cp->channel, cmdsizep, ctlsizep);
1060
1061 if (cp->hw_ok == 0)
1062 return;
1063 wdc_cp->data32iot = wdc_cp->cmd_iot;
1064 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1065 wdcattach(wdc_cp);
1066 }
1067
1068 /*
1069 * Generic code to call to know if a channel can be disabled. Return 1
1070 * if channel can be disabled, 0 if not
1071 */
1072 int
1073 pciide_chan_candisable(cp)
1074 struct pciide_channel *cp;
1075 {
1076 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1077 struct channel_softc *wdc_cp = &cp->wdc_channel;
1078
1079 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1080 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1081 printf("%s: disabling %s channel (no drives)\n",
1082 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1083 cp->hw_ok = 0;
1084 return 1;
1085 }
1086 return 0;
1087 }
1088
1089 /*
1090 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1091 * Set hw_ok=0 on failure
1092 */
1093 void
1094 pciide_map_compat_intr(pa, cp, compatchan, interface)
1095 struct pci_attach_args *pa;
1096 struct pciide_channel *cp;
1097 int compatchan, interface;
1098 {
1099 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1100 struct channel_softc *wdc_cp = &cp->wdc_channel;
1101
1102 if (cp->hw_ok == 0)
1103 return;
1104 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1105 return;
1106
1107 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1108 pa, compatchan, pciide_compat_intr, cp);
1109 if (cp->ih == NULL) {
1110 printf("%s: no compatibility interrupt for use by %s "
1111 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1112 cp->hw_ok = 0;
1113 }
1114 }
1115
1116 void
1117 pciide_print_modes(cp)
1118 struct pciide_channel *cp;
1119 {
1120 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1121 int drive;
1122 struct channel_softc *chp;
1123 struct ata_drive_datas *drvp;
1124
1125 chp = &cp->wdc_channel;
1126 for (drive = 0; drive < 2; drive++) {
1127 drvp = &chp->ch_drive[drive];
1128 if ((drvp->drive_flags & DRIVE) == 0)
1129 continue;
1130 printf("%s(%s:%d:%d): using PIO mode %d",
1131 drvp->drv_softc->dv_xname,
1132 sc->sc_wdcdev.sc_dev.dv_xname,
1133 chp->channel, drive, drvp->PIO_mode);
1134 if (drvp->drive_flags & DRIVE_DMA)
1135 printf(", DMA mode %d", drvp->DMA_mode);
1136 if (drvp->drive_flags & DRIVE_UDMA)
1137 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1138 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1139 printf(" (using DMA data transfers)");
1140 printf("\n");
1141 }
1142 }
1143
1144 void
1145 default_chip_map(sc, pa)
1146 struct pciide_softc *sc;
1147 struct pci_attach_args *pa;
1148 {
1149 struct pciide_channel *cp;
1150 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1151 pcireg_t csr;
1152 int channel, drive;
1153 struct ata_drive_datas *drvp;
1154 u_int8_t idedma_ctl;
1155 bus_size_t cmdsize, ctlsize;
1156 char *failreason;
1157
1158 if (pciide_chipen(sc, pa) == 0)
1159 return;
1160
1161 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1162 printf("%s: bus-master DMA support present",
1163 sc->sc_wdcdev.sc_dev.dv_xname);
1164 if (sc->sc_pp == &default_product_desc &&
1165 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1166 PCIIDE_OPTIONS_DMA) == 0) {
1167 printf(", but unused (no driver support)");
1168 sc->sc_dma_ok = 0;
1169 } else {
1170 pciide_mapreg_dma(sc, pa);
1171 if (sc->sc_dma_ok != 0)
1172 printf(", used without full driver "
1173 "support");
1174 }
1175 } else {
1176 printf("%s: hardware does not support DMA",
1177 sc->sc_wdcdev.sc_dev.dv_xname);
1178 sc->sc_dma_ok = 0;
1179 }
1180 printf("\n");
1181 if (sc->sc_dma_ok) {
1182 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1183 sc->sc_wdcdev.irqack = pciide_irqack;
1184 }
1185 sc->sc_wdcdev.PIO_cap = 0;
1186 sc->sc_wdcdev.DMA_cap = 0;
1187
1188 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1189 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1190 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1191
1192 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1193 cp = &sc->pciide_channels[channel];
1194 if (pciide_chansetup(sc, channel, interface) == 0)
1195 continue;
1196 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1197 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1198 &ctlsize, pciide_pci_intr);
1199 } else {
1200 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1201 channel, &cmdsize, &ctlsize);
1202 }
1203 if (cp->hw_ok == 0)
1204 continue;
1205 /*
1206 * Check to see if something appears to be there.
1207 */
1208 failreason = NULL;
1209 if (!wdcprobe(&cp->wdc_channel)) {
1210 failreason = "not responding; disabled or no drives?";
1211 goto next;
1212 }
1213 /*
1214 * Now, make sure it's actually attributable to this PCI IDE
1215 * channel by trying to access the channel again while the
1216 * PCI IDE controller's I/O space is disabled. (If the
1217 * channel no longer appears to be there, it belongs to
1218 * this controller.) YUCK!
1219 */
1220 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1221 PCI_COMMAND_STATUS_REG);
1222 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1223 csr & ~PCI_COMMAND_IO_ENABLE);
1224 if (wdcprobe(&cp->wdc_channel))
1225 failreason = "other hardware responding at addresses";
1226 pci_conf_write(sc->sc_pc, sc->sc_tag,
1227 PCI_COMMAND_STATUS_REG, csr);
1228 next:
1229 if (failreason) {
1230 printf("%s: %s channel ignored (%s)\n",
1231 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1232 failreason);
1233 cp->hw_ok = 0;
1234 bus_space_unmap(cp->wdc_channel.cmd_iot,
1235 cp->wdc_channel.cmd_ioh, cmdsize);
1236 bus_space_unmap(cp->wdc_channel.ctl_iot,
1237 cp->wdc_channel.ctl_ioh, ctlsize);
1238 } else {
1239 pciide_map_compat_intr(pa, cp, channel, interface);
1240 }
1241 if (cp->hw_ok) {
1242 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1243 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1244 wdcattach(&cp->wdc_channel);
1245 }
1246 }
1247
1248 if (sc->sc_dma_ok == 0)
1249 return;
1250
1251 /* Allocate DMA maps */
1252 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1253 idedma_ctl = 0;
1254 cp = &sc->pciide_channels[channel];
1255 for (drive = 0; drive < 2; drive++) {
1256 drvp = &cp->wdc_channel.ch_drive[drive];
1257 /* If no drive, skip */
1258 if ((drvp->drive_flags & DRIVE) == 0)
1259 continue;
1260 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1261 continue;
1262 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1263 /* Abort DMA setup */
1264 printf("%s:%d:%d: can't allocate DMA maps, "
1265 "using PIO transfers\n",
1266 sc->sc_wdcdev.sc_dev.dv_xname,
1267 channel, drive);
1268 drvp->drive_flags &= ~DRIVE_DMA;
1269 }
1270 printf("%s:%d:%d: using DMA data transfers\n",
1271 sc->sc_wdcdev.sc_dev.dv_xname,
1272 channel, drive);
1273 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1274 }
1275 if (idedma_ctl != 0) {
1276 /* Add software bits in status register */
1277 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1278 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1279 idedma_ctl);
1280 }
1281 }
1282 }
1283
1284 void
1285 piix_chip_map(sc, pa)
1286 struct pciide_softc *sc;
1287 struct pci_attach_args *pa;
1288 {
1289 struct pciide_channel *cp;
1290 int channel;
1291 u_int32_t idetim;
1292 bus_size_t cmdsize, ctlsize;
1293
1294 if (pciide_chipen(sc, pa) == 0)
1295 return;
1296
1297 printf("%s: bus-master DMA support present",
1298 sc->sc_wdcdev.sc_dev.dv_xname);
1299 pciide_mapreg_dma(sc, pa);
1300 printf("\n");
1301 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1302 WDC_CAPABILITY_MODE;
1303 if (sc->sc_dma_ok) {
1304 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1305 sc->sc_wdcdev.irqack = pciide_irqack;
1306 switch(sc->sc_pp->ide_product) {
1307 case PCI_PRODUCT_INTEL_82371AB_IDE:
1308 case PCI_PRODUCT_INTEL_82801AA_IDE:
1309 case PCI_PRODUCT_INTEL_82801AB_IDE:
1310 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1311 }
1312 }
1313 sc->sc_wdcdev.PIO_cap = 4;
1314 sc->sc_wdcdev.DMA_cap = 2;
1315 sc->sc_wdcdev.UDMA_cap =
1316 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1317 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1318 sc->sc_wdcdev.set_modes = piix_setup_channel;
1319 else
1320 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1321 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1322 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1323
1324 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1325 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1326 DEBUG_PROBE);
1327 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1328 WDCDEBUG_PRINT((", sidetim=0x%x",
1329 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1330 DEBUG_PROBE);
1331 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1332 WDCDEBUG_PRINT((", udamreg 0x%x",
1333 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1334 DEBUG_PROBE);
1335 }
1336 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1337 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1338 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1339 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1340 DEBUG_PROBE);
1341 }
1342
1343 }
1344 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1345
1346 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1347 cp = &sc->pciide_channels[channel];
1348 /* PIIX is compat-only */
1349 if (pciide_chansetup(sc, channel, 0) == 0)
1350 continue;
1351 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1352 if ((PIIX_IDETIM_READ(idetim, channel) &
1353 PIIX_IDETIM_IDE) == 0) {
1354 printf("%s: %s channel ignored (disabled)\n",
1355 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1356 continue;
1357 }
1358 /* PIIX are compat-only pciide devices */
1359 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1360 if (cp->hw_ok == 0)
1361 continue;
1362 if (pciide_chan_candisable(cp)) {
1363 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1364 channel);
1365 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1366 idetim);
1367 }
1368 pciide_map_compat_intr(pa, cp, channel, 0);
1369 if (cp->hw_ok == 0)
1370 continue;
1371 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1372 }
1373
1374 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1375 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1376 DEBUG_PROBE);
1377 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1378 WDCDEBUG_PRINT((", sidetim=0x%x",
1379 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1380 DEBUG_PROBE);
1381 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1382 WDCDEBUG_PRINT((", udamreg 0x%x",
1383 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1384 DEBUG_PROBE);
1385 }
1386 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1387 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1388 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1389 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1390 DEBUG_PROBE);
1391 }
1392 }
1393 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1394 }
1395
1396 void
1397 piix_setup_channel(chp)
1398 struct channel_softc *chp;
1399 {
1400 u_int8_t mode[2], drive;
1401 u_int32_t oidetim, idetim, idedma_ctl;
1402 struct pciide_channel *cp = (struct pciide_channel*)chp;
1403 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1404 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1405
1406 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1407 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1408 idedma_ctl = 0;
1409
1410 /* set up new idetim: Enable IDE registers decode */
1411 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1412 chp->channel);
1413
1414 /* setup DMA */
1415 pciide_channel_dma_setup(cp);
1416
1417 /*
1418 * Here we have to mess up with drives mode: PIIX can't have
1419 * different timings for master and slave drives.
1420 * We need to find the best combination.
1421 */
1422
1423 /* If both drives supports DMA, take the lower mode */
1424 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1425 (drvp[1].drive_flags & DRIVE_DMA)) {
1426 mode[0] = mode[1] =
1427 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1428 drvp[0].DMA_mode = mode[0];
1429 drvp[1].DMA_mode = mode[1];
1430 goto ok;
1431 }
1432 /*
1433 * If only one drive supports DMA, use its mode, and
1434 * put the other one in PIO mode 0 if mode not compatible
1435 */
1436 if (drvp[0].drive_flags & DRIVE_DMA) {
1437 mode[0] = drvp[0].DMA_mode;
1438 mode[1] = drvp[1].PIO_mode;
1439 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1440 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1441 mode[1] = drvp[1].PIO_mode = 0;
1442 goto ok;
1443 }
1444 if (drvp[1].drive_flags & DRIVE_DMA) {
1445 mode[1] = drvp[1].DMA_mode;
1446 mode[0] = drvp[0].PIO_mode;
1447 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1448 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1449 mode[0] = drvp[0].PIO_mode = 0;
1450 goto ok;
1451 }
1452 /*
1453 * If both drives are not DMA, takes the lower mode, unless
1454 * one of them is PIO mode < 2
1455 */
1456 if (drvp[0].PIO_mode < 2) {
1457 mode[0] = drvp[0].PIO_mode = 0;
1458 mode[1] = drvp[1].PIO_mode;
1459 } else if (drvp[1].PIO_mode < 2) {
1460 mode[1] = drvp[1].PIO_mode = 0;
1461 mode[0] = drvp[0].PIO_mode;
1462 } else {
1463 mode[0] = mode[1] =
1464 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1465 drvp[0].PIO_mode = mode[0];
1466 drvp[1].PIO_mode = mode[1];
1467 }
1468 ok: /* The modes are setup */
1469 for (drive = 0; drive < 2; drive++) {
1470 if (drvp[drive].drive_flags & DRIVE_DMA) {
1471 idetim |= piix_setup_idetim_timings(
1472 mode[drive], 1, chp->channel);
1473 goto end;
1474 }
1475 }
1476 /* If we are there, none of the drives are DMA */
1477 if (mode[0] >= 2)
1478 idetim |= piix_setup_idetim_timings(
1479 mode[0], 0, chp->channel);
1480 else
1481 idetim |= piix_setup_idetim_timings(
1482 mode[1], 0, chp->channel);
1483 end: /*
1484 * timing mode is now set up in the controller. Enable
1485 * it per-drive
1486 */
1487 for (drive = 0; drive < 2; drive++) {
1488 /* If no drive, skip */
1489 if ((drvp[drive].drive_flags & DRIVE) == 0)
1490 continue;
1491 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1492 if (drvp[drive].drive_flags & DRIVE_DMA)
1493 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1494 }
1495 if (idedma_ctl != 0) {
1496 /* Add software bits in status register */
1497 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1498 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1499 idedma_ctl);
1500 }
1501 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1502 pciide_print_modes(cp);
1503 }
1504
1505 void
1506 piix3_4_setup_channel(chp)
1507 struct channel_softc *chp;
1508 {
1509 struct ata_drive_datas *drvp;
1510 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1511 struct pciide_channel *cp = (struct pciide_channel*)chp;
1512 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1513 int drive;
1514 int channel = chp->channel;
1515
1516 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1517 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1518 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1519 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1520 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1521 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1522 PIIX_SIDETIM_RTC_MASK(channel));
1523
1524 idedma_ctl = 0;
1525 /* If channel disabled, no need to go further */
1526 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1527 return;
1528 /* set up new idetim: Enable IDE registers decode */
1529 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1530
1531 /* setup DMA if needed */
1532 pciide_channel_dma_setup(cp);
1533
1534 for (drive = 0; drive < 2; drive++) {
1535 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1536 PIIX_UDMATIM_SET(0x3, channel, drive));
1537 drvp = &chp->ch_drive[drive];
1538 /* If no drive, skip */
1539 if ((drvp->drive_flags & DRIVE) == 0)
1540 continue;
1541 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1542 (drvp->drive_flags & DRIVE_UDMA) == 0))
1543 goto pio;
1544
1545 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1546 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1547 ideconf |= PIIX_CONFIG_PINGPONG;
1548 }
1549 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1550 /* setup Ultra/66 */
1551 if (drvp->UDMA_mode > 2 &&
1552 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1553 drvp->UDMA_mode = 2;
1554 if (drvp->UDMA_mode > 2)
1555 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1556 else
1557 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1558 }
1559 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1560 (drvp->drive_flags & DRIVE_UDMA)) {
1561 /* use Ultra/DMA */
1562 drvp->drive_flags &= ~DRIVE_DMA;
1563 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1564 udmareg |= PIIX_UDMATIM_SET(
1565 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1566 } else {
1567 /* use Multiword DMA */
1568 drvp->drive_flags &= ~DRIVE_UDMA;
1569 if (drive == 0) {
1570 idetim |= piix_setup_idetim_timings(
1571 drvp->DMA_mode, 1, channel);
1572 } else {
1573 sidetim |= piix_setup_sidetim_timings(
1574 drvp->DMA_mode, 1, channel);
1575 idetim =PIIX_IDETIM_SET(idetim,
1576 PIIX_IDETIM_SITRE, channel);
1577 }
1578 }
1579 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1580
1581 pio: /* use PIO mode */
1582 idetim |= piix_setup_idetim_drvs(drvp);
1583 if (drive == 0) {
1584 idetim |= piix_setup_idetim_timings(
1585 drvp->PIO_mode, 0, channel);
1586 } else {
1587 sidetim |= piix_setup_sidetim_timings(
1588 drvp->PIO_mode, 0, channel);
1589 idetim =PIIX_IDETIM_SET(idetim,
1590 PIIX_IDETIM_SITRE, channel);
1591 }
1592 }
1593 if (idedma_ctl != 0) {
1594 /* Add software bits in status register */
1595 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1596 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1597 idedma_ctl);
1598 }
1599 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1600 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1601 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1602 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1603 pciide_print_modes(cp);
1604 }
1605
1606
1607 /* setup ISP and RTC fields, based on mode */
1608 static u_int32_t
1609 piix_setup_idetim_timings(mode, dma, channel)
1610 u_int8_t mode;
1611 u_int8_t dma;
1612 u_int8_t channel;
1613 {
1614
1615 if (dma)
1616 return PIIX_IDETIM_SET(0,
1617 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1618 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1619 channel);
1620 else
1621 return PIIX_IDETIM_SET(0,
1622 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1623 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1624 channel);
1625 }
1626
1627 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1628 static u_int32_t
1629 piix_setup_idetim_drvs(drvp)
1630 struct ata_drive_datas *drvp;
1631 {
1632 u_int32_t ret = 0;
1633 struct channel_softc *chp = drvp->chnl_softc;
1634 u_int8_t channel = chp->channel;
1635 u_int8_t drive = drvp->drive;
1636
1637 /*
1638 * If drive is using UDMA, timings setups are independant
1639 * So just check DMA and PIO here.
1640 */
1641 if (drvp->drive_flags & DRIVE_DMA) {
1642 /* if mode = DMA mode 0, use compatible timings */
1643 if ((drvp->drive_flags & DRIVE_DMA) &&
1644 drvp->DMA_mode == 0) {
1645 drvp->PIO_mode = 0;
1646 return ret;
1647 }
1648 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1649 /*
1650 * PIO and DMA timings are the same, use fast timings for PIO
1651 * too, else use compat timings.
1652 */
1653 if ((piix_isp_pio[drvp->PIO_mode] !=
1654 piix_isp_dma[drvp->DMA_mode]) ||
1655 (piix_rtc_pio[drvp->PIO_mode] !=
1656 piix_rtc_dma[drvp->DMA_mode]))
1657 drvp->PIO_mode = 0;
1658 /* if PIO mode <= 2, use compat timings for PIO */
1659 if (drvp->PIO_mode <= 2) {
1660 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1661 channel);
1662 return ret;
1663 }
1664 }
1665
1666 /*
1667 * Now setup PIO modes. If mode < 2, use compat timings.
1668 * Else enable fast timings. Enable IORDY and prefetch/post
1669 * if PIO mode >= 3.
1670 */
1671
1672 if (drvp->PIO_mode < 2)
1673 return ret;
1674
1675 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1676 if (drvp->PIO_mode >= 3) {
1677 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1678 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1679 }
1680 return ret;
1681 }
1682
1683 /* setup values in SIDETIM registers, based on mode */
1684 static u_int32_t
1685 piix_setup_sidetim_timings(mode, dma, channel)
1686 u_int8_t mode;
1687 u_int8_t dma;
1688 u_int8_t channel;
1689 {
1690 if (dma)
1691 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1692 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1693 else
1694 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1695 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1696 }
1697
1698 void
1699 amd756_chip_map(sc, pa)
1700 struct pciide_softc *sc;
1701 struct pci_attach_args *pa;
1702 {
1703 struct pciide_channel *cp;
1704 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1705 int channel;
1706 pcireg_t chanenable;
1707 bus_size_t cmdsize, ctlsize;
1708
1709 if (pciide_chipen(sc, pa) == 0)
1710 return;
1711 printf("%s: bus-master DMA support present",
1712 sc->sc_wdcdev.sc_dev.dv_xname);
1713 pciide_mapreg_dma(sc, pa);
1714 printf("\n");
1715 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1716 WDC_CAPABILITY_MODE;
1717 if (sc->sc_dma_ok) {
1718 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1719 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1720 sc->sc_wdcdev.irqack = pciide_irqack;
1721 }
1722 sc->sc_wdcdev.PIO_cap = 4;
1723 sc->sc_wdcdev.DMA_cap = 2;
1724 sc->sc_wdcdev.UDMA_cap = 4;
1725 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1726 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1727 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1728 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1729
1730 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1731 DEBUG_PROBE);
1732 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1733 cp = &sc->pciide_channels[channel];
1734 if (pciide_chansetup(sc, channel, interface) == 0)
1735 continue;
1736
1737 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1738 printf("%s: %s channel ignored (disabled)\n",
1739 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1740 continue;
1741 }
1742 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1743 pciide_pci_intr);
1744
1745 if (pciide_chan_candisable(cp))
1746 chanenable &= ~AMD756_CHAN_EN(channel);
1747 pciide_map_compat_intr(pa, cp, channel, interface);
1748 if (cp->hw_ok == 0)
1749 continue;
1750
1751 amd756_setup_channel(&cp->wdc_channel);
1752 }
1753 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1754 chanenable);
1755 return;
1756 }
1757
1758 void
1759 amd756_setup_channel(chp)
1760 struct channel_softc *chp;
1761 {
1762 u_int32_t udmatim_reg, datatim_reg;
1763 u_int8_t idedma_ctl;
1764 int mode, drive;
1765 struct ata_drive_datas *drvp;
1766 struct pciide_channel *cp = (struct pciide_channel*)chp;
1767 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1768
1769 idedma_ctl = 0;
1770 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1771 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1772 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1773 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1774
1775 /* setup DMA if needed */
1776 pciide_channel_dma_setup(cp);
1777
1778 for (drive = 0; drive < 2; drive++) {
1779 drvp = &chp->ch_drive[drive];
1780 /* If no drive, skip */
1781 if ((drvp->drive_flags & DRIVE) == 0)
1782 continue;
1783 /* add timing values, setup DMA if needed */
1784 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1785 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1786 mode = drvp->PIO_mode;
1787 goto pio;
1788 }
1789 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1790 (drvp->drive_flags & DRIVE_UDMA)) {
1791 /* use Ultra/DMA */
1792 drvp->drive_flags &= ~DRIVE_DMA;
1793 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1794 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1795 AMD756_UDMA_TIME(chp->channel, drive,
1796 amd756_udma_tim[drvp->UDMA_mode]);
1797 /* can use PIO timings, MW DMA unused */
1798 mode = drvp->PIO_mode;
1799 } else {
1800 /* use Multiword DMA */
1801 drvp->drive_flags &= ~DRIVE_UDMA;
1802 /* mode = min(pio, dma+2) */
1803 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1804 mode = drvp->PIO_mode;
1805 else
1806 mode = drvp->DMA_mode + 2;
1807 }
1808 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1809
1810 pio: /* setup PIO mode */
1811 if (mode <= 2) {
1812 drvp->DMA_mode = 0;
1813 drvp->PIO_mode = 0;
1814 mode = 0;
1815 } else {
1816 drvp->PIO_mode = mode;
1817 drvp->DMA_mode = mode - 2;
1818 }
1819 datatim_reg |=
1820 AMD756_DATATIM_PULSE(chp->channel, drive,
1821 amd756_pio_set[mode]) |
1822 AMD756_DATATIM_RECOV(chp->channel, drive,
1823 amd756_pio_rec[mode]);
1824 }
1825 if (idedma_ctl != 0) {
1826 /* Add software bits in status register */
1827 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1828 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1829 idedma_ctl);
1830 }
1831 pciide_print_modes(cp);
1832 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1833 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1834 }
1835
1836 void
1837 apollo_chip_map(sc, pa)
1838 struct pciide_softc *sc;
1839 struct pci_attach_args *pa;
1840 {
1841 struct pciide_channel *cp;
1842 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1843 int channel;
1844 u_int32_t ideconf;
1845 bus_size_t cmdsize, ctlsize;
1846
1847 if (pciide_chipen(sc, pa) == 0)
1848 return;
1849 printf("%s: bus-master DMA support present",
1850 sc->sc_wdcdev.sc_dev.dv_xname);
1851 pciide_mapreg_dma(sc, pa);
1852 printf("\n");
1853 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1854 WDC_CAPABILITY_MODE;
1855 if (sc->sc_dma_ok) {
1856 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1857 sc->sc_wdcdev.irqack = pciide_irqack;
1858 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1859 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1860 }
1861 sc->sc_wdcdev.PIO_cap = 4;
1862 sc->sc_wdcdev.DMA_cap = 2;
1863 sc->sc_wdcdev.UDMA_cap = 2;
1864 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1865 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1866 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1867
1868 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1869 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1870 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1871 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1872 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1873 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1874 DEBUG_PROBE);
1875
1876 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1877 cp = &sc->pciide_channels[channel];
1878 if (pciide_chansetup(sc, channel, interface) == 0)
1879 continue;
1880
1881 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1882 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1883 printf("%s: %s channel ignored (disabled)\n",
1884 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1885 continue;
1886 }
1887 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1888 pciide_pci_intr);
1889 if (cp->hw_ok == 0)
1890 continue;
1891 if (pciide_chan_candisable(cp)) {
1892 ideconf &= ~APO_IDECONF_EN(channel);
1893 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1894 ideconf);
1895 }
1896 pciide_map_compat_intr(pa, cp, channel, interface);
1897
1898 if (cp->hw_ok == 0)
1899 continue;
1900 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1901 }
1902 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1903 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1904 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1905 }
1906
1907 void
1908 apollo_setup_channel(chp)
1909 struct channel_softc *chp;
1910 {
1911 u_int32_t udmatim_reg, datatim_reg;
1912 u_int8_t idedma_ctl;
1913 int mode, drive;
1914 struct ata_drive_datas *drvp;
1915 struct pciide_channel *cp = (struct pciide_channel*)chp;
1916 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1917
1918 idedma_ctl = 0;
1919 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1920 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1921 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1922 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1923
1924 /* setup DMA if needed */
1925 pciide_channel_dma_setup(cp);
1926
1927 for (drive = 0; drive < 2; drive++) {
1928 drvp = &chp->ch_drive[drive];
1929 /* If no drive, skip */
1930 if ((drvp->drive_flags & DRIVE) == 0)
1931 continue;
1932 /* add timing values, setup DMA if needed */
1933 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1934 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1935 mode = drvp->PIO_mode;
1936 goto pio;
1937 }
1938 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1939 (drvp->drive_flags & DRIVE_UDMA)) {
1940 /* use Ultra/DMA */
1941 drvp->drive_flags &= ~DRIVE_DMA;
1942 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1943 APO_UDMA_EN_MTH(chp->channel, drive) |
1944 APO_UDMA_TIME(chp->channel, drive,
1945 apollo_udma_tim[drvp->UDMA_mode]);
1946 /* can use PIO timings, MW DMA unused */
1947 mode = drvp->PIO_mode;
1948 } else {
1949 /* use Multiword DMA */
1950 drvp->drive_flags &= ~DRIVE_UDMA;
1951 /* mode = min(pio, dma+2) */
1952 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1953 mode = drvp->PIO_mode;
1954 else
1955 mode = drvp->DMA_mode + 2;
1956 }
1957 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1958
1959 pio: /* setup PIO mode */
1960 if (mode <= 2) {
1961 drvp->DMA_mode = 0;
1962 drvp->PIO_mode = 0;
1963 mode = 0;
1964 } else {
1965 drvp->PIO_mode = mode;
1966 drvp->DMA_mode = mode - 2;
1967 }
1968 datatim_reg |=
1969 APO_DATATIM_PULSE(chp->channel, drive,
1970 apollo_pio_set[mode]) |
1971 APO_DATATIM_RECOV(chp->channel, drive,
1972 apollo_pio_rec[mode]);
1973 }
1974 if (idedma_ctl != 0) {
1975 /* Add software bits in status register */
1976 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1977 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1978 idedma_ctl);
1979 }
1980 pciide_print_modes(cp);
1981 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
1982 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
1983 }
1984
1985 void
1986 cmd_channel_map(pa, sc, channel)
1987 struct pci_attach_args *pa;
1988 struct pciide_softc *sc;
1989 int channel;
1990 {
1991 struct pciide_channel *cp = &sc->pciide_channels[channel];
1992 bus_size_t cmdsize, ctlsize;
1993 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
1994 int interface = PCI_INTERFACE(pa->pa_class);
1995
1996 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1997 cp->name = PCIIDE_CHANNEL_NAME(channel);
1998 cp->wdc_channel.channel = channel;
1999 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2000
2001 if (channel > 0) {
2002 cp->wdc_channel.ch_queue =
2003 sc->pciide_channels[0].wdc_channel.ch_queue;
2004 } else {
2005 cp->wdc_channel.ch_queue =
2006 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2007 }
2008 if (cp->wdc_channel.ch_queue == NULL) {
2009 printf("%s %s channel: "
2010 "can't allocate memory for command queue",
2011 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2012 return;
2013 }
2014
2015 printf("%s: %s channel %s to %s mode\n",
2016 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2017 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2018 "configured" : "wired",
2019 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2020 "native-PCI" : "compatibility");
2021
2022 /*
2023 * with a CMD PCI64x, if we get here, the first channel is enabled:
2024 * there's no way to disable the first channel without disabling
2025 * the whole device
2026 */
2027 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2028 printf("%s: %s channel ignored (disabled)\n",
2029 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2030 return;
2031 }
2032
2033 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2034 if (cp->hw_ok == 0)
2035 return;
2036 if (channel == 1) {
2037 if (pciide_chan_candisable(cp)) {
2038 ctrl &= ~CMD_CTRL_2PORT;
2039 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2040 CMD_CTRL, ctrl);
2041 }
2042 }
2043 pciide_map_compat_intr(pa, cp, channel, interface);
2044 }
2045
2046 int
2047 cmd_pci_intr(arg)
2048 void *arg;
2049 {
2050 struct pciide_softc *sc = arg;
2051 struct pciide_channel *cp;
2052 struct channel_softc *wdc_cp;
2053 int i, rv, crv;
2054 u_int32_t priirq, secirq;
2055
2056 rv = 0;
2057 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2058 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2059 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2060 cp = &sc->pciide_channels[i];
2061 wdc_cp = &cp->wdc_channel;
2062 /* If a compat channel skip. */
2063 if (cp->compat)
2064 continue;
2065 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2066 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2067 crv = wdcintr(wdc_cp);
2068 if (crv == 0)
2069 printf("%s:%d: bogus intr\n",
2070 sc->sc_wdcdev.sc_dev.dv_xname, i);
2071 else
2072 rv = 1;
2073 }
2074 }
2075 return rv;
2076 }
2077
2078 void
2079 cmd_chip_map(sc, pa)
2080 struct pciide_softc *sc;
2081 struct pci_attach_args *pa;
2082 {
2083 int channel;
2084
2085 /*
2086 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2087 * and base adresses registers can be disabled at
2088 * hardware level. In this case, the device is wired
2089 * in compat mode and its first channel is always enabled,
2090 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2091 * In fact, it seems that the first channel of the CMD PCI0640
2092 * can't be disabled.
2093 */
2094
2095 #ifdef PCIIDE_CMD064x_DISABLE
2096 if (pciide_chipen(sc, pa) == 0)
2097 return;
2098 #endif
2099
2100 printf("%s: hardware does not support DMA\n",
2101 sc->sc_wdcdev.sc_dev.dv_xname);
2102 sc->sc_dma_ok = 0;
2103
2104 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2105 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2106 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2107
2108 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2109 cmd_channel_map(pa, sc, channel);
2110 }
2111 }
2112
2113 void
2114 cmd0643_6_chip_map(sc, pa)
2115 struct pciide_softc *sc;
2116 struct pci_attach_args *pa;
2117 {
2118 struct pciide_channel *cp;
2119 int channel;
2120
2121 /*
2122 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2123 * and base adresses registers can be disabled at
2124 * hardware level. In this case, the device is wired
2125 * in compat mode and its first channel is always enabled,
2126 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2127 * In fact, it seems that the first channel of the CMD PCI0640
2128 * can't be disabled.
2129 */
2130
2131 #ifdef PCIIDE_CMD064x_DISABLE
2132 if (pciide_chipen(sc, pa) == 0)
2133 return;
2134 #endif
2135 printf("%s: bus-master DMA support present",
2136 sc->sc_wdcdev.sc_dev.dv_xname);
2137 pciide_mapreg_dma(sc, pa);
2138 printf("\n");
2139 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2140 WDC_CAPABILITY_MODE;
2141 if (sc->sc_dma_ok) {
2142 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2143 sc->sc_wdcdev.irqack = pciide_irqack;
2144 }
2145
2146 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2147 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2148 sc->sc_wdcdev.PIO_cap = 4;
2149 sc->sc_wdcdev.DMA_cap = 2;
2150 sc->sc_wdcdev.set_modes = cmd0643_6_setup_channel;
2151
2152 WDCDEBUG_PRINT(("cmd0643_6_chip_map: old timings reg 0x%x 0x%x\n",
2153 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2154 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2155 DEBUG_PROBE);
2156
2157 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2158 cp = &sc->pciide_channels[channel];
2159 cmd_channel_map(pa, sc, channel);
2160 if (cp->hw_ok == 0)
2161 continue;
2162 cmd0643_6_setup_channel(&cp->wdc_channel);
2163 }
2164 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2165 WDCDEBUG_PRINT(("cmd0643_6_chip_map: timings reg now 0x%x 0x%x\n",
2166 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2167 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2168 DEBUG_PROBE);
2169 }
2170
2171 void
2172 cmd0643_6_setup_channel(chp)
2173 struct channel_softc *chp;
2174 {
2175 struct ata_drive_datas *drvp;
2176 u_int8_t tim;
2177 u_int32_t idedma_ctl;
2178 int drive;
2179 struct pciide_channel *cp = (struct pciide_channel*)chp;
2180 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2181
2182 idedma_ctl = 0;
2183 /* setup DMA if needed */
2184 pciide_channel_dma_setup(cp);
2185
2186 for (drive = 0; drive < 2; drive++) {
2187 drvp = &chp->ch_drive[drive];
2188 /* If no drive, skip */
2189 if ((drvp->drive_flags & DRIVE) == 0)
2190 continue;
2191 /* add timing values, setup DMA if needed */
2192 tim = cmd0643_6_data_tim_pio[drvp->PIO_mode];
2193 if (drvp->drive_flags & DRIVE_DMA) {
2194 /*
2195 * use Multiword DMA.
2196 * Timings will be used for both PIO and DMA, so adjust
2197 * DMA mode if needed
2198 */
2199 if (drvp->PIO_mode >= 3 &&
2200 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2201 drvp->DMA_mode = drvp->PIO_mode - 2;
2202 }
2203 tim = cmd0643_6_data_tim_dma[drvp->DMA_mode];
2204 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2205 }
2206 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2207 CMD_DATA_TIM(chp->channel, drive), tim);
2208 }
2209 if (idedma_ctl != 0) {
2210 /* Add software bits in status register */
2211 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2212 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2213 idedma_ctl);
2214 }
2215 pciide_print_modes(cp);
2216 }
2217
2218 void
2219 cy693_chip_map(sc, pa)
2220 struct pciide_softc *sc;
2221 struct pci_attach_args *pa;
2222 {
2223 struct pciide_channel *cp;
2224 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2225 bus_size_t cmdsize, ctlsize;
2226
2227 if (pciide_chipen(sc, pa) == 0)
2228 return;
2229 /*
2230 * this chip has 2 PCI IDE functions, one for primary and one for
2231 * secondary. So we need to call pciide_mapregs_compat() with
2232 * the real channel
2233 */
2234 if (pa->pa_function == 1) {
2235 sc->sc_cy_compatchan = 0;
2236 } else if (pa->pa_function == 2) {
2237 sc->sc_cy_compatchan = 1;
2238 } else {
2239 printf("%s: unexpected PCI function %d\n",
2240 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2241 return;
2242 }
2243 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2244 printf("%s: bus-master DMA support present",
2245 sc->sc_wdcdev.sc_dev.dv_xname);
2246 pciide_mapreg_dma(sc, pa);
2247 } else {
2248 printf("%s: hardware does not support DMA",
2249 sc->sc_wdcdev.sc_dev.dv_xname);
2250 sc->sc_dma_ok = 0;
2251 }
2252 printf("\n");
2253
2254 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2255 if (sc->sc_cy_handle == NULL) {
2256 printf("%s: unable to map hyperCache control registers\n",
2257 sc->sc_wdcdev.sc_dev.dv_xname);
2258 sc->sc_dma_ok = 0;
2259 }
2260
2261 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2262 WDC_CAPABILITY_MODE;
2263 if (sc->sc_dma_ok) {
2264 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2265 sc->sc_wdcdev.irqack = pciide_irqack;
2266 }
2267 sc->sc_wdcdev.PIO_cap = 4;
2268 sc->sc_wdcdev.DMA_cap = 2;
2269 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2270
2271 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2272 sc->sc_wdcdev.nchannels = 1;
2273
2274 /* Only one channel for this chip; if we are here it's enabled */
2275 cp = &sc->pciide_channels[0];
2276 sc->wdc_chanarray[0] = &cp->wdc_channel;
2277 cp->name = PCIIDE_CHANNEL_NAME(0);
2278 cp->wdc_channel.channel = 0;
2279 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2280 cp->wdc_channel.ch_queue =
2281 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2282 if (cp->wdc_channel.ch_queue == NULL) {
2283 printf("%s primary channel: "
2284 "can't allocate memory for command queue",
2285 sc->sc_wdcdev.sc_dev.dv_xname);
2286 return;
2287 }
2288 printf("%s: primary channel %s to ",
2289 sc->sc_wdcdev.sc_dev.dv_xname,
2290 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2291 "configured" : "wired");
2292 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2293 printf("native-PCI");
2294 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2295 pciide_pci_intr);
2296 } else {
2297 printf("compatibility");
2298 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2299 &cmdsize, &ctlsize);
2300 }
2301 printf(" mode\n");
2302 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2303 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2304 wdcattach(&cp->wdc_channel);
2305 if (pciide_chan_candisable(cp)) {
2306 pci_conf_write(sc->sc_pc, sc->sc_tag,
2307 PCI_COMMAND_STATUS_REG, 0);
2308 }
2309 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2310 if (cp->hw_ok == 0)
2311 return;
2312 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2313 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2314 cy693_setup_channel(&cp->wdc_channel);
2315 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2316 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2317 }
2318
2319 void
2320 cy693_setup_channel(chp)
2321 struct channel_softc *chp;
2322 {
2323 struct ata_drive_datas *drvp;
2324 int drive;
2325 u_int32_t cy_cmd_ctrl;
2326 u_int32_t idedma_ctl;
2327 struct pciide_channel *cp = (struct pciide_channel*)chp;
2328 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2329 int dma_mode = -1;
2330
2331 cy_cmd_ctrl = idedma_ctl = 0;
2332
2333 /* setup DMA if needed */
2334 pciide_channel_dma_setup(cp);
2335
2336 for (drive = 0; drive < 2; drive++) {
2337 drvp = &chp->ch_drive[drive];
2338 /* If no drive, skip */
2339 if ((drvp->drive_flags & DRIVE) == 0)
2340 continue;
2341 /* add timing values, setup DMA if needed */
2342 if (drvp->drive_flags & DRIVE_DMA) {
2343 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2344 /* use Multiword DMA */
2345 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2346 dma_mode = drvp->DMA_mode;
2347 }
2348 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2349 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2350 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2351 CY_CMD_CTRL_IOW_REC_OFF(drive));
2352 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2353 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2354 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2355 CY_CMD_CTRL_IOR_REC_OFF(drive));
2356 }
2357 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2358 chp->ch_drive[0].DMA_mode = dma_mode;
2359 chp->ch_drive[1].DMA_mode = dma_mode;
2360
2361 if (dma_mode == -1)
2362 dma_mode = 0;
2363
2364 if (sc->sc_cy_handle != NULL) {
2365 /* Note: `multiple' is implied. */
2366 cy82c693_write(sc->sc_cy_handle,
2367 (sc->sc_cy_compatchan == 0) ?
2368 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2369 }
2370
2371 pciide_print_modes(cp);
2372
2373 if (idedma_ctl != 0) {
2374 /* Add software bits in status register */
2375 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2376 IDEDMA_CTL, idedma_ctl);
2377 }
2378 }
2379
2380 void
2381 sis_chip_map(sc, pa)
2382 struct pciide_softc *sc;
2383 struct pci_attach_args *pa;
2384 {
2385 struct pciide_channel *cp;
2386 int channel;
2387 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2388 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2389 pcireg_t rev = PCI_REVISION(pa->pa_class);
2390 bus_size_t cmdsize, ctlsize;
2391
2392 if (pciide_chipen(sc, pa) == 0)
2393 return;
2394 printf("%s: bus-master DMA support present",
2395 sc->sc_wdcdev.sc_dev.dv_xname);
2396 pciide_mapreg_dma(sc, pa);
2397 printf("\n");
2398 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2399 WDC_CAPABILITY_MODE;
2400 if (sc->sc_dma_ok) {
2401 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2402 sc->sc_wdcdev.irqack = pciide_irqack;
2403 if (rev >= 0xd0)
2404 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2405 }
2406
2407 sc->sc_wdcdev.PIO_cap = 4;
2408 sc->sc_wdcdev.DMA_cap = 2;
2409 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2410 sc->sc_wdcdev.UDMA_cap = 2;
2411 sc->sc_wdcdev.set_modes = sis_setup_channel;
2412
2413 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2414 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2415
2416 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2417 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2418 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2419
2420 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2421 cp = &sc->pciide_channels[channel];
2422 if (pciide_chansetup(sc, channel, interface) == 0)
2423 continue;
2424 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2425 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2426 printf("%s: %s channel ignored (disabled)\n",
2427 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2428 continue;
2429 }
2430 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2431 pciide_pci_intr);
2432 if (cp->hw_ok == 0)
2433 continue;
2434 if (pciide_chan_candisable(cp)) {
2435 if (channel == 0)
2436 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2437 else
2438 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2439 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2440 sis_ctr0);
2441 }
2442 pciide_map_compat_intr(pa, cp, channel, interface);
2443 if (cp->hw_ok == 0)
2444 continue;
2445 sis_setup_channel(&cp->wdc_channel);
2446 }
2447 }
2448
2449 void
2450 sis_setup_channel(chp)
2451 struct channel_softc *chp;
2452 {
2453 struct ata_drive_datas *drvp;
2454 int drive;
2455 u_int32_t sis_tim;
2456 u_int32_t idedma_ctl;
2457 struct pciide_channel *cp = (struct pciide_channel*)chp;
2458 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2459
2460 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2461 "channel %d 0x%x\n", chp->channel,
2462 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2463 DEBUG_PROBE);
2464 sis_tim = 0;
2465 idedma_ctl = 0;
2466 /* setup DMA if needed */
2467 pciide_channel_dma_setup(cp);
2468
2469 for (drive = 0; drive < 2; drive++) {
2470 drvp = &chp->ch_drive[drive];
2471 /* If no drive, skip */
2472 if ((drvp->drive_flags & DRIVE) == 0)
2473 continue;
2474 /* add timing values, setup DMA if needed */
2475 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2476 (drvp->drive_flags & DRIVE_UDMA) == 0)
2477 goto pio;
2478
2479 if (drvp->drive_flags & DRIVE_UDMA) {
2480 /* use Ultra/DMA */
2481 drvp->drive_flags &= ~DRIVE_DMA;
2482 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2483 SIS_TIM_UDMA_TIME_OFF(drive);
2484 sis_tim |= SIS_TIM_UDMA_EN(drive);
2485 } else {
2486 /*
2487 * use Multiword DMA
2488 * Timings will be used for both PIO and DMA,
2489 * so adjust DMA mode if needed
2490 */
2491 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2492 drvp->PIO_mode = drvp->DMA_mode + 2;
2493 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2494 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2495 drvp->PIO_mode - 2 : 0;
2496 if (drvp->DMA_mode == 0)
2497 drvp->PIO_mode = 0;
2498 }
2499 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2500 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2501 SIS_TIM_ACT_OFF(drive);
2502 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2503 SIS_TIM_REC_OFF(drive);
2504 }
2505 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2506 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2507 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2508 if (idedma_ctl != 0) {
2509 /* Add software bits in status register */
2510 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2511 IDEDMA_CTL, idedma_ctl);
2512 }
2513 pciide_print_modes(cp);
2514 }
2515
2516 void
2517 acer_chip_map(sc, pa)
2518 struct pciide_softc *sc;
2519 struct pci_attach_args *pa;
2520 {
2521 struct pciide_channel *cp;
2522 int channel;
2523 pcireg_t cr, interface;
2524 bus_size_t cmdsize, ctlsize;
2525
2526 if (pciide_chipen(sc, pa) == 0)
2527 return;
2528 printf("%s: bus-master DMA support present",
2529 sc->sc_wdcdev.sc_dev.dv_xname);
2530 pciide_mapreg_dma(sc, pa);
2531 printf("\n");
2532 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2533 WDC_CAPABILITY_MODE;
2534 if (sc->sc_dma_ok) {
2535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2536 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2537 sc->sc_wdcdev.irqack = pciide_irqack;
2538 }
2539
2540 sc->sc_wdcdev.PIO_cap = 4;
2541 sc->sc_wdcdev.DMA_cap = 2;
2542 sc->sc_wdcdev.UDMA_cap = 2;
2543 sc->sc_wdcdev.set_modes = acer_setup_channel;
2544 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2545 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2546
2547 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2548 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2549 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2550
2551 /* Enable "microsoft register bits" R/W. */
2552 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2553 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2554 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2555 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2556 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2557 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2558 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2559 ~ACER_CHANSTATUSREGS_RO);
2560 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2561 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2562 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2563 /* Don't use cr, re-read the real register content instead */
2564 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2565 PCI_CLASS_REG));
2566
2567 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2568 cp = &sc->pciide_channels[channel];
2569 if (pciide_chansetup(sc, channel, interface) == 0)
2570 continue;
2571 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2572 printf("%s: %s channel ignored (disabled)\n",
2573 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2574 continue;
2575 }
2576 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2577 acer_pci_intr);
2578 if (cp->hw_ok == 0)
2579 continue;
2580 if (pciide_chan_candisable(cp)) {
2581 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2582 pci_conf_write(sc->sc_pc, sc->sc_tag,
2583 PCI_CLASS_REG, cr);
2584 }
2585 pciide_map_compat_intr(pa, cp, channel, interface);
2586 acer_setup_channel(&cp->wdc_channel);
2587 }
2588 }
2589
2590 void
2591 acer_setup_channel(chp)
2592 struct channel_softc *chp;
2593 {
2594 struct ata_drive_datas *drvp;
2595 int drive;
2596 u_int32_t acer_fifo_udma;
2597 u_int32_t idedma_ctl;
2598 struct pciide_channel *cp = (struct pciide_channel*)chp;
2599 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2600
2601 idedma_ctl = 0;
2602 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2603 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2604 acer_fifo_udma), DEBUG_PROBE);
2605 /* setup DMA if needed */
2606 pciide_channel_dma_setup(cp);
2607
2608 for (drive = 0; drive < 2; drive++) {
2609 drvp = &chp->ch_drive[drive];
2610 /* If no drive, skip */
2611 if ((drvp->drive_flags & DRIVE) == 0)
2612 continue;
2613 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2614 "channel %d drive %d 0x%x\n", chp->channel, drive,
2615 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2616 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2617 /* clear FIFO/DMA mode */
2618 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2619 ACER_UDMA_EN(chp->channel, drive) |
2620 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2621
2622 /* add timing values, setup DMA if needed */
2623 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2624 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2625 acer_fifo_udma |=
2626 ACER_FTH_OPL(chp->channel, drive, 0x1);
2627 goto pio;
2628 }
2629
2630 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2631 if (drvp->drive_flags & DRIVE_UDMA) {
2632 /* use Ultra/DMA */
2633 drvp->drive_flags &= ~DRIVE_DMA;
2634 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2635 acer_fifo_udma |=
2636 ACER_UDMA_TIM(chp->channel, drive,
2637 acer_udma[drvp->UDMA_mode]);
2638 } else {
2639 /*
2640 * use Multiword DMA
2641 * Timings will be used for both PIO and DMA,
2642 * so adjust DMA mode if needed
2643 */
2644 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2645 drvp->PIO_mode = drvp->DMA_mode + 2;
2646 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2647 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2648 drvp->PIO_mode - 2 : 0;
2649 if (drvp->DMA_mode == 0)
2650 drvp->PIO_mode = 0;
2651 }
2652 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2653 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2654 ACER_IDETIM(chp->channel, drive),
2655 acer_pio[drvp->PIO_mode]);
2656 }
2657 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2658 acer_fifo_udma), DEBUG_PROBE);
2659 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2660 if (idedma_ctl != 0) {
2661 /* Add software bits in status register */
2662 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2663 IDEDMA_CTL, idedma_ctl);
2664 }
2665 pciide_print_modes(cp);
2666 }
2667
2668 int
2669 acer_pci_intr(arg)
2670 void *arg;
2671 {
2672 struct pciide_softc *sc = arg;
2673 struct pciide_channel *cp;
2674 struct channel_softc *wdc_cp;
2675 int i, rv, crv;
2676 u_int32_t chids;
2677
2678 rv = 0;
2679 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2680 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2681 cp = &sc->pciide_channels[i];
2682 wdc_cp = &cp->wdc_channel;
2683 /* If a compat channel skip. */
2684 if (cp->compat)
2685 continue;
2686 if (chids & ACER_CHIDS_INT(i)) {
2687 crv = wdcintr(wdc_cp);
2688 if (crv == 0)
2689 printf("%s:%d: bogus intr\n",
2690 sc->sc_wdcdev.sc_dev.dv_xname, i);
2691 else
2692 rv = 1;
2693 }
2694 }
2695 return rv;
2696 }
2697
2698 void
2699 hpt_chip_map(sc, pa)
2700 struct pciide_softc *sc;
2701 struct pci_attach_args *pa;
2702 {
2703 struct pciide_channel *cp;
2704 int i, compatchan, revision;
2705 pcireg_t interface;
2706 bus_size_t cmdsize, ctlsize;
2707
2708 if (pciide_chipen(sc, pa) == 0)
2709 return;
2710 revision = PCI_REVISION(pa->pa_class);
2711
2712 /*
2713 * when the chip is in native mode it identifies itself as a
2714 * 'misc mass storage'. Fake interface in this case.
2715 */
2716 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2717 interface = PCI_INTERFACE(pa->pa_class);
2718 } else {
2719 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2720 PCIIDE_INTERFACE_PCI(0);
2721 if (revision == HPT370_REV)
2722 interface |= PCIIDE_INTERFACE_PCI(1);
2723 }
2724
2725 printf("%s: bus-master DMA support present",
2726 sc->sc_wdcdev.sc_dev.dv_xname);
2727 pciide_mapreg_dma(sc, pa);
2728 printf("\n");
2729 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2730 WDC_CAPABILITY_MODE;
2731 if (sc->sc_dma_ok) {
2732 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2733 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2734 sc->sc_wdcdev.irqack = pciide_irqack;
2735 }
2736 sc->sc_wdcdev.PIO_cap = 4;
2737 sc->sc_wdcdev.DMA_cap = 2;
2738 sc->sc_wdcdev.UDMA_cap = 4;
2739
2740 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2741 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2742 sc->sc_wdcdev.nchannels = (revision == HPT366_REV) ? 1 : 2;
2743 if (revision == HPT366_REV) {
2744 /*
2745 * The 366 has 2 PCI IDE functions, one for primary and one
2746 * for secondary. So we need to call pciide_mapregs_compat()
2747 * with the real channel
2748 */
2749 if (pa->pa_function == 0) {
2750 compatchan = 0;
2751 } else if (pa->pa_function == 1) {
2752 compatchan = 1;
2753 } else {
2754 printf("%s: unexpected PCI function %d\n",
2755 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2756 return;
2757 }
2758 sc->sc_wdcdev.nchannels = 1;
2759 } else {
2760 sc->sc_wdcdev.nchannels = 2;
2761 }
2762 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2763 cp = &sc->pciide_channels[0];
2764 if (sc->sc_wdcdev.nchannels > 1) {
2765 compatchan = i;
2766 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
2767 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
2768 printf("%s: %s channel ignored (disabled)\n",
2769 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2770 continue;
2771 }
2772 }
2773 if (pciide_chansetup(sc, i, interface) == 0)
2774 continue;
2775 if (interface & PCIIDE_INTERFACE_PCI(i)) {
2776 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
2777 &ctlsize, hpt_pci_intr);
2778 } else {
2779 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
2780 &cmdsize, &ctlsize);
2781 }
2782 if (cp->hw_ok == 0)
2783 return;
2784 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2785 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2786 wdcattach(&cp->wdc_channel);
2787 hpt_setup_channel(&cp->wdc_channel);
2788 }
2789
2790 return;
2791 }
2792
2793
2794 void
2795 hpt_setup_channel(chp)
2796 struct channel_softc *chp;
2797 {
2798 struct ata_drive_datas *drvp;
2799 int drive;
2800 int cable;
2801 u_int32_t before, after;
2802 u_int32_t idedma_ctl;
2803 struct pciide_channel *cp = (struct pciide_channel*)chp;
2804 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2805
2806 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
2807
2808 /* setup DMA if needed */
2809 pciide_channel_dma_setup(cp);
2810
2811 idedma_ctl = 0;
2812
2813 /* Per drive settings */
2814 for (drive = 0; drive < 2; drive++) {
2815 drvp = &chp->ch_drive[drive];
2816 /* If no drive, skip */
2817 if ((drvp->drive_flags & DRIVE) == 0)
2818 continue;
2819 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
2820 HPT_IDETIM(chp->channel, drive));
2821
2822 /* add timing values, setup DMA if needed */
2823 if (drvp->drive_flags & DRIVE_UDMA) {
2824 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
2825 drvp->UDMA_mode > 2)
2826 drvp->UDMA_mode = 2;
2827 after = (sc->sc_wdcdev.nchannels == 2) ?
2828 hpt370_udma[drvp->UDMA_mode] :
2829 hpt366_udma[drvp->UDMA_mode];
2830 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2831 } else if (drvp->drive_flags & DRIVE_DMA) {
2832 /*
2833 * use Multiword DMA.
2834 * Timings will be used for both PIO and DMA, so adjust
2835 * DMA mode if needed
2836 */
2837 if (drvp->PIO_mode >= 3 &&
2838 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2839 drvp->DMA_mode = drvp->PIO_mode - 2;
2840 }
2841 after = (sc->sc_wdcdev.nchannels == 2) ?
2842 hpt370_dma[drvp->DMA_mode] :
2843 hpt366_dma[drvp->DMA_mode];
2844 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2845 } else {
2846 /* PIO only */
2847 after = (sc->sc_wdcdev.nchannels == 2) ?
2848 hpt370_pio[drvp->PIO_mode] :
2849 hpt366_pio[drvp->PIO_mode];
2850 }
2851 pci_conf_write(sc->sc_pc, sc->sc_tag,
2852 HPT_IDETIM(chp->channel, drive), after);
2853 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
2854 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
2855 after, before), DEBUG_PROBE);
2856 }
2857 if (idedma_ctl != 0) {
2858 /* Add software bits in status register */
2859 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2860 IDEDMA_CTL, idedma_ctl);
2861 }
2862 pciide_print_modes(cp);
2863 }
2864
2865 int
2866 hpt_pci_intr(arg)
2867 void *arg;
2868 {
2869 struct pciide_softc *sc = arg;
2870 struct pciide_channel *cp;
2871 struct channel_softc *wdc_cp;
2872 int rv = 0;
2873 int dmastat, i, crv;
2874
2875 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2876 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2877 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
2878 if((dmastat & IDEDMA_CTL_INTR) == 0)
2879 continue;
2880 cp = &sc->pciide_channels[i];
2881 wdc_cp = &cp->wdc_channel;
2882 crv = wdcintr(wdc_cp);
2883 if (crv == 0) {
2884 printf("%s:%d: bogus intr\n",
2885 sc->sc_wdcdev.sc_dev.dv_xname, i);
2886 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2887 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
2888 } else
2889 rv = 1;
2890 }
2891 return rv;
2892 }
2893
2894
2895 /* A macro to test product */
2896 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2897
2898 void
2899 pdc202xx_chip_map(sc, pa)
2900 struct pciide_softc *sc;
2901 struct pci_attach_args *pa;
2902 {
2903 struct pciide_channel *cp;
2904 int channel;
2905 pcireg_t interface, st, mode;
2906 bus_size_t cmdsize, ctlsize;
2907
2908 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2909 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2910 DEBUG_PROBE);
2911 if (pciide_chipen(sc, pa) == 0)
2912 return;
2913
2914 /* turn off RAID mode */
2915 st &= ~PDC2xx_STATE_IDERAID;
2916
2917 /*
2918 * can't rely on the PCI_CLASS_REG content if the chip was in raid
2919 * mode. We have to fake interface
2920 */
2921 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
2922 if (st & PDC2xx_STATE_NATIVE)
2923 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2924
2925 printf("%s: bus-master DMA support present",
2926 sc->sc_wdcdev.sc_dev.dv_xname);
2927 pciide_mapreg_dma(sc, pa);
2928 printf("\n");
2929 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2930 WDC_CAPABILITY_MODE;
2931 if (sc->sc_dma_ok) {
2932 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2933 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2934 sc->sc_wdcdev.irqack = pciide_irqack;
2935 }
2936 sc->sc_wdcdev.PIO_cap = 4;
2937 sc->sc_wdcdev.DMA_cap = 2;
2938 if (PDC_IS_262(sc))
2939 sc->sc_wdcdev.UDMA_cap = 4;
2940 else
2941 sc->sc_wdcdev.UDMA_cap = 2;
2942 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
2943 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2944 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2945
2946 /* setup failsafe defaults */
2947 mode = 0;
2948 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
2949 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
2950 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
2951 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
2952 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2953 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
2954 "initial timings 0x%x, now 0x%x\n", channel,
2955 pci_conf_read(sc->sc_pc, sc->sc_tag,
2956 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
2957 DEBUG_PROBE);
2958 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
2959 mode | PDC2xx_TIM_IORDYp);
2960 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
2961 "initial timings 0x%x, now 0x%x\n", channel,
2962 pci_conf_read(sc->sc_pc, sc->sc_tag,
2963 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
2964 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
2965 mode);
2966 }
2967
2968 mode = PDC2xx_SCR_DMA;
2969 if (PDC_IS_262(sc)) {
2970 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
2971 } else {
2972 /* the BIOS set it up this way */
2973 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
2974 }
2975 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
2976 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
2977 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
2978 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
2979 DEBUG_PROBE);
2980 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
2981
2982 /* controller initial state register is OK even without BIOS */
2983 /* Set DMA mode to IDE DMA compatibility */
2984 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
2985 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
2986 DEBUG_PROBE);
2987 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
2988 mode | 0x1);
2989 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
2990 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
2991 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
2992 mode | 0x1);
2993
2994 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2995 cp = &sc->pciide_channels[channel];
2996 if (pciide_chansetup(sc, channel, interface) == 0)
2997 continue;
2998 if ((st & (PDC_IS_262(sc) ?
2999 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3000 printf("%s: %s channel ignored (disabled)\n",
3001 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3002 continue;
3003 }
3004 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3005 pdc202xx_pci_intr);
3006 if (cp->hw_ok == 0)
3007 continue;
3008 if (pciide_chan_candisable(cp))
3009 st &= ~(PDC_IS_262(sc) ?
3010 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3011 pciide_map_compat_intr(pa, cp, channel, interface);
3012 pdc202xx_setup_channel(&cp->wdc_channel);
3013 }
3014 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3015 DEBUG_PROBE);
3016 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3017 return;
3018 }
3019
3020 void
3021 pdc202xx_setup_channel(chp)
3022 struct channel_softc *chp;
3023 {
3024 struct ata_drive_datas *drvp;
3025 int drive;
3026 pcireg_t mode, st;
3027 u_int32_t idedma_ctl, scr, atapi;
3028 struct pciide_channel *cp = (struct pciide_channel*)chp;
3029 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3030 int channel = chp->channel;
3031
3032 /* setup DMA if needed */
3033 pciide_channel_dma_setup(cp);
3034
3035 idedma_ctl = 0;
3036
3037 /* Per channel settings */
3038 if (PDC_IS_262(sc)) {
3039 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3040 PDC262_U66);
3041 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3042 /* Trimm UDMA mode */
3043 if ((st & PDC262_STATE_80P(channel)) == 0 ||
3044 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3045 chp->ch_drive[0].UDMA_mode <= 2) ||
3046 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3047 chp->ch_drive[1].UDMA_mode <= 2)) {
3048 if (chp->ch_drive[0].UDMA_mode > 2)
3049 chp->ch_drive[0].UDMA_mode = 2;
3050 if (chp->ch_drive[1].UDMA_mode > 2)
3051 chp->ch_drive[1].UDMA_mode = 2;
3052 }
3053 /* Set U66 if needed */
3054 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3055 chp->ch_drive[0].UDMA_mode > 2) ||
3056 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3057 chp->ch_drive[1].UDMA_mode > 2))
3058 scr |= PDC262_U66_EN(channel);
3059 else
3060 scr &= ~PDC262_U66_EN(channel);
3061 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3062 PDC262_U66, scr);
3063 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3064 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3065 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3066 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3067 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3068 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3069 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3070 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3071 atapi = 0;
3072 else
3073 atapi = PDC262_ATAPI_UDMA;
3074 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3075 PDC262_ATAPI(channel), atapi);
3076 }
3077 }
3078 for (drive = 0; drive < 2; drive++) {
3079 drvp = &chp->ch_drive[drive];
3080 /* If no drive, skip */
3081 if ((drvp->drive_flags & DRIVE) == 0)
3082 continue;
3083 mode = 0;
3084 if (drvp->drive_flags & DRIVE_UDMA) {
3085 mode = PDC2xx_TIM_SET_MB(mode,
3086 pdc2xx_udma_mb[drvp->UDMA_mode]);
3087 mode = PDC2xx_TIM_SET_MC(mode,
3088 pdc2xx_udma_mc[drvp->UDMA_mode]);
3089 drvp->drive_flags &= ~DRIVE_DMA;
3090 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3091 } else if (drvp->drive_flags & DRIVE_DMA) {
3092 mode = PDC2xx_TIM_SET_MB(mode,
3093 pdc2xx_dma_mb[drvp->DMA_mode]);
3094 mode = PDC2xx_TIM_SET_MC(mode,
3095 pdc2xx_dma_mc[drvp->DMA_mode]);
3096 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3097 } else {
3098 mode = PDC2xx_TIM_SET_MB(mode,
3099 pdc2xx_dma_mb[0]);
3100 mode = PDC2xx_TIM_SET_MC(mode,
3101 pdc2xx_dma_mc[0]);
3102 }
3103 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3104 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3105 if (drvp->drive_flags & DRIVE_ATA)
3106 mode |= PDC2xx_TIM_PRE;
3107 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3108 if (drvp->PIO_mode >= 3) {
3109 mode |= PDC2xx_TIM_IORDY;
3110 if (drive == 0)
3111 mode |= PDC2xx_TIM_IORDYp;
3112 }
3113 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3114 "timings 0x%x\n",
3115 sc->sc_wdcdev.sc_dev.dv_xname,
3116 chp->channel, drive, mode), DEBUG_PROBE);
3117 pci_conf_write(sc->sc_pc, sc->sc_tag,
3118 PDC2xx_TIM(chp->channel, drive), mode);
3119 }
3120 if (idedma_ctl != 0) {
3121 /* Add software bits in status register */
3122 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3123 IDEDMA_CTL, idedma_ctl);
3124 }
3125 pciide_print_modes(cp);
3126 }
3127
3128 int
3129 pdc202xx_pci_intr(arg)
3130 void *arg;
3131 {
3132 struct pciide_softc *sc = arg;
3133 struct pciide_channel *cp;
3134 struct channel_softc *wdc_cp;
3135 int i, rv, crv;
3136 u_int32_t scr;
3137
3138 rv = 0;
3139 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3140 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3141 cp = &sc->pciide_channels[i];
3142 wdc_cp = &cp->wdc_channel;
3143 /* If a compat channel skip. */
3144 if (cp->compat)
3145 continue;
3146 if (scr & PDC2xx_SCR_INT(i)) {
3147 crv = wdcintr(wdc_cp);
3148 if (crv == 0)
3149 printf("%s:%d: bogus intr\n",
3150 sc->sc_wdcdev.sc_dev.dv_xname, i);
3151 else
3152 rv = 1;
3153 }
3154 }
3155 return rv;
3156 }
3157
3158 void
3159 opti_chip_map(sc, pa)
3160 struct pciide_softc *sc;
3161 struct pci_attach_args *pa;
3162 {
3163 struct pciide_channel *cp;
3164 bus_size_t cmdsize, ctlsize;
3165 pcireg_t interface;
3166 u_int8_t init_ctrl;
3167 int channel;
3168
3169 if (pciide_chipen(sc, pa) == 0)
3170 return;
3171 printf("%s: bus-master DMA support present",
3172 sc->sc_wdcdev.sc_dev.dv_xname);
3173 pciide_mapreg_dma(sc, pa);
3174 printf("\n");
3175
3176 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3177 WDC_CAPABILITY_MODE;
3178 sc->sc_wdcdev.PIO_cap = 4;
3179 if (sc->sc_dma_ok) {
3180 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3181 sc->sc_wdcdev.irqack = pciide_irqack;
3182 sc->sc_wdcdev.DMA_cap = 2;
3183 }
3184 sc->sc_wdcdev.set_modes = opti_setup_channel;
3185
3186 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3187 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3188
3189 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3190 OPTI_REG_INIT_CONTROL);
3191
3192 interface = PCI_INTERFACE(pa->pa_class);
3193
3194 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3195 cp = &sc->pciide_channels[channel];
3196 if (pciide_chansetup(sc, channel, interface) == 0)
3197 continue;
3198 if (channel == 1 &&
3199 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3200 printf("%s: %s channel ignored (disabled)\n",
3201 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3202 continue;
3203 }
3204 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3205 pciide_pci_intr);
3206 if (cp->hw_ok == 0)
3207 continue;
3208 pciide_map_compat_intr(pa, cp, channel, interface);
3209 if (cp->hw_ok == 0)
3210 continue;
3211 opti_setup_channel(&cp->wdc_channel);
3212 }
3213 }
3214
3215 void
3216 opti_setup_channel(chp)
3217 struct channel_softc *chp;
3218 {
3219 struct ata_drive_datas *drvp;
3220 struct pciide_channel *cp = (struct pciide_channel*)chp;
3221 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3222 int drive, spd;
3223 int mode[2];
3224 u_int8_t rv, mr;
3225
3226 /*
3227 * The `Delay' and `Address Setup Time' fields of the
3228 * Miscellaneous Register are always zero initially.
3229 */
3230 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3231 mr &= ~(OPTI_MISC_DELAY_MASK |
3232 OPTI_MISC_ADDR_SETUP_MASK |
3233 OPTI_MISC_INDEX_MASK);
3234
3235 /* Prime the control register before setting timing values */
3236 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3237
3238 /* Determine the clockrate of the PCIbus the chip is attached to */
3239 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3240 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3241
3242 /* setup DMA if needed */
3243 pciide_channel_dma_setup(cp);
3244
3245 for (drive = 0; drive < 2; drive++) {
3246 drvp = &chp->ch_drive[drive];
3247 /* If no drive, skip */
3248 if ((drvp->drive_flags & DRIVE) == 0) {
3249 mode[drive] = -1;
3250 continue;
3251 }
3252
3253 if ((drvp->drive_flags & DRIVE_DMA)) {
3254 /*
3255 * Timings will be used for both PIO and DMA,
3256 * so adjust DMA mode if needed
3257 */
3258 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3259 drvp->PIO_mode = drvp->DMA_mode + 2;
3260 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3261 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3262 drvp->PIO_mode - 2 : 0;
3263 if (drvp->DMA_mode == 0)
3264 drvp->PIO_mode = 0;
3265
3266 mode[drive] = drvp->DMA_mode + 5;
3267 } else
3268 mode[drive] = drvp->PIO_mode;
3269
3270 if (drive && mode[0] >= 0 &&
3271 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3272 /*
3273 * Can't have two drives using different values
3274 * for `Address Setup Time'.
3275 * Slow down the faster drive to compensate.
3276 */
3277 int d = (opti_tim_as[spd][mode[0]] >
3278 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3279
3280 mode[d] = mode[1-d];
3281 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3282 chp->ch_drive[d].DMA_mode = 0;
3283 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3284 }
3285 }
3286
3287 for (drive = 0; drive < 2; drive++) {
3288 int m;
3289 if ((m = mode[drive]) < 0)
3290 continue;
3291
3292 /* Set the Address Setup Time and select appropriate index */
3293 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3294 rv |= OPTI_MISC_INDEX(drive);
3295 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3296
3297 /* Set the pulse width and recovery timing parameters */
3298 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3299 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3300 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3301 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3302
3303 /* Set the Enhanced Mode register appropriately */
3304 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3305 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3306 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3307 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3308 }
3309
3310 /* Finally, enable the timings */
3311 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3312
3313 pciide_print_modes(cp);
3314 }
3315