pciide.c revision 1.71 1 /* $NetBSD: pciide.c,v 1.71 2000/06/26 14:21:12 mrg Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/cy82c693var.h>
119
120 /* inlines for reading/writing 8-bit PCI registers */
121 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
122 int));
123 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
124 int, u_int8_t));
125
126 static __inline u_int8_t
127 pciide_pci_read(pc, pa, reg)
128 pci_chipset_tag_t pc;
129 pcitag_t pa;
130 int reg;
131 {
132
133 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
134 ((reg & 0x03) * 8) & 0xff);
135 }
136
137 static __inline void
138 pciide_pci_write(pc, pa, reg, val)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 u_int8_t val;
143 {
144 pcireg_t pcival;
145
146 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
147 pcival &= ~(0xff << ((reg & 0x03) * 8));
148 pcival |= (val << ((reg & 0x03) * 8));
149 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
150 }
151
152 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
153
154 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155 void piix_setup_channel __P((struct channel_softc*));
156 void piix3_4_setup_channel __P((struct channel_softc*));
157 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
158 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
159 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160
161 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
162 void amd756_setup_channel __P((struct channel_softc*));
163
164 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void apollo_setup_channel __P((struct channel_softc*));
166
167 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void cmd0643_9_setup_channel __P((struct channel_softc*));
170 void cmd_channel_map __P((struct pci_attach_args *,
171 struct pciide_softc *, int));
172 int cmd_pci_intr __P((void *));
173
174 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cy693_setup_channel __P((struct channel_softc*));
176
177 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void sis_setup_channel __P((struct channel_softc*));
179
180 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void acer_setup_channel __P((struct channel_softc*));
182 int acer_pci_intr __P((void *));
183
184 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void pdc202xx_setup_channel __P((struct channel_softc*));
186 int pdc202xx_pci_intr __P((void *));
187
188 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void opti_setup_channel __P((struct channel_softc*));
190
191 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
192 void hpt_setup_channel __P((struct channel_softc*));
193 int hpt_pci_intr __P((void *));
194
195 void pciide_channel_dma_setup __P((struct pciide_channel *));
196 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
197 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
198 void pciide_dma_start __P((void*, int, int));
199 int pciide_dma_finish __P((void*, int, int, int));
200 void pciide_irqack __P((struct channel_softc *));
201 void pciide_print_modes __P((struct pciide_channel *));
202
203 struct pciide_product_desc {
204 u_int32_t ide_product;
205 int ide_flags;
206 const char *ide_name;
207 /* map and setup chip, probe drives */
208 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
209 };
210
211 /* Flags for ide_flags */
212 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
213
214 /* Default product description for devices not known from this controller */
215 const struct pciide_product_desc default_product_desc = {
216 0,
217 0,
218 "Generic PCI IDE controller",
219 default_chip_map,
220 };
221
222 const struct pciide_product_desc pciide_intel_products[] = {
223 { PCI_PRODUCT_INTEL_82092AA,
224 0,
225 "Intel 82092AA IDE controller",
226 default_chip_map,
227 },
228 { PCI_PRODUCT_INTEL_82371FB_IDE,
229 0,
230 "Intel 82371FB IDE controller (PIIX)",
231 piix_chip_map,
232 },
233 { PCI_PRODUCT_INTEL_82371SB_IDE,
234 0,
235 "Intel 82371SB IDE Interface (PIIX3)",
236 piix_chip_map,
237 },
238 { PCI_PRODUCT_INTEL_82371AB_IDE,
239 0,
240 "Intel 82371AB IDE controller (PIIX4)",
241 piix_chip_map,
242 },
243 { PCI_PRODUCT_INTEL_82801AA_IDE,
244 0,
245 "Intel 82801AA IDE Controller (ICH)",
246 piix_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82801AB_IDE,
249 0,
250 "Intel 82801AB IDE Controller (ICH0)",
251 piix_chip_map,
252 },
253 { 0,
254 0,
255 NULL,
256 }
257 };
258
259 const struct pciide_product_desc pciide_amd_products[] = {
260 { PCI_PRODUCT_AMD_PBC756_IDE,
261 0,
262 "Advanced Micro Devices AMD756 IDE Controller",
263 amd756_chip_map
264 },
265 { 0,
266 0,
267 NULL,
268 }
269 };
270
271 const struct pciide_product_desc pciide_cmd_products[] = {
272 { PCI_PRODUCT_CMDTECH_640,
273 0,
274 "CMD Technology PCI0640",
275 cmd_chip_map
276 },
277 { PCI_PRODUCT_CMDTECH_643,
278 0,
279 "CMD Technology PCI0643",
280 cmd0643_9_chip_map,
281 },
282 { PCI_PRODUCT_CMDTECH_646,
283 0,
284 "CMD Technology PCI0646",
285 cmd0643_9_chip_map,
286 },
287 { PCI_PRODUCT_CMDTECH_648,
288 IDE_PCI_CLASS_OVERRIDE,
289 "CMD Technology PCI0648",
290 cmd0643_9_chip_map,
291 },
292 { PCI_PRODUCT_CMDTECH_649,
293 IDE_PCI_CLASS_OVERRIDE,
294 "CMD Technology PCI0649",
295 cmd0643_9_chip_map,
296 },
297 { 0,
298 0,
299 NULL,
300 }
301 };
302
303 const struct pciide_product_desc pciide_via_products[] = {
304 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
305 0,
306 "VIA Tech VT82C586 IDE Controller",
307 apollo_chip_map,
308 },
309 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
310 0,
311 "VIA Tech VT82C586A IDE Controller",
312 apollo_chip_map,
313 },
314 { 0,
315 0,
316 NULL,
317 }
318 };
319
320 const struct pciide_product_desc pciide_cypress_products[] = {
321 { PCI_PRODUCT_CONTAQ_82C693,
322 0,
323 "Cypress 82C693 IDE Controller",
324 cy693_chip_map,
325 },
326 { 0,
327 0,
328 NULL,
329 }
330 };
331
332 const struct pciide_product_desc pciide_sis_products[] = {
333 { PCI_PRODUCT_SIS_5597_IDE,
334 0,
335 "Silicon Integrated System 5597/5598 IDE controller",
336 sis_chip_map,
337 },
338 { 0,
339 0,
340 NULL,
341 }
342 };
343
344 const struct pciide_product_desc pciide_acer_products[] = {
345 { PCI_PRODUCT_ALI_M5229,
346 0,
347 "Acer Labs M5229 UDMA IDE Controller",
348 acer_chip_map,
349 },
350 { 0,
351 0,
352 NULL,
353 }
354 };
355
356 const struct pciide_product_desc pciide_promise_products[] = {
357 { PCI_PRODUCT_PROMISE_ULTRA33,
358 IDE_PCI_CLASS_OVERRIDE,
359 "Promise Ultra33/ATA Bus Master IDE Accelerator",
360 pdc202xx_chip_map,
361 },
362 { PCI_PRODUCT_PROMISE_ULTRA66,
363 IDE_PCI_CLASS_OVERRIDE,
364 "Promise Ultra66/ATA Bus Master IDE Accelerator",
365 pdc202xx_chip_map,
366 },
367 { 0,
368 0,
369 NULL,
370 }
371 };
372
373 const struct pciide_product_desc pciide_opti_products[] = {
374 { PCI_PRODUCT_OPTI_82C621,
375 0,
376 "OPTi 82c621 PCI IDE controller",
377 opti_chip_map,
378 },
379 { PCI_PRODUCT_OPTI_82C568,
380 0,
381 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
382 opti_chip_map,
383 },
384 { PCI_PRODUCT_OPTI_82D568,
385 0,
386 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
387 opti_chip_map,
388 },
389 { 0,
390 0,
391 NULL,
392 }
393 };
394
395 const struct pciide_product_desc pciide_triones_products[] = {
396 { PCI_PRODUCT_TRIONES_HPT366,
397 IDE_PCI_CLASS_OVERRIDE,
398 "Triones/Highpoint HPT366/370 IDE Controller",
399 hpt_chip_map,
400 },
401 { 0,
402 0,
403 NULL,
404 }
405 };
406
407 struct pciide_vendor_desc {
408 u_int32_t ide_vendor;
409 const struct pciide_product_desc *ide_products;
410 };
411
412 const struct pciide_vendor_desc pciide_vendors[] = {
413 { PCI_VENDOR_INTEL, pciide_intel_products },
414 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
415 { PCI_VENDOR_VIATECH, pciide_via_products },
416 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
417 { PCI_VENDOR_SIS, pciide_sis_products },
418 { PCI_VENDOR_ALI, pciide_acer_products },
419 { PCI_VENDOR_PROMISE, pciide_promise_products },
420 { PCI_VENDOR_AMD, pciide_amd_products },
421 { PCI_VENDOR_OPTI, pciide_opti_products },
422 { PCI_VENDOR_TRIONES, pciide_triones_products },
423 { 0, NULL }
424 };
425
426 /* options passed via the 'flags' config keyword */
427 #define PCIIDE_OPTIONS_DMA 0x01
428
429 int pciide_match __P((struct device *, struct cfdata *, void *));
430 void pciide_attach __P((struct device *, struct device *, void *));
431
432 struct cfattach pciide_ca = {
433 sizeof(struct pciide_softc), pciide_match, pciide_attach
434 };
435 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
436 int pciide_mapregs_compat __P(( struct pci_attach_args *,
437 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
438 int pciide_mapregs_native __P((struct pci_attach_args *,
439 struct pciide_channel *, bus_size_t *, bus_size_t *,
440 int (*pci_intr) __P((void *))));
441 void pciide_mapreg_dma __P((struct pciide_softc *,
442 struct pci_attach_args *));
443 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
444 void pciide_mapchan __P((struct pci_attach_args *,
445 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
446 int (*pci_intr) __P((void *))));
447 int pciide_chan_candisable __P((struct pciide_channel *));
448 void pciide_map_compat_intr __P(( struct pci_attach_args *,
449 struct pciide_channel *, int, int));
450 int pciide_print __P((void *, const char *pnp));
451 int pciide_compat_intr __P((void *));
452 int pciide_pci_intr __P((void *));
453 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
454
455 const struct pciide_product_desc *
456 pciide_lookup_product(id)
457 u_int32_t id;
458 {
459 const struct pciide_product_desc *pp;
460 const struct pciide_vendor_desc *vp;
461
462 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
463 if (PCI_VENDOR(id) == vp->ide_vendor)
464 break;
465
466 if ((pp = vp->ide_products) == NULL)
467 return NULL;
468
469 for (; pp->ide_name != NULL; pp++)
470 if (PCI_PRODUCT(id) == pp->ide_product)
471 break;
472
473 if (pp->ide_name == NULL)
474 return NULL;
475 return pp;
476 }
477
478 int
479 pciide_match(parent, match, aux)
480 struct device *parent;
481 struct cfdata *match;
482 void *aux;
483 {
484 struct pci_attach_args *pa = aux;
485 const struct pciide_product_desc *pp;
486
487 /*
488 * Check the ID register to see that it's a PCI IDE controller.
489 * If it is, we assume that we can deal with it; it _should_
490 * work in a standardized way...
491 */
492 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
493 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
494 return (1);
495 }
496
497 /*
498 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
499 * controllers. Let see if we can deal with it anyway.
500 */
501 pp = pciide_lookup_product(pa->pa_id);
502 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
503 return (1);
504 }
505
506 return (0);
507 }
508
509 void
510 pciide_attach(parent, self, aux)
511 struct device *parent, *self;
512 void *aux;
513 {
514 struct pci_attach_args *pa = aux;
515 pci_chipset_tag_t pc = pa->pa_pc;
516 pcitag_t tag = pa->pa_tag;
517 struct pciide_softc *sc = (struct pciide_softc *)self;
518 pcireg_t csr;
519 char devinfo[256];
520 const char *displaydev;
521
522 sc->sc_pp = pciide_lookup_product(pa->pa_id);
523 if (sc->sc_pp == NULL) {
524 sc->sc_pp = &default_product_desc;
525 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
526 displaydev = devinfo;
527 } else
528 displaydev = sc->sc_pp->ide_name;
529
530 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
531
532 sc->sc_pc = pa->pa_pc;
533 sc->sc_tag = pa->pa_tag;
534 #ifdef WDCDEBUG
535 if (wdcdebug_pciide_mask & DEBUG_PROBE)
536 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
537 #endif
538 sc->sc_pp->chip_map(sc, pa);
539
540 if (sc->sc_dma_ok) {
541 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
542 csr |= PCI_COMMAND_MASTER_ENABLE;
543 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
544 }
545 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
546 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
547 }
548
549 /* tell wether the chip is enabled or not */
550 int
551 pciide_chipen(sc, pa)
552 struct pciide_softc *sc;
553 struct pci_attach_args *pa;
554 {
555 pcireg_t csr;
556 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
557 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
558 PCI_COMMAND_STATUS_REG);
559 printf("%s: device disabled (at %s)\n",
560 sc->sc_wdcdev.sc_dev.dv_xname,
561 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
562 "device" : "bridge");
563 return 0;
564 }
565 return 1;
566 }
567
568 int
569 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
570 struct pci_attach_args *pa;
571 struct pciide_channel *cp;
572 int compatchan;
573 bus_size_t *cmdsizep, *ctlsizep;
574 {
575 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
576 struct channel_softc *wdc_cp = &cp->wdc_channel;
577
578 cp->compat = 1;
579 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
580 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
581
582 wdc_cp->cmd_iot = pa->pa_iot;
583 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
584 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
585 printf("%s: couldn't map %s channel cmd regs\n",
586 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
587 return (0);
588 }
589
590 wdc_cp->ctl_iot = pa->pa_iot;
591 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
592 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
593 printf("%s: couldn't map %s channel ctl regs\n",
594 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
595 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
596 PCIIDE_COMPAT_CMD_SIZE);
597 return (0);
598 }
599
600 return (1);
601 }
602
603 int
604 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
605 struct pci_attach_args * pa;
606 struct pciide_channel *cp;
607 bus_size_t *cmdsizep, *ctlsizep;
608 int (*pci_intr) __P((void *));
609 {
610 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
611 struct channel_softc *wdc_cp = &cp->wdc_channel;
612 const char *intrstr;
613 pci_intr_handle_t intrhandle;
614
615 cp->compat = 0;
616
617 if (sc->sc_pci_ih == NULL) {
618 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
619 pa->pa_intrline, &intrhandle) != 0) {
620 printf("%s: couldn't map native-PCI interrupt\n",
621 sc->sc_wdcdev.sc_dev.dv_xname);
622 return 0;
623 }
624 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
625 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
626 intrhandle, IPL_BIO, pci_intr, sc);
627 if (sc->sc_pci_ih != NULL) {
628 printf("%s: using %s for native-PCI interrupt\n",
629 sc->sc_wdcdev.sc_dev.dv_xname,
630 intrstr ? intrstr : "unknown interrupt");
631 } else {
632 printf("%s: couldn't establish native-PCI interrupt",
633 sc->sc_wdcdev.sc_dev.dv_xname);
634 if (intrstr != NULL)
635 printf(" at %s", intrstr);
636 printf("\n");
637 return 0;
638 }
639 }
640 cp->ih = sc->sc_pci_ih;
641 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
642 PCI_MAPREG_TYPE_IO, 0,
643 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
644 printf("%s: couldn't map %s channel cmd regs\n",
645 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
646 return 0;
647 }
648
649 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
650 PCI_MAPREG_TYPE_IO, 0,
651 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
652 printf("%s: couldn't map %s channel ctl regs\n",
653 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
654 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
655 return 0;
656 }
657 return (1);
658 }
659
660 void
661 pciide_mapreg_dma(sc, pa)
662 struct pciide_softc *sc;
663 struct pci_attach_args *pa;
664 {
665 pcireg_t maptype;
666
667 /*
668 * Map DMA registers
669 *
670 * Note that sc_dma_ok is the right variable to test to see if
671 * DMA can be done. If the interface doesn't support DMA,
672 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
673 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
674 * non-zero if the interface supports DMA and the registers
675 * could be mapped.
676 *
677 * XXX Note that despite the fact that the Bus Master IDE specs
678 * XXX say that "The bus master IDE function uses 16 bytes of IO
679 * XXX space," some controllers (at least the United
680 * XXX Microelectronics UM8886BF) place it in memory space.
681 */
682 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
683 PCIIDE_REG_BUS_MASTER_DMA);
684
685 switch (maptype) {
686 case PCI_MAPREG_TYPE_IO:
687 case PCI_MAPREG_MEM_TYPE_32BIT:
688 sc->sc_dma_ok = (pci_mapreg_map(pa,
689 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
690 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
691 sc->sc_dmat = pa->pa_dmat;
692 if (sc->sc_dma_ok == 0) {
693 printf(", but unused (couldn't map registers)");
694 } else {
695 sc->sc_wdcdev.dma_arg = sc;
696 sc->sc_wdcdev.dma_init = pciide_dma_init;
697 sc->sc_wdcdev.dma_start = pciide_dma_start;
698 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
699 }
700 break;
701
702 default:
703 sc->sc_dma_ok = 0;
704 printf(", but unsupported register maptype (0x%x)", maptype);
705 }
706 }
707
708 int
709 pciide_compat_intr(arg)
710 void *arg;
711 {
712 struct pciide_channel *cp = arg;
713
714 #ifdef DIAGNOSTIC
715 /* should only be called for a compat channel */
716 if (cp->compat == 0)
717 panic("pciide compat intr called for non-compat chan %p\n", cp);
718 #endif
719 return (wdcintr(&cp->wdc_channel));
720 }
721
722 int
723 pciide_pci_intr(arg)
724 void *arg;
725 {
726 struct pciide_softc *sc = arg;
727 struct pciide_channel *cp;
728 struct channel_softc *wdc_cp;
729 int i, rv, crv;
730
731 rv = 0;
732 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
733 cp = &sc->pciide_channels[i];
734 wdc_cp = &cp->wdc_channel;
735
736 /* If a compat channel skip. */
737 if (cp->compat)
738 continue;
739 /* if this channel not waiting for intr, skip */
740 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
741 continue;
742
743 crv = wdcintr(wdc_cp);
744 if (crv == 0)
745 ; /* leave rv alone */
746 else if (crv == 1)
747 rv = 1; /* claim the intr */
748 else if (rv == 0) /* crv should be -1 in this case */
749 rv = crv; /* if we've done no better, take it */
750 }
751 return (rv);
752 }
753
754 void
755 pciide_channel_dma_setup(cp)
756 struct pciide_channel *cp;
757 {
758 int drive;
759 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
760 struct ata_drive_datas *drvp;
761
762 for (drive = 0; drive < 2; drive++) {
763 drvp = &cp->wdc_channel.ch_drive[drive];
764 /* If no drive, skip */
765 if ((drvp->drive_flags & DRIVE) == 0)
766 continue;
767 /* setup DMA if needed */
768 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
769 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
770 sc->sc_dma_ok == 0) {
771 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
772 continue;
773 }
774 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
775 != 0) {
776 /* Abort DMA setup */
777 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
778 continue;
779 }
780 }
781 }
782
783 int
784 pciide_dma_table_setup(sc, channel, drive)
785 struct pciide_softc *sc;
786 int channel, drive;
787 {
788 bus_dma_segment_t seg;
789 int error, rseg;
790 const bus_size_t dma_table_size =
791 sizeof(struct idedma_table) * NIDEDMA_TABLES;
792 struct pciide_dma_maps *dma_maps =
793 &sc->pciide_channels[channel].dma_maps[drive];
794
795 /* If table was already allocated, just return */
796 if (dma_maps->dma_table)
797 return 0;
798
799 /* Allocate memory for the DMA tables and map it */
800 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
801 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
802 BUS_DMA_NOWAIT)) != 0) {
803 printf("%s:%d: unable to allocate table DMA for "
804 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
805 channel, drive, error);
806 return error;
807 }
808 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
809 dma_table_size,
810 (caddr_t *)&dma_maps->dma_table,
811 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
812 printf("%s:%d: unable to map table DMA for"
813 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
814 channel, drive, error);
815 return error;
816 }
817 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
818 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
819 seg.ds_addr), DEBUG_PROBE);
820
821 /* Create and load table DMA map for this disk */
822 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
823 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
824 &dma_maps->dmamap_table)) != 0) {
825 printf("%s:%d: unable to create table DMA map for "
826 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
827 channel, drive, error);
828 return error;
829 }
830 if ((error = bus_dmamap_load(sc->sc_dmat,
831 dma_maps->dmamap_table,
832 dma_maps->dma_table,
833 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
834 printf("%s:%d: unable to load table DMA map for "
835 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
836 channel, drive, error);
837 return error;
838 }
839 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
840 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
841 /* Create a xfer DMA map for this drive */
842 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
843 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
844 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
845 &dma_maps->dmamap_xfer)) != 0) {
846 printf("%s:%d: unable to create xfer DMA map for "
847 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
848 channel, drive, error);
849 return error;
850 }
851 return 0;
852 }
853
854 int
855 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
856 void *v;
857 int channel, drive;
858 void *databuf;
859 size_t datalen;
860 int flags;
861 {
862 struct pciide_softc *sc = v;
863 int error, seg;
864 struct pciide_dma_maps *dma_maps =
865 &sc->pciide_channels[channel].dma_maps[drive];
866
867 error = bus_dmamap_load(sc->sc_dmat,
868 dma_maps->dmamap_xfer,
869 databuf, datalen, NULL, BUS_DMA_NOWAIT);
870 if (error) {
871 printf("%s:%d: unable to load xfer DMA map for"
872 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
873 channel, drive, error);
874 return error;
875 }
876
877 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
878 dma_maps->dmamap_xfer->dm_mapsize,
879 (flags & WDC_DMA_READ) ?
880 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
881
882 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
883 #ifdef DIAGNOSTIC
884 /* A segment must not cross a 64k boundary */
885 {
886 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
887 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
888 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
889 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
890 printf("pciide_dma: segment %d physical addr 0x%lx"
891 " len 0x%lx not properly aligned\n",
892 seg, phys, len);
893 panic("pciide_dma: buf align");
894 }
895 }
896 #endif
897 dma_maps->dma_table[seg].base_addr =
898 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
899 dma_maps->dma_table[seg].byte_count =
900 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
901 IDEDMA_BYTE_COUNT_MASK);
902 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
903 seg, le32toh(dma_maps->dma_table[seg].byte_count),
904 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
905
906 }
907 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
908 htole32(IDEDMA_BYTE_COUNT_EOT);
909
910 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
911 dma_maps->dmamap_table->dm_mapsize,
912 BUS_DMASYNC_PREWRITE);
913
914 /* Maps are ready. Start DMA function */
915 #ifdef DIAGNOSTIC
916 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
917 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
918 dma_maps->dmamap_table->dm_segs[0].ds_addr);
919 panic("pciide_dma_init: table align");
920 }
921 #endif
922
923 /* Clear status bits */
924 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
925 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
926 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
927 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
928 /* Write table addr */
929 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
930 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
931 dma_maps->dmamap_table->dm_segs[0].ds_addr);
932 /* set read/write */
933 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
934 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
935 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
936 /* remember flags */
937 dma_maps->dma_flags = flags;
938 return 0;
939 }
940
941 void
942 pciide_dma_start(v, channel, drive)
943 void *v;
944 int channel, drive;
945 {
946 struct pciide_softc *sc = v;
947
948 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
949 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
950 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
951 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
952 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
953 }
954
955 int
956 pciide_dma_finish(v, channel, drive, force)
957 void *v;
958 int channel, drive;
959 int force;
960 {
961 struct pciide_softc *sc = v;
962 u_int8_t status;
963 int error = 0;
964 struct pciide_dma_maps *dma_maps =
965 &sc->pciide_channels[channel].dma_maps[drive];
966
967 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
968 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
969 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
970 DEBUG_XFERS);
971
972 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
973 return WDC_DMAST_NOIRQ;
974
975 /* stop DMA channel */
976 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
977 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
978 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
979 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
980
981 /* Unload the map of the data buffer */
982 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
983 dma_maps->dmamap_xfer->dm_mapsize,
984 (dma_maps->dma_flags & WDC_DMA_READ) ?
985 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
986 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
987
988 if ((status & IDEDMA_CTL_ERR) != 0) {
989 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
990 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
991 error |= WDC_DMAST_ERR;
992 }
993
994 if ((status & IDEDMA_CTL_INTR) == 0) {
995 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
996 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
997 drive, status);
998 error |= WDC_DMAST_NOIRQ;
999 }
1000
1001 if ((status & IDEDMA_CTL_ACT) != 0) {
1002 /* data underrun, may be a valid condition for ATAPI */
1003 error |= WDC_DMAST_UNDER;
1004 }
1005 return error;
1006 }
1007
1008 void
1009 pciide_irqack(chp)
1010 struct channel_softc *chp;
1011 {
1012 struct pciide_channel *cp = (struct pciide_channel*)chp;
1013 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1014
1015 /* clear status bits in IDE DMA registers */
1016 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1017 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1018 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1019 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1020 }
1021
1022 /* some common code used by several chip_map */
1023 int
1024 pciide_chansetup(sc, channel, interface)
1025 struct pciide_softc *sc;
1026 int channel;
1027 pcireg_t interface;
1028 {
1029 struct pciide_channel *cp = &sc->pciide_channels[channel];
1030 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1031 cp->name = PCIIDE_CHANNEL_NAME(channel);
1032 cp->wdc_channel.channel = channel;
1033 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1034 cp->wdc_channel.ch_queue =
1035 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1036 if (cp->wdc_channel.ch_queue == NULL) {
1037 printf("%s %s channel: "
1038 "can't allocate memory for command queue",
1039 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1040 return 0;
1041 }
1042 printf("%s: %s channel %s to %s mode\n",
1043 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1044 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1045 "configured" : "wired",
1046 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1047 "native-PCI" : "compatibility");
1048 return 1;
1049 }
1050
1051 /* some common code used by several chip channel_map */
1052 void
1053 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1054 struct pci_attach_args *pa;
1055 struct pciide_channel *cp;
1056 pcireg_t interface;
1057 bus_size_t *cmdsizep, *ctlsizep;
1058 int (*pci_intr) __P((void *));
1059 {
1060 struct channel_softc *wdc_cp = &cp->wdc_channel;
1061
1062 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1063 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1064 pci_intr);
1065 else
1066 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1067 wdc_cp->channel, cmdsizep, ctlsizep);
1068
1069 if (cp->hw_ok == 0)
1070 return;
1071 wdc_cp->data32iot = wdc_cp->cmd_iot;
1072 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1073 wdcattach(wdc_cp);
1074 }
1075
1076 /*
1077 * Generic code to call to know if a channel can be disabled. Return 1
1078 * if channel can be disabled, 0 if not
1079 */
1080 int
1081 pciide_chan_candisable(cp)
1082 struct pciide_channel *cp;
1083 {
1084 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1085 struct channel_softc *wdc_cp = &cp->wdc_channel;
1086
1087 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1088 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1089 printf("%s: disabling %s channel (no drives)\n",
1090 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1091 cp->hw_ok = 0;
1092 return 1;
1093 }
1094 return 0;
1095 }
1096
1097 /*
1098 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1099 * Set hw_ok=0 on failure
1100 */
1101 void
1102 pciide_map_compat_intr(pa, cp, compatchan, interface)
1103 struct pci_attach_args *pa;
1104 struct pciide_channel *cp;
1105 int compatchan, interface;
1106 {
1107 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1108 struct channel_softc *wdc_cp = &cp->wdc_channel;
1109
1110 if (cp->hw_ok == 0)
1111 return;
1112 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1113 return;
1114
1115 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1116 pa, compatchan, pciide_compat_intr, cp);
1117 if (cp->ih == NULL) {
1118 printf("%s: no compatibility interrupt for use by %s "
1119 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1120 cp->hw_ok = 0;
1121 }
1122 }
1123
1124 void
1125 pciide_print_modes(cp)
1126 struct pciide_channel *cp;
1127 {
1128 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1129 int drive;
1130 struct channel_softc *chp;
1131 struct ata_drive_datas *drvp;
1132
1133 chp = &cp->wdc_channel;
1134 for (drive = 0; drive < 2; drive++) {
1135 drvp = &chp->ch_drive[drive];
1136 if ((drvp->drive_flags & DRIVE) == 0)
1137 continue;
1138 printf("%s(%s:%d:%d): using PIO mode %d",
1139 drvp->drv_softc->dv_xname,
1140 sc->sc_wdcdev.sc_dev.dv_xname,
1141 chp->channel, drive, drvp->PIO_mode);
1142 if (drvp->drive_flags & DRIVE_DMA)
1143 printf(", DMA mode %d", drvp->DMA_mode);
1144 if (drvp->drive_flags & DRIVE_UDMA)
1145 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1146 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1147 printf(" (using DMA data transfers)");
1148 printf("\n");
1149 }
1150 }
1151
1152 void
1153 default_chip_map(sc, pa)
1154 struct pciide_softc *sc;
1155 struct pci_attach_args *pa;
1156 {
1157 struct pciide_channel *cp;
1158 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1159 pcireg_t csr;
1160 int channel, drive;
1161 struct ata_drive_datas *drvp;
1162 u_int8_t idedma_ctl;
1163 bus_size_t cmdsize, ctlsize;
1164 char *failreason;
1165
1166 if (pciide_chipen(sc, pa) == 0)
1167 return;
1168
1169 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1170 printf("%s: bus-master DMA support present",
1171 sc->sc_wdcdev.sc_dev.dv_xname);
1172 if (sc->sc_pp == &default_product_desc &&
1173 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1174 PCIIDE_OPTIONS_DMA) == 0) {
1175 printf(", but unused (no driver support)");
1176 sc->sc_dma_ok = 0;
1177 } else {
1178 pciide_mapreg_dma(sc, pa);
1179 if (sc->sc_dma_ok != 0)
1180 printf(", used without full driver "
1181 "support");
1182 }
1183 } else {
1184 printf("%s: hardware does not support DMA",
1185 sc->sc_wdcdev.sc_dev.dv_xname);
1186 sc->sc_dma_ok = 0;
1187 }
1188 printf("\n");
1189 if (sc->sc_dma_ok) {
1190 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1191 sc->sc_wdcdev.irqack = pciide_irqack;
1192 }
1193 sc->sc_wdcdev.PIO_cap = 0;
1194 sc->sc_wdcdev.DMA_cap = 0;
1195
1196 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1197 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1198 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1199
1200 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1201 cp = &sc->pciide_channels[channel];
1202 if (pciide_chansetup(sc, channel, interface) == 0)
1203 continue;
1204 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1205 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1206 &ctlsize, pciide_pci_intr);
1207 } else {
1208 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1209 channel, &cmdsize, &ctlsize);
1210 }
1211 if (cp->hw_ok == 0)
1212 continue;
1213 /*
1214 * Check to see if something appears to be there.
1215 */
1216 failreason = NULL;
1217 if (!wdcprobe(&cp->wdc_channel)) {
1218 failreason = "not responding; disabled or no drives?";
1219 goto next;
1220 }
1221 /*
1222 * Now, make sure it's actually attributable to this PCI IDE
1223 * channel by trying to access the channel again while the
1224 * PCI IDE controller's I/O space is disabled. (If the
1225 * channel no longer appears to be there, it belongs to
1226 * this controller.) YUCK!
1227 */
1228 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1229 PCI_COMMAND_STATUS_REG);
1230 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1231 csr & ~PCI_COMMAND_IO_ENABLE);
1232 if (wdcprobe(&cp->wdc_channel))
1233 failreason = "other hardware responding at addresses";
1234 pci_conf_write(sc->sc_pc, sc->sc_tag,
1235 PCI_COMMAND_STATUS_REG, csr);
1236 next:
1237 if (failreason) {
1238 printf("%s: %s channel ignored (%s)\n",
1239 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1240 failreason);
1241 cp->hw_ok = 0;
1242 bus_space_unmap(cp->wdc_channel.cmd_iot,
1243 cp->wdc_channel.cmd_ioh, cmdsize);
1244 bus_space_unmap(cp->wdc_channel.ctl_iot,
1245 cp->wdc_channel.ctl_ioh, ctlsize);
1246 } else {
1247 pciide_map_compat_intr(pa, cp, channel, interface);
1248 }
1249 if (cp->hw_ok) {
1250 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1251 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1252 wdcattach(&cp->wdc_channel);
1253 }
1254 }
1255
1256 if (sc->sc_dma_ok == 0)
1257 return;
1258
1259 /* Allocate DMA maps */
1260 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1261 idedma_ctl = 0;
1262 cp = &sc->pciide_channels[channel];
1263 for (drive = 0; drive < 2; drive++) {
1264 drvp = &cp->wdc_channel.ch_drive[drive];
1265 /* If no drive, skip */
1266 if ((drvp->drive_flags & DRIVE) == 0)
1267 continue;
1268 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1269 continue;
1270 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1271 /* Abort DMA setup */
1272 printf("%s:%d:%d: can't allocate DMA maps, "
1273 "using PIO transfers\n",
1274 sc->sc_wdcdev.sc_dev.dv_xname,
1275 channel, drive);
1276 drvp->drive_flags &= ~DRIVE_DMA;
1277 }
1278 printf("%s:%d:%d: using DMA data transfers\n",
1279 sc->sc_wdcdev.sc_dev.dv_xname,
1280 channel, drive);
1281 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1282 }
1283 if (idedma_ctl != 0) {
1284 /* Add software bits in status register */
1285 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1286 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1287 idedma_ctl);
1288 }
1289 }
1290 }
1291
1292 void
1293 piix_chip_map(sc, pa)
1294 struct pciide_softc *sc;
1295 struct pci_attach_args *pa;
1296 {
1297 struct pciide_channel *cp;
1298 int channel;
1299 u_int32_t idetim;
1300 bus_size_t cmdsize, ctlsize;
1301
1302 if (pciide_chipen(sc, pa) == 0)
1303 return;
1304
1305 printf("%s: bus-master DMA support present",
1306 sc->sc_wdcdev.sc_dev.dv_xname);
1307 pciide_mapreg_dma(sc, pa);
1308 printf("\n");
1309 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1310 WDC_CAPABILITY_MODE;
1311 if (sc->sc_dma_ok) {
1312 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1313 sc->sc_wdcdev.irqack = pciide_irqack;
1314 switch(sc->sc_pp->ide_product) {
1315 case PCI_PRODUCT_INTEL_82371AB_IDE:
1316 case PCI_PRODUCT_INTEL_82801AA_IDE:
1317 case PCI_PRODUCT_INTEL_82801AB_IDE:
1318 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1319 }
1320 }
1321 sc->sc_wdcdev.PIO_cap = 4;
1322 sc->sc_wdcdev.DMA_cap = 2;
1323 sc->sc_wdcdev.UDMA_cap =
1324 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1325 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1326 sc->sc_wdcdev.set_modes = piix_setup_channel;
1327 else
1328 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1329 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1330 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1331
1332 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1333 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1334 DEBUG_PROBE);
1335 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1336 WDCDEBUG_PRINT((", sidetim=0x%x",
1337 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1338 DEBUG_PROBE);
1339 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1340 WDCDEBUG_PRINT((", udamreg 0x%x",
1341 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1342 DEBUG_PROBE);
1343 }
1344 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1345 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1346 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1347 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1348 DEBUG_PROBE);
1349 }
1350
1351 }
1352 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1353
1354 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1355 cp = &sc->pciide_channels[channel];
1356 /* PIIX is compat-only */
1357 if (pciide_chansetup(sc, channel, 0) == 0)
1358 continue;
1359 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1360 if ((PIIX_IDETIM_READ(idetim, channel) &
1361 PIIX_IDETIM_IDE) == 0) {
1362 printf("%s: %s channel ignored (disabled)\n",
1363 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1364 continue;
1365 }
1366 /* PIIX are compat-only pciide devices */
1367 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1368 if (cp->hw_ok == 0)
1369 continue;
1370 if (pciide_chan_candisable(cp)) {
1371 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1372 channel);
1373 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1374 idetim);
1375 }
1376 pciide_map_compat_intr(pa, cp, channel, 0);
1377 if (cp->hw_ok == 0)
1378 continue;
1379 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1380 }
1381
1382 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1383 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1384 DEBUG_PROBE);
1385 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1386 WDCDEBUG_PRINT((", sidetim=0x%x",
1387 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1388 DEBUG_PROBE);
1389 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1390 WDCDEBUG_PRINT((", udamreg 0x%x",
1391 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1392 DEBUG_PROBE);
1393 }
1394 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1395 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1396 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1397 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1398 DEBUG_PROBE);
1399 }
1400 }
1401 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1402 }
1403
1404 void
1405 piix_setup_channel(chp)
1406 struct channel_softc *chp;
1407 {
1408 u_int8_t mode[2], drive;
1409 u_int32_t oidetim, idetim, idedma_ctl;
1410 struct pciide_channel *cp = (struct pciide_channel*)chp;
1411 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1412 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1413
1414 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1415 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1416 idedma_ctl = 0;
1417
1418 /* set up new idetim: Enable IDE registers decode */
1419 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1420 chp->channel);
1421
1422 /* setup DMA */
1423 pciide_channel_dma_setup(cp);
1424
1425 /*
1426 * Here we have to mess up with drives mode: PIIX can't have
1427 * different timings for master and slave drives.
1428 * We need to find the best combination.
1429 */
1430
1431 /* If both drives supports DMA, take the lower mode */
1432 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1433 (drvp[1].drive_flags & DRIVE_DMA)) {
1434 mode[0] = mode[1] =
1435 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1436 drvp[0].DMA_mode = mode[0];
1437 drvp[1].DMA_mode = mode[1];
1438 goto ok;
1439 }
1440 /*
1441 * If only one drive supports DMA, use its mode, and
1442 * put the other one in PIO mode 0 if mode not compatible
1443 */
1444 if (drvp[0].drive_flags & DRIVE_DMA) {
1445 mode[0] = drvp[0].DMA_mode;
1446 mode[1] = drvp[1].PIO_mode;
1447 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1448 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1449 mode[1] = drvp[1].PIO_mode = 0;
1450 goto ok;
1451 }
1452 if (drvp[1].drive_flags & DRIVE_DMA) {
1453 mode[1] = drvp[1].DMA_mode;
1454 mode[0] = drvp[0].PIO_mode;
1455 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1456 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1457 mode[0] = drvp[0].PIO_mode = 0;
1458 goto ok;
1459 }
1460 /*
1461 * If both drives are not DMA, takes the lower mode, unless
1462 * one of them is PIO mode < 2
1463 */
1464 if (drvp[0].PIO_mode < 2) {
1465 mode[0] = drvp[0].PIO_mode = 0;
1466 mode[1] = drvp[1].PIO_mode;
1467 } else if (drvp[1].PIO_mode < 2) {
1468 mode[1] = drvp[1].PIO_mode = 0;
1469 mode[0] = drvp[0].PIO_mode;
1470 } else {
1471 mode[0] = mode[1] =
1472 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1473 drvp[0].PIO_mode = mode[0];
1474 drvp[1].PIO_mode = mode[1];
1475 }
1476 ok: /* The modes are setup */
1477 for (drive = 0; drive < 2; drive++) {
1478 if (drvp[drive].drive_flags & DRIVE_DMA) {
1479 idetim |= piix_setup_idetim_timings(
1480 mode[drive], 1, chp->channel);
1481 goto end;
1482 }
1483 }
1484 /* If we are there, none of the drives are DMA */
1485 if (mode[0] >= 2)
1486 idetim |= piix_setup_idetim_timings(
1487 mode[0], 0, chp->channel);
1488 else
1489 idetim |= piix_setup_idetim_timings(
1490 mode[1], 0, chp->channel);
1491 end: /*
1492 * timing mode is now set up in the controller. Enable
1493 * it per-drive
1494 */
1495 for (drive = 0; drive < 2; drive++) {
1496 /* If no drive, skip */
1497 if ((drvp[drive].drive_flags & DRIVE) == 0)
1498 continue;
1499 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1500 if (drvp[drive].drive_flags & DRIVE_DMA)
1501 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1502 }
1503 if (idedma_ctl != 0) {
1504 /* Add software bits in status register */
1505 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1506 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1507 idedma_ctl);
1508 }
1509 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1510 pciide_print_modes(cp);
1511 }
1512
1513 void
1514 piix3_4_setup_channel(chp)
1515 struct channel_softc *chp;
1516 {
1517 struct ata_drive_datas *drvp;
1518 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1519 struct pciide_channel *cp = (struct pciide_channel*)chp;
1520 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1521 int drive;
1522 int channel = chp->channel;
1523
1524 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1525 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1526 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1527 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1528 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1529 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1530 PIIX_SIDETIM_RTC_MASK(channel));
1531
1532 idedma_ctl = 0;
1533 /* If channel disabled, no need to go further */
1534 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1535 return;
1536 /* set up new idetim: Enable IDE registers decode */
1537 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1538
1539 /* setup DMA if needed */
1540 pciide_channel_dma_setup(cp);
1541
1542 for (drive = 0; drive < 2; drive++) {
1543 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1544 PIIX_UDMATIM_SET(0x3, channel, drive));
1545 drvp = &chp->ch_drive[drive];
1546 /* If no drive, skip */
1547 if ((drvp->drive_flags & DRIVE) == 0)
1548 continue;
1549 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1550 (drvp->drive_flags & DRIVE_UDMA) == 0))
1551 goto pio;
1552
1553 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1554 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1555 ideconf |= PIIX_CONFIG_PINGPONG;
1556 }
1557 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1558 /* setup Ultra/66 */
1559 if (drvp->UDMA_mode > 2 &&
1560 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1561 drvp->UDMA_mode = 2;
1562 if (drvp->UDMA_mode > 2)
1563 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1564 else
1565 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1566 }
1567 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1568 (drvp->drive_flags & DRIVE_UDMA)) {
1569 /* use Ultra/DMA */
1570 drvp->drive_flags &= ~DRIVE_DMA;
1571 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1572 udmareg |= PIIX_UDMATIM_SET(
1573 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1574 } else {
1575 /* use Multiword DMA */
1576 drvp->drive_flags &= ~DRIVE_UDMA;
1577 if (drive == 0) {
1578 idetim |= piix_setup_idetim_timings(
1579 drvp->DMA_mode, 1, channel);
1580 } else {
1581 sidetim |= piix_setup_sidetim_timings(
1582 drvp->DMA_mode, 1, channel);
1583 idetim =PIIX_IDETIM_SET(idetim,
1584 PIIX_IDETIM_SITRE, channel);
1585 }
1586 }
1587 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1588
1589 pio: /* use PIO mode */
1590 idetim |= piix_setup_idetim_drvs(drvp);
1591 if (drive == 0) {
1592 idetim |= piix_setup_idetim_timings(
1593 drvp->PIO_mode, 0, channel);
1594 } else {
1595 sidetim |= piix_setup_sidetim_timings(
1596 drvp->PIO_mode, 0, channel);
1597 idetim =PIIX_IDETIM_SET(idetim,
1598 PIIX_IDETIM_SITRE, channel);
1599 }
1600 }
1601 if (idedma_ctl != 0) {
1602 /* Add software bits in status register */
1603 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1604 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1605 idedma_ctl);
1606 }
1607 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1608 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1609 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1610 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1611 pciide_print_modes(cp);
1612 }
1613
1614
1615 /* setup ISP and RTC fields, based on mode */
1616 static u_int32_t
1617 piix_setup_idetim_timings(mode, dma, channel)
1618 u_int8_t mode;
1619 u_int8_t dma;
1620 u_int8_t channel;
1621 {
1622
1623 if (dma)
1624 return PIIX_IDETIM_SET(0,
1625 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1626 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1627 channel);
1628 else
1629 return PIIX_IDETIM_SET(0,
1630 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1631 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1632 channel);
1633 }
1634
1635 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1636 static u_int32_t
1637 piix_setup_idetim_drvs(drvp)
1638 struct ata_drive_datas *drvp;
1639 {
1640 u_int32_t ret = 0;
1641 struct channel_softc *chp = drvp->chnl_softc;
1642 u_int8_t channel = chp->channel;
1643 u_int8_t drive = drvp->drive;
1644
1645 /*
1646 * If drive is using UDMA, timings setups are independant
1647 * So just check DMA and PIO here.
1648 */
1649 if (drvp->drive_flags & DRIVE_DMA) {
1650 /* if mode = DMA mode 0, use compatible timings */
1651 if ((drvp->drive_flags & DRIVE_DMA) &&
1652 drvp->DMA_mode == 0) {
1653 drvp->PIO_mode = 0;
1654 return ret;
1655 }
1656 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1657 /*
1658 * PIO and DMA timings are the same, use fast timings for PIO
1659 * too, else use compat timings.
1660 */
1661 if ((piix_isp_pio[drvp->PIO_mode] !=
1662 piix_isp_dma[drvp->DMA_mode]) ||
1663 (piix_rtc_pio[drvp->PIO_mode] !=
1664 piix_rtc_dma[drvp->DMA_mode]))
1665 drvp->PIO_mode = 0;
1666 /* if PIO mode <= 2, use compat timings for PIO */
1667 if (drvp->PIO_mode <= 2) {
1668 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1669 channel);
1670 return ret;
1671 }
1672 }
1673
1674 /*
1675 * Now setup PIO modes. If mode < 2, use compat timings.
1676 * Else enable fast timings. Enable IORDY and prefetch/post
1677 * if PIO mode >= 3.
1678 */
1679
1680 if (drvp->PIO_mode < 2)
1681 return ret;
1682
1683 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1684 if (drvp->PIO_mode >= 3) {
1685 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1686 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1687 }
1688 return ret;
1689 }
1690
1691 /* setup values in SIDETIM registers, based on mode */
1692 static u_int32_t
1693 piix_setup_sidetim_timings(mode, dma, channel)
1694 u_int8_t mode;
1695 u_int8_t dma;
1696 u_int8_t channel;
1697 {
1698 if (dma)
1699 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1700 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1701 else
1702 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1703 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1704 }
1705
1706 void
1707 amd756_chip_map(sc, pa)
1708 struct pciide_softc *sc;
1709 struct pci_attach_args *pa;
1710 {
1711 struct pciide_channel *cp;
1712 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1713 int channel;
1714 pcireg_t chanenable;
1715 bus_size_t cmdsize, ctlsize;
1716
1717 if (pciide_chipen(sc, pa) == 0)
1718 return;
1719 printf("%s: bus-master DMA support present",
1720 sc->sc_wdcdev.sc_dev.dv_xname);
1721 pciide_mapreg_dma(sc, pa);
1722 printf("\n");
1723 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1724 WDC_CAPABILITY_MODE;
1725 if (sc->sc_dma_ok) {
1726 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1727 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1728 sc->sc_wdcdev.irqack = pciide_irqack;
1729 }
1730 sc->sc_wdcdev.PIO_cap = 4;
1731 sc->sc_wdcdev.DMA_cap = 2;
1732 sc->sc_wdcdev.UDMA_cap = 4;
1733 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1734 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1735 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1736 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1737
1738 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1739 DEBUG_PROBE);
1740 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1741 cp = &sc->pciide_channels[channel];
1742 if (pciide_chansetup(sc, channel, interface) == 0)
1743 continue;
1744
1745 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1746 printf("%s: %s channel ignored (disabled)\n",
1747 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1748 continue;
1749 }
1750 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1751 pciide_pci_intr);
1752
1753 if (pciide_chan_candisable(cp))
1754 chanenable &= ~AMD756_CHAN_EN(channel);
1755 pciide_map_compat_intr(pa, cp, channel, interface);
1756 if (cp->hw_ok == 0)
1757 continue;
1758
1759 amd756_setup_channel(&cp->wdc_channel);
1760 }
1761 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1762 chanenable);
1763 return;
1764 }
1765
1766 void
1767 amd756_setup_channel(chp)
1768 struct channel_softc *chp;
1769 {
1770 u_int32_t udmatim_reg, datatim_reg;
1771 u_int8_t idedma_ctl;
1772 int mode, drive;
1773 struct ata_drive_datas *drvp;
1774 struct pciide_channel *cp = (struct pciide_channel*)chp;
1775 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1776
1777 idedma_ctl = 0;
1778 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1779 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1780 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1781 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1782
1783 /* setup DMA if needed */
1784 pciide_channel_dma_setup(cp);
1785
1786 for (drive = 0; drive < 2; drive++) {
1787 drvp = &chp->ch_drive[drive];
1788 /* If no drive, skip */
1789 if ((drvp->drive_flags & DRIVE) == 0)
1790 continue;
1791 /* add timing values, setup DMA if needed */
1792 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1793 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1794 mode = drvp->PIO_mode;
1795 goto pio;
1796 }
1797 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1798 (drvp->drive_flags & DRIVE_UDMA)) {
1799 /* use Ultra/DMA */
1800 drvp->drive_flags &= ~DRIVE_DMA;
1801 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1802 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1803 AMD756_UDMA_TIME(chp->channel, drive,
1804 amd756_udma_tim[drvp->UDMA_mode]);
1805 /* can use PIO timings, MW DMA unused */
1806 mode = drvp->PIO_mode;
1807 } else {
1808 /* use Multiword DMA */
1809 drvp->drive_flags &= ~DRIVE_UDMA;
1810 /* mode = min(pio, dma+2) */
1811 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1812 mode = drvp->PIO_mode;
1813 else
1814 mode = drvp->DMA_mode + 2;
1815 }
1816 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1817
1818 pio: /* setup PIO mode */
1819 if (mode <= 2) {
1820 drvp->DMA_mode = 0;
1821 drvp->PIO_mode = 0;
1822 mode = 0;
1823 } else {
1824 drvp->PIO_mode = mode;
1825 drvp->DMA_mode = mode - 2;
1826 }
1827 datatim_reg |=
1828 AMD756_DATATIM_PULSE(chp->channel, drive,
1829 amd756_pio_set[mode]) |
1830 AMD756_DATATIM_RECOV(chp->channel, drive,
1831 amd756_pio_rec[mode]);
1832 }
1833 if (idedma_ctl != 0) {
1834 /* Add software bits in status register */
1835 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1836 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1837 idedma_ctl);
1838 }
1839 pciide_print_modes(cp);
1840 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1841 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1842 }
1843
1844 void
1845 apollo_chip_map(sc, pa)
1846 struct pciide_softc *sc;
1847 struct pci_attach_args *pa;
1848 {
1849 struct pciide_channel *cp;
1850 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1851 int channel;
1852 u_int32_t ideconf;
1853 bus_size_t cmdsize, ctlsize;
1854
1855 if (pciide_chipen(sc, pa) == 0)
1856 return;
1857 printf("%s: bus-master DMA support present",
1858 sc->sc_wdcdev.sc_dev.dv_xname);
1859 pciide_mapreg_dma(sc, pa);
1860 printf("\n");
1861 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1862 WDC_CAPABILITY_MODE;
1863 if (sc->sc_dma_ok) {
1864 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1865 sc->sc_wdcdev.irqack = pciide_irqack;
1866 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1867 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1868 }
1869 sc->sc_wdcdev.PIO_cap = 4;
1870 sc->sc_wdcdev.DMA_cap = 2;
1871 sc->sc_wdcdev.UDMA_cap = 2;
1872 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1873 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1874 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1875
1876 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1877 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1878 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1879 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1880 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1881 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1882 DEBUG_PROBE);
1883
1884 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1885 cp = &sc->pciide_channels[channel];
1886 if (pciide_chansetup(sc, channel, interface) == 0)
1887 continue;
1888
1889 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1890 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1891 printf("%s: %s channel ignored (disabled)\n",
1892 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1893 continue;
1894 }
1895 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1896 pciide_pci_intr);
1897 if (cp->hw_ok == 0)
1898 continue;
1899 if (pciide_chan_candisable(cp)) {
1900 ideconf &= ~APO_IDECONF_EN(channel);
1901 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1902 ideconf);
1903 }
1904 pciide_map_compat_intr(pa, cp, channel, interface);
1905
1906 if (cp->hw_ok == 0)
1907 continue;
1908 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1909 }
1910 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1911 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1912 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1913 }
1914
1915 void
1916 apollo_setup_channel(chp)
1917 struct channel_softc *chp;
1918 {
1919 u_int32_t udmatim_reg, datatim_reg;
1920 u_int8_t idedma_ctl;
1921 int mode, drive;
1922 struct ata_drive_datas *drvp;
1923 struct pciide_channel *cp = (struct pciide_channel*)chp;
1924 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1925
1926 idedma_ctl = 0;
1927 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1928 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1929 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1930 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1931
1932 /* setup DMA if needed */
1933 pciide_channel_dma_setup(cp);
1934
1935 for (drive = 0; drive < 2; drive++) {
1936 drvp = &chp->ch_drive[drive];
1937 /* If no drive, skip */
1938 if ((drvp->drive_flags & DRIVE) == 0)
1939 continue;
1940 /* add timing values, setup DMA if needed */
1941 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1942 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1943 mode = drvp->PIO_mode;
1944 goto pio;
1945 }
1946 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1947 (drvp->drive_flags & DRIVE_UDMA)) {
1948 /* use Ultra/DMA */
1949 drvp->drive_flags &= ~DRIVE_DMA;
1950 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1951 APO_UDMA_EN_MTH(chp->channel, drive) |
1952 APO_UDMA_TIME(chp->channel, drive,
1953 apollo_udma_tim[drvp->UDMA_mode]);
1954 /* can use PIO timings, MW DMA unused */
1955 mode = drvp->PIO_mode;
1956 } else {
1957 /* use Multiword DMA */
1958 drvp->drive_flags &= ~DRIVE_UDMA;
1959 /* mode = min(pio, dma+2) */
1960 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1961 mode = drvp->PIO_mode;
1962 else
1963 mode = drvp->DMA_mode + 2;
1964 }
1965 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1966
1967 pio: /* setup PIO mode */
1968 if (mode <= 2) {
1969 drvp->DMA_mode = 0;
1970 drvp->PIO_mode = 0;
1971 mode = 0;
1972 } else {
1973 drvp->PIO_mode = mode;
1974 drvp->DMA_mode = mode - 2;
1975 }
1976 datatim_reg |=
1977 APO_DATATIM_PULSE(chp->channel, drive,
1978 apollo_pio_set[mode]) |
1979 APO_DATATIM_RECOV(chp->channel, drive,
1980 apollo_pio_rec[mode]);
1981 }
1982 if (idedma_ctl != 0) {
1983 /* Add software bits in status register */
1984 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1985 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1986 idedma_ctl);
1987 }
1988 pciide_print_modes(cp);
1989 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
1990 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
1991 }
1992
1993 void
1994 cmd_channel_map(pa, sc, channel)
1995 struct pci_attach_args *pa;
1996 struct pciide_softc *sc;
1997 int channel;
1998 {
1999 struct pciide_channel *cp = &sc->pciide_channels[channel];
2000 bus_size_t cmdsize, ctlsize;
2001 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2002 int interface;
2003
2004 /*
2005 * The 0648/0649 can be told to identify as a RAID controller.
2006 * In this case, we have to fake interface
2007 */
2008 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2009 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2010 PCIIDE_INTERFACE_SETTABLE(1);
2011 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2012 CMD_CONF_DSA1)
2013 interface |= PCIIDE_INTERFACE_PCI(0) |
2014 PCIIDE_INTERFACE_PCI(1);
2015 } else {
2016 interface = PCI_INTERFACE(pa->pa_class);
2017 }
2018
2019 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2020 cp->name = PCIIDE_CHANNEL_NAME(channel);
2021 cp->wdc_channel.channel = channel;
2022 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2023
2024 if (channel > 0) {
2025 cp->wdc_channel.ch_queue =
2026 sc->pciide_channels[0].wdc_channel.ch_queue;
2027 } else {
2028 cp->wdc_channel.ch_queue =
2029 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2030 }
2031 if (cp->wdc_channel.ch_queue == NULL) {
2032 printf("%s %s channel: "
2033 "can't allocate memory for command queue",
2034 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2035 return;
2036 }
2037
2038 printf("%s: %s channel %s to %s mode\n",
2039 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2040 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2041 "configured" : "wired",
2042 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2043 "native-PCI" : "compatibility");
2044
2045 /*
2046 * with a CMD PCI64x, if we get here, the first channel is enabled:
2047 * there's no way to disable the first channel without disabling
2048 * the whole device
2049 */
2050 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2051 printf("%s: %s channel ignored (disabled)\n",
2052 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2053 return;
2054 }
2055
2056 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2057 if (cp->hw_ok == 0)
2058 return;
2059 if (channel == 1) {
2060 if (pciide_chan_candisable(cp)) {
2061 ctrl &= ~CMD_CTRL_2PORT;
2062 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2063 CMD_CTRL, ctrl);
2064 }
2065 }
2066 pciide_map_compat_intr(pa, cp, channel, interface);
2067 }
2068
2069 int
2070 cmd_pci_intr(arg)
2071 void *arg;
2072 {
2073 struct pciide_softc *sc = arg;
2074 struct pciide_channel *cp;
2075 struct channel_softc *wdc_cp;
2076 int i, rv, crv;
2077 u_int32_t priirq, secirq;
2078
2079 rv = 0;
2080 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2081 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2082 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2083 cp = &sc->pciide_channels[i];
2084 wdc_cp = &cp->wdc_channel;
2085 /* If a compat channel skip. */
2086 if (cp->compat)
2087 continue;
2088 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2089 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2090 crv = wdcintr(wdc_cp);
2091 if (crv == 0)
2092 printf("%s:%d: bogus intr\n",
2093 sc->sc_wdcdev.sc_dev.dv_xname, i);
2094 else
2095 rv = 1;
2096 }
2097 }
2098 return rv;
2099 }
2100
2101 void
2102 cmd_chip_map(sc, pa)
2103 struct pciide_softc *sc;
2104 struct pci_attach_args *pa;
2105 {
2106 int channel;
2107
2108 /*
2109 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2110 * and base adresses registers can be disabled at
2111 * hardware level. In this case, the device is wired
2112 * in compat mode and its first channel is always enabled,
2113 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2114 * In fact, it seems that the first channel of the CMD PCI0640
2115 * can't be disabled.
2116 */
2117
2118 #ifdef PCIIDE_CMD064x_DISABLE
2119 if (pciide_chipen(sc, pa) == 0)
2120 return;
2121 #endif
2122
2123 printf("%s: hardware does not support DMA\n",
2124 sc->sc_wdcdev.sc_dev.dv_xname);
2125 sc->sc_dma_ok = 0;
2126
2127 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2128 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2129 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2130
2131 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2132 cmd_channel_map(pa, sc, channel);
2133 }
2134 }
2135
2136 void
2137 cmd0643_9_chip_map(sc, pa)
2138 struct pciide_softc *sc;
2139 struct pci_attach_args *pa;
2140 {
2141 struct pciide_channel *cp;
2142 int channel;
2143
2144 /*
2145 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2146 * and base adresses registers can be disabled at
2147 * hardware level. In this case, the device is wired
2148 * in compat mode and its first channel is always enabled,
2149 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2150 * In fact, it seems that the first channel of the CMD PCI0640
2151 * can't be disabled.
2152 */
2153
2154 #ifdef PCIIDE_CMD064x_DISABLE
2155 if (pciide_chipen(sc, pa) == 0)
2156 return;
2157 #endif
2158 printf("%s: bus-master DMA support present",
2159 sc->sc_wdcdev.sc_dev.dv_xname);
2160 pciide_mapreg_dma(sc, pa);
2161 printf("\n");
2162 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2163 WDC_CAPABILITY_MODE;
2164 if (sc->sc_dma_ok) {
2165 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2166 sc->sc_wdcdev.irqack = pciide_irqack;
2167 switch (sc->sc_pp->ide_product) {
2168 case PCI_PRODUCT_CMDTECH_649:
2169 case PCI_PRODUCT_CMDTECH_648:
2170 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2171 sc->sc_wdcdev.UDMA_cap = 4;
2172 }
2173 }
2174
2175 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2176 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2177 sc->sc_wdcdev.PIO_cap = 4;
2178 sc->sc_wdcdev.DMA_cap = 2;
2179 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2180
2181 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2182 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2183 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2184 DEBUG_PROBE);
2185
2186 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2187 cp = &sc->pciide_channels[channel];
2188 cmd_channel_map(pa, sc, channel);
2189 if (cp->hw_ok == 0)
2190 continue;
2191 cmd0643_9_setup_channel(&cp->wdc_channel);
2192 }
2193 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2194 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2195 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2196 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2197 DEBUG_PROBE);
2198 }
2199
2200 void
2201 cmd0643_9_setup_channel(chp)
2202 struct channel_softc *chp;
2203 {
2204 struct ata_drive_datas *drvp;
2205 u_int8_t tim;
2206 u_int32_t idedma_ctl, udma_reg;
2207 int drive;
2208 struct pciide_channel *cp = (struct pciide_channel*)chp;
2209 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2210
2211 idedma_ctl = 0;
2212 /* setup DMA if needed */
2213 pciide_channel_dma_setup(cp);
2214
2215 for (drive = 0; drive < 2; drive++) {
2216 drvp = &chp->ch_drive[drive];
2217 /* If no drive, skip */
2218 if ((drvp->drive_flags & DRIVE) == 0)
2219 continue;
2220 /* add timing values, setup DMA if needed */
2221 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2222 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2223 if (drvp->drive_flags & DRIVE_UDMA) {
2224 /* UltraDMA on a 0648 or 0649 */
2225 udma_reg = pciide_pci_read(sc->sc_pc,
2226 sc->sc_tag, CMD_UDMATIM(chp->channel));
2227 if (drvp->UDMA_mode > 2 &&
2228 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2229 CMD_BICSR) &
2230 CMD_BICSR_80(chp->channel)) == 0)
2231 drvp->UDMA_mode = 2;
2232 if (drvp->UDMA_mode > 2)
2233 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2234 else
2235 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2236 udma_reg |= CMD_UDMATIM_UDMA(drive);
2237 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2238 CMD_UDMATIM_TIM_OFF(drive));
2239 udma_reg |=
2240 (cmd0648_9_tim_udma[drvp->UDMA_mode] <<
2241 CMD_UDMATIM_TIM_OFF(drive));
2242 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2243 CMD_UDMATIM(chp->channel), udma_reg);
2244 } else {
2245 /*
2246 * use Multiword DMA.
2247 * Timings will be used for both PIO and DMA,
2248 * so adjust DMA mode if needed
2249 * if we have a 0648/9, turn off UDMA
2250 */
2251 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2252 udma_reg = pciide_pci_read(sc->sc_pc,
2253 sc->sc_tag,
2254 CMD_UDMATIM(chp->channel));
2255 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2256 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2257 CMD_UDMATIM(chp->channel),
2258 udma_reg);
2259 }
2260 if (drvp->PIO_mode >= 3 &&
2261 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2262 drvp->DMA_mode = drvp->PIO_mode - 2;
2263 }
2264 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2265 }
2266 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2267 }
2268 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2269 CMD_DATA_TIM(chp->channel, drive), tim);
2270 }
2271 if (idedma_ctl != 0) {
2272 /* Add software bits in status register */
2273 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2274 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2275 idedma_ctl);
2276 }
2277 pciide_print_modes(cp);
2278 }
2279
2280 void
2281 cy693_chip_map(sc, pa)
2282 struct pciide_softc *sc;
2283 struct pci_attach_args *pa;
2284 {
2285 struct pciide_channel *cp;
2286 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2287 bus_size_t cmdsize, ctlsize;
2288
2289 if (pciide_chipen(sc, pa) == 0)
2290 return;
2291 /*
2292 * this chip has 2 PCI IDE functions, one for primary and one for
2293 * secondary. So we need to call pciide_mapregs_compat() with
2294 * the real channel
2295 */
2296 if (pa->pa_function == 1) {
2297 sc->sc_cy_compatchan = 0;
2298 } else if (pa->pa_function == 2) {
2299 sc->sc_cy_compatchan = 1;
2300 } else {
2301 printf("%s: unexpected PCI function %d\n",
2302 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2303 return;
2304 }
2305 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2306 printf("%s: bus-master DMA support present",
2307 sc->sc_wdcdev.sc_dev.dv_xname);
2308 pciide_mapreg_dma(sc, pa);
2309 } else {
2310 printf("%s: hardware does not support DMA",
2311 sc->sc_wdcdev.sc_dev.dv_xname);
2312 sc->sc_dma_ok = 0;
2313 }
2314 printf("\n");
2315
2316 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2317 if (sc->sc_cy_handle == NULL) {
2318 printf("%s: unable to map hyperCache control registers\n",
2319 sc->sc_wdcdev.sc_dev.dv_xname);
2320 sc->sc_dma_ok = 0;
2321 }
2322
2323 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2324 WDC_CAPABILITY_MODE;
2325 if (sc->sc_dma_ok) {
2326 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2327 sc->sc_wdcdev.irqack = pciide_irqack;
2328 }
2329 sc->sc_wdcdev.PIO_cap = 4;
2330 sc->sc_wdcdev.DMA_cap = 2;
2331 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2332
2333 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2334 sc->sc_wdcdev.nchannels = 1;
2335
2336 /* Only one channel for this chip; if we are here it's enabled */
2337 cp = &sc->pciide_channels[0];
2338 sc->wdc_chanarray[0] = &cp->wdc_channel;
2339 cp->name = PCIIDE_CHANNEL_NAME(0);
2340 cp->wdc_channel.channel = 0;
2341 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2342 cp->wdc_channel.ch_queue =
2343 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2344 if (cp->wdc_channel.ch_queue == NULL) {
2345 printf("%s primary channel: "
2346 "can't allocate memory for command queue",
2347 sc->sc_wdcdev.sc_dev.dv_xname);
2348 return;
2349 }
2350 printf("%s: primary channel %s to ",
2351 sc->sc_wdcdev.sc_dev.dv_xname,
2352 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2353 "configured" : "wired");
2354 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2355 printf("native-PCI");
2356 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2357 pciide_pci_intr);
2358 } else {
2359 printf("compatibility");
2360 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2361 &cmdsize, &ctlsize);
2362 }
2363 printf(" mode\n");
2364 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2365 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2366 wdcattach(&cp->wdc_channel);
2367 if (pciide_chan_candisable(cp)) {
2368 pci_conf_write(sc->sc_pc, sc->sc_tag,
2369 PCI_COMMAND_STATUS_REG, 0);
2370 }
2371 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2372 if (cp->hw_ok == 0)
2373 return;
2374 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2375 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2376 cy693_setup_channel(&cp->wdc_channel);
2377 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2378 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2379 }
2380
2381 void
2382 cy693_setup_channel(chp)
2383 struct channel_softc *chp;
2384 {
2385 struct ata_drive_datas *drvp;
2386 int drive;
2387 u_int32_t cy_cmd_ctrl;
2388 u_int32_t idedma_ctl;
2389 struct pciide_channel *cp = (struct pciide_channel*)chp;
2390 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2391 int dma_mode = -1;
2392
2393 cy_cmd_ctrl = idedma_ctl = 0;
2394
2395 /* setup DMA if needed */
2396 pciide_channel_dma_setup(cp);
2397
2398 for (drive = 0; drive < 2; drive++) {
2399 drvp = &chp->ch_drive[drive];
2400 /* If no drive, skip */
2401 if ((drvp->drive_flags & DRIVE) == 0)
2402 continue;
2403 /* add timing values, setup DMA if needed */
2404 if (drvp->drive_flags & DRIVE_DMA) {
2405 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2406 /* use Multiword DMA */
2407 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2408 dma_mode = drvp->DMA_mode;
2409 }
2410 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2411 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2412 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2413 CY_CMD_CTRL_IOW_REC_OFF(drive));
2414 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2415 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2416 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2417 CY_CMD_CTRL_IOR_REC_OFF(drive));
2418 }
2419 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2420 chp->ch_drive[0].DMA_mode = dma_mode;
2421 chp->ch_drive[1].DMA_mode = dma_mode;
2422
2423 if (dma_mode == -1)
2424 dma_mode = 0;
2425
2426 if (sc->sc_cy_handle != NULL) {
2427 /* Note: `multiple' is implied. */
2428 cy82c693_write(sc->sc_cy_handle,
2429 (sc->sc_cy_compatchan == 0) ?
2430 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2431 }
2432
2433 pciide_print_modes(cp);
2434
2435 if (idedma_ctl != 0) {
2436 /* Add software bits in status register */
2437 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2438 IDEDMA_CTL, idedma_ctl);
2439 }
2440 }
2441
2442 void
2443 sis_chip_map(sc, pa)
2444 struct pciide_softc *sc;
2445 struct pci_attach_args *pa;
2446 {
2447 struct pciide_channel *cp;
2448 int channel;
2449 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2450 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2451 pcireg_t rev = PCI_REVISION(pa->pa_class);
2452 bus_size_t cmdsize, ctlsize;
2453
2454 if (pciide_chipen(sc, pa) == 0)
2455 return;
2456 printf("%s: bus-master DMA support present",
2457 sc->sc_wdcdev.sc_dev.dv_xname);
2458 pciide_mapreg_dma(sc, pa);
2459 printf("\n");
2460 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2461 WDC_CAPABILITY_MODE;
2462 if (sc->sc_dma_ok) {
2463 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2464 sc->sc_wdcdev.irqack = pciide_irqack;
2465 if (rev >= 0xd0)
2466 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2467 }
2468
2469 sc->sc_wdcdev.PIO_cap = 4;
2470 sc->sc_wdcdev.DMA_cap = 2;
2471 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2472 sc->sc_wdcdev.UDMA_cap = 2;
2473 sc->sc_wdcdev.set_modes = sis_setup_channel;
2474
2475 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2476 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2477
2478 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2479 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2480 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2481
2482 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2483 cp = &sc->pciide_channels[channel];
2484 if (pciide_chansetup(sc, channel, interface) == 0)
2485 continue;
2486 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2487 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2488 printf("%s: %s channel ignored (disabled)\n",
2489 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2490 continue;
2491 }
2492 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2493 pciide_pci_intr);
2494 if (cp->hw_ok == 0)
2495 continue;
2496 if (pciide_chan_candisable(cp)) {
2497 if (channel == 0)
2498 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2499 else
2500 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2501 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2502 sis_ctr0);
2503 }
2504 pciide_map_compat_intr(pa, cp, channel, interface);
2505 if (cp->hw_ok == 0)
2506 continue;
2507 sis_setup_channel(&cp->wdc_channel);
2508 }
2509 }
2510
2511 void
2512 sis_setup_channel(chp)
2513 struct channel_softc *chp;
2514 {
2515 struct ata_drive_datas *drvp;
2516 int drive;
2517 u_int32_t sis_tim;
2518 u_int32_t idedma_ctl;
2519 struct pciide_channel *cp = (struct pciide_channel*)chp;
2520 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2521
2522 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2523 "channel %d 0x%x\n", chp->channel,
2524 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2525 DEBUG_PROBE);
2526 sis_tim = 0;
2527 idedma_ctl = 0;
2528 /* setup DMA if needed */
2529 pciide_channel_dma_setup(cp);
2530
2531 for (drive = 0; drive < 2; drive++) {
2532 drvp = &chp->ch_drive[drive];
2533 /* If no drive, skip */
2534 if ((drvp->drive_flags & DRIVE) == 0)
2535 continue;
2536 /* add timing values, setup DMA if needed */
2537 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2538 (drvp->drive_flags & DRIVE_UDMA) == 0)
2539 goto pio;
2540
2541 if (drvp->drive_flags & DRIVE_UDMA) {
2542 /* use Ultra/DMA */
2543 drvp->drive_flags &= ~DRIVE_DMA;
2544 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2545 SIS_TIM_UDMA_TIME_OFF(drive);
2546 sis_tim |= SIS_TIM_UDMA_EN(drive);
2547 } else {
2548 /*
2549 * use Multiword DMA
2550 * Timings will be used for both PIO and DMA,
2551 * so adjust DMA mode if needed
2552 */
2553 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2554 drvp->PIO_mode = drvp->DMA_mode + 2;
2555 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2556 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2557 drvp->PIO_mode - 2 : 0;
2558 if (drvp->DMA_mode == 0)
2559 drvp->PIO_mode = 0;
2560 }
2561 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2562 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2563 SIS_TIM_ACT_OFF(drive);
2564 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2565 SIS_TIM_REC_OFF(drive);
2566 }
2567 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2568 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2569 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2570 if (idedma_ctl != 0) {
2571 /* Add software bits in status register */
2572 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2573 IDEDMA_CTL, idedma_ctl);
2574 }
2575 pciide_print_modes(cp);
2576 }
2577
2578 void
2579 acer_chip_map(sc, pa)
2580 struct pciide_softc *sc;
2581 struct pci_attach_args *pa;
2582 {
2583 struct pciide_channel *cp;
2584 int channel;
2585 pcireg_t cr, interface;
2586 bus_size_t cmdsize, ctlsize;
2587
2588 if (pciide_chipen(sc, pa) == 0)
2589 return;
2590 printf("%s: bus-master DMA support present",
2591 sc->sc_wdcdev.sc_dev.dv_xname);
2592 pciide_mapreg_dma(sc, pa);
2593 printf("\n");
2594 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2595 WDC_CAPABILITY_MODE;
2596 if (sc->sc_dma_ok) {
2597 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2598 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2599 sc->sc_wdcdev.irqack = pciide_irqack;
2600 }
2601
2602 sc->sc_wdcdev.PIO_cap = 4;
2603 sc->sc_wdcdev.DMA_cap = 2;
2604 sc->sc_wdcdev.UDMA_cap = 2;
2605 sc->sc_wdcdev.set_modes = acer_setup_channel;
2606 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2607 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2608
2609 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2610 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2611 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2612
2613 /* Enable "microsoft register bits" R/W. */
2614 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2615 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2616 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2617 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2618 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2619 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2620 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2621 ~ACER_CHANSTATUSREGS_RO);
2622 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2623 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2624 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2625 /* Don't use cr, re-read the real register content instead */
2626 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2627 PCI_CLASS_REG));
2628
2629 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2630 cp = &sc->pciide_channels[channel];
2631 if (pciide_chansetup(sc, channel, interface) == 0)
2632 continue;
2633 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2634 printf("%s: %s channel ignored (disabled)\n",
2635 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2636 continue;
2637 }
2638 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2639 acer_pci_intr);
2640 if (cp->hw_ok == 0)
2641 continue;
2642 if (pciide_chan_candisable(cp)) {
2643 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2644 pci_conf_write(sc->sc_pc, sc->sc_tag,
2645 PCI_CLASS_REG, cr);
2646 }
2647 pciide_map_compat_intr(pa, cp, channel, interface);
2648 acer_setup_channel(&cp->wdc_channel);
2649 }
2650 }
2651
2652 void
2653 acer_setup_channel(chp)
2654 struct channel_softc *chp;
2655 {
2656 struct ata_drive_datas *drvp;
2657 int drive;
2658 u_int32_t acer_fifo_udma;
2659 u_int32_t idedma_ctl;
2660 struct pciide_channel *cp = (struct pciide_channel*)chp;
2661 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2662
2663 idedma_ctl = 0;
2664 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2665 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2666 acer_fifo_udma), DEBUG_PROBE);
2667 /* setup DMA if needed */
2668 pciide_channel_dma_setup(cp);
2669
2670 for (drive = 0; drive < 2; drive++) {
2671 drvp = &chp->ch_drive[drive];
2672 /* If no drive, skip */
2673 if ((drvp->drive_flags & DRIVE) == 0)
2674 continue;
2675 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2676 "channel %d drive %d 0x%x\n", chp->channel, drive,
2677 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2678 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2679 /* clear FIFO/DMA mode */
2680 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2681 ACER_UDMA_EN(chp->channel, drive) |
2682 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2683
2684 /* add timing values, setup DMA if needed */
2685 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2686 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2687 acer_fifo_udma |=
2688 ACER_FTH_OPL(chp->channel, drive, 0x1);
2689 goto pio;
2690 }
2691
2692 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2693 if (drvp->drive_flags & DRIVE_UDMA) {
2694 /* use Ultra/DMA */
2695 drvp->drive_flags &= ~DRIVE_DMA;
2696 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2697 acer_fifo_udma |=
2698 ACER_UDMA_TIM(chp->channel, drive,
2699 acer_udma[drvp->UDMA_mode]);
2700 } else {
2701 /*
2702 * use Multiword DMA
2703 * Timings will be used for both PIO and DMA,
2704 * so adjust DMA mode if needed
2705 */
2706 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2707 drvp->PIO_mode = drvp->DMA_mode + 2;
2708 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2709 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2710 drvp->PIO_mode - 2 : 0;
2711 if (drvp->DMA_mode == 0)
2712 drvp->PIO_mode = 0;
2713 }
2714 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2715 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2716 ACER_IDETIM(chp->channel, drive),
2717 acer_pio[drvp->PIO_mode]);
2718 }
2719 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2720 acer_fifo_udma), DEBUG_PROBE);
2721 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2722 if (idedma_ctl != 0) {
2723 /* Add software bits in status register */
2724 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2725 IDEDMA_CTL, idedma_ctl);
2726 }
2727 pciide_print_modes(cp);
2728 }
2729
2730 int
2731 acer_pci_intr(arg)
2732 void *arg;
2733 {
2734 struct pciide_softc *sc = arg;
2735 struct pciide_channel *cp;
2736 struct channel_softc *wdc_cp;
2737 int i, rv, crv;
2738 u_int32_t chids;
2739
2740 rv = 0;
2741 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2742 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2743 cp = &sc->pciide_channels[i];
2744 wdc_cp = &cp->wdc_channel;
2745 /* If a compat channel skip. */
2746 if (cp->compat)
2747 continue;
2748 if (chids & ACER_CHIDS_INT(i)) {
2749 crv = wdcintr(wdc_cp);
2750 if (crv == 0)
2751 printf("%s:%d: bogus intr\n",
2752 sc->sc_wdcdev.sc_dev.dv_xname, i);
2753 else
2754 rv = 1;
2755 }
2756 }
2757 return rv;
2758 }
2759
2760 void
2761 hpt_chip_map(sc, pa)
2762 struct pciide_softc *sc;
2763 struct pci_attach_args *pa;
2764 {
2765 struct pciide_channel *cp;
2766 int i, compatchan, revision;
2767 pcireg_t interface;
2768 bus_size_t cmdsize, ctlsize;
2769
2770 if (pciide_chipen(sc, pa) == 0)
2771 return;
2772 revision = PCI_REVISION(pa->pa_class);
2773
2774 /*
2775 * when the chip is in native mode it identifies itself as a
2776 * 'misc mass storage'. Fake interface in this case.
2777 */
2778 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2779 interface = PCI_INTERFACE(pa->pa_class);
2780 } else {
2781 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2782 PCIIDE_INTERFACE_PCI(0);
2783 if (revision == HPT370_REV)
2784 interface |= PCIIDE_INTERFACE_PCI(1);
2785 }
2786
2787 printf("%s: bus-master DMA support present",
2788 sc->sc_wdcdev.sc_dev.dv_xname);
2789 pciide_mapreg_dma(sc, pa);
2790 printf("\n");
2791 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2792 WDC_CAPABILITY_MODE;
2793 if (sc->sc_dma_ok) {
2794 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2795 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2796 sc->sc_wdcdev.irqack = pciide_irqack;
2797 }
2798 sc->sc_wdcdev.PIO_cap = 4;
2799 sc->sc_wdcdev.DMA_cap = 2;
2800 sc->sc_wdcdev.UDMA_cap = 4;
2801
2802 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2803 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2804 sc->sc_wdcdev.nchannels = (revision == HPT366_REV) ? 1 : 2;
2805 if (revision == HPT366_REV) {
2806 /*
2807 * The 366 has 2 PCI IDE functions, one for primary and one
2808 * for secondary. So we need to call pciide_mapregs_compat()
2809 * with the real channel
2810 */
2811 if (pa->pa_function == 0) {
2812 compatchan = 0;
2813 } else if (pa->pa_function == 1) {
2814 compatchan = 1;
2815 } else {
2816 printf("%s: unexpected PCI function %d\n",
2817 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2818 return;
2819 }
2820 sc->sc_wdcdev.nchannels = 1;
2821 } else {
2822 sc->sc_wdcdev.nchannels = 2;
2823 }
2824 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2825 cp = &sc->pciide_channels[0];
2826 if (sc->sc_wdcdev.nchannels > 1) {
2827 compatchan = i;
2828 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
2829 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
2830 printf("%s: %s channel ignored (disabled)\n",
2831 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2832 continue;
2833 }
2834 }
2835 if (pciide_chansetup(sc, i, interface) == 0)
2836 continue;
2837 if (interface & PCIIDE_INTERFACE_PCI(i)) {
2838 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
2839 &ctlsize, hpt_pci_intr);
2840 } else {
2841 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
2842 &cmdsize, &ctlsize);
2843 }
2844 if (cp->hw_ok == 0)
2845 return;
2846 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2847 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2848 wdcattach(&cp->wdc_channel);
2849 hpt_setup_channel(&cp->wdc_channel);
2850 }
2851
2852 return;
2853 }
2854
2855
2856 void
2857 hpt_setup_channel(chp)
2858 struct channel_softc *chp;
2859 {
2860 struct ata_drive_datas *drvp;
2861 int drive;
2862 int cable;
2863 u_int32_t before, after;
2864 u_int32_t idedma_ctl;
2865 struct pciide_channel *cp = (struct pciide_channel*)chp;
2866 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2867
2868 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
2869
2870 /* setup DMA if needed */
2871 pciide_channel_dma_setup(cp);
2872
2873 idedma_ctl = 0;
2874
2875 /* Per drive settings */
2876 for (drive = 0; drive < 2; drive++) {
2877 drvp = &chp->ch_drive[drive];
2878 /* If no drive, skip */
2879 if ((drvp->drive_flags & DRIVE) == 0)
2880 continue;
2881 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
2882 HPT_IDETIM(chp->channel, drive));
2883
2884 /* add timing values, setup DMA if needed */
2885 if (drvp->drive_flags & DRIVE_UDMA) {
2886 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
2887 drvp->UDMA_mode > 2)
2888 drvp->UDMA_mode = 2;
2889 after = (sc->sc_wdcdev.nchannels == 2) ?
2890 hpt370_udma[drvp->UDMA_mode] :
2891 hpt366_udma[drvp->UDMA_mode];
2892 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2893 } else if (drvp->drive_flags & DRIVE_DMA) {
2894 /*
2895 * use Multiword DMA.
2896 * Timings will be used for both PIO and DMA, so adjust
2897 * DMA mode if needed
2898 */
2899 if (drvp->PIO_mode >= 3 &&
2900 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2901 drvp->DMA_mode = drvp->PIO_mode - 2;
2902 }
2903 after = (sc->sc_wdcdev.nchannels == 2) ?
2904 hpt370_dma[drvp->DMA_mode] :
2905 hpt366_dma[drvp->DMA_mode];
2906 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2907 } else {
2908 /* PIO only */
2909 after = (sc->sc_wdcdev.nchannels == 2) ?
2910 hpt370_pio[drvp->PIO_mode] :
2911 hpt366_pio[drvp->PIO_mode];
2912 }
2913 pci_conf_write(sc->sc_pc, sc->sc_tag,
2914 HPT_IDETIM(chp->channel, drive), after);
2915 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
2916 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
2917 after, before), DEBUG_PROBE);
2918 }
2919 if (idedma_ctl != 0) {
2920 /* Add software bits in status register */
2921 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2922 IDEDMA_CTL, idedma_ctl);
2923 }
2924 pciide_print_modes(cp);
2925 }
2926
2927 int
2928 hpt_pci_intr(arg)
2929 void *arg;
2930 {
2931 struct pciide_softc *sc = arg;
2932 struct pciide_channel *cp;
2933 struct channel_softc *wdc_cp;
2934 int rv = 0;
2935 int dmastat, i, crv;
2936
2937 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2938 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2939 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
2940 if((dmastat & IDEDMA_CTL_INTR) == 0)
2941 continue;
2942 cp = &sc->pciide_channels[i];
2943 wdc_cp = &cp->wdc_channel;
2944 crv = wdcintr(wdc_cp);
2945 if (crv == 0) {
2946 printf("%s:%d: bogus intr\n",
2947 sc->sc_wdcdev.sc_dev.dv_xname, i);
2948 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2949 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
2950 } else
2951 rv = 1;
2952 }
2953 return rv;
2954 }
2955
2956
2957 /* A macro to test product */
2958 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2959
2960 void
2961 pdc202xx_chip_map(sc, pa)
2962 struct pciide_softc *sc;
2963 struct pci_attach_args *pa;
2964 {
2965 struct pciide_channel *cp;
2966 int channel;
2967 pcireg_t interface, st, mode;
2968 bus_size_t cmdsize, ctlsize;
2969
2970 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2971 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2972 DEBUG_PROBE);
2973 if (pciide_chipen(sc, pa) == 0)
2974 return;
2975
2976 /* turn off RAID mode */
2977 st &= ~PDC2xx_STATE_IDERAID;
2978
2979 /*
2980 * can't rely on the PCI_CLASS_REG content if the chip was in raid
2981 * mode. We have to fake interface
2982 */
2983 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
2984 if (st & PDC2xx_STATE_NATIVE)
2985 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2986
2987 printf("%s: bus-master DMA support present",
2988 sc->sc_wdcdev.sc_dev.dv_xname);
2989 pciide_mapreg_dma(sc, pa);
2990 printf("\n");
2991 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2992 WDC_CAPABILITY_MODE;
2993 if (sc->sc_dma_ok) {
2994 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2995 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2996 sc->sc_wdcdev.irqack = pciide_irqack;
2997 }
2998 sc->sc_wdcdev.PIO_cap = 4;
2999 sc->sc_wdcdev.DMA_cap = 2;
3000 if (PDC_IS_262(sc))
3001 sc->sc_wdcdev.UDMA_cap = 4;
3002 else
3003 sc->sc_wdcdev.UDMA_cap = 2;
3004 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3005 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3006 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3007
3008 /* setup failsafe defaults */
3009 mode = 0;
3010 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3011 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3012 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3013 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3014 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3015 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3016 "initial timings 0x%x, now 0x%x\n", channel,
3017 pci_conf_read(sc->sc_pc, sc->sc_tag,
3018 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3019 DEBUG_PROBE);
3020 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3021 mode | PDC2xx_TIM_IORDYp);
3022 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3023 "initial timings 0x%x, now 0x%x\n", channel,
3024 pci_conf_read(sc->sc_pc, sc->sc_tag,
3025 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3026 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3027 mode);
3028 }
3029
3030 mode = PDC2xx_SCR_DMA;
3031 if (PDC_IS_262(sc)) {
3032 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3033 } else {
3034 /* the BIOS set it up this way */
3035 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3036 }
3037 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3038 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3039 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3040 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3041 DEBUG_PROBE);
3042 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3043
3044 /* controller initial state register is OK even without BIOS */
3045 /* Set DMA mode to IDE DMA compatibility */
3046 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3047 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3048 DEBUG_PROBE);
3049 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3050 mode | 0x1);
3051 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3052 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3053 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3054 mode | 0x1);
3055
3056 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3057 cp = &sc->pciide_channels[channel];
3058 if (pciide_chansetup(sc, channel, interface) == 0)
3059 continue;
3060 if ((st & (PDC_IS_262(sc) ?
3061 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3062 printf("%s: %s channel ignored (disabled)\n",
3063 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3064 continue;
3065 }
3066 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3067 pdc202xx_pci_intr);
3068 if (cp->hw_ok == 0)
3069 continue;
3070 if (pciide_chan_candisable(cp))
3071 st &= ~(PDC_IS_262(sc) ?
3072 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3073 pciide_map_compat_intr(pa, cp, channel, interface);
3074 pdc202xx_setup_channel(&cp->wdc_channel);
3075 }
3076 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3077 DEBUG_PROBE);
3078 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3079 return;
3080 }
3081
3082 void
3083 pdc202xx_setup_channel(chp)
3084 struct channel_softc *chp;
3085 {
3086 struct ata_drive_datas *drvp;
3087 int drive;
3088 pcireg_t mode, st;
3089 u_int32_t idedma_ctl, scr, atapi;
3090 struct pciide_channel *cp = (struct pciide_channel*)chp;
3091 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3092 int channel = chp->channel;
3093
3094 /* setup DMA if needed */
3095 pciide_channel_dma_setup(cp);
3096
3097 idedma_ctl = 0;
3098
3099 /* Per channel settings */
3100 if (PDC_IS_262(sc)) {
3101 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3102 PDC262_U66);
3103 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3104 /* Trimm UDMA mode */
3105 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3106 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3107 chp->ch_drive[0].UDMA_mode <= 2) ||
3108 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3109 chp->ch_drive[1].UDMA_mode <= 2)) {
3110 if (chp->ch_drive[0].UDMA_mode > 2)
3111 chp->ch_drive[0].UDMA_mode = 2;
3112 if (chp->ch_drive[1].UDMA_mode > 2)
3113 chp->ch_drive[1].UDMA_mode = 2;
3114 }
3115 /* Set U66 if needed */
3116 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3117 chp->ch_drive[0].UDMA_mode > 2) ||
3118 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3119 chp->ch_drive[1].UDMA_mode > 2))
3120 scr |= PDC262_U66_EN(channel);
3121 else
3122 scr &= ~PDC262_U66_EN(channel);
3123 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3124 PDC262_U66, scr);
3125 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3126 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3127 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3128 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3129 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3130 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3131 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3132 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3133 atapi = 0;
3134 else
3135 atapi = PDC262_ATAPI_UDMA;
3136 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3137 PDC262_ATAPI(channel), atapi);
3138 }
3139 }
3140 for (drive = 0; drive < 2; drive++) {
3141 drvp = &chp->ch_drive[drive];
3142 /* If no drive, skip */
3143 if ((drvp->drive_flags & DRIVE) == 0)
3144 continue;
3145 mode = 0;
3146 if (drvp->drive_flags & DRIVE_UDMA) {
3147 mode = PDC2xx_TIM_SET_MB(mode,
3148 pdc2xx_udma_mb[drvp->UDMA_mode]);
3149 mode = PDC2xx_TIM_SET_MC(mode,
3150 pdc2xx_udma_mc[drvp->UDMA_mode]);
3151 drvp->drive_flags &= ~DRIVE_DMA;
3152 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3153 } else if (drvp->drive_flags & DRIVE_DMA) {
3154 mode = PDC2xx_TIM_SET_MB(mode,
3155 pdc2xx_dma_mb[drvp->DMA_mode]);
3156 mode = PDC2xx_TIM_SET_MC(mode,
3157 pdc2xx_dma_mc[drvp->DMA_mode]);
3158 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3159 } else {
3160 mode = PDC2xx_TIM_SET_MB(mode,
3161 pdc2xx_dma_mb[0]);
3162 mode = PDC2xx_TIM_SET_MC(mode,
3163 pdc2xx_dma_mc[0]);
3164 }
3165 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3166 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3167 if (drvp->drive_flags & DRIVE_ATA)
3168 mode |= PDC2xx_TIM_PRE;
3169 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3170 if (drvp->PIO_mode >= 3) {
3171 mode |= PDC2xx_TIM_IORDY;
3172 if (drive == 0)
3173 mode |= PDC2xx_TIM_IORDYp;
3174 }
3175 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3176 "timings 0x%x\n",
3177 sc->sc_wdcdev.sc_dev.dv_xname,
3178 chp->channel, drive, mode), DEBUG_PROBE);
3179 pci_conf_write(sc->sc_pc, sc->sc_tag,
3180 PDC2xx_TIM(chp->channel, drive), mode);
3181 }
3182 if (idedma_ctl != 0) {
3183 /* Add software bits in status register */
3184 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3185 IDEDMA_CTL, idedma_ctl);
3186 }
3187 pciide_print_modes(cp);
3188 }
3189
3190 int
3191 pdc202xx_pci_intr(arg)
3192 void *arg;
3193 {
3194 struct pciide_softc *sc = arg;
3195 struct pciide_channel *cp;
3196 struct channel_softc *wdc_cp;
3197 int i, rv, crv;
3198 u_int32_t scr;
3199
3200 rv = 0;
3201 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3202 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3203 cp = &sc->pciide_channels[i];
3204 wdc_cp = &cp->wdc_channel;
3205 /* If a compat channel skip. */
3206 if (cp->compat)
3207 continue;
3208 if (scr & PDC2xx_SCR_INT(i)) {
3209 crv = wdcintr(wdc_cp);
3210 if (crv == 0)
3211 printf("%s:%d: bogus intr\n",
3212 sc->sc_wdcdev.sc_dev.dv_xname, i);
3213 else
3214 rv = 1;
3215 }
3216 }
3217 return rv;
3218 }
3219
3220 void
3221 opti_chip_map(sc, pa)
3222 struct pciide_softc *sc;
3223 struct pci_attach_args *pa;
3224 {
3225 struct pciide_channel *cp;
3226 bus_size_t cmdsize, ctlsize;
3227 pcireg_t interface;
3228 u_int8_t init_ctrl;
3229 int channel;
3230
3231 if (pciide_chipen(sc, pa) == 0)
3232 return;
3233 printf("%s: bus-master DMA support present",
3234 sc->sc_wdcdev.sc_dev.dv_xname);
3235 pciide_mapreg_dma(sc, pa);
3236 printf("\n");
3237
3238 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3239 WDC_CAPABILITY_MODE;
3240 sc->sc_wdcdev.PIO_cap = 4;
3241 if (sc->sc_dma_ok) {
3242 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3243 sc->sc_wdcdev.irqack = pciide_irqack;
3244 sc->sc_wdcdev.DMA_cap = 2;
3245 }
3246 sc->sc_wdcdev.set_modes = opti_setup_channel;
3247
3248 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3249 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3250
3251 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3252 OPTI_REG_INIT_CONTROL);
3253
3254 interface = PCI_INTERFACE(pa->pa_class);
3255
3256 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3257 cp = &sc->pciide_channels[channel];
3258 if (pciide_chansetup(sc, channel, interface) == 0)
3259 continue;
3260 if (channel == 1 &&
3261 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3262 printf("%s: %s channel ignored (disabled)\n",
3263 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3264 continue;
3265 }
3266 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3267 pciide_pci_intr);
3268 if (cp->hw_ok == 0)
3269 continue;
3270 pciide_map_compat_intr(pa, cp, channel, interface);
3271 if (cp->hw_ok == 0)
3272 continue;
3273 opti_setup_channel(&cp->wdc_channel);
3274 }
3275 }
3276
3277 void
3278 opti_setup_channel(chp)
3279 struct channel_softc *chp;
3280 {
3281 struct ata_drive_datas *drvp;
3282 struct pciide_channel *cp = (struct pciide_channel*)chp;
3283 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3284 int drive, spd;
3285 int mode[2];
3286 u_int8_t rv, mr;
3287
3288 /*
3289 * The `Delay' and `Address Setup Time' fields of the
3290 * Miscellaneous Register are always zero initially.
3291 */
3292 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3293 mr &= ~(OPTI_MISC_DELAY_MASK |
3294 OPTI_MISC_ADDR_SETUP_MASK |
3295 OPTI_MISC_INDEX_MASK);
3296
3297 /* Prime the control register before setting timing values */
3298 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3299
3300 /* Determine the clockrate of the PCIbus the chip is attached to */
3301 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3302 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3303
3304 /* setup DMA if needed */
3305 pciide_channel_dma_setup(cp);
3306
3307 for (drive = 0; drive < 2; drive++) {
3308 drvp = &chp->ch_drive[drive];
3309 /* If no drive, skip */
3310 if ((drvp->drive_flags & DRIVE) == 0) {
3311 mode[drive] = -1;
3312 continue;
3313 }
3314
3315 if ((drvp->drive_flags & DRIVE_DMA)) {
3316 /*
3317 * Timings will be used for both PIO and DMA,
3318 * so adjust DMA mode if needed
3319 */
3320 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3321 drvp->PIO_mode = drvp->DMA_mode + 2;
3322 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3323 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3324 drvp->PIO_mode - 2 : 0;
3325 if (drvp->DMA_mode == 0)
3326 drvp->PIO_mode = 0;
3327
3328 mode[drive] = drvp->DMA_mode + 5;
3329 } else
3330 mode[drive] = drvp->PIO_mode;
3331
3332 if (drive && mode[0] >= 0 &&
3333 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3334 /*
3335 * Can't have two drives using different values
3336 * for `Address Setup Time'.
3337 * Slow down the faster drive to compensate.
3338 */
3339 int d = (opti_tim_as[spd][mode[0]] >
3340 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3341
3342 mode[d] = mode[1-d];
3343 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3344 chp->ch_drive[d].DMA_mode = 0;
3345 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3346 }
3347 }
3348
3349 for (drive = 0; drive < 2; drive++) {
3350 int m;
3351 if ((m = mode[drive]) < 0)
3352 continue;
3353
3354 /* Set the Address Setup Time and select appropriate index */
3355 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3356 rv |= OPTI_MISC_INDEX(drive);
3357 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3358
3359 /* Set the pulse width and recovery timing parameters */
3360 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3361 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3362 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3363 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3364
3365 /* Set the Enhanced Mode register appropriately */
3366 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3367 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3368 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3369 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3370 }
3371
3372 /* Finally, enable the timings */
3373 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3374
3375 pciide_print_modes(cp);
3376 }
3377