pciide.c revision 1.68.2.2 1 /* $NetBSD: pciide.c,v 1.68.2.2 2000/06/27 14:57:05 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/cy82c693var.h>
121
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 int, u_int8_t));
127
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 pci_chipset_tag_t pc;
131 pcitag_t pa;
132 int reg;
133 {
134
135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 ((reg & 0x03) * 8) & 0xff);
137 }
138
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 pci_chipset_tag_t pc;
142 pcitag_t pa;
143 int reg;
144 u_int8_t val;
145 {
146 pcireg_t pcival;
147
148 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 pcival &= ~(0xff << ((reg & 0x03) * 8));
150 pcival |= (val << ((reg & 0x03) * 8));
151 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153
154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155
156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157 void piix_setup_channel __P((struct channel_softc*));
158 void piix3_4_setup_channel __P((struct channel_softc*));
159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162
163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164 void amd756_setup_channel __P((struct channel_softc*));
165
166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void apollo_setup_channel __P((struct channel_softc*));
168
169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_setup_channel __P((struct channel_softc*));
172 void cmd_channel_map __P((struct pci_attach_args *,
173 struct pciide_softc *, int));
174 int cmd_pci_intr __P((void *));
175
176 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
177 void cy693_setup_channel __P((struct channel_softc*));
178
179 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void sis_setup_channel __P((struct channel_softc*));
181
182 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void acer_setup_channel __P((struct channel_softc*));
184 int acer_pci_intr __P((void *));
185
186 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
187 void pdc202xx_setup_channel __P((struct channel_softc*));
188 int pdc202xx_pci_intr __P((void *));
189
190 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void opti_setup_channel __P((struct channel_softc*));
192
193 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void hpt_setup_channel __P((struct channel_softc*));
195 int hpt_pci_intr __P((void *));
196
197 void pciide_channel_dma_setup __P((struct pciide_channel *));
198 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
199 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
200 void pciide_dma_start __P((void*, int, int));
201 int pciide_dma_finish __P((void*, int, int, int));
202 void pciide_irqack __P((struct channel_softc *));
203 void pciide_print_modes __P((struct pciide_channel *));
204
205 struct pciide_product_desc {
206 u_int32_t ide_product;
207 int ide_flags;
208 const char *ide_name;
209 /* map and setup chip, probe drives */
210 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
211 };
212
213 /* Flags for ide_flags */
214 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
215
216 /* Default product description for devices not known from this controller */
217 const struct pciide_product_desc default_product_desc = {
218 0,
219 0,
220 "Generic PCI IDE controller",
221 default_chip_map,
222 };
223
224 const struct pciide_product_desc pciide_intel_products[] = {
225 { PCI_PRODUCT_INTEL_82092AA,
226 0,
227 "Intel 82092AA IDE controller",
228 default_chip_map,
229 },
230 { PCI_PRODUCT_INTEL_82371FB_IDE,
231 0,
232 "Intel 82371FB IDE controller (PIIX)",
233 piix_chip_map,
234 },
235 { PCI_PRODUCT_INTEL_82371SB_IDE,
236 0,
237 "Intel 82371SB IDE Interface (PIIX3)",
238 piix_chip_map,
239 },
240 { PCI_PRODUCT_INTEL_82371AB_IDE,
241 0,
242 "Intel 82371AB IDE controller (PIIX4)",
243 piix_chip_map,
244 },
245 { PCI_PRODUCT_INTEL_82801AA_IDE,
246 0,
247 "Intel 82801AA IDE Controller (ICH)",
248 piix_chip_map,
249 },
250 { PCI_PRODUCT_INTEL_82801AB_IDE,
251 0,
252 "Intel 82801AB IDE Controller (ICH0)",
253 piix_chip_map,
254 },
255 { 0,
256 0,
257 NULL,
258 }
259 };
260
261 const struct pciide_product_desc pciide_amd_products[] = {
262 { PCI_PRODUCT_AMD_PBC756_IDE,
263 0,
264 "Advanced Micro Devices AMD756 IDE Controller",
265 amd756_chip_map
266 },
267 { 0,
268 0,
269 NULL,
270 }
271 };
272
273 const struct pciide_product_desc pciide_cmd_products[] = {
274 { PCI_PRODUCT_CMDTECH_640,
275 0,
276 "CMD Technology PCI0640",
277 cmd_chip_map
278 },
279 { PCI_PRODUCT_CMDTECH_643,
280 0,
281 "CMD Technology PCI0643",
282 cmd0643_9_chip_map,
283 },
284 { PCI_PRODUCT_CMDTECH_646,
285 0,
286 "CMD Technology PCI0646",
287 cmd0643_9_chip_map,
288 },
289 { PCI_PRODUCT_CMDTECH_648,
290 IDE_PCI_CLASS_OVERRIDE,
291 "CMD Technology PCI0648",
292 cmd0643_9_chip_map,
293 },
294 { PCI_PRODUCT_CMDTECH_649,
295 IDE_PCI_CLASS_OVERRIDE,
296 "CMD Technology PCI0649",
297 cmd0643_9_chip_map,
298 },
299 { 0,
300 0,
301 NULL,
302 }
303 };
304
305 const struct pciide_product_desc pciide_via_products[] = {
306 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
307 0,
308 "VIA Tech VT82C586 IDE Controller",
309 apollo_chip_map,
310 },
311 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
312 0,
313 "VIA Tech VT82C586A IDE Controller",
314 apollo_chip_map,
315 },
316 { 0,
317 0,
318 NULL,
319 }
320 };
321
322 const struct pciide_product_desc pciide_cypress_products[] = {
323 { PCI_PRODUCT_CONTAQ_82C693,
324 0,
325 "Cypress 82C693 IDE Controller",
326 cy693_chip_map,
327 },
328 { 0,
329 0,
330 NULL,
331 }
332 };
333
334 const struct pciide_product_desc pciide_sis_products[] = {
335 { PCI_PRODUCT_SIS_5597_IDE,
336 0,
337 "Silicon Integrated System 5597/5598 IDE controller",
338 sis_chip_map,
339 },
340 { 0,
341 0,
342 NULL,
343 }
344 };
345
346 const struct pciide_product_desc pciide_acer_products[] = {
347 { PCI_PRODUCT_ALI_M5229,
348 0,
349 "Acer Labs M5229 UDMA IDE Controller",
350 acer_chip_map,
351 },
352 { 0,
353 0,
354 NULL,
355 }
356 };
357
358 const struct pciide_product_desc pciide_promise_products[] = {
359 { PCI_PRODUCT_PROMISE_ULTRA33,
360 IDE_PCI_CLASS_OVERRIDE,
361 "Promise Ultra33/ATA Bus Master IDE Accelerator",
362 pdc202xx_chip_map,
363 },
364 { PCI_PRODUCT_PROMISE_ULTRA66,
365 IDE_PCI_CLASS_OVERRIDE,
366 "Promise Ultra66/ATA Bus Master IDE Accelerator",
367 pdc202xx_chip_map,
368 },
369 { 0,
370 0,
371 NULL,
372 }
373 };
374
375 const struct pciide_product_desc pciide_opti_products[] = {
376 { PCI_PRODUCT_OPTI_82C621,
377 0,
378 "OPTi 82c621 PCI IDE controller",
379 opti_chip_map,
380 },
381 { PCI_PRODUCT_OPTI_82C568,
382 0,
383 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
384 opti_chip_map,
385 },
386 { PCI_PRODUCT_OPTI_82D568,
387 0,
388 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
389 opti_chip_map,
390 },
391 { 0,
392 0,
393 NULL,
394 }
395 };
396
397 const struct pciide_product_desc pciide_triones_products[] = {
398 { PCI_PRODUCT_TRIONES_HPT366,
399 IDE_PCI_CLASS_OVERRIDE,
400 "Triones/Highpoint HPT366/370 IDE Controller",
401 hpt_chip_map,
402 },
403 { 0,
404 0,
405 NULL,
406 }
407 };
408
409 struct pciide_vendor_desc {
410 u_int32_t ide_vendor;
411 const struct pciide_product_desc *ide_products;
412 };
413
414 const struct pciide_vendor_desc pciide_vendors[] = {
415 { PCI_VENDOR_INTEL, pciide_intel_products },
416 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
417 { PCI_VENDOR_VIATECH, pciide_via_products },
418 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
419 { PCI_VENDOR_SIS, pciide_sis_products },
420 { PCI_VENDOR_ALI, pciide_acer_products },
421 { PCI_VENDOR_PROMISE, pciide_promise_products },
422 { PCI_VENDOR_AMD, pciide_amd_products },
423 { PCI_VENDOR_OPTI, pciide_opti_products },
424 { PCI_VENDOR_TRIONES, pciide_triones_products },
425 { 0, NULL }
426 };
427
428 /* options passed via the 'flags' config keyword */
429 #define PCIIDE_OPTIONS_DMA 0x01
430
431 int pciide_match __P((struct device *, struct cfdata *, void *));
432 void pciide_attach __P((struct device *, struct device *, void *));
433
434 struct cfattach pciide_ca = {
435 sizeof(struct pciide_softc), pciide_match, pciide_attach
436 };
437 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
438 int pciide_mapregs_compat __P(( struct pci_attach_args *,
439 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
440 int pciide_mapregs_native __P((struct pci_attach_args *,
441 struct pciide_channel *, bus_size_t *, bus_size_t *,
442 int (*pci_intr) __P((void *))));
443 void pciide_mapreg_dma __P((struct pciide_softc *,
444 struct pci_attach_args *));
445 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
446 void pciide_mapchan __P((struct pci_attach_args *,
447 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
448 int (*pci_intr) __P((void *))));
449 int pciide_chan_candisable __P((struct pciide_channel *));
450 void pciide_map_compat_intr __P(( struct pci_attach_args *,
451 struct pciide_channel *, int, int));
452 int pciide_print __P((void *, const char *pnp));
453 int pciide_compat_intr __P((void *));
454 int pciide_pci_intr __P((void *));
455 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
456
457 const struct pciide_product_desc *
458 pciide_lookup_product(id)
459 u_int32_t id;
460 {
461 const struct pciide_product_desc *pp;
462 const struct pciide_vendor_desc *vp;
463
464 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
465 if (PCI_VENDOR(id) == vp->ide_vendor)
466 break;
467
468 if ((pp = vp->ide_products) == NULL)
469 return NULL;
470
471 for (; pp->ide_name != NULL; pp++)
472 if (PCI_PRODUCT(id) == pp->ide_product)
473 break;
474
475 if (pp->ide_name == NULL)
476 return NULL;
477 return pp;
478 }
479
480 int
481 pciide_match(parent, match, aux)
482 struct device *parent;
483 struct cfdata *match;
484 void *aux;
485 {
486 struct pci_attach_args *pa = aux;
487 const struct pciide_product_desc *pp;
488
489 /*
490 * Check the ID register to see that it's a PCI IDE controller.
491 * If it is, we assume that we can deal with it; it _should_
492 * work in a standardized way...
493 */
494 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
495 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
496 return (1);
497 }
498
499 /*
500 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
501 * controllers. Let see if we can deal with it anyway.
502 */
503 pp = pciide_lookup_product(pa->pa_id);
504 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
505 return (1);
506 }
507
508 return (0);
509 }
510
511 void
512 pciide_attach(parent, self, aux)
513 struct device *parent, *self;
514 void *aux;
515 {
516 struct pci_attach_args *pa = aux;
517 pci_chipset_tag_t pc = pa->pa_pc;
518 pcitag_t tag = pa->pa_tag;
519 struct pciide_softc *sc = (struct pciide_softc *)self;
520 pcireg_t csr;
521 char devinfo[256];
522 const char *displaydev;
523
524 sc->sc_pp = pciide_lookup_product(pa->pa_id);
525 if (sc->sc_pp == NULL) {
526 sc->sc_pp = &default_product_desc;
527 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
528 displaydev = devinfo;
529 } else
530 displaydev = sc->sc_pp->ide_name;
531
532 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
533
534 sc->sc_pc = pa->pa_pc;
535 sc->sc_tag = pa->pa_tag;
536 #ifdef WDCDEBUG
537 if (wdcdebug_pciide_mask & DEBUG_PROBE)
538 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
539 #endif
540 sc->sc_pp->chip_map(sc, pa);
541
542 if (sc->sc_dma_ok) {
543 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
544 csr |= PCI_COMMAND_MASTER_ENABLE;
545 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
546 }
547 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
548 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
549 }
550
551 /* tell wether the chip is enabled or not */
552 int
553 pciide_chipen(sc, pa)
554 struct pciide_softc *sc;
555 struct pci_attach_args *pa;
556 {
557 pcireg_t csr;
558 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
559 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
560 PCI_COMMAND_STATUS_REG);
561 printf("%s: device disabled (at %s)\n",
562 sc->sc_wdcdev.sc_dev.dv_xname,
563 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
564 "device" : "bridge");
565 return 0;
566 }
567 return 1;
568 }
569
570 int
571 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
572 struct pci_attach_args *pa;
573 struct pciide_channel *cp;
574 int compatchan;
575 bus_size_t *cmdsizep, *ctlsizep;
576 {
577 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
578 struct channel_softc *wdc_cp = &cp->wdc_channel;
579
580 cp->compat = 1;
581 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
582 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
583
584 wdc_cp->cmd_iot = pa->pa_iot;
585 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
586 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
587 printf("%s: couldn't map %s channel cmd regs\n",
588 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
589 return (0);
590 }
591
592 wdc_cp->ctl_iot = pa->pa_iot;
593 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
594 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
595 printf("%s: couldn't map %s channel ctl regs\n",
596 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
597 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
598 PCIIDE_COMPAT_CMD_SIZE);
599 return (0);
600 }
601
602 return (1);
603 }
604
605 int
606 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
607 struct pci_attach_args * pa;
608 struct pciide_channel *cp;
609 bus_size_t *cmdsizep, *ctlsizep;
610 int (*pci_intr) __P((void *));
611 {
612 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
613 struct channel_softc *wdc_cp = &cp->wdc_channel;
614 const char *intrstr;
615 pci_intr_handle_t intrhandle;
616
617 cp->compat = 0;
618
619 if (sc->sc_pci_ih == NULL) {
620 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
621 pa->pa_intrline, &intrhandle) != 0) {
622 printf("%s: couldn't map native-PCI interrupt\n",
623 sc->sc_wdcdev.sc_dev.dv_xname);
624 return 0;
625 }
626 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
627 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
628 intrhandle, IPL_BIO, pci_intr, sc);
629 if (sc->sc_pci_ih != NULL) {
630 printf("%s: using %s for native-PCI interrupt\n",
631 sc->sc_wdcdev.sc_dev.dv_xname,
632 intrstr ? intrstr : "unknown interrupt");
633 } else {
634 printf("%s: couldn't establish native-PCI interrupt",
635 sc->sc_wdcdev.sc_dev.dv_xname);
636 if (intrstr != NULL)
637 printf(" at %s", intrstr);
638 printf("\n");
639 return 0;
640 }
641 }
642 cp->ih = sc->sc_pci_ih;
643 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
644 PCI_MAPREG_TYPE_IO, 0,
645 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
646 printf("%s: couldn't map %s channel cmd regs\n",
647 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
648 return 0;
649 }
650
651 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
652 PCI_MAPREG_TYPE_IO, 0,
653 &wdc_cp->ctl_iot, &wdc_cp->ctl_ioh, NULL, ctlsizep) != 0) {
654 printf("%s: couldn't map %s channel ctl regs\n",
655 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
656 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
657 return 0;
658 }
659 return (1);
660 }
661
662 void
663 pciide_mapreg_dma(sc, pa)
664 struct pciide_softc *sc;
665 struct pci_attach_args *pa;
666 {
667 pcireg_t maptype;
668
669 /*
670 * Map DMA registers
671 *
672 * Note that sc_dma_ok is the right variable to test to see if
673 * DMA can be done. If the interface doesn't support DMA,
674 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
675 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
676 * non-zero if the interface supports DMA and the registers
677 * could be mapped.
678 *
679 * XXX Note that despite the fact that the Bus Master IDE specs
680 * XXX say that "The bus master IDE function uses 16 bytes of IO
681 * XXX space," some controllers (at least the United
682 * XXX Microelectronics UM8886BF) place it in memory space.
683 */
684 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
685 PCIIDE_REG_BUS_MASTER_DMA);
686
687 switch (maptype) {
688 case PCI_MAPREG_TYPE_IO:
689 case PCI_MAPREG_MEM_TYPE_32BIT:
690 sc->sc_dma_ok = (pci_mapreg_map(pa,
691 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
692 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
693 sc->sc_dmat = pa->pa_dmat;
694 if (sc->sc_dma_ok == 0) {
695 printf(", but unused (couldn't map registers)");
696 } else {
697 sc->sc_wdcdev.dma_arg = sc;
698 sc->sc_wdcdev.dma_init = pciide_dma_init;
699 sc->sc_wdcdev.dma_start = pciide_dma_start;
700 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
701 }
702 break;
703
704 default:
705 sc->sc_dma_ok = 0;
706 printf(", but unsupported register maptype (0x%x)", maptype);
707 }
708 }
709
710 int
711 pciide_compat_intr(arg)
712 void *arg;
713 {
714 struct pciide_channel *cp = arg;
715
716 #ifdef DIAGNOSTIC
717 /* should only be called for a compat channel */
718 if (cp->compat == 0)
719 panic("pciide compat intr called for non-compat chan %p\n", cp);
720 #endif
721 return (wdcintr(&cp->wdc_channel));
722 }
723
724 int
725 pciide_pci_intr(arg)
726 void *arg;
727 {
728 struct pciide_softc *sc = arg;
729 struct pciide_channel *cp;
730 struct channel_softc *wdc_cp;
731 int i, rv, crv;
732
733 rv = 0;
734 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
735 cp = &sc->pciide_channels[i];
736 wdc_cp = &cp->wdc_channel;
737
738 /* If a compat channel skip. */
739 if (cp->compat)
740 continue;
741 /* if this channel not waiting for intr, skip */
742 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
743 continue;
744
745 crv = wdcintr(wdc_cp);
746 if (crv == 0)
747 ; /* leave rv alone */
748 else if (crv == 1)
749 rv = 1; /* claim the intr */
750 else if (rv == 0) /* crv should be -1 in this case */
751 rv = crv; /* if we've done no better, take it */
752 }
753 return (rv);
754 }
755
756 void
757 pciide_channel_dma_setup(cp)
758 struct pciide_channel *cp;
759 {
760 int drive;
761 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
762 struct ata_drive_datas *drvp;
763
764 for (drive = 0; drive < 2; drive++) {
765 drvp = &cp->wdc_channel.ch_drive[drive];
766 /* If no drive, skip */
767 if ((drvp->drive_flags & DRIVE) == 0)
768 continue;
769 /* setup DMA if needed */
770 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
771 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
772 sc->sc_dma_ok == 0) {
773 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
774 continue;
775 }
776 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
777 != 0) {
778 /* Abort DMA setup */
779 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
780 continue;
781 }
782 }
783 }
784
785 int
786 pciide_dma_table_setup(sc, channel, drive)
787 struct pciide_softc *sc;
788 int channel, drive;
789 {
790 bus_dma_segment_t seg;
791 int error, rseg;
792 const bus_size_t dma_table_size =
793 sizeof(struct idedma_table) * NIDEDMA_TABLES;
794 struct pciide_dma_maps *dma_maps =
795 &sc->pciide_channels[channel].dma_maps[drive];
796
797 /* If table was already allocated, just return */
798 if (dma_maps->dma_table)
799 return 0;
800
801 /* Allocate memory for the DMA tables and map it */
802 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
803 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
804 BUS_DMA_NOWAIT)) != 0) {
805 printf("%s:%d: unable to allocate table DMA for "
806 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
807 channel, drive, error);
808 return error;
809 }
810 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
811 dma_table_size,
812 (caddr_t *)&dma_maps->dma_table,
813 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
814 printf("%s:%d: unable to map table DMA for"
815 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
816 channel, drive, error);
817 return error;
818 }
819 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
820 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
821 seg.ds_addr), DEBUG_PROBE);
822
823 /* Create and load table DMA map for this disk */
824 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
825 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
826 &dma_maps->dmamap_table)) != 0) {
827 printf("%s:%d: unable to create table DMA map for "
828 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
829 channel, drive, error);
830 return error;
831 }
832 if ((error = bus_dmamap_load(sc->sc_dmat,
833 dma_maps->dmamap_table,
834 dma_maps->dma_table,
835 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
836 printf("%s:%d: unable to load table DMA map for "
837 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
838 channel, drive, error);
839 return error;
840 }
841 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
842 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
843 /* Create a xfer DMA map for this drive */
844 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
845 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
846 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
847 &dma_maps->dmamap_xfer)) != 0) {
848 printf("%s:%d: unable to create xfer DMA map for "
849 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
850 channel, drive, error);
851 return error;
852 }
853 return 0;
854 }
855
856 int
857 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
858 void *v;
859 int channel, drive;
860 void *databuf;
861 size_t datalen;
862 int flags;
863 {
864 struct pciide_softc *sc = v;
865 int error, seg;
866 struct pciide_dma_maps *dma_maps =
867 &sc->pciide_channels[channel].dma_maps[drive];
868
869 error = bus_dmamap_load(sc->sc_dmat,
870 dma_maps->dmamap_xfer,
871 databuf, datalen, NULL, BUS_DMA_NOWAIT);
872 if (error) {
873 printf("%s:%d: unable to load xfer DMA map for"
874 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
875 channel, drive, error);
876 return error;
877 }
878
879 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
880 dma_maps->dmamap_xfer->dm_mapsize,
881 (flags & WDC_DMA_READ) ?
882 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
883
884 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
885 #ifdef DIAGNOSTIC
886 /* A segment must not cross a 64k boundary */
887 {
888 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
889 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
890 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
891 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
892 printf("pciide_dma: segment %d physical addr 0x%lx"
893 " len 0x%lx not properly aligned\n",
894 seg, phys, len);
895 panic("pciide_dma: buf align");
896 }
897 }
898 #endif
899 dma_maps->dma_table[seg].base_addr =
900 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
901 dma_maps->dma_table[seg].byte_count =
902 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
903 IDEDMA_BYTE_COUNT_MASK);
904 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
905 seg, le32toh(dma_maps->dma_table[seg].byte_count),
906 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
907
908 }
909 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
910 htole32(IDEDMA_BYTE_COUNT_EOT);
911
912 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
913 dma_maps->dmamap_table->dm_mapsize,
914 BUS_DMASYNC_PREWRITE);
915
916 /* Maps are ready. Start DMA function */
917 #ifdef DIAGNOSTIC
918 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
919 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
920 dma_maps->dmamap_table->dm_segs[0].ds_addr);
921 panic("pciide_dma_init: table align");
922 }
923 #endif
924
925 /* Clear status bits */
926 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
927 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
928 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
929 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
930 /* Write table addr */
931 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
932 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
933 dma_maps->dmamap_table->dm_segs[0].ds_addr);
934 /* set read/write */
935 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
936 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
937 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
938 /* remember flags */
939 dma_maps->dma_flags = flags;
940 return 0;
941 }
942
943 void
944 pciide_dma_start(v, channel, drive)
945 void *v;
946 int channel, drive;
947 {
948 struct pciide_softc *sc = v;
949
950 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
951 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
952 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
953 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
954 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
955 }
956
957 int
958 pciide_dma_finish(v, channel, drive, force)
959 void *v;
960 int channel, drive;
961 int force;
962 {
963 struct pciide_softc *sc = v;
964 u_int8_t status;
965 int error = 0;
966 struct pciide_dma_maps *dma_maps =
967 &sc->pciide_channels[channel].dma_maps[drive];
968
969 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
970 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
971 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
972 DEBUG_XFERS);
973
974 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
975 return WDC_DMAST_NOIRQ;
976
977 /* stop DMA channel */
978 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
979 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
980 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
981 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
982
983 /* Unload the map of the data buffer */
984 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
985 dma_maps->dmamap_xfer->dm_mapsize,
986 (dma_maps->dma_flags & WDC_DMA_READ) ?
987 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
988 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
989
990 if ((status & IDEDMA_CTL_ERR) != 0) {
991 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
992 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
993 error |= WDC_DMAST_ERR;
994 }
995
996 if ((status & IDEDMA_CTL_INTR) == 0) {
997 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
998 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
999 drive, status);
1000 error |= WDC_DMAST_NOIRQ;
1001 }
1002
1003 if ((status & IDEDMA_CTL_ACT) != 0) {
1004 /* data underrun, may be a valid condition for ATAPI */
1005 error |= WDC_DMAST_UNDER;
1006 }
1007 return error;
1008 }
1009
1010 void
1011 pciide_irqack(chp)
1012 struct channel_softc *chp;
1013 {
1014 struct pciide_channel *cp = (struct pciide_channel*)chp;
1015 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1016
1017 /* clear status bits in IDE DMA registers */
1018 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1019 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1020 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1021 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1022 }
1023
1024 /* some common code used by several chip_map */
1025 int
1026 pciide_chansetup(sc, channel, interface)
1027 struct pciide_softc *sc;
1028 int channel;
1029 pcireg_t interface;
1030 {
1031 struct pciide_channel *cp = &sc->pciide_channels[channel];
1032 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1033 cp->name = PCIIDE_CHANNEL_NAME(channel);
1034 cp->wdc_channel.channel = channel;
1035 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1036 cp->wdc_channel.ch_queue =
1037 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1038 if (cp->wdc_channel.ch_queue == NULL) {
1039 printf("%s %s channel: "
1040 "can't allocate memory for command queue",
1041 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1042 return 0;
1043 }
1044 printf("%s: %s channel %s to %s mode\n",
1045 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1046 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1047 "configured" : "wired",
1048 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1049 "native-PCI" : "compatibility");
1050 return 1;
1051 }
1052
1053 /* some common code used by several chip channel_map */
1054 void
1055 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1056 struct pci_attach_args *pa;
1057 struct pciide_channel *cp;
1058 pcireg_t interface;
1059 bus_size_t *cmdsizep, *ctlsizep;
1060 int (*pci_intr) __P((void *));
1061 {
1062 struct channel_softc *wdc_cp = &cp->wdc_channel;
1063
1064 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1065 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1066 pci_intr);
1067 else
1068 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1069 wdc_cp->channel, cmdsizep, ctlsizep);
1070
1071 if (cp->hw_ok == 0)
1072 return;
1073 wdc_cp->data32iot = wdc_cp->cmd_iot;
1074 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1075 wdcattach(wdc_cp);
1076 }
1077
1078 /*
1079 * Generic code to call to know if a channel can be disabled. Return 1
1080 * if channel can be disabled, 0 if not
1081 */
1082 int
1083 pciide_chan_candisable(cp)
1084 struct pciide_channel *cp;
1085 {
1086 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1087 struct channel_softc *wdc_cp = &cp->wdc_channel;
1088
1089 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1090 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1091 printf("%s: disabling %s channel (no drives)\n",
1092 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1093 cp->hw_ok = 0;
1094 return 1;
1095 }
1096 return 0;
1097 }
1098
1099 /*
1100 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1101 * Set hw_ok=0 on failure
1102 */
1103 void
1104 pciide_map_compat_intr(pa, cp, compatchan, interface)
1105 struct pci_attach_args *pa;
1106 struct pciide_channel *cp;
1107 int compatchan, interface;
1108 {
1109 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1110 struct channel_softc *wdc_cp = &cp->wdc_channel;
1111
1112 if (cp->hw_ok == 0)
1113 return;
1114 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1115 return;
1116
1117 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1118 pa, compatchan, pciide_compat_intr, cp);
1119 if (cp->ih == NULL) {
1120 printf("%s: no compatibility interrupt for use by %s "
1121 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1122 cp->hw_ok = 0;
1123 }
1124 }
1125
1126 void
1127 pciide_print_modes(cp)
1128 struct pciide_channel *cp;
1129 {
1130 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1131 int drive;
1132 struct channel_softc *chp;
1133 struct ata_drive_datas *drvp;
1134
1135 chp = &cp->wdc_channel;
1136 for (drive = 0; drive < 2; drive++) {
1137 drvp = &chp->ch_drive[drive];
1138 if ((drvp->drive_flags & DRIVE) == 0)
1139 continue;
1140 printf("%s(%s:%d:%d): using PIO mode %d",
1141 drvp->drv_softc->dv_xname,
1142 sc->sc_wdcdev.sc_dev.dv_xname,
1143 chp->channel, drive, drvp->PIO_mode);
1144 if (drvp->drive_flags & DRIVE_DMA)
1145 printf(", DMA mode %d", drvp->DMA_mode);
1146 if (drvp->drive_flags & DRIVE_UDMA)
1147 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1148 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1149 printf(" (using DMA data transfers)");
1150 printf("\n");
1151 }
1152 }
1153
1154 void
1155 default_chip_map(sc, pa)
1156 struct pciide_softc *sc;
1157 struct pci_attach_args *pa;
1158 {
1159 struct pciide_channel *cp;
1160 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1161 pcireg_t csr;
1162 int channel, drive;
1163 struct ata_drive_datas *drvp;
1164 u_int8_t idedma_ctl;
1165 bus_size_t cmdsize, ctlsize;
1166 char *failreason;
1167
1168 if (pciide_chipen(sc, pa) == 0)
1169 return;
1170
1171 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1172 printf("%s: bus-master DMA support present",
1173 sc->sc_wdcdev.sc_dev.dv_xname);
1174 if (sc->sc_pp == &default_product_desc &&
1175 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1176 PCIIDE_OPTIONS_DMA) == 0) {
1177 printf(", but unused (no driver support)");
1178 sc->sc_dma_ok = 0;
1179 } else {
1180 pciide_mapreg_dma(sc, pa);
1181 if (sc->sc_dma_ok != 0)
1182 printf(", used without full driver "
1183 "support");
1184 }
1185 } else {
1186 printf("%s: hardware does not support DMA",
1187 sc->sc_wdcdev.sc_dev.dv_xname);
1188 sc->sc_dma_ok = 0;
1189 }
1190 printf("\n");
1191 if (sc->sc_dma_ok) {
1192 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1193 sc->sc_wdcdev.irqack = pciide_irqack;
1194 }
1195 sc->sc_wdcdev.PIO_cap = 0;
1196 sc->sc_wdcdev.DMA_cap = 0;
1197
1198 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1199 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1200 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1201
1202 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1203 cp = &sc->pciide_channels[channel];
1204 if (pciide_chansetup(sc, channel, interface) == 0)
1205 continue;
1206 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1207 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1208 &ctlsize, pciide_pci_intr);
1209 } else {
1210 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1211 channel, &cmdsize, &ctlsize);
1212 }
1213 if (cp->hw_ok == 0)
1214 continue;
1215 /*
1216 * Check to see if something appears to be there.
1217 */
1218 failreason = NULL;
1219 if (!wdcprobe(&cp->wdc_channel)) {
1220 failreason = "not responding; disabled or no drives?";
1221 goto next;
1222 }
1223 /*
1224 * Now, make sure it's actually attributable to this PCI IDE
1225 * channel by trying to access the channel again while the
1226 * PCI IDE controller's I/O space is disabled. (If the
1227 * channel no longer appears to be there, it belongs to
1228 * this controller.) YUCK!
1229 */
1230 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1231 PCI_COMMAND_STATUS_REG);
1232 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1233 csr & ~PCI_COMMAND_IO_ENABLE);
1234 if (wdcprobe(&cp->wdc_channel))
1235 failreason = "other hardware responding at addresses";
1236 pci_conf_write(sc->sc_pc, sc->sc_tag,
1237 PCI_COMMAND_STATUS_REG, csr);
1238 next:
1239 if (failreason) {
1240 printf("%s: %s channel ignored (%s)\n",
1241 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1242 failreason);
1243 cp->hw_ok = 0;
1244 bus_space_unmap(cp->wdc_channel.cmd_iot,
1245 cp->wdc_channel.cmd_ioh, cmdsize);
1246 bus_space_unmap(cp->wdc_channel.ctl_iot,
1247 cp->wdc_channel.ctl_ioh, ctlsize);
1248 } else {
1249 pciide_map_compat_intr(pa, cp, channel, interface);
1250 }
1251 if (cp->hw_ok) {
1252 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1253 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1254 wdcattach(&cp->wdc_channel);
1255 }
1256 }
1257
1258 if (sc->sc_dma_ok == 0)
1259 return;
1260
1261 /* Allocate DMA maps */
1262 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1263 idedma_ctl = 0;
1264 cp = &sc->pciide_channels[channel];
1265 for (drive = 0; drive < 2; drive++) {
1266 drvp = &cp->wdc_channel.ch_drive[drive];
1267 /* If no drive, skip */
1268 if ((drvp->drive_flags & DRIVE) == 0)
1269 continue;
1270 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1271 continue;
1272 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1273 /* Abort DMA setup */
1274 printf("%s:%d:%d: can't allocate DMA maps, "
1275 "using PIO transfers\n",
1276 sc->sc_wdcdev.sc_dev.dv_xname,
1277 channel, drive);
1278 drvp->drive_flags &= ~DRIVE_DMA;
1279 }
1280 printf("%s:%d:%d: using DMA data transfers\n",
1281 sc->sc_wdcdev.sc_dev.dv_xname,
1282 channel, drive);
1283 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1284 }
1285 if (idedma_ctl != 0) {
1286 /* Add software bits in status register */
1287 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1288 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1289 idedma_ctl);
1290 }
1291 }
1292 }
1293
1294 void
1295 piix_chip_map(sc, pa)
1296 struct pciide_softc *sc;
1297 struct pci_attach_args *pa;
1298 {
1299 struct pciide_channel *cp;
1300 int channel;
1301 u_int32_t idetim;
1302 bus_size_t cmdsize, ctlsize;
1303
1304 if (pciide_chipen(sc, pa) == 0)
1305 return;
1306
1307 printf("%s: bus-master DMA support present",
1308 sc->sc_wdcdev.sc_dev.dv_xname);
1309 pciide_mapreg_dma(sc, pa);
1310 printf("\n");
1311 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1312 WDC_CAPABILITY_MODE;
1313 if (sc->sc_dma_ok) {
1314 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1315 sc->sc_wdcdev.irqack = pciide_irqack;
1316 switch(sc->sc_pp->ide_product) {
1317 case PCI_PRODUCT_INTEL_82371AB_IDE:
1318 case PCI_PRODUCT_INTEL_82801AA_IDE:
1319 case PCI_PRODUCT_INTEL_82801AB_IDE:
1320 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1321 }
1322 }
1323 sc->sc_wdcdev.PIO_cap = 4;
1324 sc->sc_wdcdev.DMA_cap = 2;
1325 sc->sc_wdcdev.UDMA_cap =
1326 (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) ? 4 : 2;
1327 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1328 sc->sc_wdcdev.set_modes = piix_setup_channel;
1329 else
1330 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1331 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1332 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1333
1334 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1335 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1336 DEBUG_PROBE);
1337 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1338 WDCDEBUG_PRINT((", sidetim=0x%x",
1339 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1340 DEBUG_PROBE);
1341 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1342 WDCDEBUG_PRINT((", udamreg 0x%x",
1343 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1344 DEBUG_PROBE);
1345 }
1346 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1347 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1348 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1349 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1350 DEBUG_PROBE);
1351 }
1352
1353 }
1354 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1355
1356 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1357 cp = &sc->pciide_channels[channel];
1358 /* PIIX is compat-only */
1359 if (pciide_chansetup(sc, channel, 0) == 0)
1360 continue;
1361 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1362 if ((PIIX_IDETIM_READ(idetim, channel) &
1363 PIIX_IDETIM_IDE) == 0) {
1364 printf("%s: %s channel ignored (disabled)\n",
1365 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1366 continue;
1367 }
1368 /* PIIX are compat-only pciide devices */
1369 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1370 if (cp->hw_ok == 0)
1371 continue;
1372 if (pciide_chan_candisable(cp)) {
1373 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1374 channel);
1375 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1376 idetim);
1377 }
1378 pciide_map_compat_intr(pa, cp, channel, 0);
1379 if (cp->hw_ok == 0)
1380 continue;
1381 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1382 }
1383
1384 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1385 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1386 DEBUG_PROBE);
1387 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1388 WDCDEBUG_PRINT((", sidetim=0x%x",
1389 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1390 DEBUG_PROBE);
1391 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1392 WDCDEBUG_PRINT((", udamreg 0x%x",
1393 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1394 DEBUG_PROBE);
1395 }
1396 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1397 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1398 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1399 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1400 DEBUG_PROBE);
1401 }
1402 }
1403 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1404 }
1405
1406 void
1407 piix_setup_channel(chp)
1408 struct channel_softc *chp;
1409 {
1410 u_int8_t mode[2], drive;
1411 u_int32_t oidetim, idetim, idedma_ctl;
1412 struct pciide_channel *cp = (struct pciide_channel*)chp;
1413 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1414 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1415
1416 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1417 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1418 idedma_ctl = 0;
1419
1420 /* set up new idetim: Enable IDE registers decode */
1421 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1422 chp->channel);
1423
1424 /* setup DMA */
1425 pciide_channel_dma_setup(cp);
1426
1427 /*
1428 * Here we have to mess up with drives mode: PIIX can't have
1429 * different timings for master and slave drives.
1430 * We need to find the best combination.
1431 */
1432
1433 /* If both drives supports DMA, take the lower mode */
1434 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1435 (drvp[1].drive_flags & DRIVE_DMA)) {
1436 mode[0] = mode[1] =
1437 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1438 drvp[0].DMA_mode = mode[0];
1439 drvp[1].DMA_mode = mode[1];
1440 goto ok;
1441 }
1442 /*
1443 * If only one drive supports DMA, use its mode, and
1444 * put the other one in PIO mode 0 if mode not compatible
1445 */
1446 if (drvp[0].drive_flags & DRIVE_DMA) {
1447 mode[0] = drvp[0].DMA_mode;
1448 mode[1] = drvp[1].PIO_mode;
1449 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1450 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1451 mode[1] = drvp[1].PIO_mode = 0;
1452 goto ok;
1453 }
1454 if (drvp[1].drive_flags & DRIVE_DMA) {
1455 mode[1] = drvp[1].DMA_mode;
1456 mode[0] = drvp[0].PIO_mode;
1457 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1458 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1459 mode[0] = drvp[0].PIO_mode = 0;
1460 goto ok;
1461 }
1462 /*
1463 * If both drives are not DMA, takes the lower mode, unless
1464 * one of them is PIO mode < 2
1465 */
1466 if (drvp[0].PIO_mode < 2) {
1467 mode[0] = drvp[0].PIO_mode = 0;
1468 mode[1] = drvp[1].PIO_mode;
1469 } else if (drvp[1].PIO_mode < 2) {
1470 mode[1] = drvp[1].PIO_mode = 0;
1471 mode[0] = drvp[0].PIO_mode;
1472 } else {
1473 mode[0] = mode[1] =
1474 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1475 drvp[0].PIO_mode = mode[0];
1476 drvp[1].PIO_mode = mode[1];
1477 }
1478 ok: /* The modes are setup */
1479 for (drive = 0; drive < 2; drive++) {
1480 if (drvp[drive].drive_flags & DRIVE_DMA) {
1481 idetim |= piix_setup_idetim_timings(
1482 mode[drive], 1, chp->channel);
1483 goto end;
1484 }
1485 }
1486 /* If we are there, none of the drives are DMA */
1487 if (mode[0] >= 2)
1488 idetim |= piix_setup_idetim_timings(
1489 mode[0], 0, chp->channel);
1490 else
1491 idetim |= piix_setup_idetim_timings(
1492 mode[1], 0, chp->channel);
1493 end: /*
1494 * timing mode is now set up in the controller. Enable
1495 * it per-drive
1496 */
1497 for (drive = 0; drive < 2; drive++) {
1498 /* If no drive, skip */
1499 if ((drvp[drive].drive_flags & DRIVE) == 0)
1500 continue;
1501 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1502 if (drvp[drive].drive_flags & DRIVE_DMA)
1503 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1504 }
1505 if (idedma_ctl != 0) {
1506 /* Add software bits in status register */
1507 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1508 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1509 idedma_ctl);
1510 }
1511 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1512 pciide_print_modes(cp);
1513 }
1514
1515 void
1516 piix3_4_setup_channel(chp)
1517 struct channel_softc *chp;
1518 {
1519 struct ata_drive_datas *drvp;
1520 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1521 struct pciide_channel *cp = (struct pciide_channel*)chp;
1522 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1523 int drive;
1524 int channel = chp->channel;
1525
1526 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1527 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1528 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1529 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1530 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1531 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1532 PIIX_SIDETIM_RTC_MASK(channel));
1533
1534 idedma_ctl = 0;
1535 /* If channel disabled, no need to go further */
1536 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1537 return;
1538 /* set up new idetim: Enable IDE registers decode */
1539 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1540
1541 /* setup DMA if needed */
1542 pciide_channel_dma_setup(cp);
1543
1544 for (drive = 0; drive < 2; drive++) {
1545 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1546 PIIX_UDMATIM_SET(0x3, channel, drive));
1547 drvp = &chp->ch_drive[drive];
1548 /* If no drive, skip */
1549 if ((drvp->drive_flags & DRIVE) == 0)
1550 continue;
1551 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1552 (drvp->drive_flags & DRIVE_UDMA) == 0))
1553 goto pio;
1554
1555 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1556 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE) {
1557 ideconf |= PIIX_CONFIG_PINGPONG;
1558 }
1559 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1560 /* setup Ultra/66 */
1561 if (drvp->UDMA_mode > 2 &&
1562 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1563 drvp->UDMA_mode = 2;
1564 if (drvp->UDMA_mode > 2)
1565 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1566 else
1567 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1568 }
1569 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1570 (drvp->drive_flags & DRIVE_UDMA)) {
1571 /* use Ultra/DMA */
1572 drvp->drive_flags &= ~DRIVE_DMA;
1573 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1574 udmareg |= PIIX_UDMATIM_SET(
1575 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1576 } else {
1577 /* use Multiword DMA */
1578 drvp->drive_flags &= ~DRIVE_UDMA;
1579 if (drive == 0) {
1580 idetim |= piix_setup_idetim_timings(
1581 drvp->DMA_mode, 1, channel);
1582 } else {
1583 sidetim |= piix_setup_sidetim_timings(
1584 drvp->DMA_mode, 1, channel);
1585 idetim =PIIX_IDETIM_SET(idetim,
1586 PIIX_IDETIM_SITRE, channel);
1587 }
1588 }
1589 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1590
1591 pio: /* use PIO mode */
1592 idetim |= piix_setup_idetim_drvs(drvp);
1593 if (drive == 0) {
1594 idetim |= piix_setup_idetim_timings(
1595 drvp->PIO_mode, 0, channel);
1596 } else {
1597 sidetim |= piix_setup_sidetim_timings(
1598 drvp->PIO_mode, 0, channel);
1599 idetim =PIIX_IDETIM_SET(idetim,
1600 PIIX_IDETIM_SITRE, channel);
1601 }
1602 }
1603 if (idedma_ctl != 0) {
1604 /* Add software bits in status register */
1605 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1606 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1607 idedma_ctl);
1608 }
1609 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1610 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1611 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1612 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1613 pciide_print_modes(cp);
1614 }
1615
1616
1617 /* setup ISP and RTC fields, based on mode */
1618 static u_int32_t
1619 piix_setup_idetim_timings(mode, dma, channel)
1620 u_int8_t mode;
1621 u_int8_t dma;
1622 u_int8_t channel;
1623 {
1624
1625 if (dma)
1626 return PIIX_IDETIM_SET(0,
1627 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1628 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1629 channel);
1630 else
1631 return PIIX_IDETIM_SET(0,
1632 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1633 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1634 channel);
1635 }
1636
1637 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1638 static u_int32_t
1639 piix_setup_idetim_drvs(drvp)
1640 struct ata_drive_datas *drvp;
1641 {
1642 u_int32_t ret = 0;
1643 struct channel_softc *chp = drvp->chnl_softc;
1644 u_int8_t channel = chp->channel;
1645 u_int8_t drive = drvp->drive;
1646
1647 /*
1648 * If drive is using UDMA, timings setups are independant
1649 * So just check DMA and PIO here.
1650 */
1651 if (drvp->drive_flags & DRIVE_DMA) {
1652 /* if mode = DMA mode 0, use compatible timings */
1653 if ((drvp->drive_flags & DRIVE_DMA) &&
1654 drvp->DMA_mode == 0) {
1655 drvp->PIO_mode = 0;
1656 return ret;
1657 }
1658 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1659 /*
1660 * PIO and DMA timings are the same, use fast timings for PIO
1661 * too, else use compat timings.
1662 */
1663 if ((piix_isp_pio[drvp->PIO_mode] !=
1664 piix_isp_dma[drvp->DMA_mode]) ||
1665 (piix_rtc_pio[drvp->PIO_mode] !=
1666 piix_rtc_dma[drvp->DMA_mode]))
1667 drvp->PIO_mode = 0;
1668 /* if PIO mode <= 2, use compat timings for PIO */
1669 if (drvp->PIO_mode <= 2) {
1670 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1671 channel);
1672 return ret;
1673 }
1674 }
1675
1676 /*
1677 * Now setup PIO modes. If mode < 2, use compat timings.
1678 * Else enable fast timings. Enable IORDY and prefetch/post
1679 * if PIO mode >= 3.
1680 */
1681
1682 if (drvp->PIO_mode < 2)
1683 return ret;
1684
1685 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1686 if (drvp->PIO_mode >= 3) {
1687 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1688 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1689 }
1690 return ret;
1691 }
1692
1693 /* setup values in SIDETIM registers, based on mode */
1694 static u_int32_t
1695 piix_setup_sidetim_timings(mode, dma, channel)
1696 u_int8_t mode;
1697 u_int8_t dma;
1698 u_int8_t channel;
1699 {
1700 if (dma)
1701 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1702 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1703 else
1704 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1705 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1706 }
1707
1708 void
1709 amd756_chip_map(sc, pa)
1710 struct pciide_softc *sc;
1711 struct pci_attach_args *pa;
1712 {
1713 struct pciide_channel *cp;
1714 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1715 int channel;
1716 pcireg_t chanenable;
1717 bus_size_t cmdsize, ctlsize;
1718
1719 if (pciide_chipen(sc, pa) == 0)
1720 return;
1721 printf("%s: bus-master DMA support present",
1722 sc->sc_wdcdev.sc_dev.dv_xname);
1723 pciide_mapreg_dma(sc, pa);
1724 printf("\n");
1725 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1726 WDC_CAPABILITY_MODE;
1727 if (sc->sc_dma_ok) {
1728 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1729 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1730 sc->sc_wdcdev.irqack = pciide_irqack;
1731 }
1732 sc->sc_wdcdev.PIO_cap = 4;
1733 sc->sc_wdcdev.DMA_cap = 2;
1734 sc->sc_wdcdev.UDMA_cap = 4;
1735 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1736 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1737 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1738 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1739
1740 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1741 DEBUG_PROBE);
1742 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1743 cp = &sc->pciide_channels[channel];
1744 if (pciide_chansetup(sc, channel, interface) == 0)
1745 continue;
1746
1747 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1748 printf("%s: %s channel ignored (disabled)\n",
1749 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1750 continue;
1751 }
1752 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1753 pciide_pci_intr);
1754
1755 if (pciide_chan_candisable(cp))
1756 chanenable &= ~AMD756_CHAN_EN(channel);
1757 pciide_map_compat_intr(pa, cp, channel, interface);
1758 if (cp->hw_ok == 0)
1759 continue;
1760
1761 amd756_setup_channel(&cp->wdc_channel);
1762 }
1763 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1764 chanenable);
1765 return;
1766 }
1767
1768 void
1769 amd756_setup_channel(chp)
1770 struct channel_softc *chp;
1771 {
1772 u_int32_t udmatim_reg, datatim_reg;
1773 u_int8_t idedma_ctl;
1774 int mode, drive;
1775 struct ata_drive_datas *drvp;
1776 struct pciide_channel *cp = (struct pciide_channel*)chp;
1777 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1778
1779 idedma_ctl = 0;
1780 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1781 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1782 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1783 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1784
1785 /* setup DMA if needed */
1786 pciide_channel_dma_setup(cp);
1787
1788 for (drive = 0; drive < 2; drive++) {
1789 drvp = &chp->ch_drive[drive];
1790 /* If no drive, skip */
1791 if ((drvp->drive_flags & DRIVE) == 0)
1792 continue;
1793 /* add timing values, setup DMA if needed */
1794 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1795 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1796 mode = drvp->PIO_mode;
1797 goto pio;
1798 }
1799 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1800 (drvp->drive_flags & DRIVE_UDMA)) {
1801 /* use Ultra/DMA */
1802 drvp->drive_flags &= ~DRIVE_DMA;
1803 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1804 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1805 AMD756_UDMA_TIME(chp->channel, drive,
1806 amd756_udma_tim[drvp->UDMA_mode]);
1807 /* can use PIO timings, MW DMA unused */
1808 mode = drvp->PIO_mode;
1809 } else {
1810 /* use Multiword DMA */
1811 drvp->drive_flags &= ~DRIVE_UDMA;
1812 /* mode = min(pio, dma+2) */
1813 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1814 mode = drvp->PIO_mode;
1815 else
1816 mode = drvp->DMA_mode + 2;
1817 }
1818 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1819
1820 pio: /* setup PIO mode */
1821 if (mode <= 2) {
1822 drvp->DMA_mode = 0;
1823 drvp->PIO_mode = 0;
1824 mode = 0;
1825 } else {
1826 drvp->PIO_mode = mode;
1827 drvp->DMA_mode = mode - 2;
1828 }
1829 datatim_reg |=
1830 AMD756_DATATIM_PULSE(chp->channel, drive,
1831 amd756_pio_set[mode]) |
1832 AMD756_DATATIM_RECOV(chp->channel, drive,
1833 amd756_pio_rec[mode]);
1834 }
1835 if (idedma_ctl != 0) {
1836 /* Add software bits in status register */
1837 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1838 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1839 idedma_ctl);
1840 }
1841 pciide_print_modes(cp);
1842 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1843 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1844 }
1845
1846 void
1847 apollo_chip_map(sc, pa)
1848 struct pciide_softc *sc;
1849 struct pci_attach_args *pa;
1850 {
1851 struct pciide_channel *cp;
1852 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1853 int channel;
1854 u_int32_t ideconf;
1855 bus_size_t cmdsize, ctlsize;
1856
1857 if (pciide_chipen(sc, pa) == 0)
1858 return;
1859 printf("%s: bus-master DMA support present",
1860 sc->sc_wdcdev.sc_dev.dv_xname);
1861 pciide_mapreg_dma(sc, pa);
1862 printf("\n");
1863 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1864 WDC_CAPABILITY_MODE;
1865 if (sc->sc_dma_ok) {
1866 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1867 sc->sc_wdcdev.irqack = pciide_irqack;
1868 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE)
1869 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1870 }
1871 sc->sc_wdcdev.PIO_cap = 4;
1872 sc->sc_wdcdev.DMA_cap = 2;
1873 sc->sc_wdcdev.UDMA_cap = 2;
1874 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1875 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1876 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1877
1878 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1879 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1880 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1881 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1882 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1883 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
1884 DEBUG_PROBE);
1885
1886 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1887 cp = &sc->pciide_channels[channel];
1888 if (pciide_chansetup(sc, channel, interface) == 0)
1889 continue;
1890
1891 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
1892 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
1893 printf("%s: %s channel ignored (disabled)\n",
1894 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1895 continue;
1896 }
1897 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1898 pciide_pci_intr);
1899 if (cp->hw_ok == 0)
1900 continue;
1901 if (pciide_chan_candisable(cp)) {
1902 ideconf &= ~APO_IDECONF_EN(channel);
1903 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
1904 ideconf);
1905 }
1906 pciide_map_compat_intr(pa, cp, channel, interface);
1907
1908 if (cp->hw_ok == 0)
1909 continue;
1910 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
1911 }
1912 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1913 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1914 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
1915 }
1916
1917 void
1918 apollo_setup_channel(chp)
1919 struct channel_softc *chp;
1920 {
1921 u_int32_t udmatim_reg, datatim_reg;
1922 u_int8_t idedma_ctl;
1923 int mode, drive;
1924 struct ata_drive_datas *drvp;
1925 struct pciide_channel *cp = (struct pciide_channel*)chp;
1926 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1927
1928 idedma_ctl = 0;
1929 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
1930 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1931 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
1932 udmatim_reg &= ~AP0_UDMA_MASK(chp->channel);
1933
1934 /* setup DMA if needed */
1935 pciide_channel_dma_setup(cp);
1936
1937 for (drive = 0; drive < 2; drive++) {
1938 drvp = &chp->ch_drive[drive];
1939 /* If no drive, skip */
1940 if ((drvp->drive_flags & DRIVE) == 0)
1941 continue;
1942 /* add timing values, setup DMA if needed */
1943 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1944 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1945 mode = drvp->PIO_mode;
1946 goto pio;
1947 }
1948 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1949 (drvp->drive_flags & DRIVE_UDMA)) {
1950 /* use Ultra/DMA */
1951 drvp->drive_flags &= ~DRIVE_DMA;
1952 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
1953 APO_UDMA_EN_MTH(chp->channel, drive) |
1954 APO_UDMA_TIME(chp->channel, drive,
1955 apollo_udma_tim[drvp->UDMA_mode]);
1956 /* can use PIO timings, MW DMA unused */
1957 mode = drvp->PIO_mode;
1958 } else {
1959 /* use Multiword DMA */
1960 drvp->drive_flags &= ~DRIVE_UDMA;
1961 /* mode = min(pio, dma+2) */
1962 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1963 mode = drvp->PIO_mode;
1964 else
1965 mode = drvp->DMA_mode + 2;
1966 }
1967 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1968
1969 pio: /* setup PIO mode */
1970 if (mode <= 2) {
1971 drvp->DMA_mode = 0;
1972 drvp->PIO_mode = 0;
1973 mode = 0;
1974 } else {
1975 drvp->PIO_mode = mode;
1976 drvp->DMA_mode = mode - 2;
1977 }
1978 datatim_reg |=
1979 APO_DATATIM_PULSE(chp->channel, drive,
1980 apollo_pio_set[mode]) |
1981 APO_DATATIM_RECOV(chp->channel, drive,
1982 apollo_pio_rec[mode]);
1983 }
1984 if (idedma_ctl != 0) {
1985 /* Add software bits in status register */
1986 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1987 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1988 idedma_ctl);
1989 }
1990 pciide_print_modes(cp);
1991 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
1992 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
1993 }
1994
1995 void
1996 cmd_channel_map(pa, sc, channel)
1997 struct pci_attach_args *pa;
1998 struct pciide_softc *sc;
1999 int channel;
2000 {
2001 struct pciide_channel *cp = &sc->pciide_channels[channel];
2002 bus_size_t cmdsize, ctlsize;
2003 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2004 int interface;
2005
2006 /*
2007 * The 0648/0649 can be told to identify as a RAID controller.
2008 * In this case, we have to fake interface
2009 */
2010 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2011 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2012 PCIIDE_INTERFACE_SETTABLE(1);
2013 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2014 CMD_CONF_DSA1)
2015 interface |= PCIIDE_INTERFACE_PCI(0) |
2016 PCIIDE_INTERFACE_PCI(1);
2017 } else {
2018 interface = PCI_INTERFACE(pa->pa_class);
2019 }
2020
2021 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2022 cp->name = PCIIDE_CHANNEL_NAME(channel);
2023 cp->wdc_channel.channel = channel;
2024 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2025
2026 if (channel > 0) {
2027 cp->wdc_channel.ch_queue =
2028 sc->pciide_channels[0].wdc_channel.ch_queue;
2029 } else {
2030 cp->wdc_channel.ch_queue =
2031 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2032 }
2033 if (cp->wdc_channel.ch_queue == NULL) {
2034 printf("%s %s channel: "
2035 "can't allocate memory for command queue",
2036 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2037 return;
2038 }
2039
2040 printf("%s: %s channel %s to %s mode\n",
2041 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2042 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2043 "configured" : "wired",
2044 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2045 "native-PCI" : "compatibility");
2046
2047 /*
2048 * with a CMD PCI64x, if we get here, the first channel is enabled:
2049 * there's no way to disable the first channel without disabling
2050 * the whole device
2051 */
2052 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2053 printf("%s: %s channel ignored (disabled)\n",
2054 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2055 return;
2056 }
2057
2058 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2059 if (cp->hw_ok == 0)
2060 return;
2061 if (channel == 1) {
2062 if (pciide_chan_candisable(cp)) {
2063 ctrl &= ~CMD_CTRL_2PORT;
2064 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2065 CMD_CTRL, ctrl);
2066 }
2067 }
2068 pciide_map_compat_intr(pa, cp, channel, interface);
2069 }
2070
2071 int
2072 cmd_pci_intr(arg)
2073 void *arg;
2074 {
2075 struct pciide_softc *sc = arg;
2076 struct pciide_channel *cp;
2077 struct channel_softc *wdc_cp;
2078 int i, rv, crv;
2079 u_int32_t priirq, secirq;
2080
2081 rv = 0;
2082 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2083 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2084 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2085 cp = &sc->pciide_channels[i];
2086 wdc_cp = &cp->wdc_channel;
2087 /* If a compat channel skip. */
2088 if (cp->compat)
2089 continue;
2090 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2091 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2092 crv = wdcintr(wdc_cp);
2093 if (crv == 0)
2094 printf("%s:%d: bogus intr\n",
2095 sc->sc_wdcdev.sc_dev.dv_xname, i);
2096 else
2097 rv = 1;
2098 }
2099 }
2100 return rv;
2101 }
2102
2103 void
2104 cmd_chip_map(sc, pa)
2105 struct pciide_softc *sc;
2106 struct pci_attach_args *pa;
2107 {
2108 int channel;
2109
2110 /*
2111 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2112 * and base adresses registers can be disabled at
2113 * hardware level. In this case, the device is wired
2114 * in compat mode and its first channel is always enabled,
2115 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2116 * In fact, it seems that the first channel of the CMD PCI0640
2117 * can't be disabled.
2118 */
2119
2120 #ifdef PCIIDE_CMD064x_DISABLE
2121 if (pciide_chipen(sc, pa) == 0)
2122 return;
2123 #endif
2124
2125 printf("%s: hardware does not support DMA\n",
2126 sc->sc_wdcdev.sc_dev.dv_xname);
2127 sc->sc_dma_ok = 0;
2128
2129 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2130 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2131 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2132
2133 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2134 cmd_channel_map(pa, sc, channel);
2135 }
2136 }
2137
2138 void
2139 cmd0643_9_chip_map(sc, pa)
2140 struct pciide_softc *sc;
2141 struct pci_attach_args *pa;
2142 {
2143 struct pciide_channel *cp;
2144 int channel;
2145
2146 /*
2147 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2148 * and base adresses registers can be disabled at
2149 * hardware level. In this case, the device is wired
2150 * in compat mode and its first channel is always enabled,
2151 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2152 * In fact, it seems that the first channel of the CMD PCI0640
2153 * can't be disabled.
2154 */
2155
2156 #ifdef PCIIDE_CMD064x_DISABLE
2157 if (pciide_chipen(sc, pa) == 0)
2158 return;
2159 #endif
2160 printf("%s: bus-master DMA support present",
2161 sc->sc_wdcdev.sc_dev.dv_xname);
2162 pciide_mapreg_dma(sc, pa);
2163 printf("\n");
2164 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2165 WDC_CAPABILITY_MODE;
2166 if (sc->sc_dma_ok) {
2167 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2168 sc->sc_wdcdev.irqack = pciide_irqack;
2169 switch (sc->sc_pp->ide_product) {
2170 case PCI_PRODUCT_CMDTECH_649:
2171 case PCI_PRODUCT_CMDTECH_648:
2172 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2173 sc->sc_wdcdev.UDMA_cap = 4;
2174 }
2175 }
2176
2177 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2178 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2179 sc->sc_wdcdev.PIO_cap = 4;
2180 sc->sc_wdcdev.DMA_cap = 2;
2181 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2182
2183 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2184 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2185 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2186 DEBUG_PROBE);
2187
2188 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2189 cp = &sc->pciide_channels[channel];
2190 cmd_channel_map(pa, sc, channel);
2191 if (cp->hw_ok == 0)
2192 continue;
2193 cmd0643_9_setup_channel(&cp->wdc_channel);
2194 }
2195 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2196 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2197 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2198 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2199 DEBUG_PROBE);
2200 }
2201
2202 void
2203 cmd0643_9_setup_channel(chp)
2204 struct channel_softc *chp;
2205 {
2206 struct ata_drive_datas *drvp;
2207 u_int8_t tim;
2208 u_int32_t idedma_ctl, udma_reg;
2209 int drive;
2210 struct pciide_channel *cp = (struct pciide_channel*)chp;
2211 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2212
2213 idedma_ctl = 0;
2214 /* setup DMA if needed */
2215 pciide_channel_dma_setup(cp);
2216
2217 for (drive = 0; drive < 2; drive++) {
2218 drvp = &chp->ch_drive[drive];
2219 /* If no drive, skip */
2220 if ((drvp->drive_flags & DRIVE) == 0)
2221 continue;
2222 /* add timing values, setup DMA if needed */
2223 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2224 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2225 if (drvp->drive_flags & DRIVE_UDMA) {
2226 /* UltraDMA on a 0648 or 0649 */
2227 udma_reg = pciide_pci_read(sc->sc_pc,
2228 sc->sc_tag, CMD_UDMATIM(chp->channel));
2229 if (drvp->UDMA_mode > 2 &&
2230 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2231 CMD_BICSR) &
2232 CMD_BICSR_80(chp->channel)) == 0)
2233 drvp->UDMA_mode = 2;
2234 if (drvp->UDMA_mode > 2)
2235 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2236 else
2237 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2238 udma_reg |= CMD_UDMATIM_UDMA(drive);
2239 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2240 CMD_UDMATIM_TIM_OFF(drive));
2241 udma_reg |=
2242 (cmd0648_9_tim_udma[drvp->UDMA_mode] <<
2243 CMD_UDMATIM_TIM_OFF(drive));
2244 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2245 CMD_UDMATIM(chp->channel), udma_reg);
2246 } else {
2247 /*
2248 * use Multiword DMA.
2249 * Timings will be used for both PIO and DMA,
2250 * so adjust DMA mode if needed
2251 * if we have a 0648/9, turn off UDMA
2252 */
2253 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2254 udma_reg = pciide_pci_read(sc->sc_pc,
2255 sc->sc_tag,
2256 CMD_UDMATIM(chp->channel));
2257 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2258 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2259 CMD_UDMATIM(chp->channel),
2260 udma_reg);
2261 }
2262 if (drvp->PIO_mode >= 3 &&
2263 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2264 drvp->DMA_mode = drvp->PIO_mode - 2;
2265 }
2266 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2267 }
2268 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2269 }
2270 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2271 CMD_DATA_TIM(chp->channel, drive), tim);
2272 }
2273 if (idedma_ctl != 0) {
2274 /* Add software bits in status register */
2275 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2276 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2277 idedma_ctl);
2278 }
2279 pciide_print_modes(cp);
2280 }
2281
2282 void
2283 cy693_chip_map(sc, pa)
2284 struct pciide_softc *sc;
2285 struct pci_attach_args *pa;
2286 {
2287 struct pciide_channel *cp;
2288 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2289 bus_size_t cmdsize, ctlsize;
2290
2291 if (pciide_chipen(sc, pa) == 0)
2292 return;
2293 /*
2294 * this chip has 2 PCI IDE functions, one for primary and one for
2295 * secondary. So we need to call pciide_mapregs_compat() with
2296 * the real channel
2297 */
2298 if (pa->pa_function == 1) {
2299 sc->sc_cy_compatchan = 0;
2300 } else if (pa->pa_function == 2) {
2301 sc->sc_cy_compatchan = 1;
2302 } else {
2303 printf("%s: unexpected PCI function %d\n",
2304 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2305 return;
2306 }
2307 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2308 printf("%s: bus-master DMA support present",
2309 sc->sc_wdcdev.sc_dev.dv_xname);
2310 pciide_mapreg_dma(sc, pa);
2311 } else {
2312 printf("%s: hardware does not support DMA",
2313 sc->sc_wdcdev.sc_dev.dv_xname);
2314 sc->sc_dma_ok = 0;
2315 }
2316 printf("\n");
2317
2318 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2319 if (sc->sc_cy_handle == NULL) {
2320 printf("%s: unable to map hyperCache control registers\n",
2321 sc->sc_wdcdev.sc_dev.dv_xname);
2322 sc->sc_dma_ok = 0;
2323 }
2324
2325 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2326 WDC_CAPABILITY_MODE;
2327 if (sc->sc_dma_ok) {
2328 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2329 sc->sc_wdcdev.irqack = pciide_irqack;
2330 }
2331 sc->sc_wdcdev.PIO_cap = 4;
2332 sc->sc_wdcdev.DMA_cap = 2;
2333 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2334
2335 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2336 sc->sc_wdcdev.nchannels = 1;
2337
2338 /* Only one channel for this chip; if we are here it's enabled */
2339 cp = &sc->pciide_channels[0];
2340 sc->wdc_chanarray[0] = &cp->wdc_channel;
2341 cp->name = PCIIDE_CHANNEL_NAME(0);
2342 cp->wdc_channel.channel = 0;
2343 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2344 cp->wdc_channel.ch_queue =
2345 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2346 if (cp->wdc_channel.ch_queue == NULL) {
2347 printf("%s primary channel: "
2348 "can't allocate memory for command queue",
2349 sc->sc_wdcdev.sc_dev.dv_xname);
2350 return;
2351 }
2352 printf("%s: primary channel %s to ",
2353 sc->sc_wdcdev.sc_dev.dv_xname,
2354 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2355 "configured" : "wired");
2356 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2357 printf("native-PCI");
2358 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2359 pciide_pci_intr);
2360 } else {
2361 printf("compatibility");
2362 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2363 &cmdsize, &ctlsize);
2364 }
2365 printf(" mode\n");
2366 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2367 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2368 wdcattach(&cp->wdc_channel);
2369 if (pciide_chan_candisable(cp)) {
2370 pci_conf_write(sc->sc_pc, sc->sc_tag,
2371 PCI_COMMAND_STATUS_REG, 0);
2372 }
2373 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2374 if (cp->hw_ok == 0)
2375 return;
2376 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2377 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2378 cy693_setup_channel(&cp->wdc_channel);
2379 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2380 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2381 }
2382
2383 void
2384 cy693_setup_channel(chp)
2385 struct channel_softc *chp;
2386 {
2387 struct ata_drive_datas *drvp;
2388 int drive;
2389 u_int32_t cy_cmd_ctrl;
2390 u_int32_t idedma_ctl;
2391 struct pciide_channel *cp = (struct pciide_channel*)chp;
2392 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2393 int dma_mode = -1;
2394
2395 cy_cmd_ctrl = idedma_ctl = 0;
2396
2397 /* setup DMA if needed */
2398 pciide_channel_dma_setup(cp);
2399
2400 for (drive = 0; drive < 2; drive++) {
2401 drvp = &chp->ch_drive[drive];
2402 /* If no drive, skip */
2403 if ((drvp->drive_flags & DRIVE) == 0)
2404 continue;
2405 /* add timing values, setup DMA if needed */
2406 if (drvp->drive_flags & DRIVE_DMA) {
2407 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2408 /* use Multiword DMA */
2409 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2410 dma_mode = drvp->DMA_mode;
2411 }
2412 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2413 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2414 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2415 CY_CMD_CTRL_IOW_REC_OFF(drive));
2416 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2417 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2418 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2419 CY_CMD_CTRL_IOR_REC_OFF(drive));
2420 }
2421 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2422 chp->ch_drive[0].DMA_mode = dma_mode;
2423 chp->ch_drive[1].DMA_mode = dma_mode;
2424
2425 if (dma_mode == -1)
2426 dma_mode = 0;
2427
2428 if (sc->sc_cy_handle != NULL) {
2429 /* Note: `multiple' is implied. */
2430 cy82c693_write(sc->sc_cy_handle,
2431 (sc->sc_cy_compatchan == 0) ?
2432 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2433 }
2434
2435 pciide_print_modes(cp);
2436
2437 if (idedma_ctl != 0) {
2438 /* Add software bits in status register */
2439 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2440 IDEDMA_CTL, idedma_ctl);
2441 }
2442 }
2443
2444 void
2445 sis_chip_map(sc, pa)
2446 struct pciide_softc *sc;
2447 struct pci_attach_args *pa;
2448 {
2449 struct pciide_channel *cp;
2450 int channel;
2451 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2452 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2453 pcireg_t rev = PCI_REVISION(pa->pa_class);
2454 bus_size_t cmdsize, ctlsize;
2455
2456 if (pciide_chipen(sc, pa) == 0)
2457 return;
2458 printf("%s: bus-master DMA support present",
2459 sc->sc_wdcdev.sc_dev.dv_xname);
2460 pciide_mapreg_dma(sc, pa);
2461 printf("\n");
2462 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2463 WDC_CAPABILITY_MODE;
2464 if (sc->sc_dma_ok) {
2465 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2466 sc->sc_wdcdev.irqack = pciide_irqack;
2467 if (rev >= 0xd0)
2468 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2469 }
2470
2471 sc->sc_wdcdev.PIO_cap = 4;
2472 sc->sc_wdcdev.DMA_cap = 2;
2473 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2474 sc->sc_wdcdev.UDMA_cap = 2;
2475 sc->sc_wdcdev.set_modes = sis_setup_channel;
2476
2477 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2478 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2479
2480 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2481 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2482 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2483
2484 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2485 cp = &sc->pciide_channels[channel];
2486 if (pciide_chansetup(sc, channel, interface) == 0)
2487 continue;
2488 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2489 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2490 printf("%s: %s channel ignored (disabled)\n",
2491 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2492 continue;
2493 }
2494 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2495 pciide_pci_intr);
2496 if (cp->hw_ok == 0)
2497 continue;
2498 if (pciide_chan_candisable(cp)) {
2499 if (channel == 0)
2500 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2501 else
2502 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2503 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2504 sis_ctr0);
2505 }
2506 pciide_map_compat_intr(pa, cp, channel, interface);
2507 if (cp->hw_ok == 0)
2508 continue;
2509 sis_setup_channel(&cp->wdc_channel);
2510 }
2511 }
2512
2513 void
2514 sis_setup_channel(chp)
2515 struct channel_softc *chp;
2516 {
2517 struct ata_drive_datas *drvp;
2518 int drive;
2519 u_int32_t sis_tim;
2520 u_int32_t idedma_ctl;
2521 struct pciide_channel *cp = (struct pciide_channel*)chp;
2522 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2523
2524 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2525 "channel %d 0x%x\n", chp->channel,
2526 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2527 DEBUG_PROBE);
2528 sis_tim = 0;
2529 idedma_ctl = 0;
2530 /* setup DMA if needed */
2531 pciide_channel_dma_setup(cp);
2532
2533 for (drive = 0; drive < 2; drive++) {
2534 drvp = &chp->ch_drive[drive];
2535 /* If no drive, skip */
2536 if ((drvp->drive_flags & DRIVE) == 0)
2537 continue;
2538 /* add timing values, setup DMA if needed */
2539 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2540 (drvp->drive_flags & DRIVE_UDMA) == 0)
2541 goto pio;
2542
2543 if (drvp->drive_flags & DRIVE_UDMA) {
2544 /* use Ultra/DMA */
2545 drvp->drive_flags &= ~DRIVE_DMA;
2546 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2547 SIS_TIM_UDMA_TIME_OFF(drive);
2548 sis_tim |= SIS_TIM_UDMA_EN(drive);
2549 } else {
2550 /*
2551 * use Multiword DMA
2552 * Timings will be used for both PIO and DMA,
2553 * so adjust DMA mode if needed
2554 */
2555 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2556 drvp->PIO_mode = drvp->DMA_mode + 2;
2557 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2558 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2559 drvp->PIO_mode - 2 : 0;
2560 if (drvp->DMA_mode == 0)
2561 drvp->PIO_mode = 0;
2562 }
2563 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2564 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2565 SIS_TIM_ACT_OFF(drive);
2566 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2567 SIS_TIM_REC_OFF(drive);
2568 }
2569 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2570 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2571 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2572 if (idedma_ctl != 0) {
2573 /* Add software bits in status register */
2574 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2575 IDEDMA_CTL, idedma_ctl);
2576 }
2577 pciide_print_modes(cp);
2578 }
2579
2580 void
2581 acer_chip_map(sc, pa)
2582 struct pciide_softc *sc;
2583 struct pci_attach_args *pa;
2584 {
2585 struct pciide_channel *cp;
2586 int channel;
2587 pcireg_t cr, interface;
2588 bus_size_t cmdsize, ctlsize;
2589
2590 if (pciide_chipen(sc, pa) == 0)
2591 return;
2592 printf("%s: bus-master DMA support present",
2593 sc->sc_wdcdev.sc_dev.dv_xname);
2594 pciide_mapreg_dma(sc, pa);
2595 printf("\n");
2596 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2597 WDC_CAPABILITY_MODE;
2598 if (sc->sc_dma_ok) {
2599 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2600 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2601 sc->sc_wdcdev.irqack = pciide_irqack;
2602 }
2603
2604 sc->sc_wdcdev.PIO_cap = 4;
2605 sc->sc_wdcdev.DMA_cap = 2;
2606 sc->sc_wdcdev.UDMA_cap = 2;
2607 sc->sc_wdcdev.set_modes = acer_setup_channel;
2608 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2609 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2610
2611 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2612 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2613 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2614
2615 /* Enable "microsoft register bits" R/W. */
2616 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2617 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2618 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2619 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2620 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2621 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2622 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2623 ~ACER_CHANSTATUSREGS_RO);
2624 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2625 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2626 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2627 /* Don't use cr, re-read the real register content instead */
2628 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2629 PCI_CLASS_REG));
2630
2631 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2632 cp = &sc->pciide_channels[channel];
2633 if (pciide_chansetup(sc, channel, interface) == 0)
2634 continue;
2635 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2636 printf("%s: %s channel ignored (disabled)\n",
2637 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2638 continue;
2639 }
2640 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2641 acer_pci_intr);
2642 if (cp->hw_ok == 0)
2643 continue;
2644 if (pciide_chan_candisable(cp)) {
2645 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2646 pci_conf_write(sc->sc_pc, sc->sc_tag,
2647 PCI_CLASS_REG, cr);
2648 }
2649 pciide_map_compat_intr(pa, cp, channel, interface);
2650 acer_setup_channel(&cp->wdc_channel);
2651 }
2652 }
2653
2654 void
2655 acer_setup_channel(chp)
2656 struct channel_softc *chp;
2657 {
2658 struct ata_drive_datas *drvp;
2659 int drive;
2660 u_int32_t acer_fifo_udma;
2661 u_int32_t idedma_ctl;
2662 struct pciide_channel *cp = (struct pciide_channel*)chp;
2663 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2664
2665 idedma_ctl = 0;
2666 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2667 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2668 acer_fifo_udma), DEBUG_PROBE);
2669 /* setup DMA if needed */
2670 pciide_channel_dma_setup(cp);
2671
2672 for (drive = 0; drive < 2; drive++) {
2673 drvp = &chp->ch_drive[drive];
2674 /* If no drive, skip */
2675 if ((drvp->drive_flags & DRIVE) == 0)
2676 continue;
2677 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2678 "channel %d drive %d 0x%x\n", chp->channel, drive,
2679 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2680 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2681 /* clear FIFO/DMA mode */
2682 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2683 ACER_UDMA_EN(chp->channel, drive) |
2684 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2685
2686 /* add timing values, setup DMA if needed */
2687 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2688 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2689 acer_fifo_udma |=
2690 ACER_FTH_OPL(chp->channel, drive, 0x1);
2691 goto pio;
2692 }
2693
2694 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2695 if (drvp->drive_flags & DRIVE_UDMA) {
2696 /* use Ultra/DMA */
2697 drvp->drive_flags &= ~DRIVE_DMA;
2698 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2699 acer_fifo_udma |=
2700 ACER_UDMA_TIM(chp->channel, drive,
2701 acer_udma[drvp->UDMA_mode]);
2702 } else {
2703 /*
2704 * use Multiword DMA
2705 * Timings will be used for both PIO and DMA,
2706 * so adjust DMA mode if needed
2707 */
2708 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2709 drvp->PIO_mode = drvp->DMA_mode + 2;
2710 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2711 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2712 drvp->PIO_mode - 2 : 0;
2713 if (drvp->DMA_mode == 0)
2714 drvp->PIO_mode = 0;
2715 }
2716 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2717 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2718 ACER_IDETIM(chp->channel, drive),
2719 acer_pio[drvp->PIO_mode]);
2720 }
2721 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2722 acer_fifo_udma), DEBUG_PROBE);
2723 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2724 if (idedma_ctl != 0) {
2725 /* Add software bits in status register */
2726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2727 IDEDMA_CTL, idedma_ctl);
2728 }
2729 pciide_print_modes(cp);
2730 }
2731
2732 int
2733 acer_pci_intr(arg)
2734 void *arg;
2735 {
2736 struct pciide_softc *sc = arg;
2737 struct pciide_channel *cp;
2738 struct channel_softc *wdc_cp;
2739 int i, rv, crv;
2740 u_int32_t chids;
2741
2742 rv = 0;
2743 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2744 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2745 cp = &sc->pciide_channels[i];
2746 wdc_cp = &cp->wdc_channel;
2747 /* If a compat channel skip. */
2748 if (cp->compat)
2749 continue;
2750 if (chids & ACER_CHIDS_INT(i)) {
2751 crv = wdcintr(wdc_cp);
2752 if (crv == 0)
2753 printf("%s:%d: bogus intr\n",
2754 sc->sc_wdcdev.sc_dev.dv_xname, i);
2755 else
2756 rv = 1;
2757 }
2758 }
2759 return rv;
2760 }
2761
2762 void
2763 hpt_chip_map(sc, pa)
2764 struct pciide_softc *sc;
2765 struct pci_attach_args *pa;
2766 {
2767 struct pciide_channel *cp;
2768 int i, compatchan, revision;
2769 pcireg_t interface;
2770 bus_size_t cmdsize, ctlsize;
2771
2772 if (pciide_chipen(sc, pa) == 0)
2773 return;
2774 revision = PCI_REVISION(pa->pa_class);
2775
2776 /*
2777 * when the chip is in native mode it identifies itself as a
2778 * 'misc mass storage'. Fake interface in this case.
2779 */
2780 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2781 interface = PCI_INTERFACE(pa->pa_class);
2782 } else {
2783 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2784 PCIIDE_INTERFACE_PCI(0);
2785 if (revision == HPT370_REV)
2786 interface |= PCIIDE_INTERFACE_PCI(1);
2787 }
2788
2789 printf("%s: bus-master DMA support present",
2790 sc->sc_wdcdev.sc_dev.dv_xname);
2791 pciide_mapreg_dma(sc, pa);
2792 printf("\n");
2793 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2794 WDC_CAPABILITY_MODE;
2795 if (sc->sc_dma_ok) {
2796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2797 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2798 sc->sc_wdcdev.irqack = pciide_irqack;
2799 }
2800 sc->sc_wdcdev.PIO_cap = 4;
2801 sc->sc_wdcdev.DMA_cap = 2;
2802 sc->sc_wdcdev.UDMA_cap = 4;
2803
2804 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2805 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2806 sc->sc_wdcdev.nchannels = (revision == HPT366_REV) ? 1 : 2;
2807 if (revision == HPT366_REV) {
2808 /*
2809 * The 366 has 2 PCI IDE functions, one for primary and one
2810 * for secondary. So we need to call pciide_mapregs_compat()
2811 * with the real channel
2812 */
2813 if (pa->pa_function == 0) {
2814 compatchan = 0;
2815 } else if (pa->pa_function == 1) {
2816 compatchan = 1;
2817 } else {
2818 printf("%s: unexpected PCI function %d\n",
2819 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2820 return;
2821 }
2822 sc->sc_wdcdev.nchannels = 1;
2823 } else {
2824 sc->sc_wdcdev.nchannels = 2;
2825 }
2826 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2827 cp = &sc->pciide_channels[0];
2828 if (sc->sc_wdcdev.nchannels > 1) {
2829 compatchan = i;
2830 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
2831 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
2832 printf("%s: %s channel ignored (disabled)\n",
2833 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2834 continue;
2835 }
2836 }
2837 if (pciide_chansetup(sc, i, interface) == 0)
2838 continue;
2839 if (interface & PCIIDE_INTERFACE_PCI(i)) {
2840 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
2841 &ctlsize, hpt_pci_intr);
2842 } else {
2843 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
2844 &cmdsize, &ctlsize);
2845 }
2846 if (cp->hw_ok == 0)
2847 return;
2848 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2849 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2850 wdcattach(&cp->wdc_channel);
2851 hpt_setup_channel(&cp->wdc_channel);
2852 }
2853
2854 return;
2855 }
2856
2857
2858 void
2859 hpt_setup_channel(chp)
2860 struct channel_softc *chp;
2861 {
2862 struct ata_drive_datas *drvp;
2863 int drive;
2864 int cable;
2865 u_int32_t before, after;
2866 u_int32_t idedma_ctl;
2867 struct pciide_channel *cp = (struct pciide_channel*)chp;
2868 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2869
2870 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
2871
2872 /* setup DMA if needed */
2873 pciide_channel_dma_setup(cp);
2874
2875 idedma_ctl = 0;
2876
2877 /* Per drive settings */
2878 for (drive = 0; drive < 2; drive++) {
2879 drvp = &chp->ch_drive[drive];
2880 /* If no drive, skip */
2881 if ((drvp->drive_flags & DRIVE) == 0)
2882 continue;
2883 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
2884 HPT_IDETIM(chp->channel, drive));
2885
2886 /* add timing values, setup DMA if needed */
2887 if (drvp->drive_flags & DRIVE_UDMA) {
2888 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
2889 drvp->UDMA_mode > 2)
2890 drvp->UDMA_mode = 2;
2891 after = (sc->sc_wdcdev.nchannels == 2) ?
2892 hpt370_udma[drvp->UDMA_mode] :
2893 hpt366_udma[drvp->UDMA_mode];
2894 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2895 } else if (drvp->drive_flags & DRIVE_DMA) {
2896 /*
2897 * use Multiword DMA.
2898 * Timings will be used for both PIO and DMA, so adjust
2899 * DMA mode if needed
2900 */
2901 if (drvp->PIO_mode >= 3 &&
2902 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2903 drvp->DMA_mode = drvp->PIO_mode - 2;
2904 }
2905 after = (sc->sc_wdcdev.nchannels == 2) ?
2906 hpt370_dma[drvp->DMA_mode] :
2907 hpt366_dma[drvp->DMA_mode];
2908 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2909 } else {
2910 /* PIO only */
2911 after = (sc->sc_wdcdev.nchannels == 2) ?
2912 hpt370_pio[drvp->PIO_mode] :
2913 hpt366_pio[drvp->PIO_mode];
2914 }
2915 pci_conf_write(sc->sc_pc, sc->sc_tag,
2916 HPT_IDETIM(chp->channel, drive), after);
2917 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
2918 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
2919 after, before), DEBUG_PROBE);
2920 }
2921 if (idedma_ctl != 0) {
2922 /* Add software bits in status register */
2923 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2924 IDEDMA_CTL, idedma_ctl);
2925 }
2926 pciide_print_modes(cp);
2927 }
2928
2929 int
2930 hpt_pci_intr(arg)
2931 void *arg;
2932 {
2933 struct pciide_softc *sc = arg;
2934 struct pciide_channel *cp;
2935 struct channel_softc *wdc_cp;
2936 int rv = 0;
2937 int dmastat, i, crv;
2938
2939 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2940 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2941 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
2942 if((dmastat & IDEDMA_CTL_INTR) == 0)
2943 continue;
2944 cp = &sc->pciide_channels[i];
2945 wdc_cp = &cp->wdc_channel;
2946 crv = wdcintr(wdc_cp);
2947 if (crv == 0) {
2948 printf("%s:%d: bogus intr\n",
2949 sc->sc_wdcdev.sc_dev.dv_xname, i);
2950 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2951 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
2952 } else
2953 rv = 1;
2954 }
2955 return rv;
2956 }
2957
2958
2959 /* A macro to test product */
2960 #define PDC_IS_262(sc) (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66)
2961
2962 void
2963 pdc202xx_chip_map(sc, pa)
2964 struct pciide_softc *sc;
2965 struct pci_attach_args *pa;
2966 {
2967 struct pciide_channel *cp;
2968 int channel;
2969 pcireg_t interface, st, mode;
2970 bus_size_t cmdsize, ctlsize;
2971
2972 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
2973 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
2974 DEBUG_PROBE);
2975 if (pciide_chipen(sc, pa) == 0)
2976 return;
2977
2978 /* turn off RAID mode */
2979 st &= ~PDC2xx_STATE_IDERAID;
2980
2981 /*
2982 * can't rely on the PCI_CLASS_REG content if the chip was in raid
2983 * mode. We have to fake interface
2984 */
2985 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
2986 if (st & PDC2xx_STATE_NATIVE)
2987 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
2988
2989 printf("%s: bus-master DMA support present",
2990 sc->sc_wdcdev.sc_dev.dv_xname);
2991 pciide_mapreg_dma(sc, pa);
2992 printf("\n");
2993 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2994 WDC_CAPABILITY_MODE;
2995 if (sc->sc_dma_ok) {
2996 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2997 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2998 sc->sc_wdcdev.irqack = pciide_irqack;
2999 }
3000 sc->sc_wdcdev.PIO_cap = 4;
3001 sc->sc_wdcdev.DMA_cap = 2;
3002 if (PDC_IS_262(sc))
3003 sc->sc_wdcdev.UDMA_cap = 4;
3004 else
3005 sc->sc_wdcdev.UDMA_cap = 2;
3006 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3007 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3008 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3009
3010 /* setup failsafe defaults */
3011 mode = 0;
3012 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3013 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3014 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3015 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3016 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3017 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3018 "initial timings 0x%x, now 0x%x\n", channel,
3019 pci_conf_read(sc->sc_pc, sc->sc_tag,
3020 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3021 DEBUG_PROBE);
3022 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3023 mode | PDC2xx_TIM_IORDYp);
3024 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3025 "initial timings 0x%x, now 0x%x\n", channel,
3026 pci_conf_read(sc->sc_pc, sc->sc_tag,
3027 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3028 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3029 mode);
3030 }
3031
3032 mode = PDC2xx_SCR_DMA;
3033 if (PDC_IS_262(sc)) {
3034 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3035 } else {
3036 /* the BIOS set it up this way */
3037 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3038 }
3039 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3040 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3041 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3042 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3043 DEBUG_PROBE);
3044 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3045
3046 /* controller initial state register is OK even without BIOS */
3047 /* Set DMA mode to IDE DMA compatibility */
3048 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3049 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3050 DEBUG_PROBE);
3051 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3052 mode | 0x1);
3053 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3054 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3055 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3056 mode | 0x1);
3057
3058 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3059 cp = &sc->pciide_channels[channel];
3060 if (pciide_chansetup(sc, channel, interface) == 0)
3061 continue;
3062 if ((st & (PDC_IS_262(sc) ?
3063 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3064 printf("%s: %s channel ignored (disabled)\n",
3065 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3066 continue;
3067 }
3068 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3069 pdc202xx_pci_intr);
3070 if (cp->hw_ok == 0)
3071 continue;
3072 if (pciide_chan_candisable(cp))
3073 st &= ~(PDC_IS_262(sc) ?
3074 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3075 pciide_map_compat_intr(pa, cp, channel, interface);
3076 pdc202xx_setup_channel(&cp->wdc_channel);
3077 }
3078 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3079 DEBUG_PROBE);
3080 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3081 return;
3082 }
3083
3084 void
3085 pdc202xx_setup_channel(chp)
3086 struct channel_softc *chp;
3087 {
3088 struct ata_drive_datas *drvp;
3089 int drive;
3090 pcireg_t mode, st;
3091 u_int32_t idedma_ctl, scr, atapi;
3092 struct pciide_channel *cp = (struct pciide_channel*)chp;
3093 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3094 int channel = chp->channel;
3095
3096 /* setup DMA if needed */
3097 pciide_channel_dma_setup(cp);
3098
3099 idedma_ctl = 0;
3100
3101 /* Per channel settings */
3102 if (PDC_IS_262(sc)) {
3103 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3104 PDC262_U66);
3105 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3106 /* Trimm UDMA mode */
3107 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3108 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3109 chp->ch_drive[0].UDMA_mode <= 2) ||
3110 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3111 chp->ch_drive[1].UDMA_mode <= 2)) {
3112 if (chp->ch_drive[0].UDMA_mode > 2)
3113 chp->ch_drive[0].UDMA_mode = 2;
3114 if (chp->ch_drive[1].UDMA_mode > 2)
3115 chp->ch_drive[1].UDMA_mode = 2;
3116 }
3117 /* Set U66 if needed */
3118 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3119 chp->ch_drive[0].UDMA_mode > 2) ||
3120 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3121 chp->ch_drive[1].UDMA_mode > 2))
3122 scr |= PDC262_U66_EN(channel);
3123 else
3124 scr &= ~PDC262_U66_EN(channel);
3125 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3126 PDC262_U66, scr);
3127 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3128 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3129 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3130 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3131 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3132 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3133 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3134 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3135 atapi = 0;
3136 else
3137 atapi = PDC262_ATAPI_UDMA;
3138 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3139 PDC262_ATAPI(channel), atapi);
3140 }
3141 }
3142 for (drive = 0; drive < 2; drive++) {
3143 drvp = &chp->ch_drive[drive];
3144 /* If no drive, skip */
3145 if ((drvp->drive_flags & DRIVE) == 0)
3146 continue;
3147 mode = 0;
3148 if (drvp->drive_flags & DRIVE_UDMA) {
3149 mode = PDC2xx_TIM_SET_MB(mode,
3150 pdc2xx_udma_mb[drvp->UDMA_mode]);
3151 mode = PDC2xx_TIM_SET_MC(mode,
3152 pdc2xx_udma_mc[drvp->UDMA_mode]);
3153 drvp->drive_flags &= ~DRIVE_DMA;
3154 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3155 } else if (drvp->drive_flags & DRIVE_DMA) {
3156 mode = PDC2xx_TIM_SET_MB(mode,
3157 pdc2xx_dma_mb[drvp->DMA_mode]);
3158 mode = PDC2xx_TIM_SET_MC(mode,
3159 pdc2xx_dma_mc[drvp->DMA_mode]);
3160 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3161 } else {
3162 mode = PDC2xx_TIM_SET_MB(mode,
3163 pdc2xx_dma_mb[0]);
3164 mode = PDC2xx_TIM_SET_MC(mode,
3165 pdc2xx_dma_mc[0]);
3166 }
3167 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3168 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3169 if (drvp->drive_flags & DRIVE_ATA)
3170 mode |= PDC2xx_TIM_PRE;
3171 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3172 if (drvp->PIO_mode >= 3) {
3173 mode |= PDC2xx_TIM_IORDY;
3174 if (drive == 0)
3175 mode |= PDC2xx_TIM_IORDYp;
3176 }
3177 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3178 "timings 0x%x\n",
3179 sc->sc_wdcdev.sc_dev.dv_xname,
3180 chp->channel, drive, mode), DEBUG_PROBE);
3181 pci_conf_write(sc->sc_pc, sc->sc_tag,
3182 PDC2xx_TIM(chp->channel, drive), mode);
3183 }
3184 if (idedma_ctl != 0) {
3185 /* Add software bits in status register */
3186 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3187 IDEDMA_CTL, idedma_ctl);
3188 }
3189 pciide_print_modes(cp);
3190 }
3191
3192 int
3193 pdc202xx_pci_intr(arg)
3194 void *arg;
3195 {
3196 struct pciide_softc *sc = arg;
3197 struct pciide_channel *cp;
3198 struct channel_softc *wdc_cp;
3199 int i, rv, crv;
3200 u_int32_t scr;
3201
3202 rv = 0;
3203 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3204 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3205 cp = &sc->pciide_channels[i];
3206 wdc_cp = &cp->wdc_channel;
3207 /* If a compat channel skip. */
3208 if (cp->compat)
3209 continue;
3210 if (scr & PDC2xx_SCR_INT(i)) {
3211 crv = wdcintr(wdc_cp);
3212 if (crv == 0)
3213 printf("%s:%d: bogus intr\n",
3214 sc->sc_wdcdev.sc_dev.dv_xname, i);
3215 else
3216 rv = 1;
3217 }
3218 }
3219 return rv;
3220 }
3221
3222 void
3223 opti_chip_map(sc, pa)
3224 struct pciide_softc *sc;
3225 struct pci_attach_args *pa;
3226 {
3227 struct pciide_channel *cp;
3228 bus_size_t cmdsize, ctlsize;
3229 pcireg_t interface;
3230 u_int8_t init_ctrl;
3231 int channel;
3232
3233 if (pciide_chipen(sc, pa) == 0)
3234 return;
3235 printf("%s: bus-master DMA support present",
3236 sc->sc_wdcdev.sc_dev.dv_xname);
3237 pciide_mapreg_dma(sc, pa);
3238 printf("\n");
3239
3240 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3241 WDC_CAPABILITY_MODE;
3242 sc->sc_wdcdev.PIO_cap = 4;
3243 if (sc->sc_dma_ok) {
3244 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3245 sc->sc_wdcdev.irqack = pciide_irqack;
3246 sc->sc_wdcdev.DMA_cap = 2;
3247 }
3248 sc->sc_wdcdev.set_modes = opti_setup_channel;
3249
3250 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3251 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3252
3253 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3254 OPTI_REG_INIT_CONTROL);
3255
3256 interface = PCI_INTERFACE(pa->pa_class);
3257
3258 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3259 cp = &sc->pciide_channels[channel];
3260 if (pciide_chansetup(sc, channel, interface) == 0)
3261 continue;
3262 if (channel == 1 &&
3263 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3264 printf("%s: %s channel ignored (disabled)\n",
3265 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3266 continue;
3267 }
3268 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3269 pciide_pci_intr);
3270 if (cp->hw_ok == 0)
3271 continue;
3272 pciide_map_compat_intr(pa, cp, channel, interface);
3273 if (cp->hw_ok == 0)
3274 continue;
3275 opti_setup_channel(&cp->wdc_channel);
3276 }
3277 }
3278
3279 void
3280 opti_setup_channel(chp)
3281 struct channel_softc *chp;
3282 {
3283 struct ata_drive_datas *drvp;
3284 struct pciide_channel *cp = (struct pciide_channel*)chp;
3285 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3286 int drive, spd;
3287 int mode[2];
3288 u_int8_t rv, mr;
3289
3290 /*
3291 * The `Delay' and `Address Setup Time' fields of the
3292 * Miscellaneous Register are always zero initially.
3293 */
3294 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3295 mr &= ~(OPTI_MISC_DELAY_MASK |
3296 OPTI_MISC_ADDR_SETUP_MASK |
3297 OPTI_MISC_INDEX_MASK);
3298
3299 /* Prime the control register before setting timing values */
3300 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3301
3302 /* Determine the clockrate of the PCIbus the chip is attached to */
3303 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3304 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3305
3306 /* setup DMA if needed */
3307 pciide_channel_dma_setup(cp);
3308
3309 for (drive = 0; drive < 2; drive++) {
3310 drvp = &chp->ch_drive[drive];
3311 /* If no drive, skip */
3312 if ((drvp->drive_flags & DRIVE) == 0) {
3313 mode[drive] = -1;
3314 continue;
3315 }
3316
3317 if ((drvp->drive_flags & DRIVE_DMA)) {
3318 /*
3319 * Timings will be used for both PIO and DMA,
3320 * so adjust DMA mode if needed
3321 */
3322 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3323 drvp->PIO_mode = drvp->DMA_mode + 2;
3324 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3325 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3326 drvp->PIO_mode - 2 : 0;
3327 if (drvp->DMA_mode == 0)
3328 drvp->PIO_mode = 0;
3329
3330 mode[drive] = drvp->DMA_mode + 5;
3331 } else
3332 mode[drive] = drvp->PIO_mode;
3333
3334 if (drive && mode[0] >= 0 &&
3335 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3336 /*
3337 * Can't have two drives using different values
3338 * for `Address Setup Time'.
3339 * Slow down the faster drive to compensate.
3340 */
3341 int d = (opti_tim_as[spd][mode[0]] >
3342 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3343
3344 mode[d] = mode[1-d];
3345 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3346 chp->ch_drive[d].DMA_mode = 0;
3347 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3348 }
3349 }
3350
3351 for (drive = 0; drive < 2; drive++) {
3352 int m;
3353 if ((m = mode[drive]) < 0)
3354 continue;
3355
3356 /* Set the Address Setup Time and select appropriate index */
3357 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3358 rv |= OPTI_MISC_INDEX(drive);
3359 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3360
3361 /* Set the pulse width and recovery timing parameters */
3362 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3363 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3364 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3365 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3366
3367 /* Set the Enhanced Mode register appropriately */
3368 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3369 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3370 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3371 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3372 }
3373
3374 /* Finally, enable the timings */
3375 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3376
3377 pciide_print_modes(cp);
3378 }
3379