pciide.c revision 1.106 1 /* $NetBSD: pciide.c,v 1.106 2001/02/18 17:58:59 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <uvm/uvm_extern.h>
100
101 #include <machine/endian.h>
102
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/cy82c693var.h>
119
120 #include "opt_pciide.h"
121
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 int, u_int8_t));
127
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 pci_chipset_tag_t pc;
131 pcitag_t pa;
132 int reg;
133 {
134
135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 ((reg & 0x03) * 8) & 0xff);
137 }
138
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 pci_chipset_tag_t pc;
142 pcitag_t pa;
143 int reg;
144 u_int8_t val;
145 {
146 pcireg_t pcival;
147
148 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 pcival &= ~(0xff << ((reg & 0x03) * 8));
150 pcival |= (val << ((reg & 0x03) * 8));
151 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153
154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155
156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157 void piix_setup_channel __P((struct channel_softc*));
158 void piix3_4_setup_channel __P((struct channel_softc*));
159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162
163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164 void amd756_setup_channel __P((struct channel_softc*));
165
166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void apollo_setup_channel __P((struct channel_softc*));
168
169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_setup_channel __P((struct channel_softc*));
172 void cmd_channel_map __P((struct pci_attach_args *,
173 struct pciide_softc *, int));
174 int cmd_pci_intr __P((void *));
175 void cmd646_9_irqack __P((struct channel_softc *));
176
177 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void cy693_setup_channel __P((struct channel_softc*));
179
180 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void sis_setup_channel __P((struct channel_softc*));
182
183 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
184 void acer_setup_channel __P((struct channel_softc*));
185 int acer_pci_intr __P((void *));
186
187 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void pdc202xx_setup_channel __P((struct channel_softc*));
189 int pdc202xx_pci_intr __P((void *));
190
191 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
192 void opti_setup_channel __P((struct channel_softc*));
193
194 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void hpt_setup_channel __P((struct channel_softc*));
196 int hpt_pci_intr __P((void *));
197
198 void pciide_channel_dma_setup __P((struct pciide_channel *));
199 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
200 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
201 void pciide_dma_start __P((void*, int, int));
202 int pciide_dma_finish __P((void*, int, int, int));
203 void pciide_irqack __P((struct channel_softc *));
204 void pciide_print_modes __P((struct pciide_channel *));
205
206 struct pciide_product_desc {
207 u_int32_t ide_product;
208 int ide_flags;
209 const char *ide_name;
210 /* map and setup chip, probe drives */
211 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
212 };
213
214 /* Flags for ide_flags */
215 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
216 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
217
218 /* Default product description for devices not known from this controller */
219 const struct pciide_product_desc default_product_desc = {
220 0,
221 0,
222 "Generic PCI IDE controller",
223 default_chip_map,
224 };
225
226 const struct pciide_product_desc pciide_intel_products[] = {
227 { PCI_PRODUCT_INTEL_82092AA,
228 0,
229 "Intel 82092AA IDE controller",
230 default_chip_map,
231 },
232 { PCI_PRODUCT_INTEL_82371FB_IDE,
233 0,
234 "Intel 82371FB IDE controller (PIIX)",
235 piix_chip_map,
236 },
237 { PCI_PRODUCT_INTEL_82371SB_IDE,
238 0,
239 "Intel 82371SB IDE Interface (PIIX3)",
240 piix_chip_map,
241 },
242 { PCI_PRODUCT_INTEL_82371AB_IDE,
243 0,
244 "Intel 82371AB IDE controller (PIIX4)",
245 piix_chip_map,
246 },
247 { PCI_PRODUCT_INTEL_82440MX_IDE,
248 0,
249 "Intel 82440MX IDE controller",
250 piix_chip_map
251 },
252 { PCI_PRODUCT_INTEL_82801AA_IDE,
253 0,
254 "Intel 82801AA IDE Controller (ICH)",
255 piix_chip_map,
256 },
257 { PCI_PRODUCT_INTEL_82801AB_IDE,
258 0,
259 "Intel 82801AB IDE Controller (ICH0)",
260 piix_chip_map,
261 },
262 { PCI_PRODUCT_INTEL_82801BA_IDE,
263 0,
264 "Intel 82801BA IDE Controller (ICH2)",
265 piix_chip_map,
266 },
267 { PCI_PRODUCT_INTEL_82801BAM_IDE,
268 0,
269 "Intel 82801BAM IDE Controller (ICH2)",
270 piix_chip_map,
271 },
272 { 0,
273 0,
274 NULL,
275 }
276 };
277
278 const struct pciide_product_desc pciide_amd_products[] = {
279 { PCI_PRODUCT_AMD_PBC756_IDE,
280 0,
281 "Advanced Micro Devices AMD756 IDE Controller",
282 amd756_chip_map
283 },
284 { 0,
285 0,
286 NULL,
287 }
288 };
289
290 const struct pciide_product_desc pciide_cmd_products[] = {
291 { PCI_PRODUCT_CMDTECH_640,
292 0,
293 "CMD Technology PCI0640",
294 cmd_chip_map
295 },
296 { PCI_PRODUCT_CMDTECH_643,
297 0,
298 "CMD Technology PCI0643",
299 cmd0643_9_chip_map,
300 },
301 { PCI_PRODUCT_CMDTECH_646,
302 0,
303 "CMD Technology PCI0646",
304 cmd0643_9_chip_map,
305 },
306 { PCI_PRODUCT_CMDTECH_648,
307 IDE_PCI_CLASS_OVERRIDE,
308 "CMD Technology PCI0648",
309 cmd0643_9_chip_map,
310 },
311 { PCI_PRODUCT_CMDTECH_649,
312 IDE_PCI_CLASS_OVERRIDE,
313 "CMD Technology PCI0649",
314 cmd0643_9_chip_map,
315 },
316 { 0,
317 0,
318 NULL,
319 }
320 };
321
322 const struct pciide_product_desc pciide_via_products[] = {
323 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
324 0,
325 "VIA Tech VT82C586 IDE Controller",
326 apollo_chip_map,
327 },
328 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
329 0,
330 "VIA Tech VT82C586A IDE Controller",
331 apollo_chip_map,
332 },
333 { 0,
334 0,
335 NULL,
336 }
337 };
338
339 const struct pciide_product_desc pciide_cypress_products[] = {
340 { PCI_PRODUCT_CONTAQ_82C693,
341 IDE_16BIT_IOSPACE,
342 "Cypress 82C693 IDE Controller",
343 cy693_chip_map,
344 },
345 { 0,
346 0,
347 NULL,
348 }
349 };
350
351 const struct pciide_product_desc pciide_sis_products[] = {
352 { PCI_PRODUCT_SIS_5597_IDE,
353 0,
354 "Silicon Integrated System 5597/5598 IDE controller",
355 sis_chip_map,
356 },
357 { 0,
358 0,
359 NULL,
360 }
361 };
362
363 const struct pciide_product_desc pciide_acer_products[] = {
364 { PCI_PRODUCT_ALI_M5229,
365 0,
366 "Acer Labs M5229 UDMA IDE Controller",
367 acer_chip_map,
368 },
369 { 0,
370 0,
371 NULL,
372 }
373 };
374
375 const struct pciide_product_desc pciide_promise_products[] = {
376 { PCI_PRODUCT_PROMISE_ULTRA33,
377 IDE_PCI_CLASS_OVERRIDE,
378 "Promise Ultra33/ATA Bus Master IDE Accelerator",
379 pdc202xx_chip_map,
380 },
381 { PCI_PRODUCT_PROMISE_ULTRA66,
382 IDE_PCI_CLASS_OVERRIDE,
383 "Promise Ultra66/ATA Bus Master IDE Accelerator",
384 pdc202xx_chip_map,
385 },
386 { PCI_PRODUCT_PROMISE_ULTRA100,
387 IDE_PCI_CLASS_OVERRIDE,
388 "Promise Ultra100/ATA Bus Master IDE Accelerator",
389 pdc202xx_chip_map,
390 },
391 { PCI_PRODUCT_PROMISE_ULTRA100X,
392 IDE_PCI_CLASS_OVERRIDE,
393 "Promise Ultra100/ATA Bus Master IDE Accelerator",
394 pdc202xx_chip_map,
395 },
396 { 0,
397 0,
398 NULL,
399 }
400 };
401
402 const struct pciide_product_desc pciide_opti_products[] = {
403 { PCI_PRODUCT_OPTI_82C621,
404 0,
405 "OPTi 82c621 PCI IDE controller",
406 opti_chip_map,
407 },
408 { PCI_PRODUCT_OPTI_82C568,
409 0,
410 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
411 opti_chip_map,
412 },
413 { PCI_PRODUCT_OPTI_82D568,
414 0,
415 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
416 opti_chip_map,
417 },
418 { 0,
419 0,
420 NULL,
421 }
422 };
423
424 const struct pciide_product_desc pciide_triones_products[] = {
425 { PCI_PRODUCT_TRIONES_HPT366,
426 IDE_PCI_CLASS_OVERRIDE,
427 "Triones/Highpoint HPT366/370 IDE Controller",
428 hpt_chip_map,
429 },
430 { 0,
431 0,
432 NULL,
433 }
434 };
435
436 struct pciide_vendor_desc {
437 u_int32_t ide_vendor;
438 const struct pciide_product_desc *ide_products;
439 };
440
441 const struct pciide_vendor_desc pciide_vendors[] = {
442 { PCI_VENDOR_INTEL, pciide_intel_products },
443 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
444 { PCI_VENDOR_VIATECH, pciide_via_products },
445 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
446 { PCI_VENDOR_SIS, pciide_sis_products },
447 { PCI_VENDOR_ALI, pciide_acer_products },
448 { PCI_VENDOR_PROMISE, pciide_promise_products },
449 { PCI_VENDOR_AMD, pciide_amd_products },
450 { PCI_VENDOR_OPTI, pciide_opti_products },
451 { PCI_VENDOR_TRIONES, pciide_triones_products },
452 { 0, NULL }
453 };
454
455 /* options passed via the 'flags' config keyword */
456 #define PCIIDE_OPTIONS_DMA 0x01
457
458 int pciide_match __P((struct device *, struct cfdata *, void *));
459 void pciide_attach __P((struct device *, struct device *, void *));
460
461 struct cfattach pciide_ca = {
462 sizeof(struct pciide_softc), pciide_match, pciide_attach
463 };
464 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
465 int pciide_mapregs_compat __P(( struct pci_attach_args *,
466 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
467 int pciide_mapregs_native __P((struct pci_attach_args *,
468 struct pciide_channel *, bus_size_t *, bus_size_t *,
469 int (*pci_intr) __P((void *))));
470 void pciide_mapreg_dma __P((struct pciide_softc *,
471 struct pci_attach_args *));
472 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
473 void pciide_mapchan __P((struct pci_attach_args *,
474 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
475 int (*pci_intr) __P((void *))));
476 int pciide_chan_candisable __P((struct pciide_channel *));
477 void pciide_map_compat_intr __P(( struct pci_attach_args *,
478 struct pciide_channel *, int, int));
479 int pciide_print __P((void *, const char *pnp));
480 int pciide_compat_intr __P((void *));
481 int pciide_pci_intr __P((void *));
482 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
483
484 const struct pciide_product_desc *
485 pciide_lookup_product(id)
486 u_int32_t id;
487 {
488 const struct pciide_product_desc *pp;
489 const struct pciide_vendor_desc *vp;
490
491 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
492 if (PCI_VENDOR(id) == vp->ide_vendor)
493 break;
494
495 if ((pp = vp->ide_products) == NULL)
496 return NULL;
497
498 for (; pp->ide_name != NULL; pp++)
499 if (PCI_PRODUCT(id) == pp->ide_product)
500 break;
501
502 if (pp->ide_name == NULL)
503 return NULL;
504 return pp;
505 }
506
507 int
508 pciide_match(parent, match, aux)
509 struct device *parent;
510 struct cfdata *match;
511 void *aux;
512 {
513 struct pci_attach_args *pa = aux;
514 const struct pciide_product_desc *pp;
515
516 /*
517 * Check the ID register to see that it's a PCI IDE controller.
518 * If it is, we assume that we can deal with it; it _should_
519 * work in a standardized way...
520 */
521 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
522 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
523 return (1);
524 }
525
526 /*
527 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
528 * controllers. Let see if we can deal with it anyway.
529 */
530 pp = pciide_lookup_product(pa->pa_id);
531 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
532 return (1);
533 }
534
535 return (0);
536 }
537
538 void
539 pciide_attach(parent, self, aux)
540 struct device *parent, *self;
541 void *aux;
542 {
543 struct pci_attach_args *pa = aux;
544 pci_chipset_tag_t pc = pa->pa_pc;
545 pcitag_t tag = pa->pa_tag;
546 struct pciide_softc *sc = (struct pciide_softc *)self;
547 pcireg_t csr;
548 char devinfo[256];
549 const char *displaydev;
550
551 sc->sc_pp = pciide_lookup_product(pa->pa_id);
552 if (sc->sc_pp == NULL) {
553 sc->sc_pp = &default_product_desc;
554 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
555 displaydev = devinfo;
556 } else
557 displaydev = sc->sc_pp->ide_name;
558
559 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
560
561 sc->sc_pc = pa->pa_pc;
562 sc->sc_tag = pa->pa_tag;
563 #ifdef WDCDEBUG
564 if (wdcdebug_pciide_mask & DEBUG_PROBE)
565 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
566 #endif
567 sc->sc_pp->chip_map(sc, pa);
568
569 if (sc->sc_dma_ok) {
570 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
571 csr |= PCI_COMMAND_MASTER_ENABLE;
572 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
573 }
574 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
575 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
576 }
577
578 /* tell wether the chip is enabled or not */
579 int
580 pciide_chipen(sc, pa)
581 struct pciide_softc *sc;
582 struct pci_attach_args *pa;
583 {
584 pcireg_t csr;
585 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
586 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
587 PCI_COMMAND_STATUS_REG);
588 printf("%s: device disabled (at %s)\n",
589 sc->sc_wdcdev.sc_dev.dv_xname,
590 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
591 "device" : "bridge");
592 return 0;
593 }
594 return 1;
595 }
596
597 int
598 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
599 struct pci_attach_args *pa;
600 struct pciide_channel *cp;
601 int compatchan;
602 bus_size_t *cmdsizep, *ctlsizep;
603 {
604 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
605 struct channel_softc *wdc_cp = &cp->wdc_channel;
606
607 cp->compat = 1;
608 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
609 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
610
611 wdc_cp->cmd_iot = pa->pa_iot;
612 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
613 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
614 printf("%s: couldn't map %s channel cmd regs\n",
615 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
616 return (0);
617 }
618
619 wdc_cp->ctl_iot = pa->pa_iot;
620 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
621 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
622 printf("%s: couldn't map %s channel ctl regs\n",
623 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
624 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
625 PCIIDE_COMPAT_CMD_SIZE);
626 return (0);
627 }
628
629 return (1);
630 }
631
632 int
633 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
634 struct pci_attach_args * pa;
635 struct pciide_channel *cp;
636 bus_size_t *cmdsizep, *ctlsizep;
637 int (*pci_intr) __P((void *));
638 {
639 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
640 struct channel_softc *wdc_cp = &cp->wdc_channel;
641 const char *intrstr;
642 pci_intr_handle_t intrhandle;
643
644 cp->compat = 0;
645
646 if (sc->sc_pci_ih == NULL) {
647 if (pci_intr_map(pa, &intrhandle) != 0) {
648 printf("%s: couldn't map native-PCI interrupt\n",
649 sc->sc_wdcdev.sc_dev.dv_xname);
650 return 0;
651 }
652 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
653 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
654 intrhandle, IPL_BIO, pci_intr, sc);
655 if (sc->sc_pci_ih != NULL) {
656 printf("%s: using %s for native-PCI interrupt\n",
657 sc->sc_wdcdev.sc_dev.dv_xname,
658 intrstr ? intrstr : "unknown interrupt");
659 } else {
660 printf("%s: couldn't establish native-PCI interrupt",
661 sc->sc_wdcdev.sc_dev.dv_xname);
662 if (intrstr != NULL)
663 printf(" at %s", intrstr);
664 printf("\n");
665 return 0;
666 }
667 }
668 cp->ih = sc->sc_pci_ih;
669 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
670 PCI_MAPREG_TYPE_IO, 0,
671 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
672 printf("%s: couldn't map %s channel cmd regs\n",
673 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
674 return 0;
675 }
676
677 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
678 PCI_MAPREG_TYPE_IO, 0,
679 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
680 printf("%s: couldn't map %s channel ctl regs\n",
681 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
682 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
683 return 0;
684 }
685 /*
686 * In native mode, 4 bytes of I/O space are mapped for the control
687 * register, the control register is at offset 2. Pass the generic
688 * code a handle for only one byte at the rigth offset.
689 */
690 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
691 &wdc_cp->ctl_ioh) != 0) {
692 printf("%s: unable to subregion %s channel ctl regs\n",
693 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
694 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
695 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
696 return 0;
697 }
698 return (1);
699 }
700
701 void
702 pciide_mapreg_dma(sc, pa)
703 struct pciide_softc *sc;
704 struct pci_attach_args *pa;
705 {
706 pcireg_t maptype;
707 bus_addr_t addr;
708
709 /*
710 * Map DMA registers
711 *
712 * Note that sc_dma_ok is the right variable to test to see if
713 * DMA can be done. If the interface doesn't support DMA,
714 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
715 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
716 * non-zero if the interface supports DMA and the registers
717 * could be mapped.
718 *
719 * XXX Note that despite the fact that the Bus Master IDE specs
720 * XXX say that "The bus master IDE function uses 16 bytes of IO
721 * XXX space," some controllers (at least the United
722 * XXX Microelectronics UM8886BF) place it in memory space.
723 */
724 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
725 PCIIDE_REG_BUS_MASTER_DMA);
726
727 switch (maptype) {
728 case PCI_MAPREG_TYPE_IO:
729 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
730 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
731 &addr, NULL, NULL) == 0);
732 if (sc->sc_dma_ok == 0) {
733 printf(", but unused (couldn't query registers)");
734 break;
735 }
736 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
737 && addr >= 0x10000) {
738 sc->sc_dma_ok = 0;
739 printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr);
740 break;
741 }
742 /* FALLTHROUGH */
743
744 case PCI_MAPREG_MEM_TYPE_32BIT:
745 sc->sc_dma_ok = (pci_mapreg_map(pa,
746 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
747 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
748 sc->sc_dmat = pa->pa_dmat;
749 if (sc->sc_dma_ok == 0) {
750 printf(", but unused (couldn't map registers)");
751 } else {
752 sc->sc_wdcdev.dma_arg = sc;
753 sc->sc_wdcdev.dma_init = pciide_dma_init;
754 sc->sc_wdcdev.dma_start = pciide_dma_start;
755 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
756 }
757 break;
758
759 default:
760 sc->sc_dma_ok = 0;
761 printf(", but unsupported register maptype (0x%x)", maptype);
762 }
763 }
764
765 int
766 pciide_compat_intr(arg)
767 void *arg;
768 {
769 struct pciide_channel *cp = arg;
770
771 #ifdef DIAGNOSTIC
772 /* should only be called for a compat channel */
773 if (cp->compat == 0)
774 panic("pciide compat intr called for non-compat chan %p\n", cp);
775 #endif
776 return (wdcintr(&cp->wdc_channel));
777 }
778
779 int
780 pciide_pci_intr(arg)
781 void *arg;
782 {
783 struct pciide_softc *sc = arg;
784 struct pciide_channel *cp;
785 struct channel_softc *wdc_cp;
786 int i, rv, crv;
787
788 rv = 0;
789 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
790 cp = &sc->pciide_channels[i];
791 wdc_cp = &cp->wdc_channel;
792
793 /* If a compat channel skip. */
794 if (cp->compat)
795 continue;
796 /* if this channel not waiting for intr, skip */
797 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
798 continue;
799
800 crv = wdcintr(wdc_cp);
801 if (crv == 0)
802 ; /* leave rv alone */
803 else if (crv == 1)
804 rv = 1; /* claim the intr */
805 else if (rv == 0) /* crv should be -1 in this case */
806 rv = crv; /* if we've done no better, take it */
807 }
808 return (rv);
809 }
810
811 void
812 pciide_channel_dma_setup(cp)
813 struct pciide_channel *cp;
814 {
815 int drive;
816 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
817 struct ata_drive_datas *drvp;
818
819 for (drive = 0; drive < 2; drive++) {
820 drvp = &cp->wdc_channel.ch_drive[drive];
821 /* If no drive, skip */
822 if ((drvp->drive_flags & DRIVE) == 0)
823 continue;
824 /* setup DMA if needed */
825 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
826 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
827 sc->sc_dma_ok == 0) {
828 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
829 continue;
830 }
831 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
832 != 0) {
833 /* Abort DMA setup */
834 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
835 continue;
836 }
837 }
838 }
839
840 int
841 pciide_dma_table_setup(sc, channel, drive)
842 struct pciide_softc *sc;
843 int channel, drive;
844 {
845 bus_dma_segment_t seg;
846 int error, rseg;
847 const bus_size_t dma_table_size =
848 sizeof(struct idedma_table) * NIDEDMA_TABLES;
849 struct pciide_dma_maps *dma_maps =
850 &sc->pciide_channels[channel].dma_maps[drive];
851
852 /* If table was already allocated, just return */
853 if (dma_maps->dma_table)
854 return 0;
855
856 /* Allocate memory for the DMA tables and map it */
857 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
858 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
859 BUS_DMA_NOWAIT)) != 0) {
860 printf("%s:%d: unable to allocate table DMA for "
861 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
862 channel, drive, error);
863 return error;
864 }
865 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
866 dma_table_size,
867 (caddr_t *)&dma_maps->dma_table,
868 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
869 printf("%s:%d: unable to map table DMA for"
870 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
871 channel, drive, error);
872 return error;
873 }
874 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
875 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
876 (unsigned long)seg.ds_addr), DEBUG_PROBE);
877
878 /* Create and load table DMA map for this disk */
879 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
880 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
881 &dma_maps->dmamap_table)) != 0) {
882 printf("%s:%d: unable to create table DMA map for "
883 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
884 channel, drive, error);
885 return error;
886 }
887 if ((error = bus_dmamap_load(sc->sc_dmat,
888 dma_maps->dmamap_table,
889 dma_maps->dma_table,
890 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
891 printf("%s:%d: unable to load table DMA map for "
892 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
893 channel, drive, error);
894 return error;
895 }
896 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
897 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
898 DEBUG_PROBE);
899 /* Create a xfer DMA map for this drive */
900 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
901 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
902 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
903 &dma_maps->dmamap_xfer)) != 0) {
904 printf("%s:%d: unable to create xfer DMA map for "
905 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
906 channel, drive, error);
907 return error;
908 }
909 return 0;
910 }
911
912 int
913 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
914 void *v;
915 int channel, drive;
916 void *databuf;
917 size_t datalen;
918 int flags;
919 {
920 struct pciide_softc *sc = v;
921 int error, seg;
922 struct pciide_dma_maps *dma_maps =
923 &sc->pciide_channels[channel].dma_maps[drive];
924
925 error = bus_dmamap_load(sc->sc_dmat,
926 dma_maps->dmamap_xfer,
927 databuf, datalen, NULL, BUS_DMA_NOWAIT);
928 if (error) {
929 printf("%s:%d: unable to load xfer DMA map for"
930 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
931 channel, drive, error);
932 return error;
933 }
934
935 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
936 dma_maps->dmamap_xfer->dm_mapsize,
937 (flags & WDC_DMA_READ) ?
938 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
939
940 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
941 #ifdef DIAGNOSTIC
942 /* A segment must not cross a 64k boundary */
943 {
944 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
945 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
946 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
947 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
948 printf("pciide_dma: segment %d physical addr 0x%lx"
949 " len 0x%lx not properly aligned\n",
950 seg, phys, len);
951 panic("pciide_dma: buf align");
952 }
953 }
954 #endif
955 dma_maps->dma_table[seg].base_addr =
956 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
957 dma_maps->dma_table[seg].byte_count =
958 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
959 IDEDMA_BYTE_COUNT_MASK);
960 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
961 seg, le32toh(dma_maps->dma_table[seg].byte_count),
962 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
963
964 }
965 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
966 htole32(IDEDMA_BYTE_COUNT_EOT);
967
968 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
969 dma_maps->dmamap_table->dm_mapsize,
970 BUS_DMASYNC_PREWRITE);
971
972 /* Maps are ready. Start DMA function */
973 #ifdef DIAGNOSTIC
974 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
975 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
976 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
977 panic("pciide_dma_init: table align");
978 }
979 #endif
980
981 /* Clear status bits */
982 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
983 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
984 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
985 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
986 /* Write table addr */
987 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
988 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
989 dma_maps->dmamap_table->dm_segs[0].ds_addr);
990 /* set read/write */
991 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
992 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
993 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
994 /* remember flags */
995 dma_maps->dma_flags = flags;
996 return 0;
997 }
998
999 void
1000 pciide_dma_start(v, channel, drive)
1001 void *v;
1002 int channel, drive;
1003 {
1004 struct pciide_softc *sc = v;
1005
1006 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1007 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1008 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1009 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1010 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1011 }
1012
1013 int
1014 pciide_dma_finish(v, channel, drive, force)
1015 void *v;
1016 int channel, drive;
1017 int force;
1018 {
1019 struct pciide_softc *sc = v;
1020 u_int8_t status;
1021 int error = 0;
1022 struct pciide_dma_maps *dma_maps =
1023 &sc->pciide_channels[channel].dma_maps[drive];
1024
1025 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1026 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1027 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1028 DEBUG_XFERS);
1029
1030 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1031 return WDC_DMAST_NOIRQ;
1032
1033 /* stop DMA channel */
1034 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1035 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1036 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1037 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1038
1039 /* Unload the map of the data buffer */
1040 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1041 dma_maps->dmamap_xfer->dm_mapsize,
1042 (dma_maps->dma_flags & WDC_DMA_READ) ?
1043 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1044 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1045
1046 if ((status & IDEDMA_CTL_ERR) != 0) {
1047 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1048 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1049 error |= WDC_DMAST_ERR;
1050 }
1051
1052 if ((status & IDEDMA_CTL_INTR) == 0) {
1053 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1054 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1055 drive, status);
1056 error |= WDC_DMAST_NOIRQ;
1057 }
1058
1059 if ((status & IDEDMA_CTL_ACT) != 0) {
1060 /* data underrun, may be a valid condition for ATAPI */
1061 error |= WDC_DMAST_UNDER;
1062 }
1063 return error;
1064 }
1065
1066 void
1067 pciide_irqack(chp)
1068 struct channel_softc *chp;
1069 {
1070 struct pciide_channel *cp = (struct pciide_channel*)chp;
1071 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1072
1073 /* clear status bits in IDE DMA registers */
1074 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1075 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1076 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1077 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1078 }
1079
1080 /* some common code used by several chip_map */
1081 int
1082 pciide_chansetup(sc, channel, interface)
1083 struct pciide_softc *sc;
1084 int channel;
1085 pcireg_t interface;
1086 {
1087 struct pciide_channel *cp = &sc->pciide_channels[channel];
1088 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1089 cp->name = PCIIDE_CHANNEL_NAME(channel);
1090 cp->wdc_channel.channel = channel;
1091 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1092 cp->wdc_channel.ch_queue =
1093 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1094 if (cp->wdc_channel.ch_queue == NULL) {
1095 printf("%s %s channel: "
1096 "can't allocate memory for command queue",
1097 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1098 return 0;
1099 }
1100 printf("%s: %s channel %s to %s mode\n",
1101 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1102 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1103 "configured" : "wired",
1104 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1105 "native-PCI" : "compatibility");
1106 return 1;
1107 }
1108
1109 /* some common code used by several chip channel_map */
1110 void
1111 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1112 struct pci_attach_args *pa;
1113 struct pciide_channel *cp;
1114 pcireg_t interface;
1115 bus_size_t *cmdsizep, *ctlsizep;
1116 int (*pci_intr) __P((void *));
1117 {
1118 struct channel_softc *wdc_cp = &cp->wdc_channel;
1119
1120 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1121 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1122 pci_intr);
1123 else
1124 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1125 wdc_cp->channel, cmdsizep, ctlsizep);
1126
1127 if (cp->hw_ok == 0)
1128 return;
1129 wdc_cp->data32iot = wdc_cp->cmd_iot;
1130 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1131 wdcattach(wdc_cp);
1132 }
1133
1134 /*
1135 * Generic code to call to know if a channel can be disabled. Return 1
1136 * if channel can be disabled, 0 if not
1137 */
1138 int
1139 pciide_chan_candisable(cp)
1140 struct pciide_channel *cp;
1141 {
1142 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1143 struct channel_softc *wdc_cp = &cp->wdc_channel;
1144
1145 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1146 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1147 printf("%s: disabling %s channel (no drives)\n",
1148 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1149 cp->hw_ok = 0;
1150 return 1;
1151 }
1152 return 0;
1153 }
1154
1155 /*
1156 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1157 * Set hw_ok=0 on failure
1158 */
1159 void
1160 pciide_map_compat_intr(pa, cp, compatchan, interface)
1161 struct pci_attach_args *pa;
1162 struct pciide_channel *cp;
1163 int compatchan, interface;
1164 {
1165 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1166 struct channel_softc *wdc_cp = &cp->wdc_channel;
1167
1168 if (cp->hw_ok == 0)
1169 return;
1170 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1171 return;
1172
1173 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1174 pa, compatchan, pciide_compat_intr, cp);
1175 if (cp->ih == NULL) {
1176 printf("%s: no compatibility interrupt for use by %s "
1177 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1178 cp->hw_ok = 0;
1179 }
1180 }
1181
1182 void
1183 pciide_print_modes(cp)
1184 struct pciide_channel *cp;
1185 {
1186 wdc_print_modes(&cp->wdc_channel);
1187 }
1188
1189 void
1190 default_chip_map(sc, pa)
1191 struct pciide_softc *sc;
1192 struct pci_attach_args *pa;
1193 {
1194 struct pciide_channel *cp;
1195 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1196 pcireg_t csr;
1197 int channel, drive;
1198 struct ata_drive_datas *drvp;
1199 u_int8_t idedma_ctl;
1200 bus_size_t cmdsize, ctlsize;
1201 char *failreason;
1202
1203 if (pciide_chipen(sc, pa) == 0)
1204 return;
1205
1206 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1207 printf("%s: bus-master DMA support present",
1208 sc->sc_wdcdev.sc_dev.dv_xname);
1209 if (sc->sc_pp == &default_product_desc &&
1210 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1211 PCIIDE_OPTIONS_DMA) == 0) {
1212 printf(", but unused (no driver support)");
1213 sc->sc_dma_ok = 0;
1214 } else {
1215 pciide_mapreg_dma(sc, pa);
1216 if (sc->sc_dma_ok != 0)
1217 printf(", used without full driver "
1218 "support");
1219 }
1220 } else {
1221 printf("%s: hardware does not support DMA",
1222 sc->sc_wdcdev.sc_dev.dv_xname);
1223 sc->sc_dma_ok = 0;
1224 }
1225 printf("\n");
1226 if (sc->sc_dma_ok) {
1227 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1228 sc->sc_wdcdev.irqack = pciide_irqack;
1229 }
1230 sc->sc_wdcdev.PIO_cap = 0;
1231 sc->sc_wdcdev.DMA_cap = 0;
1232
1233 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1234 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1235 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1236
1237 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1238 cp = &sc->pciide_channels[channel];
1239 if (pciide_chansetup(sc, channel, interface) == 0)
1240 continue;
1241 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1242 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1243 &ctlsize, pciide_pci_intr);
1244 } else {
1245 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1246 channel, &cmdsize, &ctlsize);
1247 }
1248 if (cp->hw_ok == 0)
1249 continue;
1250 /*
1251 * Check to see if something appears to be there.
1252 */
1253 failreason = NULL;
1254 if (!wdcprobe(&cp->wdc_channel)) {
1255 failreason = "not responding; disabled or no drives?";
1256 goto next;
1257 }
1258 /*
1259 * Now, make sure it's actually attributable to this PCI IDE
1260 * channel by trying to access the channel again while the
1261 * PCI IDE controller's I/O space is disabled. (If the
1262 * channel no longer appears to be there, it belongs to
1263 * this controller.) YUCK!
1264 */
1265 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1266 PCI_COMMAND_STATUS_REG);
1267 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1268 csr & ~PCI_COMMAND_IO_ENABLE);
1269 if (wdcprobe(&cp->wdc_channel))
1270 failreason = "other hardware responding at addresses";
1271 pci_conf_write(sc->sc_pc, sc->sc_tag,
1272 PCI_COMMAND_STATUS_REG, csr);
1273 next:
1274 if (failreason) {
1275 printf("%s: %s channel ignored (%s)\n",
1276 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1277 failreason);
1278 cp->hw_ok = 0;
1279 bus_space_unmap(cp->wdc_channel.cmd_iot,
1280 cp->wdc_channel.cmd_ioh, cmdsize);
1281 bus_space_unmap(cp->wdc_channel.ctl_iot,
1282 cp->wdc_channel.ctl_ioh, ctlsize);
1283 } else {
1284 pciide_map_compat_intr(pa, cp, channel, interface);
1285 }
1286 if (cp->hw_ok) {
1287 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1288 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1289 wdcattach(&cp->wdc_channel);
1290 }
1291 }
1292
1293 if (sc->sc_dma_ok == 0)
1294 return;
1295
1296 /* Allocate DMA maps */
1297 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1298 idedma_ctl = 0;
1299 cp = &sc->pciide_channels[channel];
1300 for (drive = 0; drive < 2; drive++) {
1301 drvp = &cp->wdc_channel.ch_drive[drive];
1302 /* If no drive, skip */
1303 if ((drvp->drive_flags & DRIVE) == 0)
1304 continue;
1305 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1306 continue;
1307 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1308 /* Abort DMA setup */
1309 printf("%s:%d:%d: can't allocate DMA maps, "
1310 "using PIO transfers\n",
1311 sc->sc_wdcdev.sc_dev.dv_xname,
1312 channel, drive);
1313 drvp->drive_flags &= ~DRIVE_DMA;
1314 }
1315 printf("%s:%d:%d: using DMA data transfers\n",
1316 sc->sc_wdcdev.sc_dev.dv_xname,
1317 channel, drive);
1318 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1319 }
1320 if (idedma_ctl != 0) {
1321 /* Add software bits in status register */
1322 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1323 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1324 idedma_ctl);
1325 }
1326 }
1327 }
1328
1329 void
1330 piix_chip_map(sc, pa)
1331 struct pciide_softc *sc;
1332 struct pci_attach_args *pa;
1333 {
1334 struct pciide_channel *cp;
1335 int channel;
1336 u_int32_t idetim;
1337 bus_size_t cmdsize, ctlsize;
1338
1339 if (pciide_chipen(sc, pa) == 0)
1340 return;
1341
1342 printf("%s: bus-master DMA support present",
1343 sc->sc_wdcdev.sc_dev.dv_xname);
1344 pciide_mapreg_dma(sc, pa);
1345 printf("\n");
1346 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1347 WDC_CAPABILITY_MODE;
1348 if (sc->sc_dma_ok) {
1349 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1350 sc->sc_wdcdev.irqack = pciide_irqack;
1351 switch(sc->sc_pp->ide_product) {
1352 case PCI_PRODUCT_INTEL_82371AB_IDE:
1353 case PCI_PRODUCT_INTEL_82440MX_IDE:
1354 case PCI_PRODUCT_INTEL_82801AA_IDE:
1355 case PCI_PRODUCT_INTEL_82801AB_IDE:
1356 case PCI_PRODUCT_INTEL_82801BA_IDE:
1357 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1358 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1359 }
1360 }
1361 sc->sc_wdcdev.PIO_cap = 4;
1362 sc->sc_wdcdev.DMA_cap = 2;
1363 switch(sc->sc_pp->ide_product) {
1364 case PCI_PRODUCT_INTEL_82801AA_IDE:
1365 sc->sc_wdcdev.UDMA_cap = 4;
1366 break;
1367 case PCI_PRODUCT_INTEL_82801BA_IDE:
1368 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1369 sc->sc_wdcdev.UDMA_cap = 5;
1370 break;
1371 default:
1372 sc->sc_wdcdev.UDMA_cap = 2;
1373 }
1374 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1375 sc->sc_wdcdev.set_modes = piix_setup_channel;
1376 else
1377 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1378 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1379 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1380
1381 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1382 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1383 DEBUG_PROBE);
1384 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1385 WDCDEBUG_PRINT((", sidetim=0x%x",
1386 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1387 DEBUG_PROBE);
1388 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1389 WDCDEBUG_PRINT((", udamreg 0x%x",
1390 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1391 DEBUG_PROBE);
1392 }
1393 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1394 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1395 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1396 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1397 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1398 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1399 DEBUG_PROBE);
1400 }
1401
1402 }
1403 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1404
1405 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1406 cp = &sc->pciide_channels[channel];
1407 /* PIIX is compat-only */
1408 if (pciide_chansetup(sc, channel, 0) == 0)
1409 continue;
1410 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1411 if ((PIIX_IDETIM_READ(idetim, channel) &
1412 PIIX_IDETIM_IDE) == 0) {
1413 printf("%s: %s channel ignored (disabled)\n",
1414 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1415 continue;
1416 }
1417 /* PIIX are compat-only pciide devices */
1418 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1419 if (cp->hw_ok == 0)
1420 continue;
1421 if (pciide_chan_candisable(cp)) {
1422 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1423 channel);
1424 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1425 idetim);
1426 }
1427 pciide_map_compat_intr(pa, cp, channel, 0);
1428 if (cp->hw_ok == 0)
1429 continue;
1430 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1431 }
1432
1433 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1434 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1435 DEBUG_PROBE);
1436 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1437 WDCDEBUG_PRINT((", sidetim=0x%x",
1438 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1439 DEBUG_PROBE);
1440 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1441 WDCDEBUG_PRINT((", udamreg 0x%x",
1442 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1443 DEBUG_PROBE);
1444 }
1445 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1446 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1447 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1448 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1449 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1450 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1451 DEBUG_PROBE);
1452 }
1453 }
1454 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1455 }
1456
1457 void
1458 piix_setup_channel(chp)
1459 struct channel_softc *chp;
1460 {
1461 u_int8_t mode[2], drive;
1462 u_int32_t oidetim, idetim, idedma_ctl;
1463 struct pciide_channel *cp = (struct pciide_channel*)chp;
1464 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1465 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1466
1467 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1468 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1469 idedma_ctl = 0;
1470
1471 /* set up new idetim: Enable IDE registers decode */
1472 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1473 chp->channel);
1474
1475 /* setup DMA */
1476 pciide_channel_dma_setup(cp);
1477
1478 /*
1479 * Here we have to mess up with drives mode: PIIX can't have
1480 * different timings for master and slave drives.
1481 * We need to find the best combination.
1482 */
1483
1484 /* If both drives supports DMA, take the lower mode */
1485 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1486 (drvp[1].drive_flags & DRIVE_DMA)) {
1487 mode[0] = mode[1] =
1488 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1489 drvp[0].DMA_mode = mode[0];
1490 drvp[1].DMA_mode = mode[1];
1491 goto ok;
1492 }
1493 /*
1494 * If only one drive supports DMA, use its mode, and
1495 * put the other one in PIO mode 0 if mode not compatible
1496 */
1497 if (drvp[0].drive_flags & DRIVE_DMA) {
1498 mode[0] = drvp[0].DMA_mode;
1499 mode[1] = drvp[1].PIO_mode;
1500 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1501 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1502 mode[1] = drvp[1].PIO_mode = 0;
1503 goto ok;
1504 }
1505 if (drvp[1].drive_flags & DRIVE_DMA) {
1506 mode[1] = drvp[1].DMA_mode;
1507 mode[0] = drvp[0].PIO_mode;
1508 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1509 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1510 mode[0] = drvp[0].PIO_mode = 0;
1511 goto ok;
1512 }
1513 /*
1514 * If both drives are not DMA, takes the lower mode, unless
1515 * one of them is PIO mode < 2
1516 */
1517 if (drvp[0].PIO_mode < 2) {
1518 mode[0] = drvp[0].PIO_mode = 0;
1519 mode[1] = drvp[1].PIO_mode;
1520 } else if (drvp[1].PIO_mode < 2) {
1521 mode[1] = drvp[1].PIO_mode = 0;
1522 mode[0] = drvp[0].PIO_mode;
1523 } else {
1524 mode[0] = mode[1] =
1525 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1526 drvp[0].PIO_mode = mode[0];
1527 drvp[1].PIO_mode = mode[1];
1528 }
1529 ok: /* The modes are setup */
1530 for (drive = 0; drive < 2; drive++) {
1531 if (drvp[drive].drive_flags & DRIVE_DMA) {
1532 idetim |= piix_setup_idetim_timings(
1533 mode[drive], 1, chp->channel);
1534 goto end;
1535 }
1536 }
1537 /* If we are there, none of the drives are DMA */
1538 if (mode[0] >= 2)
1539 idetim |= piix_setup_idetim_timings(
1540 mode[0], 0, chp->channel);
1541 else
1542 idetim |= piix_setup_idetim_timings(
1543 mode[1], 0, chp->channel);
1544 end: /*
1545 * timing mode is now set up in the controller. Enable
1546 * it per-drive
1547 */
1548 for (drive = 0; drive < 2; drive++) {
1549 /* If no drive, skip */
1550 if ((drvp[drive].drive_flags & DRIVE) == 0)
1551 continue;
1552 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1553 if (drvp[drive].drive_flags & DRIVE_DMA)
1554 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1555 }
1556 if (idedma_ctl != 0) {
1557 /* Add software bits in status register */
1558 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1559 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1560 idedma_ctl);
1561 }
1562 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1563 pciide_print_modes(cp);
1564 }
1565
1566 void
1567 piix3_4_setup_channel(chp)
1568 struct channel_softc *chp;
1569 {
1570 struct ata_drive_datas *drvp;
1571 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1572 struct pciide_channel *cp = (struct pciide_channel*)chp;
1573 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1574 int drive;
1575 int channel = chp->channel;
1576
1577 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1578 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1579 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1580 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1581 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1582 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1583 PIIX_SIDETIM_RTC_MASK(channel));
1584
1585 idedma_ctl = 0;
1586 /* If channel disabled, no need to go further */
1587 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1588 return;
1589 /* set up new idetim: Enable IDE registers decode */
1590 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1591
1592 /* setup DMA if needed */
1593 pciide_channel_dma_setup(cp);
1594
1595 for (drive = 0; drive < 2; drive++) {
1596 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1597 PIIX_UDMATIM_SET(0x3, channel, drive));
1598 drvp = &chp->ch_drive[drive];
1599 /* If no drive, skip */
1600 if ((drvp->drive_flags & DRIVE) == 0)
1601 continue;
1602 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1603 (drvp->drive_flags & DRIVE_UDMA) == 0))
1604 goto pio;
1605
1606 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1607 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1608 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1609 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1610 ideconf |= PIIX_CONFIG_PINGPONG;
1611 }
1612 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1613 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1614 /* setup Ultra/100 */
1615 if (drvp->UDMA_mode > 2 &&
1616 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1617 drvp->UDMA_mode = 2;
1618 if (drvp->UDMA_mode > 4) {
1619 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1620 } else {
1621 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1622 if (drvp->UDMA_mode > 2) {
1623 ideconf |= PIIX_CONFIG_UDMA66(channel,
1624 drive);
1625 } else {
1626 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1627 drive);
1628 }
1629 }
1630 }
1631 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1632 /* setup Ultra/66 */
1633 if (drvp->UDMA_mode > 2 &&
1634 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1635 drvp->UDMA_mode = 2;
1636 if (drvp->UDMA_mode > 2)
1637 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1638 else
1639 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1640 }
1641 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1642 (drvp->drive_flags & DRIVE_UDMA)) {
1643 /* use Ultra/DMA */
1644 drvp->drive_flags &= ~DRIVE_DMA;
1645 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1646 udmareg |= PIIX_UDMATIM_SET(
1647 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1648 } else {
1649 /* use Multiword DMA */
1650 drvp->drive_flags &= ~DRIVE_UDMA;
1651 if (drive == 0) {
1652 idetim |= piix_setup_idetim_timings(
1653 drvp->DMA_mode, 1, channel);
1654 } else {
1655 sidetim |= piix_setup_sidetim_timings(
1656 drvp->DMA_mode, 1, channel);
1657 idetim =PIIX_IDETIM_SET(idetim,
1658 PIIX_IDETIM_SITRE, channel);
1659 }
1660 }
1661 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1662
1663 pio: /* use PIO mode */
1664 idetim |= piix_setup_idetim_drvs(drvp);
1665 if (drive == 0) {
1666 idetim |= piix_setup_idetim_timings(
1667 drvp->PIO_mode, 0, channel);
1668 } else {
1669 sidetim |= piix_setup_sidetim_timings(
1670 drvp->PIO_mode, 0, channel);
1671 idetim =PIIX_IDETIM_SET(idetim,
1672 PIIX_IDETIM_SITRE, channel);
1673 }
1674 }
1675 if (idedma_ctl != 0) {
1676 /* Add software bits in status register */
1677 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1678 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1679 idedma_ctl);
1680 }
1681 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1682 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1683 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1684 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1685 pciide_print_modes(cp);
1686 }
1687
1688
1689 /* setup ISP and RTC fields, based on mode */
1690 static u_int32_t
1691 piix_setup_idetim_timings(mode, dma, channel)
1692 u_int8_t mode;
1693 u_int8_t dma;
1694 u_int8_t channel;
1695 {
1696
1697 if (dma)
1698 return PIIX_IDETIM_SET(0,
1699 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1700 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1701 channel);
1702 else
1703 return PIIX_IDETIM_SET(0,
1704 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1705 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1706 channel);
1707 }
1708
1709 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1710 static u_int32_t
1711 piix_setup_idetim_drvs(drvp)
1712 struct ata_drive_datas *drvp;
1713 {
1714 u_int32_t ret = 0;
1715 struct channel_softc *chp = drvp->chnl_softc;
1716 u_int8_t channel = chp->channel;
1717 u_int8_t drive = drvp->drive;
1718
1719 /*
1720 * If drive is using UDMA, timings setups are independant
1721 * So just check DMA and PIO here.
1722 */
1723 if (drvp->drive_flags & DRIVE_DMA) {
1724 /* if mode = DMA mode 0, use compatible timings */
1725 if ((drvp->drive_flags & DRIVE_DMA) &&
1726 drvp->DMA_mode == 0) {
1727 drvp->PIO_mode = 0;
1728 return ret;
1729 }
1730 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1731 /*
1732 * PIO and DMA timings are the same, use fast timings for PIO
1733 * too, else use compat timings.
1734 */
1735 if ((piix_isp_pio[drvp->PIO_mode] !=
1736 piix_isp_dma[drvp->DMA_mode]) ||
1737 (piix_rtc_pio[drvp->PIO_mode] !=
1738 piix_rtc_dma[drvp->DMA_mode]))
1739 drvp->PIO_mode = 0;
1740 /* if PIO mode <= 2, use compat timings for PIO */
1741 if (drvp->PIO_mode <= 2) {
1742 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1743 channel);
1744 return ret;
1745 }
1746 }
1747
1748 /*
1749 * Now setup PIO modes. If mode < 2, use compat timings.
1750 * Else enable fast timings. Enable IORDY and prefetch/post
1751 * if PIO mode >= 3.
1752 */
1753
1754 if (drvp->PIO_mode < 2)
1755 return ret;
1756
1757 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1758 if (drvp->PIO_mode >= 3) {
1759 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1760 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1761 }
1762 return ret;
1763 }
1764
1765 /* setup values in SIDETIM registers, based on mode */
1766 static u_int32_t
1767 piix_setup_sidetim_timings(mode, dma, channel)
1768 u_int8_t mode;
1769 u_int8_t dma;
1770 u_int8_t channel;
1771 {
1772 if (dma)
1773 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1774 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1775 else
1776 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1777 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1778 }
1779
1780 void
1781 amd756_chip_map(sc, pa)
1782 struct pciide_softc *sc;
1783 struct pci_attach_args *pa;
1784 {
1785 struct pciide_channel *cp;
1786 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1787 int channel;
1788 pcireg_t chanenable;
1789 bus_size_t cmdsize, ctlsize;
1790
1791 if (pciide_chipen(sc, pa) == 0)
1792 return;
1793 printf("%s: bus-master DMA support present",
1794 sc->sc_wdcdev.sc_dev.dv_xname);
1795 pciide_mapreg_dma(sc, pa);
1796 printf("\n");
1797 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1798 WDC_CAPABILITY_MODE;
1799 if (sc->sc_dma_ok) {
1800 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1801 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1802 sc->sc_wdcdev.irqack = pciide_irqack;
1803 }
1804 sc->sc_wdcdev.PIO_cap = 4;
1805 sc->sc_wdcdev.DMA_cap = 2;
1806 sc->sc_wdcdev.UDMA_cap = 4;
1807 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1808 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1809 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1810 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1811
1812 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1813 DEBUG_PROBE);
1814 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1815 cp = &sc->pciide_channels[channel];
1816 if (pciide_chansetup(sc, channel, interface) == 0)
1817 continue;
1818
1819 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1820 printf("%s: %s channel ignored (disabled)\n",
1821 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1822 continue;
1823 }
1824 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1825 pciide_pci_intr);
1826
1827 if (pciide_chan_candisable(cp))
1828 chanenable &= ~AMD756_CHAN_EN(channel);
1829 pciide_map_compat_intr(pa, cp, channel, interface);
1830 if (cp->hw_ok == 0)
1831 continue;
1832
1833 amd756_setup_channel(&cp->wdc_channel);
1834 }
1835 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1836 chanenable);
1837 return;
1838 }
1839
1840 void
1841 amd756_setup_channel(chp)
1842 struct channel_softc *chp;
1843 {
1844 u_int32_t udmatim_reg, datatim_reg;
1845 u_int8_t idedma_ctl;
1846 int mode, drive;
1847 struct ata_drive_datas *drvp;
1848 struct pciide_channel *cp = (struct pciide_channel*)chp;
1849 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1850 #ifndef PCIIDE_AMD756_ENABLEDMA
1851 int rev = PCI_REVISION(
1852 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1853 #endif
1854
1855 idedma_ctl = 0;
1856 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1857 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1858 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1859 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1860
1861 /* setup DMA if needed */
1862 pciide_channel_dma_setup(cp);
1863
1864 for (drive = 0; drive < 2; drive++) {
1865 drvp = &chp->ch_drive[drive];
1866 /* If no drive, skip */
1867 if ((drvp->drive_flags & DRIVE) == 0)
1868 continue;
1869 /* add timing values, setup DMA if needed */
1870 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1871 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1872 mode = drvp->PIO_mode;
1873 goto pio;
1874 }
1875 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1876 (drvp->drive_flags & DRIVE_UDMA)) {
1877 /* use Ultra/DMA */
1878 drvp->drive_flags &= ~DRIVE_DMA;
1879 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1880 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1881 AMD756_UDMA_TIME(chp->channel, drive,
1882 amd756_udma_tim[drvp->UDMA_mode]);
1883 /* can use PIO timings, MW DMA unused */
1884 mode = drvp->PIO_mode;
1885 } else {
1886 /* use Multiword DMA, but only if revision is OK */
1887 drvp->drive_flags &= ~DRIVE_UDMA;
1888 #ifndef PCIIDE_AMD756_ENABLEDMA
1889 /*
1890 * The workaround doesn't seem to be necessary
1891 * with all drives, so it can be disabled by
1892 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1893 * triggered.
1894 */
1895 if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1896 printf("%s:%d:%d: multi-word DMA disabled due "
1897 "to chip revision\n",
1898 sc->sc_wdcdev.sc_dev.dv_xname,
1899 chp->channel, drive);
1900 mode = drvp->PIO_mode;
1901 drvp->drive_flags &= ~DRIVE_DMA;
1902 goto pio;
1903 }
1904 #endif
1905 /* mode = min(pio, dma+2) */
1906 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1907 mode = drvp->PIO_mode;
1908 else
1909 mode = drvp->DMA_mode + 2;
1910 }
1911 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1912
1913 pio: /* setup PIO mode */
1914 if (mode <= 2) {
1915 drvp->DMA_mode = 0;
1916 drvp->PIO_mode = 0;
1917 mode = 0;
1918 } else {
1919 drvp->PIO_mode = mode;
1920 drvp->DMA_mode = mode - 2;
1921 }
1922 datatim_reg |=
1923 AMD756_DATATIM_PULSE(chp->channel, drive,
1924 amd756_pio_set[mode]) |
1925 AMD756_DATATIM_RECOV(chp->channel, drive,
1926 amd756_pio_rec[mode]);
1927 }
1928 if (idedma_ctl != 0) {
1929 /* Add software bits in status register */
1930 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1931 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1932 idedma_ctl);
1933 }
1934 pciide_print_modes(cp);
1935 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1936 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1937 }
1938
1939 void
1940 apollo_chip_map(sc, pa)
1941 struct pciide_softc *sc;
1942 struct pci_attach_args *pa;
1943 {
1944 struct pciide_channel *cp;
1945 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1946 int rev = PCI_REVISION(pa->pa_class);
1947 int channel;
1948 u_int32_t ideconf, udma_conf, old_udma_conf;
1949 bus_size_t cmdsize, ctlsize;
1950
1951 if (pciide_chipen(sc, pa) == 0)
1952 return;
1953 printf("%s: bus-master DMA support present",
1954 sc->sc_wdcdev.sc_dev.dv_xname);
1955 pciide_mapreg_dma(sc, pa);
1956 printf("\n");
1957 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1958 WDC_CAPABILITY_MODE;
1959 if (sc->sc_dma_ok) {
1960 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1961 sc->sc_wdcdev.irqack = pciide_irqack;
1962 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE
1963 && rev >= 6)
1964 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1965 }
1966 sc->sc_wdcdev.PIO_cap = 4;
1967 sc->sc_wdcdev.DMA_cap = 2;
1968 sc->sc_wdcdev.UDMA_cap = 2;
1969 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1970 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1971 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1972
1973 old_udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1974 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1975 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1976 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1977 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1978 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1979 old_udma_conf),
1980 DEBUG_PROBE);
1981 pci_conf_write(sc->sc_pc, sc->sc_tag,
1982 old_udma_conf | (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1983 APO_UDMA_EN_MTH(0, 0) | APO_UDMA_CLK66(0)),
1984 APO_UDMA);
1985 udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1986 WDCDEBUG_PRINT(("apollo_chip_map: APO_UDMA now 0x%x\n", udma_conf),
1987 DEBUG_PROBE);
1988 if ((udma_conf & (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1989 APO_UDMA_EN_MTH(0, 0))) ==
1990 (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1991 APO_UDMA_EN_MTH(0, 0))) {
1992 if ((udma_conf & APO_UDMA_CLK66(0)) ==
1993 APO_UDMA_CLK66(0)) {
1994 printf("%s: Ultra/66 capable\n",
1995 sc->sc_wdcdev.sc_dev.dv_xname);
1996 sc->sc_wdcdev.UDMA_cap = 4;
1997 } else {
1998 printf("%s: Ultra/33 capable\n",
1999 sc->sc_wdcdev.sc_dev.dv_xname);
2000 sc->sc_wdcdev.UDMA_cap = 2;
2001 }
2002 } else {
2003 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_UDMA;
2004 }
2005 pci_conf_write(sc->sc_pc, sc->sc_tag, old_udma_conf, APO_UDMA);
2006
2007 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2008 cp = &sc->pciide_channels[channel];
2009 if (pciide_chansetup(sc, channel, interface) == 0)
2010 continue;
2011
2012 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2013 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2014 printf("%s: %s channel ignored (disabled)\n",
2015 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2016 continue;
2017 }
2018 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2019 pciide_pci_intr);
2020 if (cp->hw_ok == 0)
2021 continue;
2022 if (pciide_chan_candisable(cp)) {
2023 ideconf &= ~APO_IDECONF_EN(channel);
2024 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2025 ideconf);
2026 }
2027 pciide_map_compat_intr(pa, cp, channel, interface);
2028
2029 if (cp->hw_ok == 0)
2030 continue;
2031 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2032 }
2033 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2034 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2035 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2036 }
2037
2038 void
2039 apollo_setup_channel(chp)
2040 struct channel_softc *chp;
2041 {
2042 u_int32_t udmatim_reg, datatim_reg;
2043 u_int8_t idedma_ctl;
2044 int mode, drive;
2045 struct ata_drive_datas *drvp;
2046 struct pciide_channel *cp = (struct pciide_channel*)chp;
2047 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2048
2049 idedma_ctl = 0;
2050 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2051 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2052 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2053 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2054
2055 /* setup DMA if needed */
2056 pciide_channel_dma_setup(cp);
2057
2058 /*
2059 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2060 * downgrade to Ultra/33 if needed
2061 */
2062 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2063 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2064 /* both drives UDMA */
2065 if (chp->ch_drive[0].UDMA_mode > 2 &&
2066 chp->ch_drive[1].UDMA_mode <= 2) {
2067 /* drive 0 Ultra/66, drive 1 Ultra/33 */
2068 chp->ch_drive[0].UDMA_mode = 2;
2069 } else if (chp->ch_drive[1].UDMA_mode > 2 &&
2070 chp->ch_drive[0].UDMA_mode <= 2) {
2071 /* drive 1 Ultra/66, drive 0 Ultra/33 */
2072 chp->ch_drive[1].UDMA_mode = 2;
2073 }
2074 }
2075
2076 for (drive = 0; drive < 2; drive++) {
2077 drvp = &chp->ch_drive[drive];
2078 /* If no drive, skip */
2079 if ((drvp->drive_flags & DRIVE) == 0)
2080 continue;
2081 /* add timing values, setup DMA if needed */
2082 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2083 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2084 mode = drvp->PIO_mode;
2085 goto pio;
2086 }
2087 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2088 (drvp->drive_flags & DRIVE_UDMA)) {
2089 /* use Ultra/DMA */
2090 drvp->drive_flags &= ~DRIVE_DMA;
2091 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2092 APO_UDMA_EN_MTH(chp->channel, drive) |
2093 APO_UDMA_TIME(chp->channel, drive,
2094 apollo_udma_tim[drvp->UDMA_mode]);
2095 if (drvp->UDMA_mode > 2)
2096 udmatim_reg |=
2097 APO_UDMA_CLK66(chp->channel);
2098 /* can use PIO timings, MW DMA unused */
2099 mode = drvp->PIO_mode;
2100 } else {
2101 /* use Multiword DMA */
2102 drvp->drive_flags &= ~DRIVE_UDMA;
2103 /* mode = min(pio, dma+2) */
2104 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2105 mode = drvp->PIO_mode;
2106 else
2107 mode = drvp->DMA_mode + 2;
2108 }
2109 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2110
2111 pio: /* setup PIO mode */
2112 if (mode <= 2) {
2113 drvp->DMA_mode = 0;
2114 drvp->PIO_mode = 0;
2115 mode = 0;
2116 } else {
2117 drvp->PIO_mode = mode;
2118 drvp->DMA_mode = mode - 2;
2119 }
2120 datatim_reg |=
2121 APO_DATATIM_PULSE(chp->channel, drive,
2122 apollo_pio_set[mode]) |
2123 APO_DATATIM_RECOV(chp->channel, drive,
2124 apollo_pio_rec[mode]);
2125 }
2126 if (idedma_ctl != 0) {
2127 /* Add software bits in status register */
2128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2129 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2130 idedma_ctl);
2131 }
2132 pciide_print_modes(cp);
2133 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2134 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2135 }
2136
2137 void
2138 cmd_channel_map(pa, sc, channel)
2139 struct pci_attach_args *pa;
2140 struct pciide_softc *sc;
2141 int channel;
2142 {
2143 struct pciide_channel *cp = &sc->pciide_channels[channel];
2144 bus_size_t cmdsize, ctlsize;
2145 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2146 int interface;
2147
2148 /*
2149 * The 0648/0649 can be told to identify as a RAID controller.
2150 * In this case, we have to fake interface
2151 */
2152 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2153 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2154 PCIIDE_INTERFACE_SETTABLE(1);
2155 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2156 CMD_CONF_DSA1)
2157 interface |= PCIIDE_INTERFACE_PCI(0) |
2158 PCIIDE_INTERFACE_PCI(1);
2159 } else {
2160 interface = PCI_INTERFACE(pa->pa_class);
2161 }
2162
2163 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2164 cp->name = PCIIDE_CHANNEL_NAME(channel);
2165 cp->wdc_channel.channel = channel;
2166 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2167
2168 if (channel > 0) {
2169 cp->wdc_channel.ch_queue =
2170 sc->pciide_channels[0].wdc_channel.ch_queue;
2171 } else {
2172 cp->wdc_channel.ch_queue =
2173 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2174 }
2175 if (cp->wdc_channel.ch_queue == NULL) {
2176 printf("%s %s channel: "
2177 "can't allocate memory for command queue",
2178 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2179 return;
2180 }
2181
2182 printf("%s: %s channel %s to %s mode\n",
2183 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2184 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2185 "configured" : "wired",
2186 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2187 "native-PCI" : "compatibility");
2188
2189 /*
2190 * with a CMD PCI64x, if we get here, the first channel is enabled:
2191 * there's no way to disable the first channel without disabling
2192 * the whole device
2193 */
2194 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2195 printf("%s: %s channel ignored (disabled)\n",
2196 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2197 return;
2198 }
2199
2200 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2201 if (cp->hw_ok == 0)
2202 return;
2203 if (channel == 1) {
2204 if (pciide_chan_candisable(cp)) {
2205 ctrl &= ~CMD_CTRL_2PORT;
2206 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2207 CMD_CTRL, ctrl);
2208 }
2209 }
2210 pciide_map_compat_intr(pa, cp, channel, interface);
2211 }
2212
2213 int
2214 cmd_pci_intr(arg)
2215 void *arg;
2216 {
2217 struct pciide_softc *sc = arg;
2218 struct pciide_channel *cp;
2219 struct channel_softc *wdc_cp;
2220 int i, rv, crv;
2221 u_int32_t priirq, secirq;
2222
2223 rv = 0;
2224 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2225 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2226 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2227 cp = &sc->pciide_channels[i];
2228 wdc_cp = &cp->wdc_channel;
2229 /* If a compat channel skip. */
2230 if (cp->compat)
2231 continue;
2232 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2233 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2234 crv = wdcintr(wdc_cp);
2235 if (crv == 0)
2236 printf("%s:%d: bogus intr\n",
2237 sc->sc_wdcdev.sc_dev.dv_xname, i);
2238 else
2239 rv = 1;
2240 }
2241 }
2242 return rv;
2243 }
2244
2245 void
2246 cmd_chip_map(sc, pa)
2247 struct pciide_softc *sc;
2248 struct pci_attach_args *pa;
2249 {
2250 int channel;
2251
2252 /*
2253 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2254 * and base adresses registers can be disabled at
2255 * hardware level. In this case, the device is wired
2256 * in compat mode and its first channel is always enabled,
2257 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2258 * In fact, it seems that the first channel of the CMD PCI0640
2259 * can't be disabled.
2260 */
2261
2262 #ifdef PCIIDE_CMD064x_DISABLE
2263 if (pciide_chipen(sc, pa) == 0)
2264 return;
2265 #endif
2266
2267 printf("%s: hardware does not support DMA\n",
2268 sc->sc_wdcdev.sc_dev.dv_xname);
2269 sc->sc_dma_ok = 0;
2270
2271 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2272 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2273 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2274
2275 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2276 cmd_channel_map(pa, sc, channel);
2277 }
2278 }
2279
2280 void
2281 cmd0643_9_chip_map(sc, pa)
2282 struct pciide_softc *sc;
2283 struct pci_attach_args *pa;
2284 {
2285 struct pciide_channel *cp;
2286 int channel;
2287 int rev = PCI_REVISION(
2288 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2289
2290 /*
2291 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2292 * and base adresses registers can be disabled at
2293 * hardware level. In this case, the device is wired
2294 * in compat mode and its first channel is always enabled,
2295 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2296 * In fact, it seems that the first channel of the CMD PCI0640
2297 * can't be disabled.
2298 */
2299
2300 #ifdef PCIIDE_CMD064x_DISABLE
2301 if (pciide_chipen(sc, pa) == 0)
2302 return;
2303 #endif
2304 printf("%s: bus-master DMA support present",
2305 sc->sc_wdcdev.sc_dev.dv_xname);
2306 pciide_mapreg_dma(sc, pa);
2307 printf("\n");
2308 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2309 WDC_CAPABILITY_MODE;
2310 if (sc->sc_dma_ok) {
2311 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2312 switch (sc->sc_pp->ide_product) {
2313 case PCI_PRODUCT_CMDTECH_649:
2314 case PCI_PRODUCT_CMDTECH_648:
2315 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2316 sc->sc_wdcdev.UDMA_cap = 4;
2317 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2318 break;
2319 case PCI_PRODUCT_CMDTECH_646:
2320 if (rev >= CMD0646U2_REV) {
2321 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2322 sc->sc_wdcdev.UDMA_cap = 2;
2323 } else if (rev >= CMD0646U_REV) {
2324 /*
2325 * Linux's driver claims that the 646U is broken
2326 * with UDMA. Only enable it if we know what we're
2327 * doing
2328 */
2329 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2331 sc->sc_wdcdev.UDMA_cap = 2;
2332 #endif
2333 /* explicitely disable UDMA */
2334 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2335 CMD_UDMATIM(0), 0);
2336 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2337 CMD_UDMATIM(1), 0);
2338 }
2339 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2340 break;
2341 default:
2342 sc->sc_wdcdev.irqack = pciide_irqack;
2343 }
2344 }
2345
2346 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2347 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2348 sc->sc_wdcdev.PIO_cap = 4;
2349 sc->sc_wdcdev.DMA_cap = 2;
2350 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2351
2352 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2353 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2354 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2355 DEBUG_PROBE);
2356
2357 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2358 cp = &sc->pciide_channels[channel];
2359 cmd_channel_map(pa, sc, channel);
2360 if (cp->hw_ok == 0)
2361 continue;
2362 cmd0643_9_setup_channel(&cp->wdc_channel);
2363 }
2364 /*
2365 * note - this also makes sure we clear the irq disable and reset
2366 * bits
2367 */
2368 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2369 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2370 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2371 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2372 DEBUG_PROBE);
2373 }
2374
2375 void
2376 cmd0643_9_setup_channel(chp)
2377 struct channel_softc *chp;
2378 {
2379 struct ata_drive_datas *drvp;
2380 u_int8_t tim;
2381 u_int32_t idedma_ctl, udma_reg;
2382 int drive;
2383 struct pciide_channel *cp = (struct pciide_channel*)chp;
2384 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2385
2386 idedma_ctl = 0;
2387 /* setup DMA if needed */
2388 pciide_channel_dma_setup(cp);
2389
2390 for (drive = 0; drive < 2; drive++) {
2391 drvp = &chp->ch_drive[drive];
2392 /* If no drive, skip */
2393 if ((drvp->drive_flags & DRIVE) == 0)
2394 continue;
2395 /* add timing values, setup DMA if needed */
2396 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2397 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2398 if (drvp->drive_flags & DRIVE_UDMA) {
2399 /* UltraDMA on a 646U2, 0648 or 0649 */
2400 drvp->drive_flags &= ~DRIVE_DMA;
2401 udma_reg = pciide_pci_read(sc->sc_pc,
2402 sc->sc_tag, CMD_UDMATIM(chp->channel));
2403 if (drvp->UDMA_mode > 2 &&
2404 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2405 CMD_BICSR) &
2406 CMD_BICSR_80(chp->channel)) == 0)
2407 drvp->UDMA_mode = 2;
2408 if (drvp->UDMA_mode > 2)
2409 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2410 else if (sc->sc_wdcdev.UDMA_cap > 2)
2411 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2412 udma_reg |= CMD_UDMATIM_UDMA(drive);
2413 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2414 CMD_UDMATIM_TIM_OFF(drive));
2415 udma_reg |=
2416 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2417 CMD_UDMATIM_TIM_OFF(drive));
2418 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2419 CMD_UDMATIM(chp->channel), udma_reg);
2420 } else {
2421 /*
2422 * use Multiword DMA.
2423 * Timings will be used for both PIO and DMA,
2424 * so adjust DMA mode if needed
2425 * if we have a 0646U2/8/9, turn off UDMA
2426 */
2427 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2428 udma_reg = pciide_pci_read(sc->sc_pc,
2429 sc->sc_tag,
2430 CMD_UDMATIM(chp->channel));
2431 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2432 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2433 CMD_UDMATIM(chp->channel),
2434 udma_reg);
2435 }
2436 if (drvp->PIO_mode >= 3 &&
2437 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2438 drvp->DMA_mode = drvp->PIO_mode - 2;
2439 }
2440 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2441 }
2442 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2443 }
2444 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2445 CMD_DATA_TIM(chp->channel, drive), tim);
2446 }
2447 if (idedma_ctl != 0) {
2448 /* Add software bits in status register */
2449 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2450 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2451 idedma_ctl);
2452 }
2453 pciide_print_modes(cp);
2454 }
2455
2456 void
2457 cmd646_9_irqack(chp)
2458 struct channel_softc *chp;
2459 {
2460 u_int32_t priirq, secirq;
2461 struct pciide_channel *cp = (struct pciide_channel*)chp;
2462 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2463
2464 if (chp->channel == 0) {
2465 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2466 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2467 } else {
2468 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2469 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2470 }
2471 pciide_irqack(chp);
2472 }
2473
2474 void
2475 cy693_chip_map(sc, pa)
2476 struct pciide_softc *sc;
2477 struct pci_attach_args *pa;
2478 {
2479 struct pciide_channel *cp;
2480 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2481 bus_size_t cmdsize, ctlsize;
2482
2483 if (pciide_chipen(sc, pa) == 0)
2484 return;
2485 /*
2486 * this chip has 2 PCI IDE functions, one for primary and one for
2487 * secondary. So we need to call pciide_mapregs_compat() with
2488 * the real channel
2489 */
2490 if (pa->pa_function == 1) {
2491 sc->sc_cy_compatchan = 0;
2492 } else if (pa->pa_function == 2) {
2493 sc->sc_cy_compatchan = 1;
2494 } else {
2495 printf("%s: unexpected PCI function %d\n",
2496 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2497 return;
2498 }
2499 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2500 printf("%s: bus-master DMA support present",
2501 sc->sc_wdcdev.sc_dev.dv_xname);
2502 pciide_mapreg_dma(sc, pa);
2503 } else {
2504 printf("%s: hardware does not support DMA",
2505 sc->sc_wdcdev.sc_dev.dv_xname);
2506 sc->sc_dma_ok = 0;
2507 }
2508 printf("\n");
2509
2510 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2511 if (sc->sc_cy_handle == NULL) {
2512 printf("%s: unable to map hyperCache control registers\n",
2513 sc->sc_wdcdev.sc_dev.dv_xname);
2514 sc->sc_dma_ok = 0;
2515 }
2516
2517 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2518 WDC_CAPABILITY_MODE;
2519 if (sc->sc_dma_ok) {
2520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2521 sc->sc_wdcdev.irqack = pciide_irqack;
2522 }
2523 sc->sc_wdcdev.PIO_cap = 4;
2524 sc->sc_wdcdev.DMA_cap = 2;
2525 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2526
2527 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2528 sc->sc_wdcdev.nchannels = 1;
2529
2530 /* Only one channel for this chip; if we are here it's enabled */
2531 cp = &sc->pciide_channels[0];
2532 sc->wdc_chanarray[0] = &cp->wdc_channel;
2533 cp->name = PCIIDE_CHANNEL_NAME(0);
2534 cp->wdc_channel.channel = 0;
2535 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2536 cp->wdc_channel.ch_queue =
2537 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2538 if (cp->wdc_channel.ch_queue == NULL) {
2539 printf("%s primary channel: "
2540 "can't allocate memory for command queue",
2541 sc->sc_wdcdev.sc_dev.dv_xname);
2542 return;
2543 }
2544 printf("%s: primary channel %s to ",
2545 sc->sc_wdcdev.sc_dev.dv_xname,
2546 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2547 "configured" : "wired");
2548 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2549 printf("native-PCI");
2550 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2551 pciide_pci_intr);
2552 } else {
2553 printf("compatibility");
2554 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2555 &cmdsize, &ctlsize);
2556 }
2557 printf(" mode\n");
2558 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2559 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2560 wdcattach(&cp->wdc_channel);
2561 if (pciide_chan_candisable(cp)) {
2562 pci_conf_write(sc->sc_pc, sc->sc_tag,
2563 PCI_COMMAND_STATUS_REG, 0);
2564 }
2565 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2566 if (cp->hw_ok == 0)
2567 return;
2568 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2569 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2570 cy693_setup_channel(&cp->wdc_channel);
2571 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2572 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2573 }
2574
2575 void
2576 cy693_setup_channel(chp)
2577 struct channel_softc *chp;
2578 {
2579 struct ata_drive_datas *drvp;
2580 int drive;
2581 u_int32_t cy_cmd_ctrl;
2582 u_int32_t idedma_ctl;
2583 struct pciide_channel *cp = (struct pciide_channel*)chp;
2584 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2585 int dma_mode = -1;
2586
2587 cy_cmd_ctrl = idedma_ctl = 0;
2588
2589 /* setup DMA if needed */
2590 pciide_channel_dma_setup(cp);
2591
2592 for (drive = 0; drive < 2; drive++) {
2593 drvp = &chp->ch_drive[drive];
2594 /* If no drive, skip */
2595 if ((drvp->drive_flags & DRIVE) == 0)
2596 continue;
2597 /* add timing values, setup DMA if needed */
2598 if (drvp->drive_flags & DRIVE_DMA) {
2599 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2600 /* use Multiword DMA */
2601 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2602 dma_mode = drvp->DMA_mode;
2603 }
2604 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2605 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2606 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2607 CY_CMD_CTRL_IOW_REC_OFF(drive));
2608 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2609 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2610 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2611 CY_CMD_CTRL_IOR_REC_OFF(drive));
2612 }
2613 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2614 chp->ch_drive[0].DMA_mode = dma_mode;
2615 chp->ch_drive[1].DMA_mode = dma_mode;
2616
2617 if (dma_mode == -1)
2618 dma_mode = 0;
2619
2620 if (sc->sc_cy_handle != NULL) {
2621 /* Note: `multiple' is implied. */
2622 cy82c693_write(sc->sc_cy_handle,
2623 (sc->sc_cy_compatchan == 0) ?
2624 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2625 }
2626
2627 pciide_print_modes(cp);
2628
2629 if (idedma_ctl != 0) {
2630 /* Add software bits in status register */
2631 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2632 IDEDMA_CTL, idedma_ctl);
2633 }
2634 }
2635
2636 void
2637 sis_chip_map(sc, pa)
2638 struct pciide_softc *sc;
2639 struct pci_attach_args *pa;
2640 {
2641 struct pciide_channel *cp;
2642 int channel;
2643 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2644 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2645 pcireg_t rev = PCI_REVISION(pa->pa_class);
2646 bus_size_t cmdsize, ctlsize;
2647
2648 if (pciide_chipen(sc, pa) == 0)
2649 return;
2650 printf("%s: bus-master DMA support present",
2651 sc->sc_wdcdev.sc_dev.dv_xname);
2652 pciide_mapreg_dma(sc, pa);
2653 printf("\n");
2654 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2655 WDC_CAPABILITY_MODE;
2656 if (sc->sc_dma_ok) {
2657 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2658 sc->sc_wdcdev.irqack = pciide_irqack;
2659 if (rev > 0xd0)
2660 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2661 }
2662
2663 sc->sc_wdcdev.PIO_cap = 4;
2664 sc->sc_wdcdev.DMA_cap = 2;
2665 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2666 sc->sc_wdcdev.UDMA_cap = 2;
2667 sc->sc_wdcdev.set_modes = sis_setup_channel;
2668
2669 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2670 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2671
2672 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2673 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2674 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2675
2676 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2677 cp = &sc->pciide_channels[channel];
2678 if (pciide_chansetup(sc, channel, interface) == 0)
2679 continue;
2680 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2681 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2682 printf("%s: %s channel ignored (disabled)\n",
2683 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2684 continue;
2685 }
2686 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2687 pciide_pci_intr);
2688 if (cp->hw_ok == 0)
2689 continue;
2690 if (pciide_chan_candisable(cp)) {
2691 if (channel == 0)
2692 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2693 else
2694 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2695 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2696 sis_ctr0);
2697 }
2698 pciide_map_compat_intr(pa, cp, channel, interface);
2699 if (cp->hw_ok == 0)
2700 continue;
2701 sis_setup_channel(&cp->wdc_channel);
2702 }
2703 }
2704
2705 void
2706 sis_setup_channel(chp)
2707 struct channel_softc *chp;
2708 {
2709 struct ata_drive_datas *drvp;
2710 int drive;
2711 u_int32_t sis_tim;
2712 u_int32_t idedma_ctl;
2713 struct pciide_channel *cp = (struct pciide_channel*)chp;
2714 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2715
2716 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2717 "channel %d 0x%x\n", chp->channel,
2718 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2719 DEBUG_PROBE);
2720 sis_tim = 0;
2721 idedma_ctl = 0;
2722 /* setup DMA if needed */
2723 pciide_channel_dma_setup(cp);
2724
2725 for (drive = 0; drive < 2; drive++) {
2726 drvp = &chp->ch_drive[drive];
2727 /* If no drive, skip */
2728 if ((drvp->drive_flags & DRIVE) == 0)
2729 continue;
2730 /* add timing values, setup DMA if needed */
2731 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2732 (drvp->drive_flags & DRIVE_UDMA) == 0)
2733 goto pio;
2734
2735 if (drvp->drive_flags & DRIVE_UDMA) {
2736 /* use Ultra/DMA */
2737 drvp->drive_flags &= ~DRIVE_DMA;
2738 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2739 SIS_TIM_UDMA_TIME_OFF(drive);
2740 sis_tim |= SIS_TIM_UDMA_EN(drive);
2741 } else {
2742 /*
2743 * use Multiword DMA
2744 * Timings will be used for both PIO and DMA,
2745 * so adjust DMA mode if needed
2746 */
2747 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2748 drvp->PIO_mode = drvp->DMA_mode + 2;
2749 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2750 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2751 drvp->PIO_mode - 2 : 0;
2752 if (drvp->DMA_mode == 0)
2753 drvp->PIO_mode = 0;
2754 }
2755 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2756 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2757 SIS_TIM_ACT_OFF(drive);
2758 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2759 SIS_TIM_REC_OFF(drive);
2760 }
2761 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2762 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2763 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2764 if (idedma_ctl != 0) {
2765 /* Add software bits in status register */
2766 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2767 IDEDMA_CTL, idedma_ctl);
2768 }
2769 pciide_print_modes(cp);
2770 }
2771
2772 void
2773 acer_chip_map(sc, pa)
2774 struct pciide_softc *sc;
2775 struct pci_attach_args *pa;
2776 {
2777 struct pciide_channel *cp;
2778 int channel;
2779 pcireg_t cr, interface;
2780 bus_size_t cmdsize, ctlsize;
2781
2782 if (pciide_chipen(sc, pa) == 0)
2783 return;
2784 printf("%s: bus-master DMA support present",
2785 sc->sc_wdcdev.sc_dev.dv_xname);
2786 pciide_mapreg_dma(sc, pa);
2787 printf("\n");
2788 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2789 WDC_CAPABILITY_MODE;
2790 if (sc->sc_dma_ok) {
2791 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2792 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2793 sc->sc_wdcdev.irqack = pciide_irqack;
2794 }
2795
2796 sc->sc_wdcdev.PIO_cap = 4;
2797 sc->sc_wdcdev.DMA_cap = 2;
2798 sc->sc_wdcdev.UDMA_cap = 2;
2799 sc->sc_wdcdev.set_modes = acer_setup_channel;
2800 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2801 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2802
2803 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2804 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2805 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2806
2807 /* Enable "microsoft register bits" R/W. */
2808 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2809 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2810 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2811 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2812 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2813 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2814 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2815 ~ACER_CHANSTATUSREGS_RO);
2816 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2817 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2818 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2819 /* Don't use cr, re-read the real register content instead */
2820 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2821 PCI_CLASS_REG));
2822
2823 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2824 cp = &sc->pciide_channels[channel];
2825 if (pciide_chansetup(sc, channel, interface) == 0)
2826 continue;
2827 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2828 printf("%s: %s channel ignored (disabled)\n",
2829 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2830 continue;
2831 }
2832 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2833 acer_pci_intr);
2834 if (cp->hw_ok == 0)
2835 continue;
2836 if (pciide_chan_candisable(cp)) {
2837 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2838 pci_conf_write(sc->sc_pc, sc->sc_tag,
2839 PCI_CLASS_REG, cr);
2840 }
2841 pciide_map_compat_intr(pa, cp, channel, interface);
2842 acer_setup_channel(&cp->wdc_channel);
2843 }
2844 }
2845
2846 void
2847 acer_setup_channel(chp)
2848 struct channel_softc *chp;
2849 {
2850 struct ata_drive_datas *drvp;
2851 int drive;
2852 u_int32_t acer_fifo_udma;
2853 u_int32_t idedma_ctl;
2854 struct pciide_channel *cp = (struct pciide_channel*)chp;
2855 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2856
2857 idedma_ctl = 0;
2858 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2859 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2860 acer_fifo_udma), DEBUG_PROBE);
2861 /* setup DMA if needed */
2862 pciide_channel_dma_setup(cp);
2863
2864 for (drive = 0; drive < 2; drive++) {
2865 drvp = &chp->ch_drive[drive];
2866 /* If no drive, skip */
2867 if ((drvp->drive_flags & DRIVE) == 0)
2868 continue;
2869 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2870 "channel %d drive %d 0x%x\n", chp->channel, drive,
2871 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2872 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2873 /* clear FIFO/DMA mode */
2874 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2875 ACER_UDMA_EN(chp->channel, drive) |
2876 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2877
2878 /* add timing values, setup DMA if needed */
2879 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2880 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2881 acer_fifo_udma |=
2882 ACER_FTH_OPL(chp->channel, drive, 0x1);
2883 goto pio;
2884 }
2885
2886 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2887 if (drvp->drive_flags & DRIVE_UDMA) {
2888 /* use Ultra/DMA */
2889 drvp->drive_flags &= ~DRIVE_DMA;
2890 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2891 acer_fifo_udma |=
2892 ACER_UDMA_TIM(chp->channel, drive,
2893 acer_udma[drvp->UDMA_mode]);
2894 } else {
2895 /*
2896 * use Multiword DMA
2897 * Timings will be used for both PIO and DMA,
2898 * so adjust DMA mode if needed
2899 */
2900 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2901 drvp->PIO_mode = drvp->DMA_mode + 2;
2902 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2903 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2904 drvp->PIO_mode - 2 : 0;
2905 if (drvp->DMA_mode == 0)
2906 drvp->PIO_mode = 0;
2907 }
2908 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2909 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2910 ACER_IDETIM(chp->channel, drive),
2911 acer_pio[drvp->PIO_mode]);
2912 }
2913 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2914 acer_fifo_udma), DEBUG_PROBE);
2915 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2916 if (idedma_ctl != 0) {
2917 /* Add software bits in status register */
2918 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2919 IDEDMA_CTL, idedma_ctl);
2920 }
2921 pciide_print_modes(cp);
2922 }
2923
2924 int
2925 acer_pci_intr(arg)
2926 void *arg;
2927 {
2928 struct pciide_softc *sc = arg;
2929 struct pciide_channel *cp;
2930 struct channel_softc *wdc_cp;
2931 int i, rv, crv;
2932 u_int32_t chids;
2933
2934 rv = 0;
2935 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2936 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2937 cp = &sc->pciide_channels[i];
2938 wdc_cp = &cp->wdc_channel;
2939 /* If a compat channel skip. */
2940 if (cp->compat)
2941 continue;
2942 if (chids & ACER_CHIDS_INT(i)) {
2943 crv = wdcintr(wdc_cp);
2944 if (crv == 0)
2945 printf("%s:%d: bogus intr\n",
2946 sc->sc_wdcdev.sc_dev.dv_xname, i);
2947 else
2948 rv = 1;
2949 }
2950 }
2951 return rv;
2952 }
2953
2954 void
2955 hpt_chip_map(sc, pa)
2956 struct pciide_softc *sc;
2957 struct pci_attach_args *pa;
2958 {
2959 struct pciide_channel *cp;
2960 int i, compatchan, revision;
2961 pcireg_t interface;
2962 bus_size_t cmdsize, ctlsize;
2963
2964 if (pciide_chipen(sc, pa) == 0)
2965 return;
2966 revision = PCI_REVISION(pa->pa_class);
2967
2968 /*
2969 * when the chip is in native mode it identifies itself as a
2970 * 'misc mass storage'. Fake interface in this case.
2971 */
2972 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2973 interface = PCI_INTERFACE(pa->pa_class);
2974 } else {
2975 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2976 PCIIDE_INTERFACE_PCI(0);
2977 if (revision == HPT370_REV)
2978 interface |= PCIIDE_INTERFACE_PCI(1);
2979 }
2980
2981 printf("%s: bus-master DMA support present",
2982 sc->sc_wdcdev.sc_dev.dv_xname);
2983 pciide_mapreg_dma(sc, pa);
2984 printf("\n");
2985 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2986 WDC_CAPABILITY_MODE;
2987 if (sc->sc_dma_ok) {
2988 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2989 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2990 sc->sc_wdcdev.irqack = pciide_irqack;
2991 }
2992 sc->sc_wdcdev.PIO_cap = 4;
2993 sc->sc_wdcdev.DMA_cap = 2;
2994
2995 sc->sc_wdcdev.set_modes = hpt_setup_channel;
2996 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2997 if (revision == HPT366_REV) {
2998 sc->sc_wdcdev.UDMA_cap = 4;
2999 /*
3000 * The 366 has 2 PCI IDE functions, one for primary and one
3001 * for secondary. So we need to call pciide_mapregs_compat()
3002 * with the real channel
3003 */
3004 if (pa->pa_function == 0) {
3005 compatchan = 0;
3006 } else if (pa->pa_function == 1) {
3007 compatchan = 1;
3008 } else {
3009 printf("%s: unexpected PCI function %d\n",
3010 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3011 return;
3012 }
3013 sc->sc_wdcdev.nchannels = 1;
3014 } else {
3015 sc->sc_wdcdev.nchannels = 2;
3016 sc->sc_wdcdev.UDMA_cap = 5;
3017 }
3018 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3019 cp = &sc->pciide_channels[i];
3020 if (sc->sc_wdcdev.nchannels > 1) {
3021 compatchan = i;
3022 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3023 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3024 printf("%s: %s channel ignored (disabled)\n",
3025 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3026 continue;
3027 }
3028 }
3029 if (pciide_chansetup(sc, i, interface) == 0)
3030 continue;
3031 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3032 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3033 &ctlsize, hpt_pci_intr);
3034 } else {
3035 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3036 &cmdsize, &ctlsize);
3037 }
3038 if (cp->hw_ok == 0)
3039 return;
3040 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3041 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3042 wdcattach(&cp->wdc_channel);
3043 hpt_setup_channel(&cp->wdc_channel);
3044 }
3045 if (revision == HPT370_REV) {
3046 /*
3047 * HPT370_REV has a bit to disable interrupts, make sure
3048 * to clear it
3049 */
3050 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3051 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3052 ~HPT_CSEL_IRQDIS);
3053 }
3054 return;
3055 }
3056
3057 void
3058 hpt_setup_channel(chp)
3059 struct channel_softc *chp;
3060 {
3061 struct ata_drive_datas *drvp;
3062 int drive;
3063 int cable;
3064 u_int32_t before, after;
3065 u_int32_t idedma_ctl;
3066 struct pciide_channel *cp = (struct pciide_channel*)chp;
3067 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3068
3069 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3070
3071 /* setup DMA if needed */
3072 pciide_channel_dma_setup(cp);
3073
3074 idedma_ctl = 0;
3075
3076 /* Per drive settings */
3077 for (drive = 0; drive < 2; drive++) {
3078 drvp = &chp->ch_drive[drive];
3079 /* If no drive, skip */
3080 if ((drvp->drive_flags & DRIVE) == 0)
3081 continue;
3082 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3083 HPT_IDETIM(chp->channel, drive));
3084
3085 /* add timing values, setup DMA if needed */
3086 if (drvp->drive_flags & DRIVE_UDMA) {
3087 /* use Ultra/DMA */
3088 drvp->drive_flags &= ~DRIVE_DMA;
3089 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3090 drvp->UDMA_mode > 2)
3091 drvp->UDMA_mode = 2;
3092 after = (sc->sc_wdcdev.nchannels == 2) ?
3093 hpt370_udma[drvp->UDMA_mode] :
3094 hpt366_udma[drvp->UDMA_mode];
3095 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3096 } else if (drvp->drive_flags & DRIVE_DMA) {
3097 /*
3098 * use Multiword DMA.
3099 * Timings will be used for both PIO and DMA, so adjust
3100 * DMA mode if needed
3101 */
3102 if (drvp->PIO_mode >= 3 &&
3103 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3104 drvp->DMA_mode = drvp->PIO_mode - 2;
3105 }
3106 after = (sc->sc_wdcdev.nchannels == 2) ?
3107 hpt370_dma[drvp->DMA_mode] :
3108 hpt366_dma[drvp->DMA_mode];
3109 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3110 } else {
3111 /* PIO only */
3112 after = (sc->sc_wdcdev.nchannels == 2) ?
3113 hpt370_pio[drvp->PIO_mode] :
3114 hpt366_pio[drvp->PIO_mode];
3115 }
3116 pci_conf_write(sc->sc_pc, sc->sc_tag,
3117 HPT_IDETIM(chp->channel, drive), after);
3118 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3119 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3120 after, before), DEBUG_PROBE);
3121 }
3122 if (idedma_ctl != 0) {
3123 /* Add software bits in status register */
3124 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3125 IDEDMA_CTL, idedma_ctl);
3126 }
3127 pciide_print_modes(cp);
3128 }
3129
3130 int
3131 hpt_pci_intr(arg)
3132 void *arg;
3133 {
3134 struct pciide_softc *sc = arg;
3135 struct pciide_channel *cp;
3136 struct channel_softc *wdc_cp;
3137 int rv = 0;
3138 int dmastat, i, crv;
3139
3140 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3141 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3142 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3143 if((dmastat & IDEDMA_CTL_INTR) == 0)
3144 continue;
3145 cp = &sc->pciide_channels[i];
3146 wdc_cp = &cp->wdc_channel;
3147 crv = wdcintr(wdc_cp);
3148 if (crv == 0) {
3149 printf("%s:%d: bogus intr\n",
3150 sc->sc_wdcdev.sc_dev.dv_xname, i);
3151 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3152 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3153 } else
3154 rv = 1;
3155 }
3156 return rv;
3157 }
3158
3159
3160 /* A macro to test product */
3161 #define PDC_IS_262(sc) \
3162 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3163 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3164 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3165
3166 void
3167 pdc202xx_chip_map(sc, pa)
3168 struct pciide_softc *sc;
3169 struct pci_attach_args *pa;
3170 {
3171 struct pciide_channel *cp;
3172 int channel;
3173 pcireg_t interface, st, mode;
3174 bus_size_t cmdsize, ctlsize;
3175
3176 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3177 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3178 DEBUG_PROBE);
3179 if (pciide_chipen(sc, pa) == 0)
3180 return;
3181
3182 /* turn off RAID mode */
3183 st &= ~PDC2xx_STATE_IDERAID;
3184
3185 /*
3186 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3187 * mode. We have to fake interface
3188 */
3189 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3190 if (st & PDC2xx_STATE_NATIVE)
3191 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3192
3193 printf("%s: bus-master DMA support present",
3194 sc->sc_wdcdev.sc_dev.dv_xname);
3195 pciide_mapreg_dma(sc, pa);
3196 printf("\n");
3197 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3198 WDC_CAPABILITY_MODE;
3199 if (sc->sc_dma_ok) {
3200 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3201 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3202 sc->sc_wdcdev.irqack = pciide_irqack;
3203 }
3204 sc->sc_wdcdev.PIO_cap = 4;
3205 sc->sc_wdcdev.DMA_cap = 2;
3206 if (PDC_IS_262(sc))
3207 sc->sc_wdcdev.UDMA_cap = 4;
3208 else
3209 sc->sc_wdcdev.UDMA_cap = 2;
3210 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3211 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3212 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3213
3214 /* setup failsafe defaults */
3215 mode = 0;
3216 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3217 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3218 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3219 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3220 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3221 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3222 "initial timings 0x%x, now 0x%x\n", channel,
3223 pci_conf_read(sc->sc_pc, sc->sc_tag,
3224 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3225 DEBUG_PROBE);
3226 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3227 mode | PDC2xx_TIM_IORDYp);
3228 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3229 "initial timings 0x%x, now 0x%x\n", channel,
3230 pci_conf_read(sc->sc_pc, sc->sc_tag,
3231 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3232 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3233 mode);
3234 }
3235
3236 mode = PDC2xx_SCR_DMA;
3237 if (PDC_IS_262(sc)) {
3238 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3239 } else {
3240 /* the BIOS set it up this way */
3241 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3242 }
3243 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3244 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3245 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3246 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3247 DEBUG_PROBE);
3248 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3249
3250 /* controller initial state register is OK even without BIOS */
3251 /* Set DMA mode to IDE DMA compatibility */
3252 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3253 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3254 DEBUG_PROBE);
3255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3256 mode | 0x1);
3257 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3258 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3259 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3260 mode | 0x1);
3261
3262 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3263 cp = &sc->pciide_channels[channel];
3264 if (pciide_chansetup(sc, channel, interface) == 0)
3265 continue;
3266 if ((st & (PDC_IS_262(sc) ?
3267 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3268 printf("%s: %s channel ignored (disabled)\n",
3269 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3270 continue;
3271 }
3272 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3273 pdc202xx_pci_intr);
3274 if (cp->hw_ok == 0)
3275 continue;
3276 if (pciide_chan_candisable(cp))
3277 st &= ~(PDC_IS_262(sc) ?
3278 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3279 pciide_map_compat_intr(pa, cp, channel, interface);
3280 pdc202xx_setup_channel(&cp->wdc_channel);
3281 }
3282 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3283 DEBUG_PROBE);
3284 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3285 return;
3286 }
3287
3288 void
3289 pdc202xx_setup_channel(chp)
3290 struct channel_softc *chp;
3291 {
3292 struct ata_drive_datas *drvp;
3293 int drive;
3294 pcireg_t mode, st;
3295 u_int32_t idedma_ctl, scr, atapi;
3296 struct pciide_channel *cp = (struct pciide_channel*)chp;
3297 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3298 int channel = chp->channel;
3299
3300 /* setup DMA if needed */
3301 pciide_channel_dma_setup(cp);
3302
3303 idedma_ctl = 0;
3304
3305 /* Per channel settings */
3306 if (PDC_IS_262(sc)) {
3307 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3308 PDC262_U66);
3309 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3310 /* Trimm UDMA mode */
3311 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3312 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3313 chp->ch_drive[0].UDMA_mode <= 2) ||
3314 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3315 chp->ch_drive[1].UDMA_mode <= 2)) {
3316 if (chp->ch_drive[0].UDMA_mode > 2)
3317 chp->ch_drive[0].UDMA_mode = 2;
3318 if (chp->ch_drive[1].UDMA_mode > 2)
3319 chp->ch_drive[1].UDMA_mode = 2;
3320 }
3321 /* Set U66 if needed */
3322 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3323 chp->ch_drive[0].UDMA_mode > 2) ||
3324 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3325 chp->ch_drive[1].UDMA_mode > 2))
3326 scr |= PDC262_U66_EN(channel);
3327 else
3328 scr &= ~PDC262_U66_EN(channel);
3329 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3330 PDC262_U66, scr);
3331 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3332 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3333 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3334 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3335 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3336 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3337 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3338 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3339 atapi = 0;
3340 else
3341 atapi = PDC262_ATAPI_UDMA;
3342 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3343 PDC262_ATAPI(channel), atapi);
3344 }
3345 }
3346 for (drive = 0; drive < 2; drive++) {
3347 drvp = &chp->ch_drive[drive];
3348 /* If no drive, skip */
3349 if ((drvp->drive_flags & DRIVE) == 0)
3350 continue;
3351 mode = 0;
3352 if (drvp->drive_flags & DRIVE_UDMA) {
3353 /* use Ultra/DMA */
3354 drvp->drive_flags &= ~DRIVE_DMA;
3355 mode = PDC2xx_TIM_SET_MB(mode,
3356 pdc2xx_udma_mb[drvp->UDMA_mode]);
3357 mode = PDC2xx_TIM_SET_MC(mode,
3358 pdc2xx_udma_mc[drvp->UDMA_mode]);
3359 drvp->drive_flags &= ~DRIVE_DMA;
3360 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3361 } else if (drvp->drive_flags & DRIVE_DMA) {
3362 mode = PDC2xx_TIM_SET_MB(mode,
3363 pdc2xx_dma_mb[drvp->DMA_mode]);
3364 mode = PDC2xx_TIM_SET_MC(mode,
3365 pdc2xx_dma_mc[drvp->DMA_mode]);
3366 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3367 } else {
3368 mode = PDC2xx_TIM_SET_MB(mode,
3369 pdc2xx_dma_mb[0]);
3370 mode = PDC2xx_TIM_SET_MC(mode,
3371 pdc2xx_dma_mc[0]);
3372 }
3373 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3374 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3375 if (drvp->drive_flags & DRIVE_ATA)
3376 mode |= PDC2xx_TIM_PRE;
3377 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3378 if (drvp->PIO_mode >= 3) {
3379 mode |= PDC2xx_TIM_IORDY;
3380 if (drive == 0)
3381 mode |= PDC2xx_TIM_IORDYp;
3382 }
3383 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3384 "timings 0x%x\n",
3385 sc->sc_wdcdev.sc_dev.dv_xname,
3386 chp->channel, drive, mode), DEBUG_PROBE);
3387 pci_conf_write(sc->sc_pc, sc->sc_tag,
3388 PDC2xx_TIM(chp->channel, drive), mode);
3389 }
3390 if (idedma_ctl != 0) {
3391 /* Add software bits in status register */
3392 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3393 IDEDMA_CTL, idedma_ctl);
3394 }
3395 pciide_print_modes(cp);
3396 }
3397
3398 int
3399 pdc202xx_pci_intr(arg)
3400 void *arg;
3401 {
3402 struct pciide_softc *sc = arg;
3403 struct pciide_channel *cp;
3404 struct channel_softc *wdc_cp;
3405 int i, rv, crv;
3406 u_int32_t scr;
3407
3408 rv = 0;
3409 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3410 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3411 cp = &sc->pciide_channels[i];
3412 wdc_cp = &cp->wdc_channel;
3413 /* If a compat channel skip. */
3414 if (cp->compat)
3415 continue;
3416 if (scr & PDC2xx_SCR_INT(i)) {
3417 crv = wdcintr(wdc_cp);
3418 if (crv == 0)
3419 printf("%s:%d: bogus intr\n",
3420 sc->sc_wdcdev.sc_dev.dv_xname, i);
3421 else
3422 rv = 1;
3423 }
3424 }
3425 return rv;
3426 }
3427
3428 void
3429 opti_chip_map(sc, pa)
3430 struct pciide_softc *sc;
3431 struct pci_attach_args *pa;
3432 {
3433 struct pciide_channel *cp;
3434 bus_size_t cmdsize, ctlsize;
3435 pcireg_t interface;
3436 u_int8_t init_ctrl;
3437 int channel;
3438
3439 if (pciide_chipen(sc, pa) == 0)
3440 return;
3441 printf("%s: bus-master DMA support present",
3442 sc->sc_wdcdev.sc_dev.dv_xname);
3443 pciide_mapreg_dma(sc, pa);
3444 printf("\n");
3445
3446 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3447 WDC_CAPABILITY_MODE;
3448 sc->sc_wdcdev.PIO_cap = 4;
3449 if (sc->sc_dma_ok) {
3450 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3451 sc->sc_wdcdev.irqack = pciide_irqack;
3452 sc->sc_wdcdev.DMA_cap = 2;
3453 }
3454 sc->sc_wdcdev.set_modes = opti_setup_channel;
3455
3456 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3457 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3458
3459 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3460 OPTI_REG_INIT_CONTROL);
3461
3462 interface = PCI_INTERFACE(pa->pa_class);
3463
3464 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3465 cp = &sc->pciide_channels[channel];
3466 if (pciide_chansetup(sc, channel, interface) == 0)
3467 continue;
3468 if (channel == 1 &&
3469 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3470 printf("%s: %s channel ignored (disabled)\n",
3471 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3472 continue;
3473 }
3474 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3475 pciide_pci_intr);
3476 if (cp->hw_ok == 0)
3477 continue;
3478 pciide_map_compat_intr(pa, cp, channel, interface);
3479 if (cp->hw_ok == 0)
3480 continue;
3481 opti_setup_channel(&cp->wdc_channel);
3482 }
3483 }
3484
3485 void
3486 opti_setup_channel(chp)
3487 struct channel_softc *chp;
3488 {
3489 struct ata_drive_datas *drvp;
3490 struct pciide_channel *cp = (struct pciide_channel*)chp;
3491 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3492 int drive, spd;
3493 int mode[2];
3494 u_int8_t rv, mr;
3495
3496 /*
3497 * The `Delay' and `Address Setup Time' fields of the
3498 * Miscellaneous Register are always zero initially.
3499 */
3500 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3501 mr &= ~(OPTI_MISC_DELAY_MASK |
3502 OPTI_MISC_ADDR_SETUP_MASK |
3503 OPTI_MISC_INDEX_MASK);
3504
3505 /* Prime the control register before setting timing values */
3506 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3507
3508 /* Determine the clockrate of the PCIbus the chip is attached to */
3509 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3510 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3511
3512 /* setup DMA if needed */
3513 pciide_channel_dma_setup(cp);
3514
3515 for (drive = 0; drive < 2; drive++) {
3516 drvp = &chp->ch_drive[drive];
3517 /* If no drive, skip */
3518 if ((drvp->drive_flags & DRIVE) == 0) {
3519 mode[drive] = -1;
3520 continue;
3521 }
3522
3523 if ((drvp->drive_flags & DRIVE_DMA)) {
3524 /*
3525 * Timings will be used for both PIO and DMA,
3526 * so adjust DMA mode if needed
3527 */
3528 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3529 drvp->PIO_mode = drvp->DMA_mode + 2;
3530 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3531 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3532 drvp->PIO_mode - 2 : 0;
3533 if (drvp->DMA_mode == 0)
3534 drvp->PIO_mode = 0;
3535
3536 mode[drive] = drvp->DMA_mode + 5;
3537 } else
3538 mode[drive] = drvp->PIO_mode;
3539
3540 if (drive && mode[0] >= 0 &&
3541 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3542 /*
3543 * Can't have two drives using different values
3544 * for `Address Setup Time'.
3545 * Slow down the faster drive to compensate.
3546 */
3547 int d = (opti_tim_as[spd][mode[0]] >
3548 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3549
3550 mode[d] = mode[1-d];
3551 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3552 chp->ch_drive[d].DMA_mode = 0;
3553 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3554 }
3555 }
3556
3557 for (drive = 0; drive < 2; drive++) {
3558 int m;
3559 if ((m = mode[drive]) < 0)
3560 continue;
3561
3562 /* Set the Address Setup Time and select appropriate index */
3563 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3564 rv |= OPTI_MISC_INDEX(drive);
3565 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3566
3567 /* Set the pulse width and recovery timing parameters */
3568 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3569 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3570 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3571 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3572
3573 /* Set the Enhanced Mode register appropriately */
3574 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3575 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3576 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3577 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3578 }
3579
3580 /* Finally, enable the timings */
3581 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3582
3583 pciide_print_modes(cp);
3584 }
3585