pciide.c revision 1.109 1 /* $NetBSD: pciide.c,v 1.109 2001/03/20 17:54:39 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <uvm/uvm_extern.h>
100
101 #include <machine/endian.h>
102
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/cy82c693var.h>
119
120 #include "opt_pciide.h"
121
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 int, u_int8_t));
127
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 pci_chipset_tag_t pc;
131 pcitag_t pa;
132 int reg;
133 {
134
135 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 ((reg & 0x03) * 8) & 0xff);
137 }
138
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 pci_chipset_tag_t pc;
142 pcitag_t pa;
143 int reg;
144 u_int8_t val;
145 {
146 pcireg_t pcival;
147
148 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 pcival &= ~(0xff << ((reg & 0x03) * 8));
150 pcival |= (val << ((reg & 0x03) * 8));
151 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153
154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155
156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157 void piix_setup_channel __P((struct channel_softc*));
158 void piix3_4_setup_channel __P((struct channel_softc*));
159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162
163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164 void amd756_setup_channel __P((struct channel_softc*));
165
166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void apollo_setup_channel __P((struct channel_softc*));
168
169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_setup_channel __P((struct channel_softc*));
172 void cmd_channel_map __P((struct pci_attach_args *,
173 struct pciide_softc *, int));
174 int cmd_pci_intr __P((void *));
175 void cmd646_9_irqack __P((struct channel_softc *));
176
177 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void cy693_setup_channel __P((struct channel_softc*));
179
180 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void sis_setup_channel __P((struct channel_softc*));
182
183 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
184 void acer_setup_channel __P((struct channel_softc*));
185 int acer_pci_intr __P((void *));
186
187 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void pdc202xx_setup_channel __P((struct channel_softc*));
189 int pdc202xx_pci_intr __P((void *));
190 int pdc20265_pci_intr __P((void *));
191
192 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void opti_setup_channel __P((struct channel_softc*));
194
195 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void hpt_setup_channel __P((struct channel_softc*));
197 int hpt_pci_intr __P((void *));
198
199 void pciide_channel_dma_setup __P((struct pciide_channel *));
200 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
201 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
202 void pciide_dma_start __P((void*, int, int));
203 int pciide_dma_finish __P((void*, int, int, int));
204 void pciide_irqack __P((struct channel_softc *));
205 void pciide_print_modes __P((struct pciide_channel *));
206
207 struct pciide_product_desc {
208 u_int32_t ide_product;
209 int ide_flags;
210 const char *ide_name;
211 /* map and setup chip, probe drives */
212 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
213 };
214
215 /* Flags for ide_flags */
216 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
217 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
218
219 /* Default product description for devices not known from this controller */
220 const struct pciide_product_desc default_product_desc = {
221 0,
222 0,
223 "Generic PCI IDE controller",
224 default_chip_map,
225 };
226
227 const struct pciide_product_desc pciide_intel_products[] = {
228 { PCI_PRODUCT_INTEL_82092AA,
229 0,
230 "Intel 82092AA IDE controller",
231 default_chip_map,
232 },
233 { PCI_PRODUCT_INTEL_82371FB_IDE,
234 0,
235 "Intel 82371FB IDE controller (PIIX)",
236 piix_chip_map,
237 },
238 { PCI_PRODUCT_INTEL_82371SB_IDE,
239 0,
240 "Intel 82371SB IDE Interface (PIIX3)",
241 piix_chip_map,
242 },
243 { PCI_PRODUCT_INTEL_82371AB_IDE,
244 0,
245 "Intel 82371AB IDE controller (PIIX4)",
246 piix_chip_map,
247 },
248 { PCI_PRODUCT_INTEL_82440MX_IDE,
249 0,
250 "Intel 82440MX IDE controller",
251 piix_chip_map
252 },
253 { PCI_PRODUCT_INTEL_82801AA_IDE,
254 0,
255 "Intel 82801AA IDE Controller (ICH)",
256 piix_chip_map,
257 },
258 { PCI_PRODUCT_INTEL_82801AB_IDE,
259 0,
260 "Intel 82801AB IDE Controller (ICH0)",
261 piix_chip_map,
262 },
263 { PCI_PRODUCT_INTEL_82801BA_IDE,
264 0,
265 "Intel 82801BA IDE Controller (ICH2)",
266 piix_chip_map,
267 },
268 { PCI_PRODUCT_INTEL_82801BAM_IDE,
269 0,
270 "Intel 82801BAM IDE Controller (ICH2)",
271 piix_chip_map,
272 },
273 { 0,
274 0,
275 NULL,
276 }
277 };
278
279 const struct pciide_product_desc pciide_amd_products[] = {
280 { PCI_PRODUCT_AMD_PBC756_IDE,
281 0,
282 "Advanced Micro Devices AMD756 IDE Controller",
283 amd756_chip_map
284 },
285 { 0,
286 0,
287 NULL,
288 }
289 };
290
291 const struct pciide_product_desc pciide_cmd_products[] = {
292 { PCI_PRODUCT_CMDTECH_640,
293 0,
294 "CMD Technology PCI0640",
295 cmd_chip_map
296 },
297 { PCI_PRODUCT_CMDTECH_643,
298 0,
299 "CMD Technology PCI0643",
300 cmd0643_9_chip_map,
301 },
302 { PCI_PRODUCT_CMDTECH_646,
303 0,
304 "CMD Technology PCI0646",
305 cmd0643_9_chip_map,
306 },
307 { PCI_PRODUCT_CMDTECH_648,
308 IDE_PCI_CLASS_OVERRIDE,
309 "CMD Technology PCI0648",
310 cmd0643_9_chip_map,
311 },
312 { PCI_PRODUCT_CMDTECH_649,
313 IDE_PCI_CLASS_OVERRIDE,
314 "CMD Technology PCI0649",
315 cmd0643_9_chip_map,
316 },
317 { 0,
318 0,
319 NULL,
320 }
321 };
322
323 const struct pciide_product_desc pciide_via_products[] = {
324 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
325 0,
326 "VIA Tech VT82C586 IDE Controller",
327 apollo_chip_map,
328 },
329 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
330 0,
331 "VIA Tech VT82C586A IDE Controller",
332 apollo_chip_map,
333 },
334 { 0,
335 0,
336 NULL,
337 }
338 };
339
340 const struct pciide_product_desc pciide_cypress_products[] = {
341 { PCI_PRODUCT_CONTAQ_82C693,
342 IDE_16BIT_IOSPACE,
343 "Cypress 82C693 IDE Controller",
344 cy693_chip_map,
345 },
346 { 0,
347 0,
348 NULL,
349 }
350 };
351
352 const struct pciide_product_desc pciide_sis_products[] = {
353 { PCI_PRODUCT_SIS_5597_IDE,
354 0,
355 "Silicon Integrated System 5597/5598 IDE controller",
356 sis_chip_map,
357 },
358 { 0,
359 0,
360 NULL,
361 }
362 };
363
364 const struct pciide_product_desc pciide_acer_products[] = {
365 { PCI_PRODUCT_ALI_M5229,
366 0,
367 "Acer Labs M5229 UDMA IDE Controller",
368 acer_chip_map,
369 },
370 { 0,
371 0,
372 NULL,
373 }
374 };
375
376 const struct pciide_product_desc pciide_promise_products[] = {
377 { PCI_PRODUCT_PROMISE_ULTRA33,
378 IDE_PCI_CLASS_OVERRIDE,
379 "Promise Ultra33/ATA Bus Master IDE Accelerator",
380 pdc202xx_chip_map,
381 },
382 { PCI_PRODUCT_PROMISE_ULTRA66,
383 IDE_PCI_CLASS_OVERRIDE,
384 "Promise Ultra66/ATA Bus Master IDE Accelerator",
385 pdc202xx_chip_map,
386 },
387 { PCI_PRODUCT_PROMISE_ULTRA100,
388 IDE_PCI_CLASS_OVERRIDE,
389 "Promise Ultra100/ATA Bus Master IDE Accelerator",
390 pdc202xx_chip_map,
391 },
392 { PCI_PRODUCT_PROMISE_ULTRA100X,
393 IDE_PCI_CLASS_OVERRIDE,
394 "Promise Ultra100/ATA Bus Master IDE Accelerator",
395 pdc202xx_chip_map,
396 },
397 { 0,
398 0,
399 NULL,
400 }
401 };
402
403 const struct pciide_product_desc pciide_opti_products[] = {
404 { PCI_PRODUCT_OPTI_82C621,
405 0,
406 "OPTi 82c621 PCI IDE controller",
407 opti_chip_map,
408 },
409 { PCI_PRODUCT_OPTI_82C568,
410 0,
411 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
412 opti_chip_map,
413 },
414 { PCI_PRODUCT_OPTI_82D568,
415 0,
416 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
417 opti_chip_map,
418 },
419 { 0,
420 0,
421 NULL,
422 }
423 };
424
425 const struct pciide_product_desc pciide_triones_products[] = {
426 { PCI_PRODUCT_TRIONES_HPT366,
427 IDE_PCI_CLASS_OVERRIDE,
428 "Triones/Highpoint HPT366/370 IDE Controller",
429 hpt_chip_map,
430 },
431 { 0,
432 0,
433 NULL,
434 }
435 };
436
437 struct pciide_vendor_desc {
438 u_int32_t ide_vendor;
439 const struct pciide_product_desc *ide_products;
440 };
441
442 const struct pciide_vendor_desc pciide_vendors[] = {
443 { PCI_VENDOR_INTEL, pciide_intel_products },
444 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
445 { PCI_VENDOR_VIATECH, pciide_via_products },
446 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
447 { PCI_VENDOR_SIS, pciide_sis_products },
448 { PCI_VENDOR_ALI, pciide_acer_products },
449 { PCI_VENDOR_PROMISE, pciide_promise_products },
450 { PCI_VENDOR_AMD, pciide_amd_products },
451 { PCI_VENDOR_OPTI, pciide_opti_products },
452 { PCI_VENDOR_TRIONES, pciide_triones_products },
453 { 0, NULL }
454 };
455
456 /* options passed via the 'flags' config keyword */
457 #define PCIIDE_OPTIONS_DMA 0x01
458
459 int pciide_match __P((struct device *, struct cfdata *, void *));
460 void pciide_attach __P((struct device *, struct device *, void *));
461
462 struct cfattach pciide_ca = {
463 sizeof(struct pciide_softc), pciide_match, pciide_attach
464 };
465 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
466 int pciide_mapregs_compat __P(( struct pci_attach_args *,
467 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
468 int pciide_mapregs_native __P((struct pci_attach_args *,
469 struct pciide_channel *, bus_size_t *, bus_size_t *,
470 int (*pci_intr) __P((void *))));
471 void pciide_mapreg_dma __P((struct pciide_softc *,
472 struct pci_attach_args *));
473 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
474 void pciide_mapchan __P((struct pci_attach_args *,
475 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
476 int (*pci_intr) __P((void *))));
477 int pciide_chan_candisable __P((struct pciide_channel *));
478 void pciide_map_compat_intr __P(( struct pci_attach_args *,
479 struct pciide_channel *, int, int));
480 int pciide_print __P((void *, const char *pnp));
481 int pciide_compat_intr __P((void *));
482 int pciide_pci_intr __P((void *));
483 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
484
485 const struct pciide_product_desc *
486 pciide_lookup_product(id)
487 u_int32_t id;
488 {
489 const struct pciide_product_desc *pp;
490 const struct pciide_vendor_desc *vp;
491
492 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
493 if (PCI_VENDOR(id) == vp->ide_vendor)
494 break;
495
496 if ((pp = vp->ide_products) == NULL)
497 return NULL;
498
499 for (; pp->ide_name != NULL; pp++)
500 if (PCI_PRODUCT(id) == pp->ide_product)
501 break;
502
503 if (pp->ide_name == NULL)
504 return NULL;
505 return pp;
506 }
507
508 int
509 pciide_match(parent, match, aux)
510 struct device *parent;
511 struct cfdata *match;
512 void *aux;
513 {
514 struct pci_attach_args *pa = aux;
515 const struct pciide_product_desc *pp;
516
517 /*
518 * Check the ID register to see that it's a PCI IDE controller.
519 * If it is, we assume that we can deal with it; it _should_
520 * work in a standardized way...
521 */
522 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
523 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
524 return (1);
525 }
526
527 /*
528 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
529 * controllers. Let see if we can deal with it anyway.
530 */
531 pp = pciide_lookup_product(pa->pa_id);
532 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
533 return (1);
534 }
535
536 return (0);
537 }
538
539 void
540 pciide_attach(parent, self, aux)
541 struct device *parent, *self;
542 void *aux;
543 {
544 struct pci_attach_args *pa = aux;
545 pci_chipset_tag_t pc = pa->pa_pc;
546 pcitag_t tag = pa->pa_tag;
547 struct pciide_softc *sc = (struct pciide_softc *)self;
548 pcireg_t csr;
549 char devinfo[256];
550 const char *displaydev;
551
552 sc->sc_pp = pciide_lookup_product(pa->pa_id);
553 if (sc->sc_pp == NULL) {
554 sc->sc_pp = &default_product_desc;
555 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
556 displaydev = devinfo;
557 } else
558 displaydev = sc->sc_pp->ide_name;
559
560 printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
561
562 sc->sc_pc = pa->pa_pc;
563 sc->sc_tag = pa->pa_tag;
564 #ifdef WDCDEBUG
565 if (wdcdebug_pciide_mask & DEBUG_PROBE)
566 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
567 #endif
568 sc->sc_pp->chip_map(sc, pa);
569
570 if (sc->sc_dma_ok) {
571 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
572 csr |= PCI_COMMAND_MASTER_ENABLE;
573 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
574 }
575 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
576 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
577 }
578
579 /* tell wether the chip is enabled or not */
580 int
581 pciide_chipen(sc, pa)
582 struct pciide_softc *sc;
583 struct pci_attach_args *pa;
584 {
585 pcireg_t csr;
586 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
587 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
588 PCI_COMMAND_STATUS_REG);
589 printf("%s: device disabled (at %s)\n",
590 sc->sc_wdcdev.sc_dev.dv_xname,
591 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
592 "device" : "bridge");
593 return 0;
594 }
595 return 1;
596 }
597
598 int
599 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
600 struct pci_attach_args *pa;
601 struct pciide_channel *cp;
602 int compatchan;
603 bus_size_t *cmdsizep, *ctlsizep;
604 {
605 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
606 struct channel_softc *wdc_cp = &cp->wdc_channel;
607
608 cp->compat = 1;
609 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
610 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
611
612 wdc_cp->cmd_iot = pa->pa_iot;
613 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
614 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
615 printf("%s: couldn't map %s channel cmd regs\n",
616 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
617 return (0);
618 }
619
620 wdc_cp->ctl_iot = pa->pa_iot;
621 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
622 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
623 printf("%s: couldn't map %s channel ctl regs\n",
624 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
625 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
626 PCIIDE_COMPAT_CMD_SIZE);
627 return (0);
628 }
629
630 return (1);
631 }
632
633 int
634 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
635 struct pci_attach_args * pa;
636 struct pciide_channel *cp;
637 bus_size_t *cmdsizep, *ctlsizep;
638 int (*pci_intr) __P((void *));
639 {
640 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
641 struct channel_softc *wdc_cp = &cp->wdc_channel;
642 const char *intrstr;
643 pci_intr_handle_t intrhandle;
644
645 cp->compat = 0;
646
647 if (sc->sc_pci_ih == NULL) {
648 if (pci_intr_map(pa, &intrhandle) != 0) {
649 printf("%s: couldn't map native-PCI interrupt\n",
650 sc->sc_wdcdev.sc_dev.dv_xname);
651 return 0;
652 }
653 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
654 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
655 intrhandle, IPL_BIO, pci_intr, sc);
656 if (sc->sc_pci_ih != NULL) {
657 printf("%s: using %s for native-PCI interrupt\n",
658 sc->sc_wdcdev.sc_dev.dv_xname,
659 intrstr ? intrstr : "unknown interrupt");
660 } else {
661 printf("%s: couldn't establish native-PCI interrupt",
662 sc->sc_wdcdev.sc_dev.dv_xname);
663 if (intrstr != NULL)
664 printf(" at %s", intrstr);
665 printf("\n");
666 return 0;
667 }
668 }
669 cp->ih = sc->sc_pci_ih;
670 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
671 PCI_MAPREG_TYPE_IO, 0,
672 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
673 printf("%s: couldn't map %s channel cmd regs\n",
674 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
675 return 0;
676 }
677
678 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
679 PCI_MAPREG_TYPE_IO, 0,
680 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
681 printf("%s: couldn't map %s channel ctl regs\n",
682 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
683 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
684 return 0;
685 }
686 /*
687 * In native mode, 4 bytes of I/O space are mapped for the control
688 * register, the control register is at offset 2. Pass the generic
689 * code a handle for only one byte at the rigth offset.
690 */
691 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
692 &wdc_cp->ctl_ioh) != 0) {
693 printf("%s: unable to subregion %s channel ctl regs\n",
694 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
695 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
696 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
697 return 0;
698 }
699 return (1);
700 }
701
702 void
703 pciide_mapreg_dma(sc, pa)
704 struct pciide_softc *sc;
705 struct pci_attach_args *pa;
706 {
707 pcireg_t maptype;
708 bus_addr_t addr;
709
710 /*
711 * Map DMA registers
712 *
713 * Note that sc_dma_ok is the right variable to test to see if
714 * DMA can be done. If the interface doesn't support DMA,
715 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
716 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
717 * non-zero if the interface supports DMA and the registers
718 * could be mapped.
719 *
720 * XXX Note that despite the fact that the Bus Master IDE specs
721 * XXX say that "The bus master IDE function uses 16 bytes of IO
722 * XXX space," some controllers (at least the United
723 * XXX Microelectronics UM8886BF) place it in memory space.
724 */
725 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
726 PCIIDE_REG_BUS_MASTER_DMA);
727
728 switch (maptype) {
729 case PCI_MAPREG_TYPE_IO:
730 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
731 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
732 &addr, NULL, NULL) == 0);
733 if (sc->sc_dma_ok == 0) {
734 printf(", but unused (couldn't query registers)");
735 break;
736 }
737 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
738 && addr >= 0x10000) {
739 sc->sc_dma_ok = 0;
740 printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr);
741 break;
742 }
743 /* FALLTHROUGH */
744
745 case PCI_MAPREG_MEM_TYPE_32BIT:
746 sc->sc_dma_ok = (pci_mapreg_map(pa,
747 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
748 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
749 sc->sc_dmat = pa->pa_dmat;
750 if (sc->sc_dma_ok == 0) {
751 printf(", but unused (couldn't map registers)");
752 } else {
753 sc->sc_wdcdev.dma_arg = sc;
754 sc->sc_wdcdev.dma_init = pciide_dma_init;
755 sc->sc_wdcdev.dma_start = pciide_dma_start;
756 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
757 }
758 break;
759
760 default:
761 sc->sc_dma_ok = 0;
762 printf(", but unsupported register maptype (0x%x)", maptype);
763 }
764 }
765
766 int
767 pciide_compat_intr(arg)
768 void *arg;
769 {
770 struct pciide_channel *cp = arg;
771
772 #ifdef DIAGNOSTIC
773 /* should only be called for a compat channel */
774 if (cp->compat == 0)
775 panic("pciide compat intr called for non-compat chan %p\n", cp);
776 #endif
777 return (wdcintr(&cp->wdc_channel));
778 }
779
780 int
781 pciide_pci_intr(arg)
782 void *arg;
783 {
784 struct pciide_softc *sc = arg;
785 struct pciide_channel *cp;
786 struct channel_softc *wdc_cp;
787 int i, rv, crv;
788
789 rv = 0;
790 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
791 cp = &sc->pciide_channels[i];
792 wdc_cp = &cp->wdc_channel;
793
794 /* If a compat channel skip. */
795 if (cp->compat)
796 continue;
797 /* if this channel not waiting for intr, skip */
798 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
799 continue;
800
801 crv = wdcintr(wdc_cp);
802 if (crv == 0)
803 ; /* leave rv alone */
804 else if (crv == 1)
805 rv = 1; /* claim the intr */
806 else if (rv == 0) /* crv should be -1 in this case */
807 rv = crv; /* if we've done no better, take it */
808 }
809 return (rv);
810 }
811
812 void
813 pciide_channel_dma_setup(cp)
814 struct pciide_channel *cp;
815 {
816 int drive;
817 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
818 struct ata_drive_datas *drvp;
819
820 for (drive = 0; drive < 2; drive++) {
821 drvp = &cp->wdc_channel.ch_drive[drive];
822 /* If no drive, skip */
823 if ((drvp->drive_flags & DRIVE) == 0)
824 continue;
825 /* setup DMA if needed */
826 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
827 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
828 sc->sc_dma_ok == 0) {
829 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
830 continue;
831 }
832 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
833 != 0) {
834 /* Abort DMA setup */
835 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
836 continue;
837 }
838 }
839 }
840
841 int
842 pciide_dma_table_setup(sc, channel, drive)
843 struct pciide_softc *sc;
844 int channel, drive;
845 {
846 bus_dma_segment_t seg;
847 int error, rseg;
848 const bus_size_t dma_table_size =
849 sizeof(struct idedma_table) * NIDEDMA_TABLES;
850 struct pciide_dma_maps *dma_maps =
851 &sc->pciide_channels[channel].dma_maps[drive];
852
853 /* If table was already allocated, just return */
854 if (dma_maps->dma_table)
855 return 0;
856
857 /* Allocate memory for the DMA tables and map it */
858 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
859 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
860 BUS_DMA_NOWAIT)) != 0) {
861 printf("%s:%d: unable to allocate table DMA for "
862 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
863 channel, drive, error);
864 return error;
865 }
866 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
867 dma_table_size,
868 (caddr_t *)&dma_maps->dma_table,
869 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
870 printf("%s:%d: unable to map table DMA for"
871 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
872 channel, drive, error);
873 return error;
874 }
875 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
876 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
877 (unsigned long)seg.ds_addr), DEBUG_PROBE);
878
879 /* Create and load table DMA map for this disk */
880 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
881 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
882 &dma_maps->dmamap_table)) != 0) {
883 printf("%s:%d: unable to create table DMA map for "
884 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
885 channel, drive, error);
886 return error;
887 }
888 if ((error = bus_dmamap_load(sc->sc_dmat,
889 dma_maps->dmamap_table,
890 dma_maps->dma_table,
891 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
892 printf("%s:%d: unable to load table DMA map for "
893 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
894 channel, drive, error);
895 return error;
896 }
897 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
898 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
899 DEBUG_PROBE);
900 /* Create a xfer DMA map for this drive */
901 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
902 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
903 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
904 &dma_maps->dmamap_xfer)) != 0) {
905 printf("%s:%d: unable to create xfer DMA map for "
906 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
907 channel, drive, error);
908 return error;
909 }
910 return 0;
911 }
912
913 int
914 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
915 void *v;
916 int channel, drive;
917 void *databuf;
918 size_t datalen;
919 int flags;
920 {
921 struct pciide_softc *sc = v;
922 int error, seg;
923 struct pciide_dma_maps *dma_maps =
924 &sc->pciide_channels[channel].dma_maps[drive];
925
926 error = bus_dmamap_load(sc->sc_dmat,
927 dma_maps->dmamap_xfer,
928 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
929 if (error) {
930 printf("%s:%d: unable to load xfer DMA map for"
931 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
932 channel, drive, error);
933 return error;
934 }
935
936 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
937 dma_maps->dmamap_xfer->dm_mapsize,
938 (flags & WDC_DMA_READ) ?
939 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
940
941 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
942 #ifdef DIAGNOSTIC
943 /* A segment must not cross a 64k boundary */
944 {
945 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
946 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
947 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
948 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
949 printf("pciide_dma: segment %d physical addr 0x%lx"
950 " len 0x%lx not properly aligned\n",
951 seg, phys, len);
952 panic("pciide_dma: buf align");
953 }
954 }
955 #endif
956 dma_maps->dma_table[seg].base_addr =
957 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
958 dma_maps->dma_table[seg].byte_count =
959 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
960 IDEDMA_BYTE_COUNT_MASK);
961 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
962 seg, le32toh(dma_maps->dma_table[seg].byte_count),
963 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
964
965 }
966 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
967 htole32(IDEDMA_BYTE_COUNT_EOT);
968
969 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
970 dma_maps->dmamap_table->dm_mapsize,
971 BUS_DMASYNC_PREWRITE);
972
973 /* Maps are ready. Start DMA function */
974 #ifdef DIAGNOSTIC
975 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
976 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
977 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
978 panic("pciide_dma_init: table align");
979 }
980 #endif
981
982 /* Clear status bits */
983 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
984 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
985 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
986 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
987 /* Write table addr */
988 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
989 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
990 dma_maps->dmamap_table->dm_segs[0].ds_addr);
991 /* set read/write */
992 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
993 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
994 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
995 /* remember flags */
996 dma_maps->dma_flags = flags;
997 return 0;
998 }
999
1000 void
1001 pciide_dma_start(v, channel, drive)
1002 void *v;
1003 int channel, drive;
1004 {
1005 struct pciide_softc *sc = v;
1006
1007 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1008 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1009 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1010 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1011 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1012 }
1013
1014 int
1015 pciide_dma_finish(v, channel, drive, force)
1016 void *v;
1017 int channel, drive;
1018 int force;
1019 {
1020 struct pciide_softc *sc = v;
1021 u_int8_t status;
1022 int error = 0;
1023 struct pciide_dma_maps *dma_maps =
1024 &sc->pciide_channels[channel].dma_maps[drive];
1025
1026 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1027 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1028 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1029 DEBUG_XFERS);
1030
1031 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1032 return WDC_DMAST_NOIRQ;
1033
1034 /* stop DMA channel */
1035 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1036 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1037 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1038 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1039
1040 /* Unload the map of the data buffer */
1041 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1042 dma_maps->dmamap_xfer->dm_mapsize,
1043 (dma_maps->dma_flags & WDC_DMA_READ) ?
1044 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1045 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1046
1047 if ((status & IDEDMA_CTL_ERR) != 0) {
1048 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1049 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1050 error |= WDC_DMAST_ERR;
1051 }
1052
1053 if ((status & IDEDMA_CTL_INTR) == 0) {
1054 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1055 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1056 drive, status);
1057 error |= WDC_DMAST_NOIRQ;
1058 }
1059
1060 if ((status & IDEDMA_CTL_ACT) != 0) {
1061 /* data underrun, may be a valid condition for ATAPI */
1062 error |= WDC_DMAST_UNDER;
1063 }
1064 return error;
1065 }
1066
1067 void
1068 pciide_irqack(chp)
1069 struct channel_softc *chp;
1070 {
1071 struct pciide_channel *cp = (struct pciide_channel*)chp;
1072 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1073
1074 /* clear status bits in IDE DMA registers */
1075 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1076 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1077 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1078 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1079 }
1080
1081 /* some common code used by several chip_map */
1082 int
1083 pciide_chansetup(sc, channel, interface)
1084 struct pciide_softc *sc;
1085 int channel;
1086 pcireg_t interface;
1087 {
1088 struct pciide_channel *cp = &sc->pciide_channels[channel];
1089 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1090 cp->name = PCIIDE_CHANNEL_NAME(channel);
1091 cp->wdc_channel.channel = channel;
1092 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1093 cp->wdc_channel.ch_queue =
1094 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1095 if (cp->wdc_channel.ch_queue == NULL) {
1096 printf("%s %s channel: "
1097 "can't allocate memory for command queue",
1098 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1099 return 0;
1100 }
1101 printf("%s: %s channel %s to %s mode\n",
1102 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1103 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1104 "configured" : "wired",
1105 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1106 "native-PCI" : "compatibility");
1107 return 1;
1108 }
1109
1110 /* some common code used by several chip channel_map */
1111 void
1112 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1113 struct pci_attach_args *pa;
1114 struct pciide_channel *cp;
1115 pcireg_t interface;
1116 bus_size_t *cmdsizep, *ctlsizep;
1117 int (*pci_intr) __P((void *));
1118 {
1119 struct channel_softc *wdc_cp = &cp->wdc_channel;
1120
1121 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1122 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1123 pci_intr);
1124 else
1125 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1126 wdc_cp->channel, cmdsizep, ctlsizep);
1127
1128 if (cp->hw_ok == 0)
1129 return;
1130 wdc_cp->data32iot = wdc_cp->cmd_iot;
1131 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1132 wdcattach(wdc_cp);
1133 }
1134
1135 /*
1136 * Generic code to call to know if a channel can be disabled. Return 1
1137 * if channel can be disabled, 0 if not
1138 */
1139 int
1140 pciide_chan_candisable(cp)
1141 struct pciide_channel *cp;
1142 {
1143 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1144 struct channel_softc *wdc_cp = &cp->wdc_channel;
1145
1146 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1147 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1148 printf("%s: disabling %s channel (no drives)\n",
1149 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1150 cp->hw_ok = 0;
1151 return 1;
1152 }
1153 return 0;
1154 }
1155
1156 /*
1157 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1158 * Set hw_ok=0 on failure
1159 */
1160 void
1161 pciide_map_compat_intr(pa, cp, compatchan, interface)
1162 struct pci_attach_args *pa;
1163 struct pciide_channel *cp;
1164 int compatchan, interface;
1165 {
1166 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1167 struct channel_softc *wdc_cp = &cp->wdc_channel;
1168
1169 if (cp->hw_ok == 0)
1170 return;
1171 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1172 return;
1173
1174 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1175 pa, compatchan, pciide_compat_intr, cp);
1176 if (cp->ih == NULL) {
1177 printf("%s: no compatibility interrupt for use by %s "
1178 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1179 cp->hw_ok = 0;
1180 }
1181 }
1182
1183 void
1184 pciide_print_modes(cp)
1185 struct pciide_channel *cp;
1186 {
1187 wdc_print_modes(&cp->wdc_channel);
1188 }
1189
1190 void
1191 default_chip_map(sc, pa)
1192 struct pciide_softc *sc;
1193 struct pci_attach_args *pa;
1194 {
1195 struct pciide_channel *cp;
1196 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1197 pcireg_t csr;
1198 int channel, drive;
1199 struct ata_drive_datas *drvp;
1200 u_int8_t idedma_ctl;
1201 bus_size_t cmdsize, ctlsize;
1202 char *failreason;
1203
1204 if (pciide_chipen(sc, pa) == 0)
1205 return;
1206
1207 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1208 printf("%s: bus-master DMA support present",
1209 sc->sc_wdcdev.sc_dev.dv_xname);
1210 if (sc->sc_pp == &default_product_desc &&
1211 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1212 PCIIDE_OPTIONS_DMA) == 0) {
1213 printf(", but unused (no driver support)");
1214 sc->sc_dma_ok = 0;
1215 } else {
1216 pciide_mapreg_dma(sc, pa);
1217 if (sc->sc_dma_ok != 0)
1218 printf(", used without full driver "
1219 "support");
1220 }
1221 } else {
1222 printf("%s: hardware does not support DMA",
1223 sc->sc_wdcdev.sc_dev.dv_xname);
1224 sc->sc_dma_ok = 0;
1225 }
1226 printf("\n");
1227 if (sc->sc_dma_ok) {
1228 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1229 sc->sc_wdcdev.irqack = pciide_irqack;
1230 }
1231 sc->sc_wdcdev.PIO_cap = 0;
1232 sc->sc_wdcdev.DMA_cap = 0;
1233
1234 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1235 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1236 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1237
1238 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1239 cp = &sc->pciide_channels[channel];
1240 if (pciide_chansetup(sc, channel, interface) == 0)
1241 continue;
1242 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1243 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1244 &ctlsize, pciide_pci_intr);
1245 } else {
1246 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1247 channel, &cmdsize, &ctlsize);
1248 }
1249 if (cp->hw_ok == 0)
1250 continue;
1251 /*
1252 * Check to see if something appears to be there.
1253 */
1254 failreason = NULL;
1255 if (!wdcprobe(&cp->wdc_channel)) {
1256 failreason = "not responding; disabled or no drives?";
1257 goto next;
1258 }
1259 /*
1260 * Now, make sure it's actually attributable to this PCI IDE
1261 * channel by trying to access the channel again while the
1262 * PCI IDE controller's I/O space is disabled. (If the
1263 * channel no longer appears to be there, it belongs to
1264 * this controller.) YUCK!
1265 */
1266 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1267 PCI_COMMAND_STATUS_REG);
1268 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1269 csr & ~PCI_COMMAND_IO_ENABLE);
1270 if (wdcprobe(&cp->wdc_channel))
1271 failreason = "other hardware responding at addresses";
1272 pci_conf_write(sc->sc_pc, sc->sc_tag,
1273 PCI_COMMAND_STATUS_REG, csr);
1274 next:
1275 if (failreason) {
1276 printf("%s: %s channel ignored (%s)\n",
1277 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1278 failreason);
1279 cp->hw_ok = 0;
1280 bus_space_unmap(cp->wdc_channel.cmd_iot,
1281 cp->wdc_channel.cmd_ioh, cmdsize);
1282 bus_space_unmap(cp->wdc_channel.ctl_iot,
1283 cp->wdc_channel.ctl_ioh, ctlsize);
1284 } else {
1285 pciide_map_compat_intr(pa, cp, channel, interface);
1286 }
1287 if (cp->hw_ok) {
1288 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1289 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1290 wdcattach(&cp->wdc_channel);
1291 }
1292 }
1293
1294 if (sc->sc_dma_ok == 0)
1295 return;
1296
1297 /* Allocate DMA maps */
1298 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1299 idedma_ctl = 0;
1300 cp = &sc->pciide_channels[channel];
1301 for (drive = 0; drive < 2; drive++) {
1302 drvp = &cp->wdc_channel.ch_drive[drive];
1303 /* If no drive, skip */
1304 if ((drvp->drive_flags & DRIVE) == 0)
1305 continue;
1306 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1307 continue;
1308 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1309 /* Abort DMA setup */
1310 printf("%s:%d:%d: can't allocate DMA maps, "
1311 "using PIO transfers\n",
1312 sc->sc_wdcdev.sc_dev.dv_xname,
1313 channel, drive);
1314 drvp->drive_flags &= ~DRIVE_DMA;
1315 }
1316 printf("%s:%d:%d: using DMA data transfers\n",
1317 sc->sc_wdcdev.sc_dev.dv_xname,
1318 channel, drive);
1319 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1320 }
1321 if (idedma_ctl != 0) {
1322 /* Add software bits in status register */
1323 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1324 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1325 idedma_ctl);
1326 }
1327 }
1328 }
1329
1330 void
1331 piix_chip_map(sc, pa)
1332 struct pciide_softc *sc;
1333 struct pci_attach_args *pa;
1334 {
1335 struct pciide_channel *cp;
1336 int channel;
1337 u_int32_t idetim;
1338 bus_size_t cmdsize, ctlsize;
1339
1340 if (pciide_chipen(sc, pa) == 0)
1341 return;
1342
1343 printf("%s: bus-master DMA support present",
1344 sc->sc_wdcdev.sc_dev.dv_xname);
1345 pciide_mapreg_dma(sc, pa);
1346 printf("\n");
1347 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1348 WDC_CAPABILITY_MODE;
1349 if (sc->sc_dma_ok) {
1350 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1351 sc->sc_wdcdev.irqack = pciide_irqack;
1352 switch(sc->sc_pp->ide_product) {
1353 case PCI_PRODUCT_INTEL_82371AB_IDE:
1354 case PCI_PRODUCT_INTEL_82440MX_IDE:
1355 case PCI_PRODUCT_INTEL_82801AA_IDE:
1356 case PCI_PRODUCT_INTEL_82801AB_IDE:
1357 case PCI_PRODUCT_INTEL_82801BA_IDE:
1358 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1359 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1360 }
1361 }
1362 sc->sc_wdcdev.PIO_cap = 4;
1363 sc->sc_wdcdev.DMA_cap = 2;
1364 switch(sc->sc_pp->ide_product) {
1365 case PCI_PRODUCT_INTEL_82801AA_IDE:
1366 sc->sc_wdcdev.UDMA_cap = 4;
1367 break;
1368 case PCI_PRODUCT_INTEL_82801BA_IDE:
1369 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1370 sc->sc_wdcdev.UDMA_cap = 5;
1371 break;
1372 default:
1373 sc->sc_wdcdev.UDMA_cap = 2;
1374 }
1375 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1376 sc->sc_wdcdev.set_modes = piix_setup_channel;
1377 else
1378 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1379 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1380 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1381
1382 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1383 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1384 DEBUG_PROBE);
1385 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1386 WDCDEBUG_PRINT((", sidetim=0x%x",
1387 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1388 DEBUG_PROBE);
1389 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1390 WDCDEBUG_PRINT((", udamreg 0x%x",
1391 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1392 DEBUG_PROBE);
1393 }
1394 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1395 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1396 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1397 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1398 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1399 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1400 DEBUG_PROBE);
1401 }
1402
1403 }
1404 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1405
1406 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1407 cp = &sc->pciide_channels[channel];
1408 /* PIIX is compat-only */
1409 if (pciide_chansetup(sc, channel, 0) == 0)
1410 continue;
1411 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1412 if ((PIIX_IDETIM_READ(idetim, channel) &
1413 PIIX_IDETIM_IDE) == 0) {
1414 printf("%s: %s channel ignored (disabled)\n",
1415 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1416 continue;
1417 }
1418 /* PIIX are compat-only pciide devices */
1419 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1420 if (cp->hw_ok == 0)
1421 continue;
1422 if (pciide_chan_candisable(cp)) {
1423 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1424 channel);
1425 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1426 idetim);
1427 }
1428 pciide_map_compat_intr(pa, cp, channel, 0);
1429 if (cp->hw_ok == 0)
1430 continue;
1431 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1432 }
1433
1434 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1435 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1436 DEBUG_PROBE);
1437 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1438 WDCDEBUG_PRINT((", sidetim=0x%x",
1439 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1440 DEBUG_PROBE);
1441 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1442 WDCDEBUG_PRINT((", udamreg 0x%x",
1443 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1444 DEBUG_PROBE);
1445 }
1446 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1447 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1448 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1449 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1450 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1451 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1452 DEBUG_PROBE);
1453 }
1454 }
1455 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1456 }
1457
1458 void
1459 piix_setup_channel(chp)
1460 struct channel_softc *chp;
1461 {
1462 u_int8_t mode[2], drive;
1463 u_int32_t oidetim, idetim, idedma_ctl;
1464 struct pciide_channel *cp = (struct pciide_channel*)chp;
1465 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1466 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1467
1468 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1469 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1470 idedma_ctl = 0;
1471
1472 /* set up new idetim: Enable IDE registers decode */
1473 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1474 chp->channel);
1475
1476 /* setup DMA */
1477 pciide_channel_dma_setup(cp);
1478
1479 /*
1480 * Here we have to mess up with drives mode: PIIX can't have
1481 * different timings for master and slave drives.
1482 * We need to find the best combination.
1483 */
1484
1485 /* If both drives supports DMA, take the lower mode */
1486 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1487 (drvp[1].drive_flags & DRIVE_DMA)) {
1488 mode[0] = mode[1] =
1489 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1490 drvp[0].DMA_mode = mode[0];
1491 drvp[1].DMA_mode = mode[1];
1492 goto ok;
1493 }
1494 /*
1495 * If only one drive supports DMA, use its mode, and
1496 * put the other one in PIO mode 0 if mode not compatible
1497 */
1498 if (drvp[0].drive_flags & DRIVE_DMA) {
1499 mode[0] = drvp[0].DMA_mode;
1500 mode[1] = drvp[1].PIO_mode;
1501 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1502 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1503 mode[1] = drvp[1].PIO_mode = 0;
1504 goto ok;
1505 }
1506 if (drvp[1].drive_flags & DRIVE_DMA) {
1507 mode[1] = drvp[1].DMA_mode;
1508 mode[0] = drvp[0].PIO_mode;
1509 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1510 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1511 mode[0] = drvp[0].PIO_mode = 0;
1512 goto ok;
1513 }
1514 /*
1515 * If both drives are not DMA, takes the lower mode, unless
1516 * one of them is PIO mode < 2
1517 */
1518 if (drvp[0].PIO_mode < 2) {
1519 mode[0] = drvp[0].PIO_mode = 0;
1520 mode[1] = drvp[1].PIO_mode;
1521 } else if (drvp[1].PIO_mode < 2) {
1522 mode[1] = drvp[1].PIO_mode = 0;
1523 mode[0] = drvp[0].PIO_mode;
1524 } else {
1525 mode[0] = mode[1] =
1526 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1527 drvp[0].PIO_mode = mode[0];
1528 drvp[1].PIO_mode = mode[1];
1529 }
1530 ok: /* The modes are setup */
1531 for (drive = 0; drive < 2; drive++) {
1532 if (drvp[drive].drive_flags & DRIVE_DMA) {
1533 idetim |= piix_setup_idetim_timings(
1534 mode[drive], 1, chp->channel);
1535 goto end;
1536 }
1537 }
1538 /* If we are there, none of the drives are DMA */
1539 if (mode[0] >= 2)
1540 idetim |= piix_setup_idetim_timings(
1541 mode[0], 0, chp->channel);
1542 else
1543 idetim |= piix_setup_idetim_timings(
1544 mode[1], 0, chp->channel);
1545 end: /*
1546 * timing mode is now set up in the controller. Enable
1547 * it per-drive
1548 */
1549 for (drive = 0; drive < 2; drive++) {
1550 /* If no drive, skip */
1551 if ((drvp[drive].drive_flags & DRIVE) == 0)
1552 continue;
1553 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1554 if (drvp[drive].drive_flags & DRIVE_DMA)
1555 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1556 }
1557 if (idedma_ctl != 0) {
1558 /* Add software bits in status register */
1559 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1560 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1561 idedma_ctl);
1562 }
1563 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1564 pciide_print_modes(cp);
1565 }
1566
1567 void
1568 piix3_4_setup_channel(chp)
1569 struct channel_softc *chp;
1570 {
1571 struct ata_drive_datas *drvp;
1572 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1573 struct pciide_channel *cp = (struct pciide_channel*)chp;
1574 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1575 int drive;
1576 int channel = chp->channel;
1577
1578 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1579 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1580 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1581 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1582 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1583 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1584 PIIX_SIDETIM_RTC_MASK(channel));
1585
1586 idedma_ctl = 0;
1587 /* If channel disabled, no need to go further */
1588 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1589 return;
1590 /* set up new idetim: Enable IDE registers decode */
1591 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1592
1593 /* setup DMA if needed */
1594 pciide_channel_dma_setup(cp);
1595
1596 for (drive = 0; drive < 2; drive++) {
1597 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1598 PIIX_UDMATIM_SET(0x3, channel, drive));
1599 drvp = &chp->ch_drive[drive];
1600 /* If no drive, skip */
1601 if ((drvp->drive_flags & DRIVE) == 0)
1602 continue;
1603 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1604 (drvp->drive_flags & DRIVE_UDMA) == 0))
1605 goto pio;
1606
1607 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1608 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1609 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1610 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1611 ideconf |= PIIX_CONFIG_PINGPONG;
1612 }
1613 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1614 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1615 /* setup Ultra/100 */
1616 if (drvp->UDMA_mode > 2 &&
1617 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1618 drvp->UDMA_mode = 2;
1619 if (drvp->UDMA_mode > 4) {
1620 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1621 } else {
1622 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1623 if (drvp->UDMA_mode > 2) {
1624 ideconf |= PIIX_CONFIG_UDMA66(channel,
1625 drive);
1626 } else {
1627 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1628 drive);
1629 }
1630 }
1631 }
1632 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1633 /* setup Ultra/66 */
1634 if (drvp->UDMA_mode > 2 &&
1635 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1636 drvp->UDMA_mode = 2;
1637 if (drvp->UDMA_mode > 2)
1638 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1639 else
1640 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1641 }
1642 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1643 (drvp->drive_flags & DRIVE_UDMA)) {
1644 /* use Ultra/DMA */
1645 drvp->drive_flags &= ~DRIVE_DMA;
1646 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1647 udmareg |= PIIX_UDMATIM_SET(
1648 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1649 } else {
1650 /* use Multiword DMA */
1651 drvp->drive_flags &= ~DRIVE_UDMA;
1652 if (drive == 0) {
1653 idetim |= piix_setup_idetim_timings(
1654 drvp->DMA_mode, 1, channel);
1655 } else {
1656 sidetim |= piix_setup_sidetim_timings(
1657 drvp->DMA_mode, 1, channel);
1658 idetim =PIIX_IDETIM_SET(idetim,
1659 PIIX_IDETIM_SITRE, channel);
1660 }
1661 }
1662 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1663
1664 pio: /* use PIO mode */
1665 idetim |= piix_setup_idetim_drvs(drvp);
1666 if (drive == 0) {
1667 idetim |= piix_setup_idetim_timings(
1668 drvp->PIO_mode, 0, channel);
1669 } else {
1670 sidetim |= piix_setup_sidetim_timings(
1671 drvp->PIO_mode, 0, channel);
1672 idetim =PIIX_IDETIM_SET(idetim,
1673 PIIX_IDETIM_SITRE, channel);
1674 }
1675 }
1676 if (idedma_ctl != 0) {
1677 /* Add software bits in status register */
1678 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1679 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1680 idedma_ctl);
1681 }
1682 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1683 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1684 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1685 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1686 pciide_print_modes(cp);
1687 }
1688
1689
1690 /* setup ISP and RTC fields, based on mode */
1691 static u_int32_t
1692 piix_setup_idetim_timings(mode, dma, channel)
1693 u_int8_t mode;
1694 u_int8_t dma;
1695 u_int8_t channel;
1696 {
1697
1698 if (dma)
1699 return PIIX_IDETIM_SET(0,
1700 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1701 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1702 channel);
1703 else
1704 return PIIX_IDETIM_SET(0,
1705 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1706 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1707 channel);
1708 }
1709
1710 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1711 static u_int32_t
1712 piix_setup_idetim_drvs(drvp)
1713 struct ata_drive_datas *drvp;
1714 {
1715 u_int32_t ret = 0;
1716 struct channel_softc *chp = drvp->chnl_softc;
1717 u_int8_t channel = chp->channel;
1718 u_int8_t drive = drvp->drive;
1719
1720 /*
1721 * If drive is using UDMA, timings setups are independant
1722 * So just check DMA and PIO here.
1723 */
1724 if (drvp->drive_flags & DRIVE_DMA) {
1725 /* if mode = DMA mode 0, use compatible timings */
1726 if ((drvp->drive_flags & DRIVE_DMA) &&
1727 drvp->DMA_mode == 0) {
1728 drvp->PIO_mode = 0;
1729 return ret;
1730 }
1731 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1732 /*
1733 * PIO and DMA timings are the same, use fast timings for PIO
1734 * too, else use compat timings.
1735 */
1736 if ((piix_isp_pio[drvp->PIO_mode] !=
1737 piix_isp_dma[drvp->DMA_mode]) ||
1738 (piix_rtc_pio[drvp->PIO_mode] !=
1739 piix_rtc_dma[drvp->DMA_mode]))
1740 drvp->PIO_mode = 0;
1741 /* if PIO mode <= 2, use compat timings for PIO */
1742 if (drvp->PIO_mode <= 2) {
1743 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1744 channel);
1745 return ret;
1746 }
1747 }
1748
1749 /*
1750 * Now setup PIO modes. If mode < 2, use compat timings.
1751 * Else enable fast timings. Enable IORDY and prefetch/post
1752 * if PIO mode >= 3.
1753 */
1754
1755 if (drvp->PIO_mode < 2)
1756 return ret;
1757
1758 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1759 if (drvp->PIO_mode >= 3) {
1760 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1761 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1762 }
1763 return ret;
1764 }
1765
1766 /* setup values in SIDETIM registers, based on mode */
1767 static u_int32_t
1768 piix_setup_sidetim_timings(mode, dma, channel)
1769 u_int8_t mode;
1770 u_int8_t dma;
1771 u_int8_t channel;
1772 {
1773 if (dma)
1774 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1775 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1776 else
1777 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1778 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1779 }
1780
1781 void
1782 amd756_chip_map(sc, pa)
1783 struct pciide_softc *sc;
1784 struct pci_attach_args *pa;
1785 {
1786 struct pciide_channel *cp;
1787 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1788 int channel;
1789 pcireg_t chanenable;
1790 bus_size_t cmdsize, ctlsize;
1791
1792 if (pciide_chipen(sc, pa) == 0)
1793 return;
1794 printf("%s: bus-master DMA support present",
1795 sc->sc_wdcdev.sc_dev.dv_xname);
1796 pciide_mapreg_dma(sc, pa);
1797 printf("\n");
1798 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1799 WDC_CAPABILITY_MODE;
1800 if (sc->sc_dma_ok) {
1801 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1802 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1803 sc->sc_wdcdev.irqack = pciide_irqack;
1804 }
1805 sc->sc_wdcdev.PIO_cap = 4;
1806 sc->sc_wdcdev.DMA_cap = 2;
1807 sc->sc_wdcdev.UDMA_cap = 4;
1808 sc->sc_wdcdev.set_modes = amd756_setup_channel;
1809 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1810 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1811 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1812
1813 WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1814 DEBUG_PROBE);
1815 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1816 cp = &sc->pciide_channels[channel];
1817 if (pciide_chansetup(sc, channel, interface) == 0)
1818 continue;
1819
1820 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1821 printf("%s: %s channel ignored (disabled)\n",
1822 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1823 continue;
1824 }
1825 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1826 pciide_pci_intr);
1827
1828 if (pciide_chan_candisable(cp))
1829 chanenable &= ~AMD756_CHAN_EN(channel);
1830 pciide_map_compat_intr(pa, cp, channel, interface);
1831 if (cp->hw_ok == 0)
1832 continue;
1833
1834 amd756_setup_channel(&cp->wdc_channel);
1835 }
1836 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1837 chanenable);
1838 return;
1839 }
1840
1841 void
1842 amd756_setup_channel(chp)
1843 struct channel_softc *chp;
1844 {
1845 u_int32_t udmatim_reg, datatim_reg;
1846 u_int8_t idedma_ctl;
1847 int mode, drive;
1848 struct ata_drive_datas *drvp;
1849 struct pciide_channel *cp = (struct pciide_channel*)chp;
1850 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1851 #ifndef PCIIDE_AMD756_ENABLEDMA
1852 int rev = PCI_REVISION(
1853 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1854 #endif
1855
1856 idedma_ctl = 0;
1857 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1858 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1859 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1860 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1861
1862 /* setup DMA if needed */
1863 pciide_channel_dma_setup(cp);
1864
1865 for (drive = 0; drive < 2; drive++) {
1866 drvp = &chp->ch_drive[drive];
1867 /* If no drive, skip */
1868 if ((drvp->drive_flags & DRIVE) == 0)
1869 continue;
1870 /* add timing values, setup DMA if needed */
1871 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1872 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1873 mode = drvp->PIO_mode;
1874 goto pio;
1875 }
1876 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1877 (drvp->drive_flags & DRIVE_UDMA)) {
1878 /* use Ultra/DMA */
1879 drvp->drive_flags &= ~DRIVE_DMA;
1880 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1881 AMD756_UDMA_EN_MTH(chp->channel, drive) |
1882 AMD756_UDMA_TIME(chp->channel, drive,
1883 amd756_udma_tim[drvp->UDMA_mode]);
1884 /* can use PIO timings, MW DMA unused */
1885 mode = drvp->PIO_mode;
1886 } else {
1887 /* use Multiword DMA, but only if revision is OK */
1888 drvp->drive_flags &= ~DRIVE_UDMA;
1889 #ifndef PCIIDE_AMD756_ENABLEDMA
1890 /*
1891 * The workaround doesn't seem to be necessary
1892 * with all drives, so it can be disabled by
1893 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1894 * triggered.
1895 */
1896 if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1897 printf("%s:%d:%d: multi-word DMA disabled due "
1898 "to chip revision\n",
1899 sc->sc_wdcdev.sc_dev.dv_xname,
1900 chp->channel, drive);
1901 mode = drvp->PIO_mode;
1902 drvp->drive_flags &= ~DRIVE_DMA;
1903 goto pio;
1904 }
1905 #endif
1906 /* mode = min(pio, dma+2) */
1907 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1908 mode = drvp->PIO_mode;
1909 else
1910 mode = drvp->DMA_mode + 2;
1911 }
1912 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1913
1914 pio: /* setup PIO mode */
1915 if (mode <= 2) {
1916 drvp->DMA_mode = 0;
1917 drvp->PIO_mode = 0;
1918 mode = 0;
1919 } else {
1920 drvp->PIO_mode = mode;
1921 drvp->DMA_mode = mode - 2;
1922 }
1923 datatim_reg |=
1924 AMD756_DATATIM_PULSE(chp->channel, drive,
1925 amd756_pio_set[mode]) |
1926 AMD756_DATATIM_RECOV(chp->channel, drive,
1927 amd756_pio_rec[mode]);
1928 }
1929 if (idedma_ctl != 0) {
1930 /* Add software bits in status register */
1931 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1932 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1933 idedma_ctl);
1934 }
1935 pciide_print_modes(cp);
1936 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1937 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1938 }
1939
1940 void
1941 apollo_chip_map(sc, pa)
1942 struct pciide_softc *sc;
1943 struct pci_attach_args *pa;
1944 {
1945 struct pciide_channel *cp;
1946 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1947 int rev = PCI_REVISION(pa->pa_class);
1948 int channel;
1949 u_int32_t ideconf, udma_conf, old_udma_conf;
1950 bus_size_t cmdsize, ctlsize;
1951
1952 if (pciide_chipen(sc, pa) == 0)
1953 return;
1954 printf("%s: bus-master DMA support present",
1955 sc->sc_wdcdev.sc_dev.dv_xname);
1956 pciide_mapreg_dma(sc, pa);
1957 printf("\n");
1958 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1959 WDC_CAPABILITY_MODE;
1960 if (sc->sc_dma_ok) {
1961 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1962 sc->sc_wdcdev.irqack = pciide_irqack;
1963 if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE
1964 && rev >= 6)
1965 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1966 }
1967 sc->sc_wdcdev.PIO_cap = 4;
1968 sc->sc_wdcdev.DMA_cap = 2;
1969 sc->sc_wdcdev.UDMA_cap = 2;
1970 sc->sc_wdcdev.set_modes = apollo_setup_channel;
1971 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1972 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1973
1974 old_udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1975 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1976 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1977 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1978 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1979 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1980 old_udma_conf),
1981 DEBUG_PROBE);
1982 pci_conf_write(sc->sc_pc, sc->sc_tag,
1983 old_udma_conf | (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1984 APO_UDMA_EN_MTH(0, 0) | APO_UDMA_CLK66(0)),
1985 APO_UDMA);
1986 udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1987 WDCDEBUG_PRINT(("apollo_chip_map: APO_UDMA now 0x%x\n", udma_conf),
1988 DEBUG_PROBE);
1989 if ((udma_conf & (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1990 APO_UDMA_EN_MTH(0, 0))) ==
1991 (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1992 APO_UDMA_EN_MTH(0, 0))) {
1993 if ((udma_conf & APO_UDMA_CLK66(0)) ==
1994 APO_UDMA_CLK66(0)) {
1995 printf("%s: Ultra/66 capable\n",
1996 sc->sc_wdcdev.sc_dev.dv_xname);
1997 sc->sc_wdcdev.UDMA_cap = 4;
1998 } else {
1999 printf("%s: Ultra/33 capable\n",
2000 sc->sc_wdcdev.sc_dev.dv_xname);
2001 sc->sc_wdcdev.UDMA_cap = 2;
2002 }
2003 } else {
2004 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_UDMA;
2005 }
2006 pci_conf_write(sc->sc_pc, sc->sc_tag, old_udma_conf, APO_UDMA);
2007
2008 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2009 cp = &sc->pciide_channels[channel];
2010 if (pciide_chansetup(sc, channel, interface) == 0)
2011 continue;
2012
2013 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2014 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2015 printf("%s: %s channel ignored (disabled)\n",
2016 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2017 continue;
2018 }
2019 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2020 pciide_pci_intr);
2021 if (cp->hw_ok == 0)
2022 continue;
2023 if (pciide_chan_candisable(cp)) {
2024 ideconf &= ~APO_IDECONF_EN(channel);
2025 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2026 ideconf);
2027 }
2028 pciide_map_compat_intr(pa, cp, channel, interface);
2029
2030 if (cp->hw_ok == 0)
2031 continue;
2032 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2033 }
2034 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2035 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2036 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2037 }
2038
2039 void
2040 apollo_setup_channel(chp)
2041 struct channel_softc *chp;
2042 {
2043 u_int32_t udmatim_reg, datatim_reg;
2044 u_int8_t idedma_ctl;
2045 int mode, drive;
2046 struct ata_drive_datas *drvp;
2047 struct pciide_channel *cp = (struct pciide_channel*)chp;
2048 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2049
2050 idedma_ctl = 0;
2051 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2052 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2053 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2054 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2055
2056 /* setup DMA if needed */
2057 pciide_channel_dma_setup(cp);
2058
2059 /*
2060 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2061 * downgrade to Ultra/33 if needed
2062 */
2063 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2064 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2065 /* both drives UDMA */
2066 if (chp->ch_drive[0].UDMA_mode > 2 &&
2067 chp->ch_drive[1].UDMA_mode <= 2) {
2068 /* drive 0 Ultra/66, drive 1 Ultra/33 */
2069 chp->ch_drive[0].UDMA_mode = 2;
2070 } else if (chp->ch_drive[1].UDMA_mode > 2 &&
2071 chp->ch_drive[0].UDMA_mode <= 2) {
2072 /* drive 1 Ultra/66, drive 0 Ultra/33 */
2073 chp->ch_drive[1].UDMA_mode = 2;
2074 }
2075 }
2076
2077 for (drive = 0; drive < 2; drive++) {
2078 drvp = &chp->ch_drive[drive];
2079 /* If no drive, skip */
2080 if ((drvp->drive_flags & DRIVE) == 0)
2081 continue;
2082 /* add timing values, setup DMA if needed */
2083 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2084 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2085 mode = drvp->PIO_mode;
2086 goto pio;
2087 }
2088 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2089 (drvp->drive_flags & DRIVE_UDMA)) {
2090 /* use Ultra/DMA */
2091 drvp->drive_flags &= ~DRIVE_DMA;
2092 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2093 APO_UDMA_EN_MTH(chp->channel, drive) |
2094 APO_UDMA_TIME(chp->channel, drive,
2095 apollo_udma_tim[drvp->UDMA_mode]);
2096 if (drvp->UDMA_mode > 2)
2097 udmatim_reg |=
2098 APO_UDMA_CLK66(chp->channel);
2099 /* can use PIO timings, MW DMA unused */
2100 mode = drvp->PIO_mode;
2101 } else {
2102 /* use Multiword DMA */
2103 drvp->drive_flags &= ~DRIVE_UDMA;
2104 /* mode = min(pio, dma+2) */
2105 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2106 mode = drvp->PIO_mode;
2107 else
2108 mode = drvp->DMA_mode + 2;
2109 }
2110 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2111
2112 pio: /* setup PIO mode */
2113 if (mode <= 2) {
2114 drvp->DMA_mode = 0;
2115 drvp->PIO_mode = 0;
2116 mode = 0;
2117 } else {
2118 drvp->PIO_mode = mode;
2119 drvp->DMA_mode = mode - 2;
2120 }
2121 datatim_reg |=
2122 APO_DATATIM_PULSE(chp->channel, drive,
2123 apollo_pio_set[mode]) |
2124 APO_DATATIM_RECOV(chp->channel, drive,
2125 apollo_pio_rec[mode]);
2126 }
2127 if (idedma_ctl != 0) {
2128 /* Add software bits in status register */
2129 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2130 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2131 idedma_ctl);
2132 }
2133 pciide_print_modes(cp);
2134 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2135 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2136 }
2137
2138 void
2139 cmd_channel_map(pa, sc, channel)
2140 struct pci_attach_args *pa;
2141 struct pciide_softc *sc;
2142 int channel;
2143 {
2144 struct pciide_channel *cp = &sc->pciide_channels[channel];
2145 bus_size_t cmdsize, ctlsize;
2146 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2147 int interface;
2148
2149 /*
2150 * The 0648/0649 can be told to identify as a RAID controller.
2151 * In this case, we have to fake interface
2152 */
2153 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2154 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2155 PCIIDE_INTERFACE_SETTABLE(1);
2156 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2157 CMD_CONF_DSA1)
2158 interface |= PCIIDE_INTERFACE_PCI(0) |
2159 PCIIDE_INTERFACE_PCI(1);
2160 } else {
2161 interface = PCI_INTERFACE(pa->pa_class);
2162 }
2163
2164 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2165 cp->name = PCIIDE_CHANNEL_NAME(channel);
2166 cp->wdc_channel.channel = channel;
2167 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2168
2169 if (channel > 0) {
2170 cp->wdc_channel.ch_queue =
2171 sc->pciide_channels[0].wdc_channel.ch_queue;
2172 } else {
2173 cp->wdc_channel.ch_queue =
2174 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2175 }
2176 if (cp->wdc_channel.ch_queue == NULL) {
2177 printf("%s %s channel: "
2178 "can't allocate memory for command queue",
2179 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2180 return;
2181 }
2182
2183 printf("%s: %s channel %s to %s mode\n",
2184 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2185 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2186 "configured" : "wired",
2187 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2188 "native-PCI" : "compatibility");
2189
2190 /*
2191 * with a CMD PCI64x, if we get here, the first channel is enabled:
2192 * there's no way to disable the first channel without disabling
2193 * the whole device
2194 */
2195 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2196 printf("%s: %s channel ignored (disabled)\n",
2197 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2198 return;
2199 }
2200
2201 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2202 if (cp->hw_ok == 0)
2203 return;
2204 if (channel == 1) {
2205 if (pciide_chan_candisable(cp)) {
2206 ctrl &= ~CMD_CTRL_2PORT;
2207 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2208 CMD_CTRL, ctrl);
2209 }
2210 }
2211 pciide_map_compat_intr(pa, cp, channel, interface);
2212 }
2213
2214 int
2215 cmd_pci_intr(arg)
2216 void *arg;
2217 {
2218 struct pciide_softc *sc = arg;
2219 struct pciide_channel *cp;
2220 struct channel_softc *wdc_cp;
2221 int i, rv, crv;
2222 u_int32_t priirq, secirq;
2223
2224 rv = 0;
2225 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2226 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2227 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2228 cp = &sc->pciide_channels[i];
2229 wdc_cp = &cp->wdc_channel;
2230 /* If a compat channel skip. */
2231 if (cp->compat)
2232 continue;
2233 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2234 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2235 crv = wdcintr(wdc_cp);
2236 if (crv == 0)
2237 printf("%s:%d: bogus intr\n",
2238 sc->sc_wdcdev.sc_dev.dv_xname, i);
2239 else
2240 rv = 1;
2241 }
2242 }
2243 return rv;
2244 }
2245
2246 void
2247 cmd_chip_map(sc, pa)
2248 struct pciide_softc *sc;
2249 struct pci_attach_args *pa;
2250 {
2251 int channel;
2252
2253 /*
2254 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2255 * and base adresses registers can be disabled at
2256 * hardware level. In this case, the device is wired
2257 * in compat mode and its first channel is always enabled,
2258 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2259 * In fact, it seems that the first channel of the CMD PCI0640
2260 * can't be disabled.
2261 */
2262
2263 #ifdef PCIIDE_CMD064x_DISABLE
2264 if (pciide_chipen(sc, pa) == 0)
2265 return;
2266 #endif
2267
2268 printf("%s: hardware does not support DMA\n",
2269 sc->sc_wdcdev.sc_dev.dv_xname);
2270 sc->sc_dma_ok = 0;
2271
2272 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2273 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2274 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2275
2276 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2277 cmd_channel_map(pa, sc, channel);
2278 }
2279 }
2280
2281 void
2282 cmd0643_9_chip_map(sc, pa)
2283 struct pciide_softc *sc;
2284 struct pci_attach_args *pa;
2285 {
2286 struct pciide_channel *cp;
2287 int channel;
2288 int rev = PCI_REVISION(
2289 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2290
2291 /*
2292 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2293 * and base adresses registers can be disabled at
2294 * hardware level. In this case, the device is wired
2295 * in compat mode and its first channel is always enabled,
2296 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2297 * In fact, it seems that the first channel of the CMD PCI0640
2298 * can't be disabled.
2299 */
2300
2301 #ifdef PCIIDE_CMD064x_DISABLE
2302 if (pciide_chipen(sc, pa) == 0)
2303 return;
2304 #endif
2305 printf("%s: bus-master DMA support present",
2306 sc->sc_wdcdev.sc_dev.dv_xname);
2307 pciide_mapreg_dma(sc, pa);
2308 printf("\n");
2309 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2310 WDC_CAPABILITY_MODE;
2311 if (sc->sc_dma_ok) {
2312 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2313 switch (sc->sc_pp->ide_product) {
2314 case PCI_PRODUCT_CMDTECH_649:
2315 case PCI_PRODUCT_CMDTECH_648:
2316 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2317 sc->sc_wdcdev.UDMA_cap = 4;
2318 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2319 break;
2320 case PCI_PRODUCT_CMDTECH_646:
2321 if (rev >= CMD0646U2_REV) {
2322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2323 sc->sc_wdcdev.UDMA_cap = 2;
2324 } else if (rev >= CMD0646U_REV) {
2325 /*
2326 * Linux's driver claims that the 646U is broken
2327 * with UDMA. Only enable it if we know what we're
2328 * doing
2329 */
2330 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2331 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2332 sc->sc_wdcdev.UDMA_cap = 2;
2333 #endif
2334 /* explicitely disable UDMA */
2335 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2336 CMD_UDMATIM(0), 0);
2337 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2338 CMD_UDMATIM(1), 0);
2339 }
2340 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2341 break;
2342 default:
2343 sc->sc_wdcdev.irqack = pciide_irqack;
2344 }
2345 }
2346
2347 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2348 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2349 sc->sc_wdcdev.PIO_cap = 4;
2350 sc->sc_wdcdev.DMA_cap = 2;
2351 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2352
2353 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2354 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2355 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2356 DEBUG_PROBE);
2357
2358 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2359 cp = &sc->pciide_channels[channel];
2360 cmd_channel_map(pa, sc, channel);
2361 if (cp->hw_ok == 0)
2362 continue;
2363 cmd0643_9_setup_channel(&cp->wdc_channel);
2364 }
2365 /*
2366 * note - this also makes sure we clear the irq disable and reset
2367 * bits
2368 */
2369 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2370 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2371 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2372 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2373 DEBUG_PROBE);
2374 }
2375
2376 void
2377 cmd0643_9_setup_channel(chp)
2378 struct channel_softc *chp;
2379 {
2380 struct ata_drive_datas *drvp;
2381 u_int8_t tim;
2382 u_int32_t idedma_ctl, udma_reg;
2383 int drive;
2384 struct pciide_channel *cp = (struct pciide_channel*)chp;
2385 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2386
2387 idedma_ctl = 0;
2388 /* setup DMA if needed */
2389 pciide_channel_dma_setup(cp);
2390
2391 for (drive = 0; drive < 2; drive++) {
2392 drvp = &chp->ch_drive[drive];
2393 /* If no drive, skip */
2394 if ((drvp->drive_flags & DRIVE) == 0)
2395 continue;
2396 /* add timing values, setup DMA if needed */
2397 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2398 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2399 if (drvp->drive_flags & DRIVE_UDMA) {
2400 /* UltraDMA on a 646U2, 0648 or 0649 */
2401 drvp->drive_flags &= ~DRIVE_DMA;
2402 udma_reg = pciide_pci_read(sc->sc_pc,
2403 sc->sc_tag, CMD_UDMATIM(chp->channel));
2404 if (drvp->UDMA_mode > 2 &&
2405 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2406 CMD_BICSR) &
2407 CMD_BICSR_80(chp->channel)) == 0)
2408 drvp->UDMA_mode = 2;
2409 if (drvp->UDMA_mode > 2)
2410 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2411 else if (sc->sc_wdcdev.UDMA_cap > 2)
2412 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2413 udma_reg |= CMD_UDMATIM_UDMA(drive);
2414 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2415 CMD_UDMATIM_TIM_OFF(drive));
2416 udma_reg |=
2417 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2418 CMD_UDMATIM_TIM_OFF(drive));
2419 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2420 CMD_UDMATIM(chp->channel), udma_reg);
2421 } else {
2422 /*
2423 * use Multiword DMA.
2424 * Timings will be used for both PIO and DMA,
2425 * so adjust DMA mode if needed
2426 * if we have a 0646U2/8/9, turn off UDMA
2427 */
2428 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2429 udma_reg = pciide_pci_read(sc->sc_pc,
2430 sc->sc_tag,
2431 CMD_UDMATIM(chp->channel));
2432 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2433 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2434 CMD_UDMATIM(chp->channel),
2435 udma_reg);
2436 }
2437 if (drvp->PIO_mode >= 3 &&
2438 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2439 drvp->DMA_mode = drvp->PIO_mode - 2;
2440 }
2441 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2442 }
2443 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2444 }
2445 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2446 CMD_DATA_TIM(chp->channel, drive), tim);
2447 }
2448 if (idedma_ctl != 0) {
2449 /* Add software bits in status register */
2450 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2451 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2452 idedma_ctl);
2453 }
2454 pciide_print_modes(cp);
2455 }
2456
2457 void
2458 cmd646_9_irqack(chp)
2459 struct channel_softc *chp;
2460 {
2461 u_int32_t priirq, secirq;
2462 struct pciide_channel *cp = (struct pciide_channel*)chp;
2463 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2464
2465 if (chp->channel == 0) {
2466 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2467 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2468 } else {
2469 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2470 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2471 }
2472 pciide_irqack(chp);
2473 }
2474
2475 void
2476 cy693_chip_map(sc, pa)
2477 struct pciide_softc *sc;
2478 struct pci_attach_args *pa;
2479 {
2480 struct pciide_channel *cp;
2481 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2482 bus_size_t cmdsize, ctlsize;
2483
2484 if (pciide_chipen(sc, pa) == 0)
2485 return;
2486 /*
2487 * this chip has 2 PCI IDE functions, one for primary and one for
2488 * secondary. So we need to call pciide_mapregs_compat() with
2489 * the real channel
2490 */
2491 if (pa->pa_function == 1) {
2492 sc->sc_cy_compatchan = 0;
2493 } else if (pa->pa_function == 2) {
2494 sc->sc_cy_compatchan = 1;
2495 } else {
2496 printf("%s: unexpected PCI function %d\n",
2497 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2498 return;
2499 }
2500 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2501 printf("%s: bus-master DMA support present",
2502 sc->sc_wdcdev.sc_dev.dv_xname);
2503 pciide_mapreg_dma(sc, pa);
2504 } else {
2505 printf("%s: hardware does not support DMA",
2506 sc->sc_wdcdev.sc_dev.dv_xname);
2507 sc->sc_dma_ok = 0;
2508 }
2509 printf("\n");
2510
2511 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2512 if (sc->sc_cy_handle == NULL) {
2513 printf("%s: unable to map hyperCache control registers\n",
2514 sc->sc_wdcdev.sc_dev.dv_xname);
2515 sc->sc_dma_ok = 0;
2516 }
2517
2518 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2519 WDC_CAPABILITY_MODE;
2520 if (sc->sc_dma_ok) {
2521 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2522 sc->sc_wdcdev.irqack = pciide_irqack;
2523 }
2524 sc->sc_wdcdev.PIO_cap = 4;
2525 sc->sc_wdcdev.DMA_cap = 2;
2526 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2527
2528 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2529 sc->sc_wdcdev.nchannels = 1;
2530
2531 /* Only one channel for this chip; if we are here it's enabled */
2532 cp = &sc->pciide_channels[0];
2533 sc->wdc_chanarray[0] = &cp->wdc_channel;
2534 cp->name = PCIIDE_CHANNEL_NAME(0);
2535 cp->wdc_channel.channel = 0;
2536 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2537 cp->wdc_channel.ch_queue =
2538 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2539 if (cp->wdc_channel.ch_queue == NULL) {
2540 printf("%s primary channel: "
2541 "can't allocate memory for command queue",
2542 sc->sc_wdcdev.sc_dev.dv_xname);
2543 return;
2544 }
2545 printf("%s: primary channel %s to ",
2546 sc->sc_wdcdev.sc_dev.dv_xname,
2547 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2548 "configured" : "wired");
2549 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2550 printf("native-PCI");
2551 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2552 pciide_pci_intr);
2553 } else {
2554 printf("compatibility");
2555 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2556 &cmdsize, &ctlsize);
2557 }
2558 printf(" mode\n");
2559 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2560 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2561 wdcattach(&cp->wdc_channel);
2562 if (pciide_chan_candisable(cp)) {
2563 pci_conf_write(sc->sc_pc, sc->sc_tag,
2564 PCI_COMMAND_STATUS_REG, 0);
2565 }
2566 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2567 if (cp->hw_ok == 0)
2568 return;
2569 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2570 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2571 cy693_setup_channel(&cp->wdc_channel);
2572 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2573 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2574 }
2575
2576 void
2577 cy693_setup_channel(chp)
2578 struct channel_softc *chp;
2579 {
2580 struct ata_drive_datas *drvp;
2581 int drive;
2582 u_int32_t cy_cmd_ctrl;
2583 u_int32_t idedma_ctl;
2584 struct pciide_channel *cp = (struct pciide_channel*)chp;
2585 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2586 int dma_mode = -1;
2587
2588 cy_cmd_ctrl = idedma_ctl = 0;
2589
2590 /* setup DMA if needed */
2591 pciide_channel_dma_setup(cp);
2592
2593 for (drive = 0; drive < 2; drive++) {
2594 drvp = &chp->ch_drive[drive];
2595 /* If no drive, skip */
2596 if ((drvp->drive_flags & DRIVE) == 0)
2597 continue;
2598 /* add timing values, setup DMA if needed */
2599 if (drvp->drive_flags & DRIVE_DMA) {
2600 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2601 /* use Multiword DMA */
2602 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2603 dma_mode = drvp->DMA_mode;
2604 }
2605 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2606 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2607 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2608 CY_CMD_CTRL_IOW_REC_OFF(drive));
2609 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2610 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2611 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2612 CY_CMD_CTRL_IOR_REC_OFF(drive));
2613 }
2614 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2615 chp->ch_drive[0].DMA_mode = dma_mode;
2616 chp->ch_drive[1].DMA_mode = dma_mode;
2617
2618 if (dma_mode == -1)
2619 dma_mode = 0;
2620
2621 if (sc->sc_cy_handle != NULL) {
2622 /* Note: `multiple' is implied. */
2623 cy82c693_write(sc->sc_cy_handle,
2624 (sc->sc_cy_compatchan == 0) ?
2625 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2626 }
2627
2628 pciide_print_modes(cp);
2629
2630 if (idedma_ctl != 0) {
2631 /* Add software bits in status register */
2632 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2633 IDEDMA_CTL, idedma_ctl);
2634 }
2635 }
2636
2637 void
2638 sis_chip_map(sc, pa)
2639 struct pciide_softc *sc;
2640 struct pci_attach_args *pa;
2641 {
2642 struct pciide_channel *cp;
2643 int channel;
2644 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2645 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2646 pcireg_t rev = PCI_REVISION(pa->pa_class);
2647 bus_size_t cmdsize, ctlsize;
2648
2649 if (pciide_chipen(sc, pa) == 0)
2650 return;
2651 printf("%s: bus-master DMA support present",
2652 sc->sc_wdcdev.sc_dev.dv_xname);
2653 pciide_mapreg_dma(sc, pa);
2654 printf("\n");
2655 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2656 WDC_CAPABILITY_MODE;
2657 if (sc->sc_dma_ok) {
2658 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2659 sc->sc_wdcdev.irqack = pciide_irqack;
2660 if (rev > 0xd0)
2661 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2662 }
2663
2664 sc->sc_wdcdev.PIO_cap = 4;
2665 sc->sc_wdcdev.DMA_cap = 2;
2666 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2667 sc->sc_wdcdev.UDMA_cap = 2;
2668 sc->sc_wdcdev.set_modes = sis_setup_channel;
2669
2670 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2671 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2672
2673 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2674 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2675 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2676
2677 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2678 cp = &sc->pciide_channels[channel];
2679 if (pciide_chansetup(sc, channel, interface) == 0)
2680 continue;
2681 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2682 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2683 printf("%s: %s channel ignored (disabled)\n",
2684 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2685 continue;
2686 }
2687 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2688 pciide_pci_intr);
2689 if (cp->hw_ok == 0)
2690 continue;
2691 if (pciide_chan_candisable(cp)) {
2692 if (channel == 0)
2693 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2694 else
2695 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2696 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2697 sis_ctr0);
2698 }
2699 pciide_map_compat_intr(pa, cp, channel, interface);
2700 if (cp->hw_ok == 0)
2701 continue;
2702 sis_setup_channel(&cp->wdc_channel);
2703 }
2704 }
2705
2706 void
2707 sis_setup_channel(chp)
2708 struct channel_softc *chp;
2709 {
2710 struct ata_drive_datas *drvp;
2711 int drive;
2712 u_int32_t sis_tim;
2713 u_int32_t idedma_ctl;
2714 struct pciide_channel *cp = (struct pciide_channel*)chp;
2715 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2716
2717 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2718 "channel %d 0x%x\n", chp->channel,
2719 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2720 DEBUG_PROBE);
2721 sis_tim = 0;
2722 idedma_ctl = 0;
2723 /* setup DMA if needed */
2724 pciide_channel_dma_setup(cp);
2725
2726 for (drive = 0; drive < 2; drive++) {
2727 drvp = &chp->ch_drive[drive];
2728 /* If no drive, skip */
2729 if ((drvp->drive_flags & DRIVE) == 0)
2730 continue;
2731 /* add timing values, setup DMA if needed */
2732 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2733 (drvp->drive_flags & DRIVE_UDMA) == 0)
2734 goto pio;
2735
2736 if (drvp->drive_flags & DRIVE_UDMA) {
2737 /* use Ultra/DMA */
2738 drvp->drive_flags &= ~DRIVE_DMA;
2739 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2740 SIS_TIM_UDMA_TIME_OFF(drive);
2741 sis_tim |= SIS_TIM_UDMA_EN(drive);
2742 } else {
2743 /*
2744 * use Multiword DMA
2745 * Timings will be used for both PIO and DMA,
2746 * so adjust DMA mode if needed
2747 */
2748 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2749 drvp->PIO_mode = drvp->DMA_mode + 2;
2750 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2751 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2752 drvp->PIO_mode - 2 : 0;
2753 if (drvp->DMA_mode == 0)
2754 drvp->PIO_mode = 0;
2755 }
2756 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2757 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2758 SIS_TIM_ACT_OFF(drive);
2759 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2760 SIS_TIM_REC_OFF(drive);
2761 }
2762 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2763 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2764 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2765 if (idedma_ctl != 0) {
2766 /* Add software bits in status register */
2767 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2768 IDEDMA_CTL, idedma_ctl);
2769 }
2770 pciide_print_modes(cp);
2771 }
2772
2773 void
2774 acer_chip_map(sc, pa)
2775 struct pciide_softc *sc;
2776 struct pci_attach_args *pa;
2777 {
2778 struct pciide_channel *cp;
2779 int channel;
2780 pcireg_t cr, interface;
2781 bus_size_t cmdsize, ctlsize;
2782 pcireg_t rev = PCI_REVISION(pa->pa_class);
2783
2784 if (pciide_chipen(sc, pa) == 0)
2785 return;
2786 printf("%s: bus-master DMA support present",
2787 sc->sc_wdcdev.sc_dev.dv_xname);
2788 pciide_mapreg_dma(sc, pa);
2789 printf("\n");
2790 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2791 WDC_CAPABILITY_MODE;
2792 if (sc->sc_dma_ok) {
2793 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2794 if (rev >= 0x20)
2795 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2797 sc->sc_wdcdev.irqack = pciide_irqack;
2798 }
2799
2800 sc->sc_wdcdev.PIO_cap = 4;
2801 sc->sc_wdcdev.DMA_cap = 2;
2802 sc->sc_wdcdev.UDMA_cap = 2;
2803 sc->sc_wdcdev.set_modes = acer_setup_channel;
2804 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2805 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2806
2807 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2808 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2809 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2810
2811 /* Enable "microsoft register bits" R/W. */
2812 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2813 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2814 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2815 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2816 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2817 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2818 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2819 ~ACER_CHANSTATUSREGS_RO);
2820 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2821 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2822 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2823 /* Don't use cr, re-read the real register content instead */
2824 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2825 PCI_CLASS_REG));
2826
2827 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2828 cp = &sc->pciide_channels[channel];
2829 if (pciide_chansetup(sc, channel, interface) == 0)
2830 continue;
2831 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2832 printf("%s: %s channel ignored (disabled)\n",
2833 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2834 continue;
2835 }
2836 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2837 acer_pci_intr);
2838 if (cp->hw_ok == 0)
2839 continue;
2840 if (pciide_chan_candisable(cp)) {
2841 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2842 pci_conf_write(sc->sc_pc, sc->sc_tag,
2843 PCI_CLASS_REG, cr);
2844 }
2845 pciide_map_compat_intr(pa, cp, channel, interface);
2846 acer_setup_channel(&cp->wdc_channel);
2847 }
2848 }
2849
2850 void
2851 acer_setup_channel(chp)
2852 struct channel_softc *chp;
2853 {
2854 struct ata_drive_datas *drvp;
2855 int drive;
2856 u_int32_t acer_fifo_udma;
2857 u_int32_t idedma_ctl;
2858 struct pciide_channel *cp = (struct pciide_channel*)chp;
2859 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2860
2861 idedma_ctl = 0;
2862 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2863 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2864 acer_fifo_udma), DEBUG_PROBE);
2865 /* setup DMA if needed */
2866 pciide_channel_dma_setup(cp);
2867
2868 for (drive = 0; drive < 2; drive++) {
2869 drvp = &chp->ch_drive[drive];
2870 /* If no drive, skip */
2871 if ((drvp->drive_flags & DRIVE) == 0)
2872 continue;
2873 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2874 "channel %d drive %d 0x%x\n", chp->channel, drive,
2875 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2876 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2877 /* clear FIFO/DMA mode */
2878 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2879 ACER_UDMA_EN(chp->channel, drive) |
2880 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2881
2882 /* add timing values, setup DMA if needed */
2883 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2884 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2885 acer_fifo_udma |=
2886 ACER_FTH_OPL(chp->channel, drive, 0x1);
2887 goto pio;
2888 }
2889
2890 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2891 if (drvp->drive_flags & DRIVE_UDMA) {
2892 /* use Ultra/DMA */
2893 drvp->drive_flags &= ~DRIVE_DMA;
2894 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2895 acer_fifo_udma |=
2896 ACER_UDMA_TIM(chp->channel, drive,
2897 acer_udma[drvp->UDMA_mode]);
2898 } else {
2899 /*
2900 * use Multiword DMA
2901 * Timings will be used for both PIO and DMA,
2902 * so adjust DMA mode if needed
2903 */
2904 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2905 drvp->PIO_mode = drvp->DMA_mode + 2;
2906 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2907 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2908 drvp->PIO_mode - 2 : 0;
2909 if (drvp->DMA_mode == 0)
2910 drvp->PIO_mode = 0;
2911 }
2912 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2913 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2914 ACER_IDETIM(chp->channel, drive),
2915 acer_pio[drvp->PIO_mode]);
2916 }
2917 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2918 acer_fifo_udma), DEBUG_PROBE);
2919 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2920 if (idedma_ctl != 0) {
2921 /* Add software bits in status register */
2922 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2923 IDEDMA_CTL, idedma_ctl);
2924 }
2925 pciide_print_modes(cp);
2926 }
2927
2928 int
2929 acer_pci_intr(arg)
2930 void *arg;
2931 {
2932 struct pciide_softc *sc = arg;
2933 struct pciide_channel *cp;
2934 struct channel_softc *wdc_cp;
2935 int i, rv, crv;
2936 u_int32_t chids;
2937
2938 rv = 0;
2939 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2940 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2941 cp = &sc->pciide_channels[i];
2942 wdc_cp = &cp->wdc_channel;
2943 /* If a compat channel skip. */
2944 if (cp->compat)
2945 continue;
2946 if (chids & ACER_CHIDS_INT(i)) {
2947 crv = wdcintr(wdc_cp);
2948 if (crv == 0)
2949 printf("%s:%d: bogus intr\n",
2950 sc->sc_wdcdev.sc_dev.dv_xname, i);
2951 else
2952 rv = 1;
2953 }
2954 }
2955 return rv;
2956 }
2957
2958 void
2959 hpt_chip_map(sc, pa)
2960 struct pciide_softc *sc;
2961 struct pci_attach_args *pa;
2962 {
2963 struct pciide_channel *cp;
2964 int i, compatchan, revision;
2965 pcireg_t interface;
2966 bus_size_t cmdsize, ctlsize;
2967
2968 if (pciide_chipen(sc, pa) == 0)
2969 return;
2970 revision = PCI_REVISION(pa->pa_class);
2971
2972 /*
2973 * when the chip is in native mode it identifies itself as a
2974 * 'misc mass storage'. Fake interface in this case.
2975 */
2976 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2977 interface = PCI_INTERFACE(pa->pa_class);
2978 } else {
2979 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2980 PCIIDE_INTERFACE_PCI(0);
2981 if (revision == HPT370_REV)
2982 interface |= PCIIDE_INTERFACE_PCI(1);
2983 }
2984
2985 printf("%s: bus-master DMA support present",
2986 sc->sc_wdcdev.sc_dev.dv_xname);
2987 pciide_mapreg_dma(sc, pa);
2988 printf("\n");
2989 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2990 WDC_CAPABILITY_MODE;
2991 if (sc->sc_dma_ok) {
2992 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2993 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2994 sc->sc_wdcdev.irqack = pciide_irqack;
2995 }
2996 sc->sc_wdcdev.PIO_cap = 4;
2997 sc->sc_wdcdev.DMA_cap = 2;
2998
2999 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3000 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3001 if (revision == HPT366_REV) {
3002 sc->sc_wdcdev.UDMA_cap = 4;
3003 /*
3004 * The 366 has 2 PCI IDE functions, one for primary and one
3005 * for secondary. So we need to call pciide_mapregs_compat()
3006 * with the real channel
3007 */
3008 if (pa->pa_function == 0) {
3009 compatchan = 0;
3010 } else if (pa->pa_function == 1) {
3011 compatchan = 1;
3012 } else {
3013 printf("%s: unexpected PCI function %d\n",
3014 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3015 return;
3016 }
3017 sc->sc_wdcdev.nchannels = 1;
3018 } else {
3019 sc->sc_wdcdev.nchannels = 2;
3020 sc->sc_wdcdev.UDMA_cap = 5;
3021 }
3022 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3023 cp = &sc->pciide_channels[i];
3024 if (sc->sc_wdcdev.nchannels > 1) {
3025 compatchan = i;
3026 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3027 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3028 printf("%s: %s channel ignored (disabled)\n",
3029 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3030 continue;
3031 }
3032 }
3033 if (pciide_chansetup(sc, i, interface) == 0)
3034 continue;
3035 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3036 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3037 &ctlsize, hpt_pci_intr);
3038 } else {
3039 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3040 &cmdsize, &ctlsize);
3041 }
3042 if (cp->hw_ok == 0)
3043 return;
3044 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3045 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3046 wdcattach(&cp->wdc_channel);
3047 hpt_setup_channel(&cp->wdc_channel);
3048 }
3049 if (revision == HPT370_REV) {
3050 /*
3051 * HPT370_REV has a bit to disable interrupts, make sure
3052 * to clear it
3053 */
3054 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3055 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3056 ~HPT_CSEL_IRQDIS);
3057 }
3058 return;
3059 }
3060
3061 void
3062 hpt_setup_channel(chp)
3063 struct channel_softc *chp;
3064 {
3065 struct ata_drive_datas *drvp;
3066 int drive;
3067 int cable;
3068 u_int32_t before, after;
3069 u_int32_t idedma_ctl;
3070 struct pciide_channel *cp = (struct pciide_channel*)chp;
3071 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3072
3073 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3074
3075 /* setup DMA if needed */
3076 pciide_channel_dma_setup(cp);
3077
3078 idedma_ctl = 0;
3079
3080 /* Per drive settings */
3081 for (drive = 0; drive < 2; drive++) {
3082 drvp = &chp->ch_drive[drive];
3083 /* If no drive, skip */
3084 if ((drvp->drive_flags & DRIVE) == 0)
3085 continue;
3086 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3087 HPT_IDETIM(chp->channel, drive));
3088
3089 /* add timing values, setup DMA if needed */
3090 if (drvp->drive_flags & DRIVE_UDMA) {
3091 /* use Ultra/DMA */
3092 drvp->drive_flags &= ~DRIVE_DMA;
3093 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3094 drvp->UDMA_mode > 2)
3095 drvp->UDMA_mode = 2;
3096 after = (sc->sc_wdcdev.nchannels == 2) ?
3097 hpt370_udma[drvp->UDMA_mode] :
3098 hpt366_udma[drvp->UDMA_mode];
3099 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3100 } else if (drvp->drive_flags & DRIVE_DMA) {
3101 /*
3102 * use Multiword DMA.
3103 * Timings will be used for both PIO and DMA, so adjust
3104 * DMA mode if needed
3105 */
3106 if (drvp->PIO_mode >= 3 &&
3107 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3108 drvp->DMA_mode = drvp->PIO_mode - 2;
3109 }
3110 after = (sc->sc_wdcdev.nchannels == 2) ?
3111 hpt370_dma[drvp->DMA_mode] :
3112 hpt366_dma[drvp->DMA_mode];
3113 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3114 } else {
3115 /* PIO only */
3116 after = (sc->sc_wdcdev.nchannels == 2) ?
3117 hpt370_pio[drvp->PIO_mode] :
3118 hpt366_pio[drvp->PIO_mode];
3119 }
3120 pci_conf_write(sc->sc_pc, sc->sc_tag,
3121 HPT_IDETIM(chp->channel, drive), after);
3122 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3123 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3124 after, before), DEBUG_PROBE);
3125 }
3126 if (idedma_ctl != 0) {
3127 /* Add software bits in status register */
3128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3129 IDEDMA_CTL, idedma_ctl);
3130 }
3131 pciide_print_modes(cp);
3132 }
3133
3134 int
3135 hpt_pci_intr(arg)
3136 void *arg;
3137 {
3138 struct pciide_softc *sc = arg;
3139 struct pciide_channel *cp;
3140 struct channel_softc *wdc_cp;
3141 int rv = 0;
3142 int dmastat, i, crv;
3143
3144 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3145 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3146 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3147 if((dmastat & IDEDMA_CTL_INTR) == 0)
3148 continue;
3149 cp = &sc->pciide_channels[i];
3150 wdc_cp = &cp->wdc_channel;
3151 crv = wdcintr(wdc_cp);
3152 if (crv == 0) {
3153 printf("%s:%d: bogus intr\n",
3154 sc->sc_wdcdev.sc_dev.dv_xname, i);
3155 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3156 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3157 } else
3158 rv = 1;
3159 }
3160 return rv;
3161 }
3162
3163
3164 /* Macros to test product */
3165 #define PDC_IS_262(sc) \
3166 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3167 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3168 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3169 #define PDC_IS_265(sc) \
3170 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3171 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3172
3173 void
3174 pdc202xx_chip_map(sc, pa)
3175 struct pciide_softc *sc;
3176 struct pci_attach_args *pa;
3177 {
3178 struct pciide_channel *cp;
3179 int channel;
3180 pcireg_t interface, st, mode;
3181 bus_size_t cmdsize, ctlsize;
3182
3183 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3184 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3185 DEBUG_PROBE);
3186 if (pciide_chipen(sc, pa) == 0)
3187 return;
3188
3189 /* turn off RAID mode */
3190 st &= ~PDC2xx_STATE_IDERAID;
3191
3192 /*
3193 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3194 * mode. We have to fake interface
3195 */
3196 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3197 if (st & PDC2xx_STATE_NATIVE)
3198 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3199
3200 printf("%s: bus-master DMA support present",
3201 sc->sc_wdcdev.sc_dev.dv_xname);
3202 pciide_mapreg_dma(sc, pa);
3203 printf("\n");
3204 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3205 WDC_CAPABILITY_MODE;
3206 if (sc->sc_dma_ok) {
3207 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3208 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3209 sc->sc_wdcdev.irqack = pciide_irqack;
3210 }
3211 sc->sc_wdcdev.PIO_cap = 4;
3212 sc->sc_wdcdev.DMA_cap = 2;
3213 if (PDC_IS_265(sc))
3214 sc->sc_wdcdev.UDMA_cap = 5;
3215 else if (PDC_IS_262(sc))
3216 sc->sc_wdcdev.UDMA_cap = 4;
3217 else
3218 sc->sc_wdcdev.UDMA_cap = 2;
3219 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3220 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3221 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3222
3223 /* setup failsafe defaults */
3224 mode = 0;
3225 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3226 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3227 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3228 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3229 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3230 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3231 "initial timings 0x%x, now 0x%x\n", channel,
3232 pci_conf_read(sc->sc_pc, sc->sc_tag,
3233 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3234 DEBUG_PROBE);
3235 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3236 mode | PDC2xx_TIM_IORDYp);
3237 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3238 "initial timings 0x%x, now 0x%x\n", channel,
3239 pci_conf_read(sc->sc_pc, sc->sc_tag,
3240 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3241 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3242 mode);
3243 }
3244
3245 mode = PDC2xx_SCR_DMA;
3246 if (PDC_IS_265(sc)) {
3247 /* the BIOS set it up this way */
3248 mode = PDC2xx_SCR_SET_GEN(mode, 0x3);
3249 mode |= 0x80000000;
3250 } else if (PDC_IS_262(sc)) {
3251 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3252 } else {
3253 /* the BIOS set it up this way */
3254 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3255 }
3256 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3257 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3258 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3259 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3260 DEBUG_PROBE);
3261 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3262
3263 /* controller initial state register is OK even without BIOS */
3264 /* Set DMA mode to IDE DMA compatibility */
3265 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3266 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3267 DEBUG_PROBE);
3268 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3269 mode | 0x1);
3270 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3271 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3272 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3273 mode | 0x1);
3274
3275 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3276 cp = &sc->pciide_channels[channel];
3277 if (pciide_chansetup(sc, channel, interface) == 0)
3278 continue;
3279 if ((st & (PDC_IS_262(sc) ?
3280 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3281 printf("%s: %s channel ignored (disabled)\n",
3282 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3283 continue;
3284 }
3285 if (PDC_IS_265(sc))
3286 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3287 pdc20265_pci_intr);
3288 else
3289 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3290 pdc202xx_pci_intr);
3291 if (cp->hw_ok == 0)
3292 continue;
3293 if (pciide_chan_candisable(cp))
3294 st &= ~(PDC_IS_262(sc) ?
3295 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3296 pciide_map_compat_intr(pa, cp, channel, interface);
3297 pdc202xx_setup_channel(&cp->wdc_channel);
3298 }
3299 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3300 DEBUG_PROBE);
3301 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3302 return;
3303 }
3304
3305 void
3306 pdc202xx_setup_channel(chp)
3307 struct channel_softc *chp;
3308 {
3309 struct ata_drive_datas *drvp;
3310 int drive;
3311 pcireg_t mode, st;
3312 u_int32_t idedma_ctl, scr, atapi;
3313 struct pciide_channel *cp = (struct pciide_channel*)chp;
3314 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3315 int channel = chp->channel;
3316
3317 /* setup DMA if needed */
3318 pciide_channel_dma_setup(cp);
3319
3320 idedma_ctl = 0;
3321 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3322 sc->sc_wdcdev.sc_dev.dv_xname,
3323 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3324 DEBUG_PROBE);
3325
3326 /* Per channel settings */
3327 if (PDC_IS_262(sc)) {
3328 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3329 PDC262_U66);
3330 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3331 /* Trimm UDMA mode */
3332 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3333 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3334 chp->ch_drive[0].UDMA_mode <= 2) ||
3335 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3336 chp->ch_drive[1].UDMA_mode <= 2)) {
3337 if (chp->ch_drive[0].UDMA_mode > 2)
3338 chp->ch_drive[0].UDMA_mode = 2;
3339 if (chp->ch_drive[1].UDMA_mode > 2)
3340 chp->ch_drive[1].UDMA_mode = 2;
3341 }
3342 /* Set U66 if needed */
3343 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3344 chp->ch_drive[0].UDMA_mode > 2) ||
3345 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3346 chp->ch_drive[1].UDMA_mode > 2))
3347 scr |= PDC262_U66_EN(channel);
3348 else
3349 scr &= ~PDC262_U66_EN(channel);
3350 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3351 PDC262_U66, scr);
3352 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3353 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3354 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3355 PDC262_ATAPI(channel))), DEBUG_PROBE);
3356 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3357 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3358 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3359 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3360 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3361 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3362 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3363 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3364 atapi = 0;
3365 else
3366 atapi = PDC262_ATAPI_UDMA;
3367 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3368 PDC262_ATAPI(channel), atapi);
3369 }
3370 }
3371 for (drive = 0; drive < 2; drive++) {
3372 drvp = &chp->ch_drive[drive];
3373 /* If no drive, skip */
3374 if ((drvp->drive_flags & DRIVE) == 0)
3375 continue;
3376 mode = 0;
3377 if (drvp->drive_flags & DRIVE_UDMA) {
3378 /* use Ultra/DMA */
3379 drvp->drive_flags &= ~DRIVE_DMA;
3380 mode = PDC2xx_TIM_SET_MB(mode,
3381 pdc2xx_udma_mb[drvp->UDMA_mode]);
3382 mode = PDC2xx_TIM_SET_MC(mode,
3383 pdc2xx_udma_mc[drvp->UDMA_mode]);
3384 drvp->drive_flags &= ~DRIVE_DMA;
3385 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3386 } else if (drvp->drive_flags & DRIVE_DMA) {
3387 mode = PDC2xx_TIM_SET_MB(mode,
3388 pdc2xx_dma_mb[drvp->DMA_mode]);
3389 mode = PDC2xx_TIM_SET_MC(mode,
3390 pdc2xx_dma_mc[drvp->DMA_mode]);
3391 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3392 } else {
3393 mode = PDC2xx_TIM_SET_MB(mode,
3394 pdc2xx_dma_mb[0]);
3395 mode = PDC2xx_TIM_SET_MC(mode,
3396 pdc2xx_dma_mc[0]);
3397 }
3398 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3399 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3400 if (drvp->drive_flags & DRIVE_ATA)
3401 mode |= PDC2xx_TIM_PRE;
3402 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3403 if (drvp->PIO_mode >= 3) {
3404 mode |= PDC2xx_TIM_IORDY;
3405 if (drive == 0)
3406 mode |= PDC2xx_TIM_IORDYp;
3407 }
3408 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3409 "timings 0x%x\n",
3410 sc->sc_wdcdev.sc_dev.dv_xname,
3411 chp->channel, drive, mode), DEBUG_PROBE);
3412 pci_conf_write(sc->sc_pc, sc->sc_tag,
3413 PDC2xx_TIM(chp->channel, drive), mode);
3414 }
3415 if (idedma_ctl != 0) {
3416 /* Add software bits in status register */
3417 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3418 IDEDMA_CTL, idedma_ctl);
3419 }
3420 pciide_print_modes(cp);
3421 }
3422
3423 int
3424 pdc202xx_pci_intr(arg)
3425 void *arg;
3426 {
3427 struct pciide_softc *sc = arg;
3428 struct pciide_channel *cp;
3429 struct channel_softc *wdc_cp;
3430 int i, rv, crv;
3431 u_int32_t scr;
3432
3433 rv = 0;
3434 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3435 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3436 cp = &sc->pciide_channels[i];
3437 wdc_cp = &cp->wdc_channel;
3438 /* If a compat channel skip. */
3439 if (cp->compat)
3440 continue;
3441 if (scr & PDC2xx_SCR_INT(i)) {
3442 crv = wdcintr(wdc_cp);
3443 if (crv == 0)
3444 printf("%s:%d: bogus intr (reg 0x%x)\n",
3445 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3446 else
3447 rv = 1;
3448 }
3449 }
3450 return rv;
3451 }
3452
3453 int
3454 pdc20265_pci_intr(arg)
3455 void *arg;
3456 {
3457 struct pciide_softc *sc = arg;
3458 struct pciide_channel *cp;
3459 struct channel_softc *wdc_cp;
3460 int i, rv, crv;
3461 u_int32_t dmastat;
3462
3463 rv = 0;
3464 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3465 cp = &sc->pciide_channels[i];
3466 wdc_cp = &cp->wdc_channel;
3467 /* If a compat channel skip. */
3468 if (cp->compat)
3469 continue;
3470 /*
3471 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3472 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3473 * So use it instead (requires 2 reg reads instead of 1,
3474 * but we can't do it another way).
3475 */
3476 dmastat = bus_space_read_1(sc->sc_dma_iot,
3477 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3478 if((dmastat & IDEDMA_CTL_INTR) == 0)
3479 continue;
3480 crv = wdcintr(wdc_cp);
3481 if (crv == 0)
3482 printf("%s:%d: bogus intr\n",
3483 sc->sc_wdcdev.sc_dev.dv_xname, i);
3484 else
3485 rv = 1;
3486 }
3487 return rv;
3488 }
3489
3490 void
3491 opti_chip_map(sc, pa)
3492 struct pciide_softc *sc;
3493 struct pci_attach_args *pa;
3494 {
3495 struct pciide_channel *cp;
3496 bus_size_t cmdsize, ctlsize;
3497 pcireg_t interface;
3498 u_int8_t init_ctrl;
3499 int channel;
3500
3501 if (pciide_chipen(sc, pa) == 0)
3502 return;
3503 printf("%s: bus-master DMA support present",
3504 sc->sc_wdcdev.sc_dev.dv_xname);
3505 pciide_mapreg_dma(sc, pa);
3506 printf("\n");
3507
3508 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3509 WDC_CAPABILITY_MODE;
3510 sc->sc_wdcdev.PIO_cap = 4;
3511 if (sc->sc_dma_ok) {
3512 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3513 sc->sc_wdcdev.irqack = pciide_irqack;
3514 sc->sc_wdcdev.DMA_cap = 2;
3515 }
3516 sc->sc_wdcdev.set_modes = opti_setup_channel;
3517
3518 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3519 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3520
3521 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3522 OPTI_REG_INIT_CONTROL);
3523
3524 interface = PCI_INTERFACE(pa->pa_class);
3525
3526 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3527 cp = &sc->pciide_channels[channel];
3528 if (pciide_chansetup(sc, channel, interface) == 0)
3529 continue;
3530 if (channel == 1 &&
3531 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3532 printf("%s: %s channel ignored (disabled)\n",
3533 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3534 continue;
3535 }
3536 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3537 pciide_pci_intr);
3538 if (cp->hw_ok == 0)
3539 continue;
3540 pciide_map_compat_intr(pa, cp, channel, interface);
3541 if (cp->hw_ok == 0)
3542 continue;
3543 opti_setup_channel(&cp->wdc_channel);
3544 }
3545 }
3546
3547 void
3548 opti_setup_channel(chp)
3549 struct channel_softc *chp;
3550 {
3551 struct ata_drive_datas *drvp;
3552 struct pciide_channel *cp = (struct pciide_channel*)chp;
3553 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3554 int drive, spd;
3555 int mode[2];
3556 u_int8_t rv, mr;
3557
3558 /*
3559 * The `Delay' and `Address Setup Time' fields of the
3560 * Miscellaneous Register are always zero initially.
3561 */
3562 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3563 mr &= ~(OPTI_MISC_DELAY_MASK |
3564 OPTI_MISC_ADDR_SETUP_MASK |
3565 OPTI_MISC_INDEX_MASK);
3566
3567 /* Prime the control register before setting timing values */
3568 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3569
3570 /* Determine the clockrate of the PCIbus the chip is attached to */
3571 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3572 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3573
3574 /* setup DMA if needed */
3575 pciide_channel_dma_setup(cp);
3576
3577 for (drive = 0; drive < 2; drive++) {
3578 drvp = &chp->ch_drive[drive];
3579 /* If no drive, skip */
3580 if ((drvp->drive_flags & DRIVE) == 0) {
3581 mode[drive] = -1;
3582 continue;
3583 }
3584
3585 if ((drvp->drive_flags & DRIVE_DMA)) {
3586 /*
3587 * Timings will be used for both PIO and DMA,
3588 * so adjust DMA mode if needed
3589 */
3590 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3591 drvp->PIO_mode = drvp->DMA_mode + 2;
3592 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3593 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3594 drvp->PIO_mode - 2 : 0;
3595 if (drvp->DMA_mode == 0)
3596 drvp->PIO_mode = 0;
3597
3598 mode[drive] = drvp->DMA_mode + 5;
3599 } else
3600 mode[drive] = drvp->PIO_mode;
3601
3602 if (drive && mode[0] >= 0 &&
3603 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3604 /*
3605 * Can't have two drives using different values
3606 * for `Address Setup Time'.
3607 * Slow down the faster drive to compensate.
3608 */
3609 int d = (opti_tim_as[spd][mode[0]] >
3610 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3611
3612 mode[d] = mode[1-d];
3613 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3614 chp->ch_drive[d].DMA_mode = 0;
3615 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3616 }
3617 }
3618
3619 for (drive = 0; drive < 2; drive++) {
3620 int m;
3621 if ((m = mode[drive]) < 0)
3622 continue;
3623
3624 /* Set the Address Setup Time and select appropriate index */
3625 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3626 rv |= OPTI_MISC_INDEX(drive);
3627 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3628
3629 /* Set the pulse width and recovery timing parameters */
3630 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3631 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3632 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3633 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3634
3635 /* Set the Enhanced Mode register appropriately */
3636 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3637 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3638 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3639 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3640 }
3641
3642 /* Finally, enable the timings */
3643 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3644
3645 pciide_print_modes(cp);
3646 }
3647