pciide.c revision 1.153.2.6 1 /* $NetBSD: pciide.c,v 1.153.2.6 2002/11/01 13:23:06 tron Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.153.2.6 2002/11/01 13:23:06 tron Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191
192 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void pdc202xx_setup_channel __P((struct channel_softc*));
194 void pdc20268_setup_channel __P((struct channel_softc*));
195 int pdc202xx_pci_intr __P((void *));
196 int pdc20265_pci_intr __P((void *));
197
198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void opti_setup_channel __P((struct channel_softc*));
200
201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void hpt_setup_channel __P((struct channel_softc*));
203 int hpt_pci_intr __P((void *));
204
205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acard_setup_channel __P((struct channel_softc*));
207 int acard_pci_intr __P((void *));
208
209 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void serverworks_setup_channel __P((struct channel_softc*));
211 int serverworks_pci_intr __P((void *));
212
213 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void sl82c105_setup_channel __P((struct channel_softc*));
215
216 void pciide_channel_dma_setup __P((struct pciide_channel *));
217 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
218 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
219 void pciide_dma_start __P((void*, int, int));
220 int pciide_dma_finish __P((void*, int, int, int));
221 void pciide_irqack __P((struct channel_softc *));
222 void pciide_print_modes __P((struct pciide_channel *));
223
224 struct pciide_product_desc {
225 u_int32_t ide_product;
226 int ide_flags;
227 const char *ide_name;
228 /* map and setup chip, probe drives */
229 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
230 };
231
232 /* Flags for ide_flags */
233 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
234 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
235
236 /* Default product description for devices not known from this controller */
237 const struct pciide_product_desc default_product_desc = {
238 0,
239 0,
240 "Generic PCI IDE controller",
241 default_chip_map,
242 };
243
244 const struct pciide_product_desc pciide_intel_products[] = {
245 { PCI_PRODUCT_INTEL_82092AA,
246 0,
247 "Intel 82092AA IDE controller",
248 default_chip_map,
249 },
250 { PCI_PRODUCT_INTEL_82371FB_IDE,
251 0,
252 "Intel 82371FB IDE controller (PIIX)",
253 piix_chip_map,
254 },
255 { PCI_PRODUCT_INTEL_82371SB_IDE,
256 0,
257 "Intel 82371SB IDE Interface (PIIX3)",
258 piix_chip_map,
259 },
260 { PCI_PRODUCT_INTEL_82371AB_IDE,
261 0,
262 "Intel 82371AB IDE controller (PIIX4)",
263 piix_chip_map,
264 },
265 { PCI_PRODUCT_INTEL_82440MX_IDE,
266 0,
267 "Intel 82440MX IDE controller",
268 piix_chip_map
269 },
270 { PCI_PRODUCT_INTEL_82801AA_IDE,
271 0,
272 "Intel 82801AA IDE Controller (ICH)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82801AB_IDE,
276 0,
277 "Intel 82801AB IDE Controller (ICH0)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82801BA_IDE,
281 0,
282 "Intel 82801BA IDE Controller (ICH2)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82801BAM_IDE,
286 0,
287 "Intel 82801BAM IDE Controller (ICH2)",
288 piix_chip_map,
289 },
290 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
291 0,
292 "Intel 82801CA IDE Controller",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
296 0,
297 "Intel 82801CA IDE Controller",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801DB_IDE,
301 0,
302 "Intel 82801DB IDE Controller (ICH4)",
303 piix_chip_map,
304 },
305 { 0,
306 0,
307 NULL,
308 NULL
309 }
310 };
311
312 const struct pciide_product_desc pciide_amd_products[] = {
313 { PCI_PRODUCT_AMD_PBC756_IDE,
314 0,
315 "Advanced Micro Devices AMD756 IDE Controller",
316 amd7x6_chip_map
317 },
318 { PCI_PRODUCT_AMD_PBC766_IDE,
319 0,
320 "Advanced Micro Devices AMD766 IDE Controller",
321 amd7x6_chip_map
322 },
323 { PCI_PRODUCT_AMD_PBC768_IDE,
324 0,
325 "Advanced Micro Devices AMD768 IDE Controller",
326 amd7x6_chip_map
327 },
328 { 0,
329 0,
330 NULL,
331 NULL
332 }
333 };
334
335 const struct pciide_product_desc pciide_cmd_products[] = {
336 { PCI_PRODUCT_CMDTECH_640,
337 0,
338 "CMD Technology PCI0640",
339 cmd_chip_map
340 },
341 { PCI_PRODUCT_CMDTECH_643,
342 0,
343 "CMD Technology PCI0643",
344 cmd0643_9_chip_map,
345 },
346 { PCI_PRODUCT_CMDTECH_646,
347 0,
348 "CMD Technology PCI0646",
349 cmd0643_9_chip_map,
350 },
351 { PCI_PRODUCT_CMDTECH_648,
352 IDE_PCI_CLASS_OVERRIDE,
353 "CMD Technology PCI0648",
354 cmd0643_9_chip_map,
355 },
356 { PCI_PRODUCT_CMDTECH_649,
357 IDE_PCI_CLASS_OVERRIDE,
358 "CMD Technology PCI0649",
359 cmd0643_9_chip_map,
360 },
361 { 0,
362 0,
363 NULL,
364 NULL
365 }
366 };
367
368 const struct pciide_product_desc pciide_via_products[] = {
369 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
370 0,
371 NULL,
372 apollo_chip_map,
373 },
374 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
375 0,
376 NULL,
377 apollo_chip_map,
378 },
379 { 0,
380 0,
381 NULL,
382 NULL
383 }
384 };
385
386 const struct pciide_product_desc pciide_cypress_products[] = {
387 { PCI_PRODUCT_CONTAQ_82C693,
388 IDE_16BIT_IOSPACE,
389 "Cypress 82C693 IDE Controller",
390 cy693_chip_map,
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_sis_products[] = {
400 { PCI_PRODUCT_SIS_5597_IDE,
401 0,
402 "Silicon Integrated System 5597/5598 IDE controller",
403 sis_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_acer_products[] = {
413 { PCI_PRODUCT_ALI_M5229,
414 0,
415 "Acer Labs M5229 UDMA IDE Controller",
416 acer_chip_map,
417 },
418 { 0,
419 0,
420 NULL,
421 NULL
422 }
423 };
424
425 const struct pciide_product_desc pciide_promise_products[] = {
426 { PCI_PRODUCT_PROMISE_ULTRA33,
427 IDE_PCI_CLASS_OVERRIDE,
428 "Promise Ultra33/ATA Bus Master IDE Accelerator",
429 pdc202xx_chip_map,
430 },
431 { PCI_PRODUCT_PROMISE_ULTRA66,
432 IDE_PCI_CLASS_OVERRIDE,
433 "Promise Ultra66/ATA Bus Master IDE Accelerator",
434 pdc202xx_chip_map,
435 },
436 { PCI_PRODUCT_PROMISE_ULTRA100,
437 IDE_PCI_CLASS_OVERRIDE,
438 "Promise Ultra100/ATA Bus Master IDE Accelerator",
439 pdc202xx_chip_map,
440 },
441 { PCI_PRODUCT_PROMISE_ULTRA100X,
442 IDE_PCI_CLASS_OVERRIDE,
443 "Promise Ultra100/ATA Bus Master IDE Accelerator",
444 pdc202xx_chip_map,
445 },
446 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
447 IDE_PCI_CLASS_OVERRIDE,
448 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
449 pdc202xx_chip_map,
450 },
451 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
452 IDE_PCI_CLASS_OVERRIDE,
453 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
454 pdc202xx_chip_map,
455 },
456 { PCI_PRODUCT_PROMISE_ULTRA133,
457 IDE_PCI_CLASS_OVERRIDE,
458 "Promise Ultra133/ATA Bus Master IDE Accelerator",
459 pdc202xx_chip_map,
460 },
461 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
462 IDE_PCI_CLASS_OVERRIDE,
463 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
464 pdc202xx_chip_map,
465 },
466 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
467 IDE_PCI_CLASS_OVERRIDE,
468 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
469 pdc202xx_chip_map,
470 },
471 { 0,
472 0,
473 NULL,
474 NULL
475 }
476 };
477
478 const struct pciide_product_desc pciide_opti_products[] = {
479 { PCI_PRODUCT_OPTI_82C621,
480 0,
481 "OPTi 82c621 PCI IDE controller",
482 opti_chip_map,
483 },
484 { PCI_PRODUCT_OPTI_82C568,
485 0,
486 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
487 opti_chip_map,
488 },
489 { PCI_PRODUCT_OPTI_82D568,
490 0,
491 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
492 opti_chip_map,
493 },
494 { 0,
495 0,
496 NULL,
497 NULL
498 }
499 };
500
501 const struct pciide_product_desc pciide_triones_products[] = {
502 { PCI_PRODUCT_TRIONES_HPT366,
503 IDE_PCI_CLASS_OVERRIDE,
504 NULL,
505 hpt_chip_map,
506 },
507 { PCI_PRODUCT_TRIONES_HPT374,
508 IDE_PCI_CLASS_OVERRIDE,
509 NULL,
510 hpt_chip_map
511 },
512 { 0,
513 0,
514 NULL,
515 NULL
516 }
517 };
518
519 const struct pciide_product_desc pciide_acard_products[] = {
520 { PCI_PRODUCT_ACARD_ATP850U,
521 IDE_PCI_CLASS_OVERRIDE,
522 "Acard ATP850U Ultra33 IDE Controller",
523 acard_chip_map,
524 },
525 { PCI_PRODUCT_ACARD_ATP860,
526 IDE_PCI_CLASS_OVERRIDE,
527 "Acard ATP860 Ultra66 IDE Controller",
528 acard_chip_map,
529 },
530 { PCI_PRODUCT_ACARD_ATP860A,
531 IDE_PCI_CLASS_OVERRIDE,
532 "Acard ATP860-A Ultra66 IDE Controller",
533 acard_chip_map,
534 },
535 { 0,
536 0,
537 NULL,
538 NULL
539 }
540 };
541
542 const struct pciide_product_desc pciide_serverworks_products[] = {
543 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
544 0,
545 "ServerWorks OSB4 IDE Controller",
546 serverworks_chip_map,
547 },
548 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
549 0,
550 "ServerWorks CSB5 IDE Controller",
551 serverworks_chip_map,
552 },
553 { 0,
554 0,
555 NULL,
556 }
557 };
558
559 const struct pciide_product_desc pciide_symphony_products[] = {
560 { PCI_PRODUCT_SYMPHONY_82C105,
561 0,
562 "Symphony Labs 82C105 IDE controller",
563 sl82c105_chip_map,
564 },
565 { 0,
566 0,
567 NULL,
568 }
569 };
570
571 const struct pciide_product_desc pciide_winbond_products[] = {
572 { PCI_PRODUCT_WINBOND_W83C553F_1,
573 0,
574 "Winbond W83C553F IDE controller",
575 sl82c105_chip_map,
576 },
577 { 0,
578 0,
579 NULL,
580 }
581 };
582
583 struct pciide_vendor_desc {
584 u_int32_t ide_vendor;
585 const struct pciide_product_desc *ide_products;
586 };
587
588 const struct pciide_vendor_desc pciide_vendors[] = {
589 { PCI_VENDOR_INTEL, pciide_intel_products },
590 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
591 { PCI_VENDOR_VIATECH, pciide_via_products },
592 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
593 { PCI_VENDOR_SIS, pciide_sis_products },
594 { PCI_VENDOR_ALI, pciide_acer_products },
595 { PCI_VENDOR_PROMISE, pciide_promise_products },
596 { PCI_VENDOR_AMD, pciide_amd_products },
597 { PCI_VENDOR_OPTI, pciide_opti_products },
598 { PCI_VENDOR_TRIONES, pciide_triones_products },
599 { PCI_VENDOR_ACARD, pciide_acard_products },
600 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
601 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
602 { PCI_VENDOR_WINBOND, pciide_winbond_products },
603 { 0, NULL }
604 };
605
606 /* options passed via the 'flags' config keyword */
607 #define PCIIDE_OPTIONS_DMA 0x01
608 #define PCIIDE_OPTIONS_NODMA 0x02
609
610 int pciide_match __P((struct device *, struct cfdata *, void *));
611 void pciide_attach __P((struct device *, struct device *, void *));
612
613 struct cfattach pciide_ca = {
614 sizeof(struct pciide_softc), pciide_match, pciide_attach
615 };
616 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
617 int pciide_mapregs_compat __P(( struct pci_attach_args *,
618 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
619 int pciide_mapregs_native __P((struct pci_attach_args *,
620 struct pciide_channel *, bus_size_t *, bus_size_t *,
621 int (*pci_intr) __P((void *))));
622 void pciide_mapreg_dma __P((struct pciide_softc *,
623 struct pci_attach_args *));
624 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
625 void pciide_mapchan __P((struct pci_attach_args *,
626 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
627 int (*pci_intr) __P((void *))));
628 int pciide_chan_candisable __P((struct pciide_channel *));
629 void pciide_map_compat_intr __P(( struct pci_attach_args *,
630 struct pciide_channel *, int, int));
631 int pciide_compat_intr __P((void *));
632 int pciide_pci_intr __P((void *));
633 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
634
635 const struct pciide_product_desc *
636 pciide_lookup_product(id)
637 u_int32_t id;
638 {
639 const struct pciide_product_desc *pp;
640 const struct pciide_vendor_desc *vp;
641
642 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
643 if (PCI_VENDOR(id) == vp->ide_vendor)
644 break;
645
646 if ((pp = vp->ide_products) == NULL)
647 return NULL;
648
649 for (; pp->chip_map != NULL; pp++)
650 if (PCI_PRODUCT(id) == pp->ide_product)
651 break;
652
653 if (pp->chip_map == NULL)
654 return NULL;
655 return pp;
656 }
657
658 int
659 pciide_match(parent, match, aux)
660 struct device *parent;
661 struct cfdata *match;
662 void *aux;
663 {
664 struct pci_attach_args *pa = aux;
665 const struct pciide_product_desc *pp;
666
667 /*
668 * Check the ID register to see that it's a PCI IDE controller.
669 * If it is, we assume that we can deal with it; it _should_
670 * work in a standardized way...
671 */
672 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
673 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
674 return (1);
675 }
676
677 /*
678 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
679 * controllers. Let see if we can deal with it anyway.
680 */
681 pp = pciide_lookup_product(pa->pa_id);
682 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
683 return (1);
684 }
685
686 return (0);
687 }
688
689 void
690 pciide_attach(parent, self, aux)
691 struct device *parent, *self;
692 void *aux;
693 {
694 struct pci_attach_args *pa = aux;
695 pci_chipset_tag_t pc = pa->pa_pc;
696 pcitag_t tag = pa->pa_tag;
697 struct pciide_softc *sc = (struct pciide_softc *)self;
698 pcireg_t csr;
699 char devinfo[256];
700 const char *displaydev;
701
702 sc->sc_pp = pciide_lookup_product(pa->pa_id);
703 if (sc->sc_pp == NULL) {
704 sc->sc_pp = &default_product_desc;
705 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
706 displaydev = devinfo;
707 } else
708 displaydev = sc->sc_pp->ide_name;
709
710 /* if displaydev == NULL, printf is done in chip-specific map */
711 if (displaydev)
712 printf(": %s (rev. 0x%02x)\n", displaydev,
713 PCI_REVISION(pa->pa_class));
714
715 sc->sc_pc = pa->pa_pc;
716 sc->sc_tag = pa->pa_tag;
717 #ifdef WDCDEBUG
718 if (wdcdebug_pciide_mask & DEBUG_PROBE)
719 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
720 #endif
721 sc->sc_pp->chip_map(sc, pa);
722
723 if (sc->sc_dma_ok) {
724 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
725 csr |= PCI_COMMAND_MASTER_ENABLE;
726 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
727 }
728 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
729 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
730 }
731
732 /* tell wether the chip is enabled or not */
733 int
734 pciide_chipen(sc, pa)
735 struct pciide_softc *sc;
736 struct pci_attach_args *pa;
737 {
738 pcireg_t csr;
739 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
740 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
741 PCI_COMMAND_STATUS_REG);
742 printf("%s: device disabled (at %s)\n",
743 sc->sc_wdcdev.sc_dev.dv_xname,
744 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
745 "device" : "bridge");
746 return 0;
747 }
748 return 1;
749 }
750
751 int
752 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
753 struct pci_attach_args *pa;
754 struct pciide_channel *cp;
755 int compatchan;
756 bus_size_t *cmdsizep, *ctlsizep;
757 {
758 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
759 struct channel_softc *wdc_cp = &cp->wdc_channel;
760
761 cp->compat = 1;
762 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
763 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
764
765 wdc_cp->cmd_iot = pa->pa_iot;
766 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
767 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
768 printf("%s: couldn't map %s channel cmd regs\n",
769 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
770 return (0);
771 }
772
773 wdc_cp->ctl_iot = pa->pa_iot;
774 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
775 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
776 printf("%s: couldn't map %s channel ctl regs\n",
777 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
778 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
779 PCIIDE_COMPAT_CMD_SIZE);
780 return (0);
781 }
782
783 return (1);
784 }
785
786 int
787 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
788 struct pci_attach_args * pa;
789 struct pciide_channel *cp;
790 bus_size_t *cmdsizep, *ctlsizep;
791 int (*pci_intr) __P((void *));
792 {
793 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
794 struct channel_softc *wdc_cp = &cp->wdc_channel;
795 const char *intrstr;
796 pci_intr_handle_t intrhandle;
797
798 cp->compat = 0;
799
800 if (sc->sc_pci_ih == NULL) {
801 if (pci_intr_map(pa, &intrhandle) != 0) {
802 printf("%s: couldn't map native-PCI interrupt\n",
803 sc->sc_wdcdev.sc_dev.dv_xname);
804 return 0;
805 }
806 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
807 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
808 intrhandle, IPL_BIO, pci_intr, sc);
809 if (sc->sc_pci_ih != NULL) {
810 printf("%s: using %s for native-PCI interrupt\n",
811 sc->sc_wdcdev.sc_dev.dv_xname,
812 intrstr ? intrstr : "unknown interrupt");
813 } else {
814 printf("%s: couldn't establish native-PCI interrupt",
815 sc->sc_wdcdev.sc_dev.dv_xname);
816 if (intrstr != NULL)
817 printf(" at %s", intrstr);
818 printf("\n");
819 return 0;
820 }
821 }
822 cp->ih = sc->sc_pci_ih;
823 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
824 PCI_MAPREG_TYPE_IO, 0,
825 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
826 printf("%s: couldn't map %s channel cmd regs\n",
827 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
828 return 0;
829 }
830
831 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
832 PCI_MAPREG_TYPE_IO, 0,
833 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
834 printf("%s: couldn't map %s channel ctl regs\n",
835 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
836 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
837 return 0;
838 }
839 /*
840 * In native mode, 4 bytes of I/O space are mapped for the control
841 * register, the control register is at offset 2. Pass the generic
842 * code a handle for only one byte at the rigth offset.
843 */
844 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
845 &wdc_cp->ctl_ioh) != 0) {
846 printf("%s: unable to subregion %s channel ctl regs\n",
847 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
848 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
849 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
850 return 0;
851 }
852 return (1);
853 }
854
855 void
856 pciide_mapreg_dma(sc, pa)
857 struct pciide_softc *sc;
858 struct pci_attach_args *pa;
859 {
860 pcireg_t maptype;
861 bus_addr_t addr;
862
863 /*
864 * Map DMA registers
865 *
866 * Note that sc_dma_ok is the right variable to test to see if
867 * DMA can be done. If the interface doesn't support DMA,
868 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
869 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
870 * non-zero if the interface supports DMA and the registers
871 * could be mapped.
872 *
873 * XXX Note that despite the fact that the Bus Master IDE specs
874 * XXX say that "The bus master IDE function uses 16 bytes of IO
875 * XXX space," some controllers (at least the United
876 * XXX Microelectronics UM8886BF) place it in memory space.
877 */
878 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
879 PCIIDE_REG_BUS_MASTER_DMA);
880
881 switch (maptype) {
882 case PCI_MAPREG_TYPE_IO:
883 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
884 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
885 &addr, NULL, NULL) == 0);
886 if (sc->sc_dma_ok == 0) {
887 printf(", but unused (couldn't query registers)");
888 break;
889 }
890 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
891 && addr >= 0x10000) {
892 sc->sc_dma_ok = 0;
893 printf(", but unused (registers at unsafe address "
894 "%#lx)", (unsigned long)addr);
895 break;
896 }
897 /* FALLTHROUGH */
898
899 case PCI_MAPREG_MEM_TYPE_32BIT:
900 sc->sc_dma_ok = (pci_mapreg_map(pa,
901 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
902 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
903 sc->sc_dmat = pa->pa_dmat;
904 if (sc->sc_dma_ok == 0) {
905 printf(", but unused (couldn't map registers)");
906 } else {
907 sc->sc_wdcdev.dma_arg = sc;
908 sc->sc_wdcdev.dma_init = pciide_dma_init;
909 sc->sc_wdcdev.dma_start = pciide_dma_start;
910 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
911 }
912
913 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
914 PCIIDE_OPTIONS_NODMA) {
915 printf(", but unused (forced off by config file)");
916 sc->sc_dma_ok = 0;
917 }
918 break;
919
920 default:
921 sc->sc_dma_ok = 0;
922 printf(", but unsupported register maptype (0x%x)", maptype);
923 }
924 }
925
926 int
927 pciide_compat_intr(arg)
928 void *arg;
929 {
930 struct pciide_channel *cp = arg;
931
932 #ifdef DIAGNOSTIC
933 /* should only be called for a compat channel */
934 if (cp->compat == 0)
935 panic("pciide compat intr called for non-compat chan %p\n", cp);
936 #endif
937 return (wdcintr(&cp->wdc_channel));
938 }
939
940 int
941 pciide_pci_intr(arg)
942 void *arg;
943 {
944 struct pciide_softc *sc = arg;
945 struct pciide_channel *cp;
946 struct channel_softc *wdc_cp;
947 int i, rv, crv;
948
949 rv = 0;
950 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
951 cp = &sc->pciide_channels[i];
952 wdc_cp = &cp->wdc_channel;
953
954 /* If a compat channel skip. */
955 if (cp->compat)
956 continue;
957 /* if this channel not waiting for intr, skip */
958 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
959 continue;
960
961 crv = wdcintr(wdc_cp);
962 if (crv == 0)
963 ; /* leave rv alone */
964 else if (crv == 1)
965 rv = 1; /* claim the intr */
966 else if (rv == 0) /* crv should be -1 in this case */
967 rv = crv; /* if we've done no better, take it */
968 }
969 return (rv);
970 }
971
972 void
973 pciide_channel_dma_setup(cp)
974 struct pciide_channel *cp;
975 {
976 int drive;
977 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
978 struct ata_drive_datas *drvp;
979
980 for (drive = 0; drive < 2; drive++) {
981 drvp = &cp->wdc_channel.ch_drive[drive];
982 /* If no drive, skip */
983 if ((drvp->drive_flags & DRIVE) == 0)
984 continue;
985 /* setup DMA if needed */
986 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
987 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
988 sc->sc_dma_ok == 0) {
989 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
990 continue;
991 }
992 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
993 != 0) {
994 /* Abort DMA setup */
995 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
996 continue;
997 }
998 }
999 }
1000
1001 int
1002 pciide_dma_table_setup(sc, channel, drive)
1003 struct pciide_softc *sc;
1004 int channel, drive;
1005 {
1006 bus_dma_segment_t seg;
1007 int error, rseg;
1008 const bus_size_t dma_table_size =
1009 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1010 struct pciide_dma_maps *dma_maps =
1011 &sc->pciide_channels[channel].dma_maps[drive];
1012
1013 /* If table was already allocated, just return */
1014 if (dma_maps->dma_table)
1015 return 0;
1016
1017 /* Allocate memory for the DMA tables and map it */
1018 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1019 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1020 BUS_DMA_NOWAIT)) != 0) {
1021 printf("%s:%d: unable to allocate table DMA for "
1022 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1023 channel, drive, error);
1024 return error;
1025 }
1026 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1027 dma_table_size,
1028 (caddr_t *)&dma_maps->dma_table,
1029 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1030 printf("%s:%d: unable to map table DMA for"
1031 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1032 channel, drive, error);
1033 return error;
1034 }
1035 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1036 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1037 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1038
1039 /* Create and load table DMA map for this disk */
1040 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1041 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1042 &dma_maps->dmamap_table)) != 0) {
1043 printf("%s:%d: unable to create table DMA map for "
1044 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1045 channel, drive, error);
1046 return error;
1047 }
1048 if ((error = bus_dmamap_load(sc->sc_dmat,
1049 dma_maps->dmamap_table,
1050 dma_maps->dma_table,
1051 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1052 printf("%s:%d: unable to load table DMA map for "
1053 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1054 channel, drive, error);
1055 return error;
1056 }
1057 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1058 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1059 DEBUG_PROBE);
1060 /* Create a xfer DMA map for this drive */
1061 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1062 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1063 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1064 &dma_maps->dmamap_xfer)) != 0) {
1065 printf("%s:%d: unable to create xfer DMA map for "
1066 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1067 channel, drive, error);
1068 return error;
1069 }
1070 return 0;
1071 }
1072
1073 int
1074 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1075 void *v;
1076 int channel, drive;
1077 void *databuf;
1078 size_t datalen;
1079 int flags;
1080 {
1081 struct pciide_softc *sc = v;
1082 int error, seg;
1083 struct pciide_dma_maps *dma_maps =
1084 &sc->pciide_channels[channel].dma_maps[drive];
1085
1086 error = bus_dmamap_load(sc->sc_dmat,
1087 dma_maps->dmamap_xfer,
1088 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1089 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1090 if (error) {
1091 printf("%s:%d: unable to load xfer DMA map for"
1092 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1093 channel, drive, error);
1094 return error;
1095 }
1096
1097 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1098 dma_maps->dmamap_xfer->dm_mapsize,
1099 (flags & WDC_DMA_READ) ?
1100 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1101
1102 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1103 #ifdef DIAGNOSTIC
1104 /* A segment must not cross a 64k boundary */
1105 {
1106 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1107 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1108 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1109 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1110 printf("pciide_dma: segment %d physical addr 0x%lx"
1111 " len 0x%lx not properly aligned\n",
1112 seg, phys, len);
1113 panic("pciide_dma: buf align");
1114 }
1115 }
1116 #endif
1117 dma_maps->dma_table[seg].base_addr =
1118 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1119 dma_maps->dma_table[seg].byte_count =
1120 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1121 IDEDMA_BYTE_COUNT_MASK);
1122 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1123 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1124 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1125
1126 }
1127 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1128 htole32(IDEDMA_BYTE_COUNT_EOT);
1129
1130 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1131 dma_maps->dmamap_table->dm_mapsize,
1132 BUS_DMASYNC_PREWRITE);
1133
1134 /* Maps are ready. Start DMA function */
1135 #ifdef DIAGNOSTIC
1136 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1137 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1138 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1139 panic("pciide_dma_init: table align");
1140 }
1141 #endif
1142
1143 /* Clear status bits */
1144 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1145 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1146 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1147 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1148 /* Write table addr */
1149 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1150 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1151 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1152 /* set read/write */
1153 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1154 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1155 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1156 /* remember flags */
1157 dma_maps->dma_flags = flags;
1158 return 0;
1159 }
1160
1161 void
1162 pciide_dma_start(v, channel, drive)
1163 void *v;
1164 int channel, drive;
1165 {
1166 struct pciide_softc *sc = v;
1167
1168 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1169 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1170 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1171 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1172 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1173 }
1174
1175 int
1176 pciide_dma_finish(v, channel, drive, force)
1177 void *v;
1178 int channel, drive;
1179 int force;
1180 {
1181 struct pciide_softc *sc = v;
1182 u_int8_t status;
1183 int error = 0;
1184 struct pciide_dma_maps *dma_maps =
1185 &sc->pciide_channels[channel].dma_maps[drive];
1186
1187 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1188 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1189 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1190 DEBUG_XFERS);
1191
1192 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1193 return WDC_DMAST_NOIRQ;
1194
1195 /* stop DMA channel */
1196 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1197 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1198 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1199 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1200
1201 /* Unload the map of the data buffer */
1202 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1203 dma_maps->dmamap_xfer->dm_mapsize,
1204 (dma_maps->dma_flags & WDC_DMA_READ) ?
1205 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1206 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1207
1208 if ((status & IDEDMA_CTL_ERR) != 0) {
1209 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1210 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1211 error |= WDC_DMAST_ERR;
1212 }
1213
1214 if ((status & IDEDMA_CTL_INTR) == 0) {
1215 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1216 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1217 drive, status);
1218 error |= WDC_DMAST_NOIRQ;
1219 }
1220
1221 if ((status & IDEDMA_CTL_ACT) != 0) {
1222 /* data underrun, may be a valid condition for ATAPI */
1223 error |= WDC_DMAST_UNDER;
1224 }
1225 return error;
1226 }
1227
1228 void
1229 pciide_irqack(chp)
1230 struct channel_softc *chp;
1231 {
1232 struct pciide_channel *cp = (struct pciide_channel*)chp;
1233 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1234
1235 /* clear status bits in IDE DMA registers */
1236 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1237 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1238 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1239 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1240 }
1241
1242 /* some common code used by several chip_map */
1243 int
1244 pciide_chansetup(sc, channel, interface)
1245 struct pciide_softc *sc;
1246 int channel;
1247 pcireg_t interface;
1248 {
1249 struct pciide_channel *cp = &sc->pciide_channels[channel];
1250 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1251 cp->name = PCIIDE_CHANNEL_NAME(channel);
1252 cp->wdc_channel.channel = channel;
1253 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1254 cp->wdc_channel.ch_queue =
1255 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1256 if (cp->wdc_channel.ch_queue == NULL) {
1257 printf("%s %s channel: "
1258 "can't allocate memory for command queue",
1259 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1260 return 0;
1261 }
1262 printf("%s: %s channel %s to %s mode\n",
1263 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1264 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1265 "configured" : "wired",
1266 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1267 "native-PCI" : "compatibility");
1268 return 1;
1269 }
1270
1271 /* some common code used by several chip channel_map */
1272 void
1273 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1274 struct pci_attach_args *pa;
1275 struct pciide_channel *cp;
1276 pcireg_t interface;
1277 bus_size_t *cmdsizep, *ctlsizep;
1278 int (*pci_intr) __P((void *));
1279 {
1280 struct channel_softc *wdc_cp = &cp->wdc_channel;
1281
1282 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1283 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1284 pci_intr);
1285 else
1286 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1287 wdc_cp->channel, cmdsizep, ctlsizep);
1288
1289 if (cp->hw_ok == 0)
1290 return;
1291 wdc_cp->data32iot = wdc_cp->cmd_iot;
1292 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1293 wdcattach(wdc_cp);
1294 }
1295
1296 /*
1297 * Generic code to call to know if a channel can be disabled. Return 1
1298 * if channel can be disabled, 0 if not
1299 */
1300 int
1301 pciide_chan_candisable(cp)
1302 struct pciide_channel *cp;
1303 {
1304 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1305 struct channel_softc *wdc_cp = &cp->wdc_channel;
1306
1307 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1308 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1309 printf("%s: disabling %s channel (no drives)\n",
1310 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1311 cp->hw_ok = 0;
1312 return 1;
1313 }
1314 return 0;
1315 }
1316
1317 /*
1318 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1319 * Set hw_ok=0 on failure
1320 */
1321 void
1322 pciide_map_compat_intr(pa, cp, compatchan, interface)
1323 struct pci_attach_args *pa;
1324 struct pciide_channel *cp;
1325 int compatchan, interface;
1326 {
1327 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1328 struct channel_softc *wdc_cp = &cp->wdc_channel;
1329
1330 if (cp->hw_ok == 0)
1331 return;
1332 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1333 return;
1334
1335 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1336 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1337 pa, compatchan, pciide_compat_intr, cp);
1338 if (cp->ih == NULL) {
1339 #endif
1340 printf("%s: no compatibility interrupt for use by %s "
1341 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1342 cp->hw_ok = 0;
1343 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1344 }
1345 #endif
1346 }
1347
1348 void
1349 pciide_print_modes(cp)
1350 struct pciide_channel *cp;
1351 {
1352 wdc_print_modes(&cp->wdc_channel);
1353 }
1354
1355 void
1356 default_chip_map(sc, pa)
1357 struct pciide_softc *sc;
1358 struct pci_attach_args *pa;
1359 {
1360 struct pciide_channel *cp;
1361 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1362 pcireg_t csr;
1363 int channel, drive;
1364 struct ata_drive_datas *drvp;
1365 u_int8_t idedma_ctl;
1366 bus_size_t cmdsize, ctlsize;
1367 char *failreason;
1368
1369 if (pciide_chipen(sc, pa) == 0)
1370 return;
1371
1372 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1373 printf("%s: bus-master DMA support present",
1374 sc->sc_wdcdev.sc_dev.dv_xname);
1375 if (sc->sc_pp == &default_product_desc &&
1376 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1377 PCIIDE_OPTIONS_DMA) == 0) {
1378 printf(", but unused (no driver support)");
1379 sc->sc_dma_ok = 0;
1380 } else {
1381 pciide_mapreg_dma(sc, pa);
1382 if (sc->sc_dma_ok != 0)
1383 printf(", used without full driver "
1384 "support");
1385 }
1386 } else {
1387 printf("%s: hardware does not support DMA",
1388 sc->sc_wdcdev.sc_dev.dv_xname);
1389 sc->sc_dma_ok = 0;
1390 }
1391 printf("\n");
1392 if (sc->sc_dma_ok) {
1393 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1394 sc->sc_wdcdev.irqack = pciide_irqack;
1395 }
1396 sc->sc_wdcdev.PIO_cap = 0;
1397 sc->sc_wdcdev.DMA_cap = 0;
1398
1399 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1400 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1401 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1402
1403 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1404 cp = &sc->pciide_channels[channel];
1405 if (pciide_chansetup(sc, channel, interface) == 0)
1406 continue;
1407 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1408 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1409 &ctlsize, pciide_pci_intr);
1410 } else {
1411 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1412 channel, &cmdsize, &ctlsize);
1413 }
1414 if (cp->hw_ok == 0)
1415 continue;
1416 /*
1417 * Check to see if something appears to be there.
1418 */
1419 failreason = NULL;
1420 if (!wdcprobe(&cp->wdc_channel)) {
1421 failreason = "not responding; disabled or no drives?";
1422 goto next;
1423 }
1424 /*
1425 * Now, make sure it's actually attributable to this PCI IDE
1426 * channel by trying to access the channel again while the
1427 * PCI IDE controller's I/O space is disabled. (If the
1428 * channel no longer appears to be there, it belongs to
1429 * this controller.) YUCK!
1430 */
1431 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1432 PCI_COMMAND_STATUS_REG);
1433 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1434 csr & ~PCI_COMMAND_IO_ENABLE);
1435 if (wdcprobe(&cp->wdc_channel))
1436 failreason = "other hardware responding at addresses";
1437 pci_conf_write(sc->sc_pc, sc->sc_tag,
1438 PCI_COMMAND_STATUS_REG, csr);
1439 next:
1440 if (failreason) {
1441 printf("%s: %s channel ignored (%s)\n",
1442 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1443 failreason);
1444 cp->hw_ok = 0;
1445 bus_space_unmap(cp->wdc_channel.cmd_iot,
1446 cp->wdc_channel.cmd_ioh, cmdsize);
1447 if (interface & PCIIDE_INTERFACE_PCI(channel))
1448 bus_space_unmap(cp->wdc_channel.ctl_iot,
1449 cp->ctl_baseioh, ctlsize);
1450 else
1451 bus_space_unmap(cp->wdc_channel.ctl_iot,
1452 cp->wdc_channel.ctl_ioh, ctlsize);
1453 } else {
1454 pciide_map_compat_intr(pa, cp, channel, interface);
1455 }
1456 if (cp->hw_ok) {
1457 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1458 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1459 wdcattach(&cp->wdc_channel);
1460 }
1461 }
1462
1463 if (sc->sc_dma_ok == 0)
1464 return;
1465
1466 /* Allocate DMA maps */
1467 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1468 idedma_ctl = 0;
1469 cp = &sc->pciide_channels[channel];
1470 for (drive = 0; drive < 2; drive++) {
1471 drvp = &cp->wdc_channel.ch_drive[drive];
1472 /* If no drive, skip */
1473 if ((drvp->drive_flags & DRIVE) == 0)
1474 continue;
1475 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1476 continue;
1477 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1478 /* Abort DMA setup */
1479 printf("%s:%d:%d: can't allocate DMA maps, "
1480 "using PIO transfers\n",
1481 sc->sc_wdcdev.sc_dev.dv_xname,
1482 channel, drive);
1483 drvp->drive_flags &= ~DRIVE_DMA;
1484 }
1485 printf("%s:%d:%d: using DMA data transfers\n",
1486 sc->sc_wdcdev.sc_dev.dv_xname,
1487 channel, drive);
1488 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1489 }
1490 if (idedma_ctl != 0) {
1491 /* Add software bits in status register */
1492 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1493 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1494 idedma_ctl);
1495 }
1496 }
1497 }
1498
1499 void
1500 piix_chip_map(sc, pa)
1501 struct pciide_softc *sc;
1502 struct pci_attach_args *pa;
1503 {
1504 struct pciide_channel *cp;
1505 int channel;
1506 u_int32_t idetim;
1507 bus_size_t cmdsize, ctlsize;
1508
1509 if (pciide_chipen(sc, pa) == 0)
1510 return;
1511
1512 printf("%s: bus-master DMA support present",
1513 sc->sc_wdcdev.sc_dev.dv_xname);
1514 pciide_mapreg_dma(sc, pa);
1515 printf("\n");
1516 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1517 WDC_CAPABILITY_MODE;
1518 if (sc->sc_dma_ok) {
1519 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1520 sc->sc_wdcdev.irqack = pciide_irqack;
1521 switch(sc->sc_pp->ide_product) {
1522 case PCI_PRODUCT_INTEL_82371AB_IDE:
1523 case PCI_PRODUCT_INTEL_82440MX_IDE:
1524 case PCI_PRODUCT_INTEL_82801AA_IDE:
1525 case PCI_PRODUCT_INTEL_82801AB_IDE:
1526 case PCI_PRODUCT_INTEL_82801BA_IDE:
1527 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1528 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1529 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1530 case PCI_PRODUCT_INTEL_82801DB_IDE:
1531 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1532 }
1533 }
1534 sc->sc_wdcdev.PIO_cap = 4;
1535 sc->sc_wdcdev.DMA_cap = 2;
1536 switch(sc->sc_pp->ide_product) {
1537 case PCI_PRODUCT_INTEL_82801AA_IDE:
1538 sc->sc_wdcdev.UDMA_cap = 4;
1539 break;
1540 case PCI_PRODUCT_INTEL_82801BA_IDE:
1541 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1542 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1543 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1544 case PCI_PRODUCT_INTEL_82801DB_IDE:
1545 sc->sc_wdcdev.UDMA_cap = 5;
1546 break;
1547 default:
1548 sc->sc_wdcdev.UDMA_cap = 2;
1549 }
1550 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1551 sc->sc_wdcdev.set_modes = piix_setup_channel;
1552 else
1553 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1554 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1555 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1556
1557 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1558 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1559 DEBUG_PROBE);
1560 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1561 WDCDEBUG_PRINT((", sidetim=0x%x",
1562 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1563 DEBUG_PROBE);
1564 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1565 WDCDEBUG_PRINT((", udamreg 0x%x",
1566 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1567 DEBUG_PROBE);
1568 }
1569 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1570 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1571 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1572 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1573 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1574 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1575 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1576 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1577 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1578 DEBUG_PROBE);
1579 }
1580
1581 }
1582 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1583
1584 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1585 cp = &sc->pciide_channels[channel];
1586 /* PIIX is compat-only */
1587 if (pciide_chansetup(sc, channel, 0) == 0)
1588 continue;
1589 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1590 if ((PIIX_IDETIM_READ(idetim, channel) &
1591 PIIX_IDETIM_IDE) == 0) {
1592 printf("%s: %s channel ignored (disabled)\n",
1593 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1594 continue;
1595 }
1596 /* PIIX are compat-only pciide devices */
1597 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1598 if (cp->hw_ok == 0)
1599 continue;
1600 if (pciide_chan_candisable(cp)) {
1601 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1602 channel);
1603 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1604 idetim);
1605 }
1606 pciide_map_compat_intr(pa, cp, channel, 0);
1607 if (cp->hw_ok == 0)
1608 continue;
1609 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1610 }
1611
1612 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1613 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1614 DEBUG_PROBE);
1615 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1616 WDCDEBUG_PRINT((", sidetim=0x%x",
1617 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1618 DEBUG_PROBE);
1619 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1620 WDCDEBUG_PRINT((", udamreg 0x%x",
1621 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1622 DEBUG_PROBE);
1623 }
1624 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1625 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1626 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1627 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1628 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1629 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1630 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1631 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1632 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1633 DEBUG_PROBE);
1634 }
1635 }
1636 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1637 }
1638
1639 void
1640 piix_setup_channel(chp)
1641 struct channel_softc *chp;
1642 {
1643 u_int8_t mode[2], drive;
1644 u_int32_t oidetim, idetim, idedma_ctl;
1645 struct pciide_channel *cp = (struct pciide_channel*)chp;
1646 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1647 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1648
1649 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1650 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1651 idedma_ctl = 0;
1652
1653 /* set up new idetim: Enable IDE registers decode */
1654 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1655 chp->channel);
1656
1657 /* setup DMA */
1658 pciide_channel_dma_setup(cp);
1659
1660 /*
1661 * Here we have to mess up with drives mode: PIIX can't have
1662 * different timings for master and slave drives.
1663 * We need to find the best combination.
1664 */
1665
1666 /* If both drives supports DMA, take the lower mode */
1667 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1668 (drvp[1].drive_flags & DRIVE_DMA)) {
1669 mode[0] = mode[1] =
1670 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1671 drvp[0].DMA_mode = mode[0];
1672 drvp[1].DMA_mode = mode[1];
1673 goto ok;
1674 }
1675 /*
1676 * If only one drive supports DMA, use its mode, and
1677 * put the other one in PIO mode 0 if mode not compatible
1678 */
1679 if (drvp[0].drive_flags & DRIVE_DMA) {
1680 mode[0] = drvp[0].DMA_mode;
1681 mode[1] = drvp[1].PIO_mode;
1682 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1683 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1684 mode[1] = drvp[1].PIO_mode = 0;
1685 goto ok;
1686 }
1687 if (drvp[1].drive_flags & DRIVE_DMA) {
1688 mode[1] = drvp[1].DMA_mode;
1689 mode[0] = drvp[0].PIO_mode;
1690 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1691 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1692 mode[0] = drvp[0].PIO_mode = 0;
1693 goto ok;
1694 }
1695 /*
1696 * If both drives are not DMA, takes the lower mode, unless
1697 * one of them is PIO mode < 2
1698 */
1699 if (drvp[0].PIO_mode < 2) {
1700 mode[0] = drvp[0].PIO_mode = 0;
1701 mode[1] = drvp[1].PIO_mode;
1702 } else if (drvp[1].PIO_mode < 2) {
1703 mode[1] = drvp[1].PIO_mode = 0;
1704 mode[0] = drvp[0].PIO_mode;
1705 } else {
1706 mode[0] = mode[1] =
1707 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1708 drvp[0].PIO_mode = mode[0];
1709 drvp[1].PIO_mode = mode[1];
1710 }
1711 ok: /* The modes are setup */
1712 for (drive = 0; drive < 2; drive++) {
1713 if (drvp[drive].drive_flags & DRIVE_DMA) {
1714 idetim |= piix_setup_idetim_timings(
1715 mode[drive], 1, chp->channel);
1716 goto end;
1717 }
1718 }
1719 /* If we are there, none of the drives are DMA */
1720 if (mode[0] >= 2)
1721 idetim |= piix_setup_idetim_timings(
1722 mode[0], 0, chp->channel);
1723 else
1724 idetim |= piix_setup_idetim_timings(
1725 mode[1], 0, chp->channel);
1726 end: /*
1727 * timing mode is now set up in the controller. Enable
1728 * it per-drive
1729 */
1730 for (drive = 0; drive < 2; drive++) {
1731 /* If no drive, skip */
1732 if ((drvp[drive].drive_flags & DRIVE) == 0)
1733 continue;
1734 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1735 if (drvp[drive].drive_flags & DRIVE_DMA)
1736 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1737 }
1738 if (idedma_ctl != 0) {
1739 /* Add software bits in status register */
1740 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1741 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1742 idedma_ctl);
1743 }
1744 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1745 pciide_print_modes(cp);
1746 }
1747
1748 void
1749 piix3_4_setup_channel(chp)
1750 struct channel_softc *chp;
1751 {
1752 struct ata_drive_datas *drvp;
1753 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1754 struct pciide_channel *cp = (struct pciide_channel*)chp;
1755 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1756 int drive;
1757 int channel = chp->channel;
1758
1759 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1760 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1761 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1762 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1763 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1764 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1765 PIIX_SIDETIM_RTC_MASK(channel));
1766
1767 idedma_ctl = 0;
1768 /* If channel disabled, no need to go further */
1769 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1770 return;
1771 /* set up new idetim: Enable IDE registers decode */
1772 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1773
1774 /* setup DMA if needed */
1775 pciide_channel_dma_setup(cp);
1776
1777 for (drive = 0; drive < 2; drive++) {
1778 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1779 PIIX_UDMATIM_SET(0x3, channel, drive));
1780 drvp = &chp->ch_drive[drive];
1781 /* If no drive, skip */
1782 if ((drvp->drive_flags & DRIVE) == 0)
1783 continue;
1784 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1785 (drvp->drive_flags & DRIVE_UDMA) == 0))
1786 goto pio;
1787
1788 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1789 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1790 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1791 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1792 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1793 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1794 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1795 ideconf |= PIIX_CONFIG_PINGPONG;
1796 }
1797 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1798 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1799 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1800 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1801 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1802 /* setup Ultra/100 */
1803 if (drvp->UDMA_mode > 2 &&
1804 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1805 drvp->UDMA_mode = 2;
1806 if (drvp->UDMA_mode > 4) {
1807 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1808 } else {
1809 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1810 if (drvp->UDMA_mode > 2) {
1811 ideconf |= PIIX_CONFIG_UDMA66(channel,
1812 drive);
1813 } else {
1814 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1815 drive);
1816 }
1817 }
1818 }
1819 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1820 /* setup Ultra/66 */
1821 if (drvp->UDMA_mode > 2 &&
1822 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1823 drvp->UDMA_mode = 2;
1824 if (drvp->UDMA_mode > 2)
1825 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1826 else
1827 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1828 }
1829 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1830 (drvp->drive_flags & DRIVE_UDMA)) {
1831 /* use Ultra/DMA */
1832 drvp->drive_flags &= ~DRIVE_DMA;
1833 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1834 udmareg |= PIIX_UDMATIM_SET(
1835 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1836 } else {
1837 /* use Multiword DMA */
1838 drvp->drive_flags &= ~DRIVE_UDMA;
1839 if (drive == 0) {
1840 idetim |= piix_setup_idetim_timings(
1841 drvp->DMA_mode, 1, channel);
1842 } else {
1843 sidetim |= piix_setup_sidetim_timings(
1844 drvp->DMA_mode, 1, channel);
1845 idetim =PIIX_IDETIM_SET(idetim,
1846 PIIX_IDETIM_SITRE, channel);
1847 }
1848 }
1849 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1850
1851 pio: /* use PIO mode */
1852 idetim |= piix_setup_idetim_drvs(drvp);
1853 if (drive == 0) {
1854 idetim |= piix_setup_idetim_timings(
1855 drvp->PIO_mode, 0, channel);
1856 } else {
1857 sidetim |= piix_setup_sidetim_timings(
1858 drvp->PIO_mode, 0, channel);
1859 idetim =PIIX_IDETIM_SET(idetim,
1860 PIIX_IDETIM_SITRE, channel);
1861 }
1862 }
1863 if (idedma_ctl != 0) {
1864 /* Add software bits in status register */
1865 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1866 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1867 idedma_ctl);
1868 }
1869 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1870 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1871 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1872 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1873 pciide_print_modes(cp);
1874 }
1875
1876
1877 /* setup ISP and RTC fields, based on mode */
1878 static u_int32_t
1879 piix_setup_idetim_timings(mode, dma, channel)
1880 u_int8_t mode;
1881 u_int8_t dma;
1882 u_int8_t channel;
1883 {
1884
1885 if (dma)
1886 return PIIX_IDETIM_SET(0,
1887 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1888 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1889 channel);
1890 else
1891 return PIIX_IDETIM_SET(0,
1892 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1893 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1894 channel);
1895 }
1896
1897 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1898 static u_int32_t
1899 piix_setup_idetim_drvs(drvp)
1900 struct ata_drive_datas *drvp;
1901 {
1902 u_int32_t ret = 0;
1903 struct channel_softc *chp = drvp->chnl_softc;
1904 u_int8_t channel = chp->channel;
1905 u_int8_t drive = drvp->drive;
1906
1907 /*
1908 * If drive is using UDMA, timings setups are independant
1909 * So just check DMA and PIO here.
1910 */
1911 if (drvp->drive_flags & DRIVE_DMA) {
1912 /* if mode = DMA mode 0, use compatible timings */
1913 if ((drvp->drive_flags & DRIVE_DMA) &&
1914 drvp->DMA_mode == 0) {
1915 drvp->PIO_mode = 0;
1916 return ret;
1917 }
1918 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1919 /*
1920 * PIO and DMA timings are the same, use fast timings for PIO
1921 * too, else use compat timings.
1922 */
1923 if ((piix_isp_pio[drvp->PIO_mode] !=
1924 piix_isp_dma[drvp->DMA_mode]) ||
1925 (piix_rtc_pio[drvp->PIO_mode] !=
1926 piix_rtc_dma[drvp->DMA_mode]))
1927 drvp->PIO_mode = 0;
1928 /* if PIO mode <= 2, use compat timings for PIO */
1929 if (drvp->PIO_mode <= 2) {
1930 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1931 channel);
1932 return ret;
1933 }
1934 }
1935
1936 /*
1937 * Now setup PIO modes. If mode < 2, use compat timings.
1938 * Else enable fast timings. Enable IORDY and prefetch/post
1939 * if PIO mode >= 3.
1940 */
1941
1942 if (drvp->PIO_mode < 2)
1943 return ret;
1944
1945 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1946 if (drvp->PIO_mode >= 3) {
1947 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1948 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1949 }
1950 return ret;
1951 }
1952
1953 /* setup values in SIDETIM registers, based on mode */
1954 static u_int32_t
1955 piix_setup_sidetim_timings(mode, dma, channel)
1956 u_int8_t mode;
1957 u_int8_t dma;
1958 u_int8_t channel;
1959 {
1960 if (dma)
1961 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1962 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1963 else
1964 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1965 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1966 }
1967
1968 void
1969 amd7x6_chip_map(sc, pa)
1970 struct pciide_softc *sc;
1971 struct pci_attach_args *pa;
1972 {
1973 struct pciide_channel *cp;
1974 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1975 int channel;
1976 pcireg_t chanenable;
1977 bus_size_t cmdsize, ctlsize;
1978
1979 if (pciide_chipen(sc, pa) == 0)
1980 return;
1981 printf("%s: bus-master DMA support present",
1982 sc->sc_wdcdev.sc_dev.dv_xname);
1983 pciide_mapreg_dma(sc, pa);
1984 printf("\n");
1985 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1986 WDC_CAPABILITY_MODE;
1987 if (sc->sc_dma_ok) {
1988 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1989 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1990 sc->sc_wdcdev.irqack = pciide_irqack;
1991 }
1992 sc->sc_wdcdev.PIO_cap = 4;
1993 sc->sc_wdcdev.DMA_cap = 2;
1994
1995 switch (sc->sc_pp->ide_product) {
1996 case PCI_PRODUCT_AMD_PBC766_IDE:
1997 case PCI_PRODUCT_AMD_PBC768_IDE:
1998 sc->sc_wdcdev.UDMA_cap = 5;
1999 break;
2000 default:
2001 sc->sc_wdcdev.UDMA_cap = 4;
2002 }
2003 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2004 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2005 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2006 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2007
2008 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2009 DEBUG_PROBE);
2010 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2011 cp = &sc->pciide_channels[channel];
2012 if (pciide_chansetup(sc, channel, interface) == 0)
2013 continue;
2014
2015 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2016 printf("%s: %s channel ignored (disabled)\n",
2017 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2018 continue;
2019 }
2020 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2021 pciide_pci_intr);
2022
2023 if (pciide_chan_candisable(cp))
2024 chanenable &= ~AMD7X6_CHAN_EN(channel);
2025 pciide_map_compat_intr(pa, cp, channel, interface);
2026 if (cp->hw_ok == 0)
2027 continue;
2028
2029 amd7x6_setup_channel(&cp->wdc_channel);
2030 }
2031 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2032 chanenable);
2033 return;
2034 }
2035
2036 void
2037 amd7x6_setup_channel(chp)
2038 struct channel_softc *chp;
2039 {
2040 u_int32_t udmatim_reg, datatim_reg;
2041 u_int8_t idedma_ctl;
2042 int mode, drive;
2043 struct ata_drive_datas *drvp;
2044 struct pciide_channel *cp = (struct pciide_channel*)chp;
2045 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2046 #ifndef PCIIDE_AMD756_ENABLEDMA
2047 int rev = PCI_REVISION(
2048 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2049 #endif
2050
2051 idedma_ctl = 0;
2052 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2053 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2054 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2055 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2056
2057 /* setup DMA if needed */
2058 pciide_channel_dma_setup(cp);
2059
2060 for (drive = 0; drive < 2; drive++) {
2061 drvp = &chp->ch_drive[drive];
2062 /* If no drive, skip */
2063 if ((drvp->drive_flags & DRIVE) == 0)
2064 continue;
2065 /* add timing values, setup DMA if needed */
2066 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2067 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2068 mode = drvp->PIO_mode;
2069 goto pio;
2070 }
2071 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2072 (drvp->drive_flags & DRIVE_UDMA)) {
2073 /* use Ultra/DMA */
2074 drvp->drive_flags &= ~DRIVE_DMA;
2075 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2076 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2077 AMD7X6_UDMA_TIME(chp->channel, drive,
2078 amd7x6_udma_tim[drvp->UDMA_mode]);
2079 /* can use PIO timings, MW DMA unused */
2080 mode = drvp->PIO_mode;
2081 } else {
2082 /* use Multiword DMA, but only if revision is OK */
2083 drvp->drive_flags &= ~DRIVE_UDMA;
2084 #ifndef PCIIDE_AMD756_ENABLEDMA
2085 /*
2086 * The workaround doesn't seem to be necessary
2087 * with all drives, so it can be disabled by
2088 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2089 * triggered.
2090 */
2091 if (sc->sc_pp->ide_product ==
2092 PCI_PRODUCT_AMD_PBC756_IDE &&
2093 AMD756_CHIPREV_DISABLEDMA(rev)) {
2094 printf("%s:%d:%d: multi-word DMA disabled due "
2095 "to chip revision\n",
2096 sc->sc_wdcdev.sc_dev.dv_xname,
2097 chp->channel, drive);
2098 mode = drvp->PIO_mode;
2099 drvp->drive_flags &= ~DRIVE_DMA;
2100 goto pio;
2101 }
2102 #endif
2103 /* mode = min(pio, dma+2) */
2104 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2105 mode = drvp->PIO_mode;
2106 else
2107 mode = drvp->DMA_mode + 2;
2108 }
2109 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2110
2111 pio: /* setup PIO mode */
2112 if (mode <= 2) {
2113 drvp->DMA_mode = 0;
2114 drvp->PIO_mode = 0;
2115 mode = 0;
2116 } else {
2117 drvp->PIO_mode = mode;
2118 drvp->DMA_mode = mode - 2;
2119 }
2120 datatim_reg |=
2121 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2122 amd7x6_pio_set[mode]) |
2123 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2124 amd7x6_pio_rec[mode]);
2125 }
2126 if (idedma_ctl != 0) {
2127 /* Add software bits in status register */
2128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2129 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2130 idedma_ctl);
2131 }
2132 pciide_print_modes(cp);
2133 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2134 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2135 }
2136
2137 void
2138 apollo_chip_map(sc, pa)
2139 struct pciide_softc *sc;
2140 struct pci_attach_args *pa;
2141 {
2142 struct pciide_channel *cp;
2143 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2144 int channel;
2145 u_int32_t ideconf;
2146 bus_size_t cmdsize, ctlsize;
2147 pcitag_t pcib_tag;
2148 pcireg_t pcib_id, pcib_class;
2149
2150 if (pciide_chipen(sc, pa) == 0)
2151 return;
2152 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2153 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2154 /* and read ID and rev of the ISA bridge */
2155 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2156 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2157 printf(": VIA Technologies ");
2158 switch (PCI_PRODUCT(pcib_id)) {
2159 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2160 printf("VT82C586 (Apollo VP) ");
2161 if(PCI_REVISION(pcib_class) >= 0x02) {
2162 printf("ATA33 controller\n");
2163 sc->sc_wdcdev.UDMA_cap = 2;
2164 } else {
2165 printf("controller\n");
2166 sc->sc_wdcdev.UDMA_cap = 0;
2167 }
2168 break;
2169 case PCI_PRODUCT_VIATECH_VT82C596A:
2170 printf("VT82C596A (Apollo Pro) ");
2171 if (PCI_REVISION(pcib_class) >= 0x12) {
2172 printf("ATA66 controller\n");
2173 sc->sc_wdcdev.UDMA_cap = 4;
2174 } else {
2175 printf("ATA33 controller\n");
2176 sc->sc_wdcdev.UDMA_cap = 2;
2177 }
2178 break;
2179 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2180 printf("VT82C686A (Apollo KX133) ");
2181 if (PCI_REVISION(pcib_class) >= 0x40) {
2182 printf("ATA100 controller\n");
2183 sc->sc_wdcdev.UDMA_cap = 5;
2184 } else {
2185 printf("ATA66 controller\n");
2186 sc->sc_wdcdev.UDMA_cap = 4;
2187 }
2188 break;
2189 case PCI_PRODUCT_VIATECH_VT8231:
2190 printf("VT8231 ATA100 controller\n");
2191 sc->sc_wdcdev.UDMA_cap = 5;
2192 break;
2193 case PCI_PRODUCT_VIATECH_VT8233:
2194 printf("VT8233 ATA100 controller\n");
2195 sc->sc_wdcdev.UDMA_cap = 5;
2196 break;
2197 case PCI_PRODUCT_VIATECH_VT8233A:
2198 printf("VT8233A ATA133 controller\n");
2199 /* XXX use ATA100 untill ATA133 is supported */
2200 sc->sc_wdcdev.UDMA_cap = 5;
2201 break;
2202 default:
2203 printf("unknown ATA controller\n");
2204 sc->sc_wdcdev.UDMA_cap = 0;
2205 }
2206
2207 printf("%s: bus-master DMA support present",
2208 sc->sc_wdcdev.sc_dev.dv_xname);
2209 pciide_mapreg_dma(sc, pa);
2210 printf("\n");
2211 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2212 WDC_CAPABILITY_MODE;
2213 if (sc->sc_dma_ok) {
2214 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2215 sc->sc_wdcdev.irqack = pciide_irqack;
2216 if (sc->sc_wdcdev.UDMA_cap > 0)
2217 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2218 }
2219 sc->sc_wdcdev.PIO_cap = 4;
2220 sc->sc_wdcdev.DMA_cap = 2;
2221 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2222 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2223 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2224
2225 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2226 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2227 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2228 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2229 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2230 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2231 DEBUG_PROBE);
2232
2233 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2234 cp = &sc->pciide_channels[channel];
2235 if (pciide_chansetup(sc, channel, interface) == 0)
2236 continue;
2237
2238 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2239 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2240 printf("%s: %s channel ignored (disabled)\n",
2241 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2242 continue;
2243 }
2244 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2245 pciide_pci_intr);
2246 if (cp->hw_ok == 0)
2247 continue;
2248 if (pciide_chan_candisable(cp)) {
2249 ideconf &= ~APO_IDECONF_EN(channel);
2250 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2251 ideconf);
2252 }
2253 pciide_map_compat_intr(pa, cp, channel, interface);
2254
2255 if (cp->hw_ok == 0)
2256 continue;
2257 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2258 }
2259 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2260 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2261 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2262 }
2263
2264 void
2265 apollo_setup_channel(chp)
2266 struct channel_softc *chp;
2267 {
2268 u_int32_t udmatim_reg, datatim_reg;
2269 u_int8_t idedma_ctl;
2270 int mode, drive;
2271 struct ata_drive_datas *drvp;
2272 struct pciide_channel *cp = (struct pciide_channel*)chp;
2273 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2274
2275 idedma_ctl = 0;
2276 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2277 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2278 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2279 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2280
2281 /* setup DMA if needed */
2282 pciide_channel_dma_setup(cp);
2283
2284 for (drive = 0; drive < 2; drive++) {
2285 drvp = &chp->ch_drive[drive];
2286 /* If no drive, skip */
2287 if ((drvp->drive_flags & DRIVE) == 0)
2288 continue;
2289 /* add timing values, setup DMA if needed */
2290 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2291 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2292 mode = drvp->PIO_mode;
2293 goto pio;
2294 }
2295 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2296 (drvp->drive_flags & DRIVE_UDMA)) {
2297 /* use Ultra/DMA */
2298 drvp->drive_flags &= ~DRIVE_DMA;
2299 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2300 APO_UDMA_EN_MTH(chp->channel, drive);
2301 if (sc->sc_wdcdev.UDMA_cap == 5) {
2302 /* 686b */
2303 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2304 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2305 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2306 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2307 /* 596b or 686a */
2308 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2309 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2310 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2311 } else {
2312 /* 596a or 586b */
2313 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2314 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2315 }
2316 /* can use PIO timings, MW DMA unused */
2317 mode = drvp->PIO_mode;
2318 } else {
2319 /* use Multiword DMA */
2320 drvp->drive_flags &= ~DRIVE_UDMA;
2321 /* mode = min(pio, dma+2) */
2322 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2323 mode = drvp->PIO_mode;
2324 else
2325 mode = drvp->DMA_mode + 2;
2326 }
2327 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2328
2329 pio: /* setup PIO mode */
2330 if (mode <= 2) {
2331 drvp->DMA_mode = 0;
2332 drvp->PIO_mode = 0;
2333 mode = 0;
2334 } else {
2335 drvp->PIO_mode = mode;
2336 drvp->DMA_mode = mode - 2;
2337 }
2338 datatim_reg |=
2339 APO_DATATIM_PULSE(chp->channel, drive,
2340 apollo_pio_set[mode]) |
2341 APO_DATATIM_RECOV(chp->channel, drive,
2342 apollo_pio_rec[mode]);
2343 }
2344 if (idedma_ctl != 0) {
2345 /* Add software bits in status register */
2346 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2347 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2348 idedma_ctl);
2349 }
2350 pciide_print_modes(cp);
2351 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2352 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2353 }
2354
2355 void
2356 cmd_channel_map(pa, sc, channel)
2357 struct pci_attach_args *pa;
2358 struct pciide_softc *sc;
2359 int channel;
2360 {
2361 struct pciide_channel *cp = &sc->pciide_channels[channel];
2362 bus_size_t cmdsize, ctlsize;
2363 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2364 int interface, one_channel;
2365
2366 /*
2367 * The 0648/0649 can be told to identify as a RAID controller.
2368 * In this case, we have to fake interface
2369 */
2370 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2371 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2372 PCIIDE_INTERFACE_SETTABLE(1);
2373 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2374 CMD_CONF_DSA1)
2375 interface |= PCIIDE_INTERFACE_PCI(0) |
2376 PCIIDE_INTERFACE_PCI(1);
2377 } else {
2378 interface = PCI_INTERFACE(pa->pa_class);
2379 }
2380
2381 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2382 cp->name = PCIIDE_CHANNEL_NAME(channel);
2383 cp->wdc_channel.channel = channel;
2384 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2385
2386 /*
2387 * Older CMD64X doesn't have independant channels
2388 */
2389 switch (sc->sc_pp->ide_product) {
2390 case PCI_PRODUCT_CMDTECH_649:
2391 one_channel = 0;
2392 break;
2393 default:
2394 one_channel = 1;
2395 break;
2396 }
2397
2398 if (channel > 0 && one_channel) {
2399 cp->wdc_channel.ch_queue =
2400 sc->pciide_channels[0].wdc_channel.ch_queue;
2401 } else {
2402 cp->wdc_channel.ch_queue =
2403 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2404 }
2405 if (cp->wdc_channel.ch_queue == NULL) {
2406 printf("%s %s channel: "
2407 "can't allocate memory for command queue",
2408 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2409 return;
2410 }
2411
2412 printf("%s: %s channel %s to %s mode\n",
2413 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2414 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2415 "configured" : "wired",
2416 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2417 "native-PCI" : "compatibility");
2418
2419 /*
2420 * with a CMD PCI64x, if we get here, the first channel is enabled:
2421 * there's no way to disable the first channel without disabling
2422 * the whole device
2423 */
2424 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2425 printf("%s: %s channel ignored (disabled)\n",
2426 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2427 return;
2428 }
2429
2430 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2431 if (cp->hw_ok == 0)
2432 return;
2433 if (channel == 1) {
2434 if (pciide_chan_candisable(cp)) {
2435 ctrl &= ~CMD_CTRL_2PORT;
2436 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2437 CMD_CTRL, ctrl);
2438 }
2439 }
2440 pciide_map_compat_intr(pa, cp, channel, interface);
2441 }
2442
2443 int
2444 cmd_pci_intr(arg)
2445 void *arg;
2446 {
2447 struct pciide_softc *sc = arg;
2448 struct pciide_channel *cp;
2449 struct channel_softc *wdc_cp;
2450 int i, rv, crv;
2451 u_int32_t priirq, secirq;
2452
2453 rv = 0;
2454 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2455 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2456 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2457 cp = &sc->pciide_channels[i];
2458 wdc_cp = &cp->wdc_channel;
2459 /* If a compat channel skip. */
2460 if (cp->compat)
2461 continue;
2462 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2463 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2464 crv = wdcintr(wdc_cp);
2465 if (crv == 0)
2466 printf("%s:%d: bogus intr\n",
2467 sc->sc_wdcdev.sc_dev.dv_xname, i);
2468 else
2469 rv = 1;
2470 }
2471 }
2472 return rv;
2473 }
2474
2475 void
2476 cmd_chip_map(sc, pa)
2477 struct pciide_softc *sc;
2478 struct pci_attach_args *pa;
2479 {
2480 int channel;
2481
2482 /*
2483 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2484 * and base adresses registers can be disabled at
2485 * hardware level. In this case, the device is wired
2486 * in compat mode and its first channel is always enabled,
2487 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2488 * In fact, it seems that the first channel of the CMD PCI0640
2489 * can't be disabled.
2490 */
2491
2492 #ifdef PCIIDE_CMD064x_DISABLE
2493 if (pciide_chipen(sc, pa) == 0)
2494 return;
2495 #endif
2496
2497 printf("%s: hardware does not support DMA\n",
2498 sc->sc_wdcdev.sc_dev.dv_xname);
2499 sc->sc_dma_ok = 0;
2500
2501 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2502 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2503 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2504
2505 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2506 cmd_channel_map(pa, sc, channel);
2507 }
2508 }
2509
2510 void
2511 cmd0643_9_chip_map(sc, pa)
2512 struct pciide_softc *sc;
2513 struct pci_attach_args *pa;
2514 {
2515 struct pciide_channel *cp;
2516 int channel;
2517 pcireg_t rev = PCI_REVISION(pa->pa_class);
2518
2519 /*
2520 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2521 * and base adresses registers can be disabled at
2522 * hardware level. In this case, the device is wired
2523 * in compat mode and its first channel is always enabled,
2524 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2525 * In fact, it seems that the first channel of the CMD PCI0640
2526 * can't be disabled.
2527 */
2528
2529 #ifdef PCIIDE_CMD064x_DISABLE
2530 if (pciide_chipen(sc, pa) == 0)
2531 return;
2532 #endif
2533 printf("%s: bus-master DMA support present",
2534 sc->sc_wdcdev.sc_dev.dv_xname);
2535 pciide_mapreg_dma(sc, pa);
2536 printf("\n");
2537 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2538 WDC_CAPABILITY_MODE;
2539 if (sc->sc_dma_ok) {
2540 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2541 switch (sc->sc_pp->ide_product) {
2542 case PCI_PRODUCT_CMDTECH_649:
2543 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2544 sc->sc_wdcdev.UDMA_cap = 5;
2545 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2546 break;
2547 case PCI_PRODUCT_CMDTECH_648:
2548 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2549 sc->sc_wdcdev.UDMA_cap = 4;
2550 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2551 break;
2552 case PCI_PRODUCT_CMDTECH_646:
2553 if (rev >= CMD0646U2_REV) {
2554 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2555 sc->sc_wdcdev.UDMA_cap = 2;
2556 } else if (rev >= CMD0646U_REV) {
2557 /*
2558 * Linux's driver claims that the 646U is broken
2559 * with UDMA. Only enable it if we know what we're
2560 * doing
2561 */
2562 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2564 sc->sc_wdcdev.UDMA_cap = 2;
2565 #endif
2566 /* explicitly disable UDMA */
2567 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2568 CMD_UDMATIM(0), 0);
2569 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2570 CMD_UDMATIM(1), 0);
2571 }
2572 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2573 break;
2574 default:
2575 sc->sc_wdcdev.irqack = pciide_irqack;
2576 }
2577 }
2578
2579 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2580 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2581 sc->sc_wdcdev.PIO_cap = 4;
2582 sc->sc_wdcdev.DMA_cap = 2;
2583 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2584
2585 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2586 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2587 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2588 DEBUG_PROBE);
2589
2590 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2591 cp = &sc->pciide_channels[channel];
2592 cmd_channel_map(pa, sc, channel);
2593 if (cp->hw_ok == 0)
2594 continue;
2595 cmd0643_9_setup_channel(&cp->wdc_channel);
2596 }
2597 /*
2598 * note - this also makes sure we clear the irq disable and reset
2599 * bits
2600 */
2601 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2602 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2603 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2604 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2605 DEBUG_PROBE);
2606 }
2607
2608 void
2609 cmd0643_9_setup_channel(chp)
2610 struct channel_softc *chp;
2611 {
2612 struct ata_drive_datas *drvp;
2613 u_int8_t tim;
2614 u_int32_t idedma_ctl, udma_reg;
2615 int drive;
2616 struct pciide_channel *cp = (struct pciide_channel*)chp;
2617 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2618
2619 idedma_ctl = 0;
2620 /* setup DMA if needed */
2621 pciide_channel_dma_setup(cp);
2622
2623 for (drive = 0; drive < 2; drive++) {
2624 drvp = &chp->ch_drive[drive];
2625 /* If no drive, skip */
2626 if ((drvp->drive_flags & DRIVE) == 0)
2627 continue;
2628 /* add timing values, setup DMA if needed */
2629 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2630 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2631 if (drvp->drive_flags & DRIVE_UDMA) {
2632 /* UltraDMA on a 646U2, 0648 or 0649 */
2633 drvp->drive_flags &= ~DRIVE_DMA;
2634 udma_reg = pciide_pci_read(sc->sc_pc,
2635 sc->sc_tag, CMD_UDMATIM(chp->channel));
2636 if (drvp->UDMA_mode > 2 &&
2637 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2638 CMD_BICSR) &
2639 CMD_BICSR_80(chp->channel)) == 0)
2640 drvp->UDMA_mode = 2;
2641 if (drvp->UDMA_mode > 2)
2642 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2643 else if (sc->sc_wdcdev.UDMA_cap > 2)
2644 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2645 udma_reg |= CMD_UDMATIM_UDMA(drive);
2646 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2647 CMD_UDMATIM_TIM_OFF(drive));
2648 udma_reg |=
2649 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2650 CMD_UDMATIM_TIM_OFF(drive));
2651 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2652 CMD_UDMATIM(chp->channel), udma_reg);
2653 } else {
2654 /*
2655 * use Multiword DMA.
2656 * Timings will be used for both PIO and DMA,
2657 * so adjust DMA mode if needed
2658 * if we have a 0646U2/8/9, turn off UDMA
2659 */
2660 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2661 udma_reg = pciide_pci_read(sc->sc_pc,
2662 sc->sc_tag,
2663 CMD_UDMATIM(chp->channel));
2664 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2665 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2666 CMD_UDMATIM(chp->channel),
2667 udma_reg);
2668 }
2669 if (drvp->PIO_mode >= 3 &&
2670 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2671 drvp->DMA_mode = drvp->PIO_mode - 2;
2672 }
2673 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2674 }
2675 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2676 }
2677 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2678 CMD_DATA_TIM(chp->channel, drive), tim);
2679 }
2680 if (idedma_ctl != 0) {
2681 /* Add software bits in status register */
2682 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2683 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2684 idedma_ctl);
2685 }
2686 pciide_print_modes(cp);
2687 }
2688
2689 void
2690 cmd646_9_irqack(chp)
2691 struct channel_softc *chp;
2692 {
2693 u_int32_t priirq, secirq;
2694 struct pciide_channel *cp = (struct pciide_channel*)chp;
2695 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2696
2697 if (chp->channel == 0) {
2698 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2699 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2700 } else {
2701 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2702 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2703 }
2704 pciide_irqack(chp);
2705 }
2706
2707 void
2708 cy693_chip_map(sc, pa)
2709 struct pciide_softc *sc;
2710 struct pci_attach_args *pa;
2711 {
2712 struct pciide_channel *cp;
2713 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2714 bus_size_t cmdsize, ctlsize;
2715
2716 if (pciide_chipen(sc, pa) == 0)
2717 return;
2718 /*
2719 * this chip has 2 PCI IDE functions, one for primary and one for
2720 * secondary. So we need to call pciide_mapregs_compat() with
2721 * the real channel
2722 */
2723 if (pa->pa_function == 1) {
2724 sc->sc_cy_compatchan = 0;
2725 } else if (pa->pa_function == 2) {
2726 sc->sc_cy_compatchan = 1;
2727 } else {
2728 printf("%s: unexpected PCI function %d\n",
2729 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2730 return;
2731 }
2732 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2733 printf("%s: bus-master DMA support present",
2734 sc->sc_wdcdev.sc_dev.dv_xname);
2735 pciide_mapreg_dma(sc, pa);
2736 } else {
2737 printf("%s: hardware does not support DMA",
2738 sc->sc_wdcdev.sc_dev.dv_xname);
2739 sc->sc_dma_ok = 0;
2740 }
2741 printf("\n");
2742
2743 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2744 if (sc->sc_cy_handle == NULL) {
2745 printf("%s: unable to map hyperCache control registers\n",
2746 sc->sc_wdcdev.sc_dev.dv_xname);
2747 sc->sc_dma_ok = 0;
2748 }
2749
2750 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2751 WDC_CAPABILITY_MODE;
2752 if (sc->sc_dma_ok) {
2753 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2754 sc->sc_wdcdev.irqack = pciide_irqack;
2755 }
2756 sc->sc_wdcdev.PIO_cap = 4;
2757 sc->sc_wdcdev.DMA_cap = 2;
2758 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2759
2760 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2761 sc->sc_wdcdev.nchannels = 1;
2762
2763 /* Only one channel for this chip; if we are here it's enabled */
2764 cp = &sc->pciide_channels[0];
2765 sc->wdc_chanarray[0] = &cp->wdc_channel;
2766 cp->name = PCIIDE_CHANNEL_NAME(0);
2767 cp->wdc_channel.channel = 0;
2768 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2769 cp->wdc_channel.ch_queue =
2770 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2771 if (cp->wdc_channel.ch_queue == NULL) {
2772 printf("%s primary channel: "
2773 "can't allocate memory for command queue",
2774 sc->sc_wdcdev.sc_dev.dv_xname);
2775 return;
2776 }
2777 printf("%s: primary channel %s to ",
2778 sc->sc_wdcdev.sc_dev.dv_xname,
2779 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2780 "configured" : "wired");
2781 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2782 printf("native-PCI");
2783 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2784 pciide_pci_intr);
2785 } else {
2786 printf("compatibility");
2787 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2788 &cmdsize, &ctlsize);
2789 }
2790 printf(" mode\n");
2791 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2792 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2793 wdcattach(&cp->wdc_channel);
2794 if (pciide_chan_candisable(cp)) {
2795 pci_conf_write(sc->sc_pc, sc->sc_tag,
2796 PCI_COMMAND_STATUS_REG, 0);
2797 }
2798 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2799 if (cp->hw_ok == 0)
2800 return;
2801 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2802 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2803 cy693_setup_channel(&cp->wdc_channel);
2804 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2805 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2806 }
2807
2808 void
2809 cy693_setup_channel(chp)
2810 struct channel_softc *chp;
2811 {
2812 struct ata_drive_datas *drvp;
2813 int drive;
2814 u_int32_t cy_cmd_ctrl;
2815 u_int32_t idedma_ctl;
2816 struct pciide_channel *cp = (struct pciide_channel*)chp;
2817 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2818 int dma_mode = -1;
2819
2820 cy_cmd_ctrl = idedma_ctl = 0;
2821
2822 /* setup DMA if needed */
2823 pciide_channel_dma_setup(cp);
2824
2825 for (drive = 0; drive < 2; drive++) {
2826 drvp = &chp->ch_drive[drive];
2827 /* If no drive, skip */
2828 if ((drvp->drive_flags & DRIVE) == 0)
2829 continue;
2830 /* add timing values, setup DMA if needed */
2831 if (drvp->drive_flags & DRIVE_DMA) {
2832 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2833 /* use Multiword DMA */
2834 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2835 dma_mode = drvp->DMA_mode;
2836 }
2837 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2838 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2839 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2840 CY_CMD_CTRL_IOW_REC_OFF(drive));
2841 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2842 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2843 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2844 CY_CMD_CTRL_IOR_REC_OFF(drive));
2845 }
2846 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2847 chp->ch_drive[0].DMA_mode = dma_mode;
2848 chp->ch_drive[1].DMA_mode = dma_mode;
2849
2850 if (dma_mode == -1)
2851 dma_mode = 0;
2852
2853 if (sc->sc_cy_handle != NULL) {
2854 /* Note: `multiple' is implied. */
2855 cy82c693_write(sc->sc_cy_handle,
2856 (sc->sc_cy_compatchan == 0) ?
2857 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2858 }
2859
2860 pciide_print_modes(cp);
2861
2862 if (idedma_ctl != 0) {
2863 /* Add software bits in status register */
2864 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2865 IDEDMA_CTL, idedma_ctl);
2866 }
2867 }
2868
2869 static int
2870 sis_hostbr_match(pa)
2871 struct pci_attach_args *pa;
2872 {
2873 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2874 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2875 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2876 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2877 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2878 }
2879
2880 void
2881 sis_chip_map(sc, pa)
2882 struct pciide_softc *sc;
2883 struct pci_attach_args *pa;
2884 {
2885 struct pciide_channel *cp;
2886 int channel;
2887 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2888 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2889 pcireg_t rev = PCI_REVISION(pa->pa_class);
2890 bus_size_t cmdsize, ctlsize;
2891 pcitag_t pchb_tag;
2892 pcireg_t pchb_id, pchb_class;
2893
2894 if (pciide_chipen(sc, pa) == 0)
2895 return;
2896 printf("%s: bus-master DMA support present",
2897 sc->sc_wdcdev.sc_dev.dv_xname);
2898 pciide_mapreg_dma(sc, pa);
2899 printf("\n");
2900
2901 /* get a PCI tag for the host bridge (function 0 of the same device) */
2902 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2903 /* and read ID and rev of the ISA bridge */
2904 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2905 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2906
2907 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2908 WDC_CAPABILITY_MODE;
2909 if (sc->sc_dma_ok) {
2910 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2911 sc->sc_wdcdev.irqack = pciide_irqack;
2912 /*
2913 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2914 * have problems with UDMA (info provided by Christos)
2915 */
2916 if (rev >= 0xd0 &&
2917 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2918 PCI_REVISION(pchb_class) >= 0x03))
2919 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2920 }
2921
2922 sc->sc_wdcdev.PIO_cap = 4;
2923 sc->sc_wdcdev.DMA_cap = 2;
2924 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2925 /*
2926 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2927 * chipsets.
2928 */
2929 sc->sc_wdcdev.UDMA_cap =
2930 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2931 sc->sc_wdcdev.set_modes = sis_setup_channel;
2932
2933 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2934 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2935
2936 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2937 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2938 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2939
2940 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2941 cp = &sc->pciide_channels[channel];
2942 if (pciide_chansetup(sc, channel, interface) == 0)
2943 continue;
2944 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2945 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2946 printf("%s: %s channel ignored (disabled)\n",
2947 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2948 continue;
2949 }
2950 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2951 pciide_pci_intr);
2952 if (cp->hw_ok == 0)
2953 continue;
2954 if (pciide_chan_candisable(cp)) {
2955 if (channel == 0)
2956 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2957 else
2958 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2959 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2960 sis_ctr0);
2961 }
2962 pciide_map_compat_intr(pa, cp, channel, interface);
2963 if (cp->hw_ok == 0)
2964 continue;
2965 sis_setup_channel(&cp->wdc_channel);
2966 }
2967 }
2968
2969 void
2970 sis_setup_channel(chp)
2971 struct channel_softc *chp;
2972 {
2973 struct ata_drive_datas *drvp;
2974 int drive;
2975 u_int32_t sis_tim;
2976 u_int32_t idedma_ctl;
2977 struct pciide_channel *cp = (struct pciide_channel*)chp;
2978 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2979
2980 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2981 "channel %d 0x%x\n", chp->channel,
2982 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2983 DEBUG_PROBE);
2984 sis_tim = 0;
2985 idedma_ctl = 0;
2986 /* setup DMA if needed */
2987 pciide_channel_dma_setup(cp);
2988
2989 for (drive = 0; drive < 2; drive++) {
2990 drvp = &chp->ch_drive[drive];
2991 /* If no drive, skip */
2992 if ((drvp->drive_flags & DRIVE) == 0)
2993 continue;
2994 /* add timing values, setup DMA if needed */
2995 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2996 (drvp->drive_flags & DRIVE_UDMA) == 0)
2997 goto pio;
2998
2999 if (drvp->drive_flags & DRIVE_UDMA) {
3000 /* use Ultra/DMA */
3001 drvp->drive_flags &= ~DRIVE_DMA;
3002 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3003 SIS_TIM_UDMA_TIME_OFF(drive);
3004 sis_tim |= SIS_TIM_UDMA_EN(drive);
3005 } else {
3006 /*
3007 * use Multiword DMA
3008 * Timings will be used for both PIO and DMA,
3009 * so adjust DMA mode if needed
3010 */
3011 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3012 drvp->PIO_mode = drvp->DMA_mode + 2;
3013 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3014 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3015 drvp->PIO_mode - 2 : 0;
3016 if (drvp->DMA_mode == 0)
3017 drvp->PIO_mode = 0;
3018 }
3019 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3020 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3021 SIS_TIM_ACT_OFF(drive);
3022 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3023 SIS_TIM_REC_OFF(drive);
3024 }
3025 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3026 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3027 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3028 if (idedma_ctl != 0) {
3029 /* Add software bits in status register */
3030 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3031 IDEDMA_CTL, idedma_ctl);
3032 }
3033 pciide_print_modes(cp);
3034 }
3035
3036 void
3037 acer_chip_map(sc, pa)
3038 struct pciide_softc *sc;
3039 struct pci_attach_args *pa;
3040 {
3041 struct pciide_channel *cp;
3042 int channel;
3043 pcireg_t cr, interface;
3044 bus_size_t cmdsize, ctlsize;
3045 pcireg_t rev = PCI_REVISION(pa->pa_class);
3046
3047 if (pciide_chipen(sc, pa) == 0)
3048 return;
3049 printf("%s: bus-master DMA support present",
3050 sc->sc_wdcdev.sc_dev.dv_xname);
3051 pciide_mapreg_dma(sc, pa);
3052 printf("\n");
3053 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3054 WDC_CAPABILITY_MODE;
3055 if (sc->sc_dma_ok) {
3056 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3057 if (rev >= 0x20) {
3058 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3059 if (rev >= 0xC4)
3060 sc->sc_wdcdev.UDMA_cap = 5;
3061 else if (rev >= 0xC2)
3062 sc->sc_wdcdev.UDMA_cap = 4;
3063 else
3064 sc->sc_wdcdev.UDMA_cap = 2;
3065 }
3066 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3067 sc->sc_wdcdev.irqack = pciide_irqack;
3068 }
3069
3070 sc->sc_wdcdev.PIO_cap = 4;
3071 sc->sc_wdcdev.DMA_cap = 2;
3072 sc->sc_wdcdev.set_modes = acer_setup_channel;
3073 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3074 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3075
3076 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3077 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3078 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3079
3080 /* Enable "microsoft register bits" R/W. */
3081 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3082 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3083 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3084 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3085 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3086 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3087 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3088 ~ACER_CHANSTATUSREGS_RO);
3089 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3090 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3091 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3092 /* Don't use cr, re-read the real register content instead */
3093 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3094 PCI_CLASS_REG));
3095
3096 /* From linux: enable "Cable Detection" */
3097 if (rev >= 0xC2) {
3098 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3099 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3100 | ACER_0x4B_CDETECT);
3101 }
3102
3103 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3104 cp = &sc->pciide_channels[channel];
3105 if (pciide_chansetup(sc, channel, interface) == 0)
3106 continue;
3107 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3108 printf("%s: %s channel ignored (disabled)\n",
3109 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3110 continue;
3111 }
3112 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3113 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3114 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3115 if (cp->hw_ok == 0)
3116 continue;
3117 if (pciide_chan_candisable(cp)) {
3118 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3119 pci_conf_write(sc->sc_pc, sc->sc_tag,
3120 PCI_CLASS_REG, cr);
3121 }
3122 pciide_map_compat_intr(pa, cp, channel, interface);
3123 acer_setup_channel(&cp->wdc_channel);
3124 }
3125 }
3126
3127 void
3128 acer_setup_channel(chp)
3129 struct channel_softc *chp;
3130 {
3131 struct ata_drive_datas *drvp;
3132 int drive;
3133 u_int32_t acer_fifo_udma;
3134 u_int32_t idedma_ctl;
3135 struct pciide_channel *cp = (struct pciide_channel*)chp;
3136 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3137
3138 idedma_ctl = 0;
3139 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3140 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3141 acer_fifo_udma), DEBUG_PROBE);
3142 /* setup DMA if needed */
3143 pciide_channel_dma_setup(cp);
3144
3145 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3146 DRIVE_UDMA) { /* check 80 pins cable */
3147 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3148 ACER_0x4A_80PIN(chp->channel)) {
3149 if (chp->ch_drive[0].UDMA_mode > 2)
3150 chp->ch_drive[0].UDMA_mode = 2;
3151 if (chp->ch_drive[1].UDMA_mode > 2)
3152 chp->ch_drive[1].UDMA_mode = 2;
3153 }
3154 }
3155
3156 for (drive = 0; drive < 2; drive++) {
3157 drvp = &chp->ch_drive[drive];
3158 /* If no drive, skip */
3159 if ((drvp->drive_flags & DRIVE) == 0)
3160 continue;
3161 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3162 "channel %d drive %d 0x%x\n", chp->channel, drive,
3163 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3164 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3165 /* clear FIFO/DMA mode */
3166 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3167 ACER_UDMA_EN(chp->channel, drive) |
3168 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3169
3170 /* add timing values, setup DMA if needed */
3171 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3172 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3173 acer_fifo_udma |=
3174 ACER_FTH_OPL(chp->channel, drive, 0x1);
3175 goto pio;
3176 }
3177
3178 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3179 if (drvp->drive_flags & DRIVE_UDMA) {
3180 /* use Ultra/DMA */
3181 drvp->drive_flags &= ~DRIVE_DMA;
3182 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3183 acer_fifo_udma |=
3184 ACER_UDMA_TIM(chp->channel, drive,
3185 acer_udma[drvp->UDMA_mode]);
3186 /* XXX disable if one drive < UDMA3 ? */
3187 if (drvp->UDMA_mode >= 3) {
3188 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3189 ACER_0x4B,
3190 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3191 ACER_0x4B) | ACER_0x4B_UDMA66);
3192 }
3193 } else {
3194 /*
3195 * use Multiword DMA
3196 * Timings will be used for both PIO and DMA,
3197 * so adjust DMA mode if needed
3198 */
3199 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3200 drvp->PIO_mode = drvp->DMA_mode + 2;
3201 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3202 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3203 drvp->PIO_mode - 2 : 0;
3204 if (drvp->DMA_mode == 0)
3205 drvp->PIO_mode = 0;
3206 }
3207 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3208 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3209 ACER_IDETIM(chp->channel, drive),
3210 acer_pio[drvp->PIO_mode]);
3211 }
3212 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3213 acer_fifo_udma), DEBUG_PROBE);
3214 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3215 if (idedma_ctl != 0) {
3216 /* Add software bits in status register */
3217 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3218 IDEDMA_CTL, idedma_ctl);
3219 }
3220 pciide_print_modes(cp);
3221 }
3222
3223 int
3224 acer_pci_intr(arg)
3225 void *arg;
3226 {
3227 struct pciide_softc *sc = arg;
3228 struct pciide_channel *cp;
3229 struct channel_softc *wdc_cp;
3230 int i, rv, crv;
3231 u_int32_t chids;
3232
3233 rv = 0;
3234 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3235 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3236 cp = &sc->pciide_channels[i];
3237 wdc_cp = &cp->wdc_channel;
3238 /* If a compat channel skip. */
3239 if (cp->compat)
3240 continue;
3241 if (chids & ACER_CHIDS_INT(i)) {
3242 crv = wdcintr(wdc_cp);
3243 if (crv == 0)
3244 printf("%s:%d: bogus intr\n",
3245 sc->sc_wdcdev.sc_dev.dv_xname, i);
3246 else
3247 rv = 1;
3248 }
3249 }
3250 return rv;
3251 }
3252
3253 void
3254 hpt_chip_map(sc, pa)
3255 struct pciide_softc *sc;
3256 struct pci_attach_args *pa;
3257 {
3258 struct pciide_channel *cp;
3259 int i, compatchan, revision;
3260 pcireg_t interface;
3261 bus_size_t cmdsize, ctlsize;
3262
3263 if (pciide_chipen(sc, pa) == 0)
3264 return;
3265 revision = PCI_REVISION(pa->pa_class);
3266 printf(": Triones/Highpoint ");
3267 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3268 printf("HPT374 IDE Controller\n");
3269 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3270 if (revision == HPT370_REV)
3271 printf("HPT370 IDE Controller\n");
3272 else if (revision == HPT370A_REV)
3273 printf("HPT370A IDE Controller\n");
3274 else if (revision == HPT366_REV)
3275 printf("HPT366 IDE Controller\n");
3276 else
3277 printf("unknown HPT IDE controller rev %d\n", revision);
3278 } else
3279 printf("unknown HPT IDE controller 0x%x\n",
3280 sc->sc_pp->ide_product);
3281
3282 /*
3283 * when the chip is in native mode it identifies itself as a
3284 * 'misc mass storage'. Fake interface in this case.
3285 */
3286 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3287 interface = PCI_INTERFACE(pa->pa_class);
3288 } else {
3289 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3290 PCIIDE_INTERFACE_PCI(0);
3291 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3292 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3293 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3294 interface |= PCIIDE_INTERFACE_PCI(1);
3295 }
3296
3297 printf("%s: bus-master DMA support present",
3298 sc->sc_wdcdev.sc_dev.dv_xname);
3299 pciide_mapreg_dma(sc, pa);
3300 printf("\n");
3301 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3302 WDC_CAPABILITY_MODE;
3303 if (sc->sc_dma_ok) {
3304 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3305 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3306 sc->sc_wdcdev.irqack = pciide_irqack;
3307 }
3308 sc->sc_wdcdev.PIO_cap = 4;
3309 sc->sc_wdcdev.DMA_cap = 2;
3310
3311 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3312 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3313 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3314 revision == HPT366_REV) {
3315 sc->sc_wdcdev.UDMA_cap = 4;
3316 /*
3317 * The 366 has 2 PCI IDE functions, one for primary and one
3318 * for secondary. So we need to call pciide_mapregs_compat()
3319 * with the real channel
3320 */
3321 if (pa->pa_function == 0) {
3322 compatchan = 0;
3323 } else if (pa->pa_function == 1) {
3324 compatchan = 1;
3325 } else {
3326 printf("%s: unexpected PCI function %d\n",
3327 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3328 return;
3329 }
3330 sc->sc_wdcdev.nchannels = 1;
3331 } else {
3332 sc->sc_wdcdev.nchannels = 2;
3333 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3334 sc->sc_wdcdev.UDMA_cap = 6;
3335 else
3336 sc->sc_wdcdev.UDMA_cap = 5;
3337 }
3338 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3339 cp = &sc->pciide_channels[i];
3340 if (sc->sc_wdcdev.nchannels > 1) {
3341 compatchan = i;
3342 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3343 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3344 printf("%s: %s channel ignored (disabled)\n",
3345 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3346 continue;
3347 }
3348 }
3349 if (pciide_chansetup(sc, i, interface) == 0)
3350 continue;
3351 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3352 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3353 &ctlsize, hpt_pci_intr);
3354 } else {
3355 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3356 &cmdsize, &ctlsize);
3357 }
3358 if (cp->hw_ok == 0)
3359 return;
3360 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3361 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3362 wdcattach(&cp->wdc_channel);
3363 hpt_setup_channel(&cp->wdc_channel);
3364 }
3365 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3366 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3367 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3368 /*
3369 * HPT370_REV and highter has a bit to disable interrupts,
3370 * make sure to clear it
3371 */
3372 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3373 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3374 ~HPT_CSEL_IRQDIS);
3375 }
3376 /* set clocks, etc (mandatory on 374, optional otherwise) */
3377 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3378 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3379 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3380 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3381 return;
3382 }
3383
3384 void
3385 hpt_setup_channel(chp)
3386 struct channel_softc *chp;
3387 {
3388 struct ata_drive_datas *drvp;
3389 int drive;
3390 int cable;
3391 u_int32_t before, after;
3392 u_int32_t idedma_ctl;
3393 struct pciide_channel *cp = (struct pciide_channel*)chp;
3394 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3395
3396 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3397
3398 /* setup DMA if needed */
3399 pciide_channel_dma_setup(cp);
3400
3401 idedma_ctl = 0;
3402
3403 /* Per drive settings */
3404 for (drive = 0; drive < 2; drive++) {
3405 drvp = &chp->ch_drive[drive];
3406 /* If no drive, skip */
3407 if ((drvp->drive_flags & DRIVE) == 0)
3408 continue;
3409 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3410 HPT_IDETIM(chp->channel, drive));
3411
3412 /* add timing values, setup DMA if needed */
3413 if (drvp->drive_flags & DRIVE_UDMA) {
3414 /* use Ultra/DMA */
3415 drvp->drive_flags &= ~DRIVE_DMA;
3416 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3417 drvp->UDMA_mode > 2)
3418 drvp->UDMA_mode = 2;
3419 after = (sc->sc_wdcdev.nchannels == 2) ?
3420 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3421 hpt374_udma[drvp->UDMA_mode] :
3422 hpt370_udma[drvp->UDMA_mode]) :
3423 hpt366_udma[drvp->UDMA_mode];
3424 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3425 } else if (drvp->drive_flags & DRIVE_DMA) {
3426 /*
3427 * use Multiword DMA.
3428 * Timings will be used for both PIO and DMA, so adjust
3429 * DMA mode if needed
3430 */
3431 if (drvp->PIO_mode >= 3 &&
3432 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3433 drvp->DMA_mode = drvp->PIO_mode - 2;
3434 }
3435 after = (sc->sc_wdcdev.nchannels == 2) ?
3436 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3437 hpt374_dma[drvp->DMA_mode] :
3438 hpt370_dma[drvp->DMA_mode]) :
3439 hpt366_dma[drvp->DMA_mode];
3440 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3441 } else {
3442 /* PIO only */
3443 after = (sc->sc_wdcdev.nchannels == 2) ?
3444 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3445 hpt374_pio[drvp->PIO_mode] :
3446 hpt370_pio[drvp->PIO_mode]) :
3447 hpt366_pio[drvp->PIO_mode];
3448 }
3449 pci_conf_write(sc->sc_pc, sc->sc_tag,
3450 HPT_IDETIM(chp->channel, drive), after);
3451 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3452 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3453 after, before), DEBUG_PROBE);
3454 }
3455 if (idedma_ctl != 0) {
3456 /* Add software bits in status register */
3457 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3458 IDEDMA_CTL, idedma_ctl);
3459 }
3460 pciide_print_modes(cp);
3461 }
3462
3463 int
3464 hpt_pci_intr(arg)
3465 void *arg;
3466 {
3467 struct pciide_softc *sc = arg;
3468 struct pciide_channel *cp;
3469 struct channel_softc *wdc_cp;
3470 int rv = 0;
3471 int dmastat, i, crv;
3472
3473 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3474 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3475 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3476 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3477 IDEDMA_CTL_INTR)
3478 continue;
3479 cp = &sc->pciide_channels[i];
3480 wdc_cp = &cp->wdc_channel;
3481 crv = wdcintr(wdc_cp);
3482 if (crv == 0) {
3483 printf("%s:%d: bogus intr\n",
3484 sc->sc_wdcdev.sc_dev.dv_xname, i);
3485 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3486 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3487 } else
3488 rv = 1;
3489 }
3490 return rv;
3491 }
3492
3493
3494 /* Macros to test product */
3495 #define PDC_IS_262(sc) \
3496 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3497 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3498 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3499 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3500 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3501 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3502 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3503 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3504 #define PDC_IS_265(sc) \
3505 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3506 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3507 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3508 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3509 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3510 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3511 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3512 #define PDC_IS_268(sc) \
3513 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3514 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3515 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3516 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3517 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3518
3519 void
3520 pdc202xx_chip_map(sc, pa)
3521 struct pciide_softc *sc;
3522 struct pci_attach_args *pa;
3523 {
3524 struct pciide_channel *cp;
3525 int channel;
3526 pcireg_t interface, st, mode;
3527 bus_size_t cmdsize, ctlsize;
3528
3529 if (!PDC_IS_268(sc)) {
3530 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3531 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3532 st), DEBUG_PROBE);
3533 }
3534 if (pciide_chipen(sc, pa) == 0)
3535 return;
3536
3537 /* turn off RAID mode */
3538 if (!PDC_IS_268(sc))
3539 st &= ~PDC2xx_STATE_IDERAID;
3540
3541 /*
3542 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3543 * mode. We have to fake interface
3544 */
3545 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3546 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3547 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3548
3549 printf("%s: bus-master DMA support present",
3550 sc->sc_wdcdev.sc_dev.dv_xname);
3551 pciide_mapreg_dma(sc, pa);
3552 printf("\n");
3553 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3554 WDC_CAPABILITY_MODE;
3555 if (sc->sc_dma_ok) {
3556 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3557 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3558 sc->sc_wdcdev.irqack = pciide_irqack;
3559 }
3560 sc->sc_wdcdev.PIO_cap = 4;
3561 sc->sc_wdcdev.DMA_cap = 2;
3562 if (PDC_IS_265(sc))
3563 sc->sc_wdcdev.UDMA_cap = 5;
3564 else if (PDC_IS_262(sc))
3565 sc->sc_wdcdev.UDMA_cap = 4;
3566 else
3567 sc->sc_wdcdev.UDMA_cap = 2;
3568 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3569 pdc20268_setup_channel : pdc202xx_setup_channel;
3570 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3571 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3572
3573 if (!PDC_IS_268(sc)) {
3574 /* setup failsafe defaults */
3575 mode = 0;
3576 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3577 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3578 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3579 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3580 for (channel = 0;
3581 channel < sc->sc_wdcdev.nchannels;
3582 channel++) {
3583 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3584 "drive 0 initial timings 0x%x, now 0x%x\n",
3585 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3586 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3587 DEBUG_PROBE);
3588 pci_conf_write(sc->sc_pc, sc->sc_tag,
3589 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3590 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3591 "drive 1 initial timings 0x%x, now 0x%x\n",
3592 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3593 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3594 pci_conf_write(sc->sc_pc, sc->sc_tag,
3595 PDC2xx_TIM(channel, 1), mode);
3596 }
3597
3598 mode = PDC2xx_SCR_DMA;
3599 if (PDC_IS_262(sc)) {
3600 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3601 } else {
3602 /* the BIOS set it up this way */
3603 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3604 }
3605 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3606 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3607 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3608 "now 0x%x\n",
3609 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3610 PDC2xx_SCR),
3611 mode), DEBUG_PROBE);
3612 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3613 PDC2xx_SCR, mode);
3614
3615 /* controller initial state register is OK even without BIOS */
3616 /* Set DMA mode to IDE DMA compatibility */
3617 mode =
3618 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3619 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3620 DEBUG_PROBE);
3621 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3622 mode | 0x1);
3623 mode =
3624 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3625 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3626 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3627 mode | 0x1);
3628 }
3629
3630 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3631 cp = &sc->pciide_channels[channel];
3632 if (pciide_chansetup(sc, channel, interface) == 0)
3633 continue;
3634 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3635 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3636 printf("%s: %s channel ignored (disabled)\n",
3637 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3638 continue;
3639 }
3640 if (PDC_IS_265(sc))
3641 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3642 pdc20265_pci_intr);
3643 else
3644 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3645 pdc202xx_pci_intr);
3646 if (cp->hw_ok == 0)
3647 continue;
3648 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3649 st &= ~(PDC_IS_262(sc) ?
3650 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3651 pciide_map_compat_intr(pa, cp, channel, interface);
3652 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3653 }
3654 if (!PDC_IS_268(sc)) {
3655 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3656 "0x%x\n", st), DEBUG_PROBE);
3657 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3658 }
3659 return;
3660 }
3661
3662 void
3663 pdc202xx_setup_channel(chp)
3664 struct channel_softc *chp;
3665 {
3666 struct ata_drive_datas *drvp;
3667 int drive;
3668 pcireg_t mode, st;
3669 u_int32_t idedma_ctl, scr, atapi;
3670 struct pciide_channel *cp = (struct pciide_channel*)chp;
3671 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3672 int channel = chp->channel;
3673
3674 /* setup DMA if needed */
3675 pciide_channel_dma_setup(cp);
3676
3677 idedma_ctl = 0;
3678 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3679 sc->sc_wdcdev.sc_dev.dv_xname,
3680 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3681 DEBUG_PROBE);
3682
3683 /* Per channel settings */
3684 if (PDC_IS_262(sc)) {
3685 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3686 PDC262_U66);
3687 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3688 /* Trim UDMA mode */
3689 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3690 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3691 chp->ch_drive[0].UDMA_mode <= 2) ||
3692 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3693 chp->ch_drive[1].UDMA_mode <= 2)) {
3694 if (chp->ch_drive[0].UDMA_mode > 2)
3695 chp->ch_drive[0].UDMA_mode = 2;
3696 if (chp->ch_drive[1].UDMA_mode > 2)
3697 chp->ch_drive[1].UDMA_mode = 2;
3698 }
3699 /* Set U66 if needed */
3700 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3701 chp->ch_drive[0].UDMA_mode > 2) ||
3702 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3703 chp->ch_drive[1].UDMA_mode > 2))
3704 scr |= PDC262_U66_EN(channel);
3705 else
3706 scr &= ~PDC262_U66_EN(channel);
3707 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3708 PDC262_U66, scr);
3709 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3710 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3711 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3712 PDC262_ATAPI(channel))), DEBUG_PROBE);
3713 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3714 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3715 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3716 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3717 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3718 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3719 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3720 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3721 atapi = 0;
3722 else
3723 atapi = PDC262_ATAPI_UDMA;
3724 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3725 PDC262_ATAPI(channel), atapi);
3726 }
3727 }
3728 for (drive = 0; drive < 2; drive++) {
3729 drvp = &chp->ch_drive[drive];
3730 /* If no drive, skip */
3731 if ((drvp->drive_flags & DRIVE) == 0)
3732 continue;
3733 mode = 0;
3734 if (drvp->drive_flags & DRIVE_UDMA) {
3735 /* use Ultra/DMA */
3736 drvp->drive_flags &= ~DRIVE_DMA;
3737 mode = PDC2xx_TIM_SET_MB(mode,
3738 pdc2xx_udma_mb[drvp->UDMA_mode]);
3739 mode = PDC2xx_TIM_SET_MC(mode,
3740 pdc2xx_udma_mc[drvp->UDMA_mode]);
3741 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3742 } else if (drvp->drive_flags & DRIVE_DMA) {
3743 mode = PDC2xx_TIM_SET_MB(mode,
3744 pdc2xx_dma_mb[drvp->DMA_mode]);
3745 mode = PDC2xx_TIM_SET_MC(mode,
3746 pdc2xx_dma_mc[drvp->DMA_mode]);
3747 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3748 } else {
3749 mode = PDC2xx_TIM_SET_MB(mode,
3750 pdc2xx_dma_mb[0]);
3751 mode = PDC2xx_TIM_SET_MC(mode,
3752 pdc2xx_dma_mc[0]);
3753 }
3754 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3755 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3756 if (drvp->drive_flags & DRIVE_ATA)
3757 mode |= PDC2xx_TIM_PRE;
3758 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3759 if (drvp->PIO_mode >= 3) {
3760 mode |= PDC2xx_TIM_IORDY;
3761 if (drive == 0)
3762 mode |= PDC2xx_TIM_IORDYp;
3763 }
3764 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3765 "timings 0x%x\n",
3766 sc->sc_wdcdev.sc_dev.dv_xname,
3767 chp->channel, drive, mode), DEBUG_PROBE);
3768 pci_conf_write(sc->sc_pc, sc->sc_tag,
3769 PDC2xx_TIM(chp->channel, drive), mode);
3770 }
3771 if (idedma_ctl != 0) {
3772 /* Add software bits in status register */
3773 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3774 IDEDMA_CTL, idedma_ctl);
3775 }
3776 pciide_print_modes(cp);
3777 }
3778
3779 void
3780 pdc20268_setup_channel(chp)
3781 struct channel_softc *chp;
3782 {
3783 struct ata_drive_datas *drvp;
3784 int drive;
3785 u_int32_t idedma_ctl;
3786 struct pciide_channel *cp = (struct pciide_channel*)chp;
3787 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3788 int u100;
3789
3790 /* setup DMA if needed */
3791 pciide_channel_dma_setup(cp);
3792
3793 idedma_ctl = 0;
3794
3795 /* I don't know what this is for, FreeBSD does it ... */
3796 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3797 IDEDMA_CMD + 0x1, 0x0b);
3798
3799 /*
3800 * I don't know what this is for; FreeBSD checks this ... this is not
3801 * cable type detect.
3802 */
3803 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3804 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3805
3806 for (drive = 0; drive < 2; drive++) {
3807 drvp = &chp->ch_drive[drive];
3808 /* If no drive, skip */
3809 if ((drvp->drive_flags & DRIVE) == 0)
3810 continue;
3811 if (drvp->drive_flags & DRIVE_UDMA) {
3812 /* use Ultra/DMA */
3813 drvp->drive_flags &= ~DRIVE_DMA;
3814 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3815 if (drvp->UDMA_mode > 2 && u100 == 0)
3816 drvp->UDMA_mode = 2;
3817 } else if (drvp->drive_flags & DRIVE_DMA) {
3818 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3819 }
3820 }
3821 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3822 if (idedma_ctl != 0) {
3823 /* Add software bits in status register */
3824 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3825 IDEDMA_CTL, idedma_ctl);
3826 }
3827 pciide_print_modes(cp);
3828 }
3829
3830 int
3831 pdc202xx_pci_intr(arg)
3832 void *arg;
3833 {
3834 struct pciide_softc *sc = arg;
3835 struct pciide_channel *cp;
3836 struct channel_softc *wdc_cp;
3837 int i, rv, crv;
3838 u_int32_t scr;
3839
3840 rv = 0;
3841 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3842 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3843 cp = &sc->pciide_channels[i];
3844 wdc_cp = &cp->wdc_channel;
3845 /* If a compat channel skip. */
3846 if (cp->compat)
3847 continue;
3848 if (scr & PDC2xx_SCR_INT(i)) {
3849 crv = wdcintr(wdc_cp);
3850 if (crv == 0)
3851 printf("%s:%d: bogus intr (reg 0x%x)\n",
3852 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3853 else
3854 rv = 1;
3855 }
3856 }
3857 return rv;
3858 }
3859
3860 int
3861 pdc20265_pci_intr(arg)
3862 void *arg;
3863 {
3864 struct pciide_softc *sc = arg;
3865 struct pciide_channel *cp;
3866 struct channel_softc *wdc_cp;
3867 int i, rv, crv;
3868 u_int32_t dmastat;
3869
3870 rv = 0;
3871 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3872 cp = &sc->pciide_channels[i];
3873 wdc_cp = &cp->wdc_channel;
3874 /* If a compat channel skip. */
3875 if (cp->compat)
3876 continue;
3877 /*
3878 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3879 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3880 * So use it instead (requires 2 reg reads instead of 1,
3881 * but we can't do it another way).
3882 */
3883 dmastat = bus_space_read_1(sc->sc_dma_iot,
3884 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3885 if((dmastat & IDEDMA_CTL_INTR) == 0)
3886 continue;
3887 crv = wdcintr(wdc_cp);
3888 if (crv == 0)
3889 printf("%s:%d: bogus intr\n",
3890 sc->sc_wdcdev.sc_dev.dv_xname, i);
3891 else
3892 rv = 1;
3893 }
3894 return rv;
3895 }
3896
3897 void
3898 opti_chip_map(sc, pa)
3899 struct pciide_softc *sc;
3900 struct pci_attach_args *pa;
3901 {
3902 struct pciide_channel *cp;
3903 bus_size_t cmdsize, ctlsize;
3904 pcireg_t interface;
3905 u_int8_t init_ctrl;
3906 int channel;
3907
3908 if (pciide_chipen(sc, pa) == 0)
3909 return;
3910 printf("%s: bus-master DMA support present",
3911 sc->sc_wdcdev.sc_dev.dv_xname);
3912
3913 /*
3914 * XXXSCW:
3915 * There seem to be a couple of buggy revisions/implementations
3916 * of the OPTi pciide chipset. This kludge seems to fix one of
3917 * the reported problems (PR/11644) but still fails for the
3918 * other (PR/13151), although the latter may be due to other
3919 * issues too...
3920 */
3921 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3922 printf(" but disabled due to chip rev. <= 0x12");
3923 sc->sc_dma_ok = 0;
3924 } else
3925 pciide_mapreg_dma(sc, pa);
3926
3927 printf("\n");
3928
3929 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
3930 WDC_CAPABILITY_MODE;
3931 sc->sc_wdcdev.PIO_cap = 4;
3932 if (sc->sc_dma_ok) {
3933 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3934 sc->sc_wdcdev.irqack = pciide_irqack;
3935 sc->sc_wdcdev.DMA_cap = 2;
3936 }
3937 sc->sc_wdcdev.set_modes = opti_setup_channel;
3938
3939 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3940 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3941
3942 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3943 OPTI_REG_INIT_CONTROL);
3944
3945 interface = PCI_INTERFACE(pa->pa_class);
3946
3947 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3948 cp = &sc->pciide_channels[channel];
3949 if (pciide_chansetup(sc, channel, interface) == 0)
3950 continue;
3951 if (channel == 1 &&
3952 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3953 printf("%s: %s channel ignored (disabled)\n",
3954 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3955 continue;
3956 }
3957 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3958 pciide_pci_intr);
3959 if (cp->hw_ok == 0)
3960 continue;
3961 pciide_map_compat_intr(pa, cp, channel, interface);
3962 if (cp->hw_ok == 0)
3963 continue;
3964 opti_setup_channel(&cp->wdc_channel);
3965 }
3966 }
3967
3968 void
3969 opti_setup_channel(chp)
3970 struct channel_softc *chp;
3971 {
3972 struct ata_drive_datas *drvp;
3973 struct pciide_channel *cp = (struct pciide_channel*)chp;
3974 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3975 int drive, spd;
3976 int mode[2];
3977 u_int8_t rv, mr;
3978
3979 /*
3980 * The `Delay' and `Address Setup Time' fields of the
3981 * Miscellaneous Register are always zero initially.
3982 */
3983 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3984 mr &= ~(OPTI_MISC_DELAY_MASK |
3985 OPTI_MISC_ADDR_SETUP_MASK |
3986 OPTI_MISC_INDEX_MASK);
3987
3988 /* Prime the control register before setting timing values */
3989 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3990
3991 /* Determine the clockrate of the PCIbus the chip is attached to */
3992 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3993 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3994
3995 /* setup DMA if needed */
3996 pciide_channel_dma_setup(cp);
3997
3998 for (drive = 0; drive < 2; drive++) {
3999 drvp = &chp->ch_drive[drive];
4000 /* If no drive, skip */
4001 if ((drvp->drive_flags & DRIVE) == 0) {
4002 mode[drive] = -1;
4003 continue;
4004 }
4005
4006 if ((drvp->drive_flags & DRIVE_DMA)) {
4007 /*
4008 * Timings will be used for both PIO and DMA,
4009 * so adjust DMA mode if needed
4010 */
4011 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4012 drvp->PIO_mode = drvp->DMA_mode + 2;
4013 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4014 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4015 drvp->PIO_mode - 2 : 0;
4016 if (drvp->DMA_mode == 0)
4017 drvp->PIO_mode = 0;
4018
4019 mode[drive] = drvp->DMA_mode + 5;
4020 } else
4021 mode[drive] = drvp->PIO_mode;
4022
4023 if (drive && mode[0] >= 0 &&
4024 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4025 /*
4026 * Can't have two drives using different values
4027 * for `Address Setup Time'.
4028 * Slow down the faster drive to compensate.
4029 */
4030 int d = (opti_tim_as[spd][mode[0]] >
4031 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4032
4033 mode[d] = mode[1-d];
4034 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4035 chp->ch_drive[d].DMA_mode = 0;
4036 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4037 }
4038 }
4039
4040 for (drive = 0; drive < 2; drive++) {
4041 int m;
4042 if ((m = mode[drive]) < 0)
4043 continue;
4044
4045 /* Set the Address Setup Time and select appropriate index */
4046 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4047 rv |= OPTI_MISC_INDEX(drive);
4048 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4049
4050 /* Set the pulse width and recovery timing parameters */
4051 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4052 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4053 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4054 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4055
4056 /* Set the Enhanced Mode register appropriately */
4057 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4058 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4059 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4060 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4061 }
4062
4063 /* Finally, enable the timings */
4064 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4065
4066 pciide_print_modes(cp);
4067 }
4068
4069 #define ACARD_IS_850(sc) \
4070 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4071
4072 void
4073 acard_chip_map(sc, pa)
4074 struct pciide_softc *sc;
4075 struct pci_attach_args *pa;
4076 {
4077 struct pciide_channel *cp;
4078 int i;
4079 pcireg_t interface;
4080 bus_size_t cmdsize, ctlsize;
4081
4082 if (pciide_chipen(sc, pa) == 0)
4083 return;
4084
4085 /*
4086 * when the chip is in native mode it identifies itself as a
4087 * 'misc mass storage'. Fake interface in this case.
4088 */
4089 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4090 interface = PCI_INTERFACE(pa->pa_class);
4091 } else {
4092 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4093 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4094 }
4095
4096 printf("%s: bus-master DMA support present",
4097 sc->sc_wdcdev.sc_dev.dv_xname);
4098 pciide_mapreg_dma(sc, pa);
4099 printf("\n");
4100 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4101 WDC_CAPABILITY_MODE;
4102
4103 if (sc->sc_dma_ok) {
4104 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4105 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4106 sc->sc_wdcdev.irqack = pciide_irqack;
4107 }
4108 sc->sc_wdcdev.PIO_cap = 4;
4109 sc->sc_wdcdev.DMA_cap = 2;
4110 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4111
4112 sc->sc_wdcdev.set_modes = acard_setup_channel;
4113 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4114 sc->sc_wdcdev.nchannels = 2;
4115
4116 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4117 cp = &sc->pciide_channels[i];
4118 if (pciide_chansetup(sc, i, interface) == 0)
4119 continue;
4120 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4121 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4122 &ctlsize, pciide_pci_intr);
4123 } else {
4124 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4125 &cmdsize, &ctlsize);
4126 }
4127 if (cp->hw_ok == 0)
4128 return;
4129 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4130 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4131 wdcattach(&cp->wdc_channel);
4132 acard_setup_channel(&cp->wdc_channel);
4133 }
4134 if (!ACARD_IS_850(sc)) {
4135 u_int32_t reg;
4136 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4137 reg &= ~ATP860_CTRL_INT;
4138 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4139 }
4140 }
4141
4142 void
4143 acard_setup_channel(chp)
4144 struct channel_softc *chp;
4145 {
4146 struct ata_drive_datas *drvp;
4147 struct pciide_channel *cp = (struct pciide_channel*)chp;
4148 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4149 int channel = chp->channel;
4150 int drive;
4151 u_int32_t idetime, udma_mode;
4152 u_int32_t idedma_ctl;
4153
4154 /* setup DMA if needed */
4155 pciide_channel_dma_setup(cp);
4156
4157 if (ACARD_IS_850(sc)) {
4158 idetime = 0;
4159 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4160 udma_mode &= ~ATP850_UDMA_MASK(channel);
4161 } else {
4162 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4163 idetime &= ~ATP860_SETTIME_MASK(channel);
4164 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4165 udma_mode &= ~ATP860_UDMA_MASK(channel);
4166
4167 /* check 80 pins cable */
4168 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4169 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4170 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4171 & ATP860_CTRL_80P(chp->channel)) {
4172 if (chp->ch_drive[0].UDMA_mode > 2)
4173 chp->ch_drive[0].UDMA_mode = 2;
4174 if (chp->ch_drive[1].UDMA_mode > 2)
4175 chp->ch_drive[1].UDMA_mode = 2;
4176 }
4177 }
4178 }
4179
4180 idedma_ctl = 0;
4181
4182 /* Per drive settings */
4183 for (drive = 0; drive < 2; drive++) {
4184 drvp = &chp->ch_drive[drive];
4185 /* If no drive, skip */
4186 if ((drvp->drive_flags & DRIVE) == 0)
4187 continue;
4188 /* add timing values, setup DMA if needed */
4189 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4190 (drvp->drive_flags & DRIVE_UDMA)) {
4191 /* use Ultra/DMA */
4192 if (ACARD_IS_850(sc)) {
4193 idetime |= ATP850_SETTIME(drive,
4194 acard_act_udma[drvp->UDMA_mode],
4195 acard_rec_udma[drvp->UDMA_mode]);
4196 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4197 acard_udma_conf[drvp->UDMA_mode]);
4198 } else {
4199 idetime |= ATP860_SETTIME(channel, drive,
4200 acard_act_udma[drvp->UDMA_mode],
4201 acard_rec_udma[drvp->UDMA_mode]);
4202 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4203 acard_udma_conf[drvp->UDMA_mode]);
4204 }
4205 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4206 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4207 (drvp->drive_flags & DRIVE_DMA)) {
4208 /* use Multiword DMA */
4209 drvp->drive_flags &= ~DRIVE_UDMA;
4210 if (ACARD_IS_850(sc)) {
4211 idetime |= ATP850_SETTIME(drive,
4212 acard_act_dma[drvp->DMA_mode],
4213 acard_rec_dma[drvp->DMA_mode]);
4214 } else {
4215 idetime |= ATP860_SETTIME(channel, drive,
4216 acard_act_dma[drvp->DMA_mode],
4217 acard_rec_dma[drvp->DMA_mode]);
4218 }
4219 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4220 } else {
4221 /* PIO only */
4222 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4223 if (ACARD_IS_850(sc)) {
4224 idetime |= ATP850_SETTIME(drive,
4225 acard_act_pio[drvp->PIO_mode],
4226 acard_rec_pio[drvp->PIO_mode]);
4227 } else {
4228 idetime |= ATP860_SETTIME(channel, drive,
4229 acard_act_pio[drvp->PIO_mode],
4230 acard_rec_pio[drvp->PIO_mode]);
4231 }
4232 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4233 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4234 | ATP8x0_CTRL_EN(channel));
4235 }
4236 }
4237
4238 if (idedma_ctl != 0) {
4239 /* Add software bits in status register */
4240 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4241 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4242 }
4243 pciide_print_modes(cp);
4244
4245 if (ACARD_IS_850(sc)) {
4246 pci_conf_write(sc->sc_pc, sc->sc_tag,
4247 ATP850_IDETIME(channel), idetime);
4248 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4249 } else {
4250 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4251 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4252 }
4253 }
4254
4255 int
4256 acard_pci_intr(arg)
4257 void *arg;
4258 {
4259 struct pciide_softc *sc = arg;
4260 struct pciide_channel *cp;
4261 struct channel_softc *wdc_cp;
4262 int rv = 0;
4263 int dmastat, i, crv;
4264
4265 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4266 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4267 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4268 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4269 continue;
4270 cp = &sc->pciide_channels[i];
4271 wdc_cp = &cp->wdc_channel;
4272 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4273 (void)wdcintr(wdc_cp);
4274 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4275 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4276 continue;
4277 }
4278 crv = wdcintr(wdc_cp);
4279 if (crv == 0)
4280 printf("%s:%d: bogus intr\n",
4281 sc->sc_wdcdev.sc_dev.dv_xname, i);
4282 else if (crv == 1)
4283 rv = 1;
4284 else if (rv == 0)
4285 rv = crv;
4286 }
4287 return rv;
4288 }
4289
4290 static int
4291 sl82c105_bugchk(struct pci_attach_args *pa)
4292 {
4293
4294 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4295 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4296 return (0);
4297
4298 if (PCI_REVISION(pa->pa_class) <= 0x05)
4299 return (1);
4300
4301 return (0);
4302 }
4303
4304 void
4305 sl82c105_chip_map(sc, pa)
4306 struct pciide_softc *sc;
4307 struct pci_attach_args *pa;
4308 {
4309 struct pciide_channel *cp;
4310 bus_size_t cmdsize, ctlsize;
4311 pcireg_t interface, idecr;
4312 int channel;
4313
4314 if (pciide_chipen(sc, pa) == 0)
4315 return;
4316
4317 printf("%s: bus-master DMA support present",
4318 sc->sc_wdcdev.sc_dev.dv_xname);
4319
4320 /*
4321 * Check to see if we're part of the Winbond 83c553 Southbridge.
4322 * If so, we need to disable DMA on rev. <= 5 of that chip.
4323 */
4324 if (pci_find_device(pa, sl82c105_bugchk)) {
4325 printf(" but disabled due to 83c553 rev. <= 0x05");
4326 sc->sc_dma_ok = 0;
4327 } else
4328 pciide_mapreg_dma(sc, pa);
4329 printf("\n");
4330
4331 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4332 WDC_CAPABILITY_MODE;
4333 sc->sc_wdcdev.PIO_cap = 4;
4334 if (sc->sc_dma_ok) {
4335 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4336 sc->sc_wdcdev.irqack = pciide_irqack;
4337 sc->sc_wdcdev.DMA_cap = 2;
4338 }
4339 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4340
4341 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4342 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4343
4344 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4345
4346 interface = PCI_INTERFACE(pa->pa_class);
4347
4348 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4349 cp = &sc->pciide_channels[channel];
4350 if (pciide_chansetup(sc, channel, interface) == 0)
4351 continue;
4352 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4353 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4354 printf("%s: %s channel ignored (disabled)\n",
4355 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4356 continue;
4357 }
4358 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4359 pciide_pci_intr);
4360 if (cp->hw_ok == 0)
4361 continue;
4362 pciide_map_compat_intr(pa, cp, channel, interface);
4363 if (cp->hw_ok == 0)
4364 continue;
4365 sl82c105_setup_channel(&cp->wdc_channel);
4366 }
4367 }
4368
4369 void
4370 sl82c105_setup_channel(chp)
4371 struct channel_softc *chp;
4372 {
4373 struct ata_drive_datas *drvp;
4374 struct pciide_channel *cp = (struct pciide_channel*)chp;
4375 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4376 int pxdx_reg, drive;
4377 pcireg_t pxdx;
4378
4379 /* Set up DMA if needed. */
4380 pciide_channel_dma_setup(cp);
4381
4382 for (drive = 0; drive < 2; drive++) {
4383 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4384 : SYMPH_P1D0CR) + (drive * 4);
4385
4386 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4387
4388 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4389 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4390
4391 drvp = &chp->ch_drive[drive];
4392 /* If no drive, skip. */
4393 if ((drvp->drive_flags & DRIVE) == 0) {
4394 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4395 continue;
4396 }
4397
4398 if (drvp->drive_flags & DRIVE_DMA) {
4399 /*
4400 * Timings will be used for both PIO and DMA,
4401 * so adjust DMA mode if needed.
4402 */
4403 if (drvp->PIO_mode >= 3) {
4404 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4405 drvp->DMA_mode = drvp->PIO_mode - 2;
4406 if (drvp->DMA_mode < 1) {
4407 /*
4408 * Can't mix both PIO and DMA.
4409 * Disable DMA.
4410 */
4411 drvp->drive_flags &= ~DRIVE_DMA;
4412 }
4413 } else {
4414 /*
4415 * Can't mix both PIO and DMA. Disable
4416 * DMA.
4417 */
4418 drvp->drive_flags &= ~DRIVE_DMA;
4419 }
4420 }
4421
4422 if (drvp->drive_flags & DRIVE_DMA) {
4423 /* Use multi-word DMA. */
4424 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4425 PxDx_CMD_ON_SHIFT;
4426 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4427 } else {
4428 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4429 PxDx_CMD_ON_SHIFT;
4430 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4431 }
4432
4433 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4434
4435 /* ...and set the mode for this drive. */
4436 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4437 }
4438
4439 pciide_print_modes(cp);
4440 }
4441
4442 void
4443 serverworks_chip_map(sc, pa)
4444 struct pciide_softc *sc;
4445 struct pci_attach_args *pa;
4446 {
4447 struct pciide_channel *cp;
4448 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4449 pcitag_t pcib_tag;
4450 int channel;
4451 bus_size_t cmdsize, ctlsize;
4452
4453 if (pciide_chipen(sc, pa) == 0)
4454 return;
4455
4456 printf("%s: bus-master DMA support present",
4457 sc->sc_wdcdev.sc_dev.dv_xname);
4458 pciide_mapreg_dma(sc, pa);
4459 printf("\n");
4460 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4461 WDC_CAPABILITY_MODE;
4462
4463 if (sc->sc_dma_ok) {
4464 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4465 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4466 sc->sc_wdcdev.irqack = pciide_irqack;
4467 }
4468 sc->sc_wdcdev.PIO_cap = 4;
4469 sc->sc_wdcdev.DMA_cap = 2;
4470 switch (sc->sc_pp->ide_product) {
4471 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4472 sc->sc_wdcdev.UDMA_cap = 2;
4473 break;
4474 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4475 if (PCI_REVISION(pa->pa_class) < 0x92)
4476 sc->sc_wdcdev.UDMA_cap = 4;
4477 else
4478 sc->sc_wdcdev.UDMA_cap = 5;
4479 break;
4480 }
4481
4482 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4483 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4484 sc->sc_wdcdev.nchannels = 2;
4485
4486 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4487 cp = &sc->pciide_channels[channel];
4488 if (pciide_chansetup(sc, channel, interface) == 0)
4489 continue;
4490 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4491 serverworks_pci_intr);
4492 if (cp->hw_ok == 0)
4493 return;
4494 pciide_map_compat_intr(pa, cp, channel, interface);
4495 if (cp->hw_ok == 0)
4496 return;
4497 serverworks_setup_channel(&cp->wdc_channel);
4498 }
4499
4500 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4501 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4502 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4503 }
4504
4505 void
4506 serverworks_setup_channel(chp)
4507 struct channel_softc *chp;
4508 {
4509 struct ata_drive_datas *drvp;
4510 struct pciide_channel *cp = (struct pciide_channel*)chp;
4511 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4512 int channel = chp->channel;
4513 int drive, unit;
4514 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4515 u_int32_t idedma_ctl;
4516 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4517 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4518
4519 /* setup DMA if needed */
4520 pciide_channel_dma_setup(cp);
4521
4522 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4523 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4524 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4525 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4526
4527 pio_time &= ~(0xffff << (16 * channel));
4528 dma_time &= ~(0xffff << (16 * channel));
4529 pio_mode &= ~(0xff << (8 * channel + 16));
4530 udma_mode &= ~(0xff << (8 * channel + 16));
4531 udma_mode &= ~(3 << (2 * channel));
4532
4533 idedma_ctl = 0;
4534
4535 /* Per drive settings */
4536 for (drive = 0; drive < 2; drive++) {
4537 drvp = &chp->ch_drive[drive];
4538 /* If no drive, skip */
4539 if ((drvp->drive_flags & DRIVE) == 0)
4540 continue;
4541 unit = drive + 2 * channel;
4542 /* add timing values, setup DMA if needed */
4543 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4544 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4545 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4546 (drvp->drive_flags & DRIVE_UDMA)) {
4547 /* use Ultra/DMA, check for 80-pin cable */
4548 if (drvp->UDMA_mode > 2 &&
4549 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4550 drvp->UDMA_mode = 2;
4551 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4552 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4553 udma_mode |= 1 << unit;
4554 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4555 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4556 (drvp->drive_flags & DRIVE_DMA)) {
4557 /* use Multiword DMA */
4558 drvp->drive_flags &= ~DRIVE_UDMA;
4559 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4560 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4561 } else {
4562 /* PIO only */
4563 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4564 }
4565 }
4566
4567 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4568 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4569 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4570 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4571 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4572
4573 if (idedma_ctl != 0) {
4574 /* Add software bits in status register */
4575 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4576 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4577 }
4578 pciide_print_modes(cp);
4579 }
4580
4581 int
4582 serverworks_pci_intr(arg)
4583 void *arg;
4584 {
4585 struct pciide_softc *sc = arg;
4586 struct pciide_channel *cp;
4587 struct channel_softc *wdc_cp;
4588 int rv = 0;
4589 int dmastat, i, crv;
4590
4591 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4592 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4593 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4594 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4595 IDEDMA_CTL_INTR)
4596 continue;
4597 cp = &sc->pciide_channels[i];
4598 wdc_cp = &cp->wdc_channel;
4599 crv = wdcintr(wdc_cp);
4600 if (crv == 0) {
4601 printf("%s:%d: bogus intr\n",
4602 sc->sc_wdcdev.sc_dev.dv_xname, i);
4603 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4604 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4605 } else
4606 rv = 1;
4607 }
4608 return rv;
4609 }
4610