pciide.c revision 1.175 1 /* $NetBSD: pciide.c,v 1.175 2002/11/20 19:20:24 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.175 2002/11/20 19:20:24 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_cmd_products[] = {
345 { PCI_PRODUCT_CMDTECH_640,
346 0,
347 "CMD Technology PCI0640",
348 cmd_chip_map
349 },
350 { PCI_PRODUCT_CMDTECH_643,
351 0,
352 "CMD Technology PCI0643",
353 cmd0643_9_chip_map,
354 },
355 { PCI_PRODUCT_CMDTECH_646,
356 0,
357 "CMD Technology PCI0646",
358 cmd0643_9_chip_map,
359 },
360 { PCI_PRODUCT_CMDTECH_648,
361 IDE_PCI_CLASS_OVERRIDE,
362 "CMD Technology PCI0648",
363 cmd0643_9_chip_map,
364 },
365 { PCI_PRODUCT_CMDTECH_649,
366 IDE_PCI_CLASS_OVERRIDE,
367 "CMD Technology PCI0649",
368 cmd0643_9_chip_map,
369 },
370 { PCI_PRODUCT_CMDTECH_680,
371 IDE_PCI_CLASS_OVERRIDE,
372 "Silicon Image 0680",
373 cmd680_chip_map,
374 },
375 { 0,
376 0,
377 NULL,
378 NULL
379 }
380 };
381
382 const struct pciide_product_desc pciide_via_products[] = {
383 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
384 0,
385 NULL,
386 apollo_chip_map,
387 },
388 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
389 0,
390 NULL,
391 apollo_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_cypress_products[] = {
401 { PCI_PRODUCT_CONTAQ_82C693,
402 IDE_16BIT_IOSPACE,
403 "Cypress 82C693 IDE Controller",
404 cy693_chip_map,
405 },
406 { 0,
407 0,
408 NULL,
409 NULL
410 }
411 };
412
413 const struct pciide_product_desc pciide_sis_products[] = {
414 { PCI_PRODUCT_SIS_5597_IDE,
415 0,
416 "Silicon Integrated System 5597/5598 IDE controller",
417 sis_chip_map,
418 },
419 { 0,
420 0,
421 NULL,
422 NULL
423 }
424 };
425
426 const struct pciide_product_desc pciide_acer_products[] = {
427 { PCI_PRODUCT_ALI_M5229,
428 0,
429 "Acer Labs M5229 UDMA IDE Controller",
430 acer_chip_map,
431 },
432 { 0,
433 0,
434 NULL,
435 NULL
436 }
437 };
438
439 const struct pciide_product_desc pciide_promise_products[] = {
440 { PCI_PRODUCT_PROMISE_ULTRA33,
441 IDE_PCI_CLASS_OVERRIDE,
442 "Promise Ultra33/ATA Bus Master IDE Accelerator",
443 pdc202xx_chip_map,
444 },
445 { PCI_PRODUCT_PROMISE_ULTRA66,
446 IDE_PCI_CLASS_OVERRIDE,
447 "Promise Ultra66/ATA Bus Master IDE Accelerator",
448 pdc202xx_chip_map,
449 },
450 { PCI_PRODUCT_PROMISE_ULTRA100,
451 IDE_PCI_CLASS_OVERRIDE,
452 "Promise Ultra100/ATA Bus Master IDE Accelerator",
453 pdc202xx_chip_map,
454 },
455 { PCI_PRODUCT_PROMISE_ULTRA100X,
456 IDE_PCI_CLASS_OVERRIDE,
457 "Promise Ultra100/ATA Bus Master IDE Accelerator",
458 pdc202xx_chip_map,
459 },
460 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
461 IDE_PCI_CLASS_OVERRIDE,
462 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
463 pdc202xx_chip_map,
464 },
465 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
466 IDE_PCI_CLASS_OVERRIDE,
467 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
468 pdc202xx_chip_map,
469 },
470 { PCI_PRODUCT_PROMISE_ULTRA133,
471 IDE_PCI_CLASS_OVERRIDE,
472 "Promise Ultra133/ATA Bus Master IDE Accelerator",
473 pdc202xx_chip_map,
474 },
475 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
476 IDE_PCI_CLASS_OVERRIDE,
477 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
478 pdc202xx_chip_map,
479 },
480 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
481 IDE_PCI_CLASS_OVERRIDE,
482 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
483 pdc202xx_chip_map,
484 },
485 { 0,
486 0,
487 NULL,
488 NULL
489 }
490 };
491
492 const struct pciide_product_desc pciide_opti_products[] = {
493 { PCI_PRODUCT_OPTI_82C621,
494 0,
495 "OPTi 82c621 PCI IDE controller",
496 opti_chip_map,
497 },
498 { PCI_PRODUCT_OPTI_82C568,
499 0,
500 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
501 opti_chip_map,
502 },
503 { PCI_PRODUCT_OPTI_82D568,
504 0,
505 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
506 opti_chip_map,
507 },
508 { 0,
509 0,
510 NULL,
511 NULL
512 }
513 };
514
515 const struct pciide_product_desc pciide_triones_products[] = {
516 { PCI_PRODUCT_TRIONES_HPT366,
517 IDE_PCI_CLASS_OVERRIDE,
518 NULL,
519 hpt_chip_map,
520 },
521 { PCI_PRODUCT_TRIONES_HPT372,
522 IDE_PCI_CLASS_OVERRIDE,
523 NULL,
524 hpt_chip_map
525 },
526 { PCI_PRODUCT_TRIONES_HPT374,
527 IDE_PCI_CLASS_OVERRIDE,
528 NULL,
529 hpt_chip_map
530 },
531 { 0,
532 0,
533 NULL,
534 NULL
535 }
536 };
537
538 const struct pciide_product_desc pciide_acard_products[] = {
539 { PCI_PRODUCT_ACARD_ATP850U,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Acard ATP850U Ultra33 IDE Controller",
542 acard_chip_map,
543 },
544 { PCI_PRODUCT_ACARD_ATP860,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Acard ATP860 Ultra66 IDE Controller",
547 acard_chip_map,
548 },
549 { PCI_PRODUCT_ACARD_ATP860A,
550 IDE_PCI_CLASS_OVERRIDE,
551 "Acard ATP860-A Ultra66 IDE Controller",
552 acard_chip_map,
553 },
554 { 0,
555 0,
556 NULL,
557 NULL
558 }
559 };
560
561 const struct pciide_product_desc pciide_serverworks_products[] = {
562 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
563 0,
564 "ServerWorks OSB4 IDE Controller",
565 serverworks_chip_map,
566 },
567 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
568 0,
569 "ServerWorks CSB5 IDE Controller",
570 serverworks_chip_map,
571 },
572 { 0,
573 0,
574 NULL,
575 }
576 };
577
578 const struct pciide_product_desc pciide_symphony_products[] = {
579 { PCI_PRODUCT_SYMPHONY_82C105,
580 0,
581 "Symphony Labs 82C105 IDE controller",
582 sl82c105_chip_map,
583 },
584 { 0,
585 0,
586 NULL,
587 }
588 };
589
590 const struct pciide_product_desc pciide_winbond_products[] = {
591 { PCI_PRODUCT_WINBOND_W83C553F_1,
592 0,
593 "Winbond W83C553F IDE controller",
594 sl82c105_chip_map,
595 },
596 { 0,
597 0,
598 NULL,
599 }
600 };
601
602 struct pciide_vendor_desc {
603 u_int32_t ide_vendor;
604 const struct pciide_product_desc *ide_products;
605 };
606
607 const struct pciide_vendor_desc pciide_vendors[] = {
608 { PCI_VENDOR_INTEL, pciide_intel_products },
609 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
610 { PCI_VENDOR_VIATECH, pciide_via_products },
611 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
612 { PCI_VENDOR_SIS, pciide_sis_products },
613 { PCI_VENDOR_ALI, pciide_acer_products },
614 { PCI_VENDOR_PROMISE, pciide_promise_products },
615 { PCI_VENDOR_AMD, pciide_amd_products },
616 { PCI_VENDOR_OPTI, pciide_opti_products },
617 { PCI_VENDOR_TRIONES, pciide_triones_products },
618 { PCI_VENDOR_ACARD, pciide_acard_products },
619 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
620 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
621 { PCI_VENDOR_WINBOND, pciide_winbond_products },
622 { 0, NULL }
623 };
624
625 /* options passed via the 'flags' config keyword */
626 #define PCIIDE_OPTIONS_DMA 0x01
627 #define PCIIDE_OPTIONS_NODMA 0x02
628
629 int pciide_match __P((struct device *, struct cfdata *, void *));
630 void pciide_attach __P((struct device *, struct device *, void *));
631
632 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
633 pciide_match, pciide_attach, NULL, NULL);
634
635 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
636 int pciide_mapregs_compat __P(( struct pci_attach_args *,
637 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
638 int pciide_mapregs_native __P((struct pci_attach_args *,
639 struct pciide_channel *, bus_size_t *, bus_size_t *,
640 int (*pci_intr) __P((void *))));
641 void pciide_mapreg_dma __P((struct pciide_softc *,
642 struct pci_attach_args *));
643 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
644 void pciide_mapchan __P((struct pci_attach_args *,
645 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
646 int (*pci_intr) __P((void *))));
647 int pciide_chan_candisable __P((struct pciide_channel *));
648 void pciide_map_compat_intr __P(( struct pci_attach_args *,
649 struct pciide_channel *, int, int));
650 int pciide_compat_intr __P((void *));
651 int pciide_pci_intr __P((void *));
652 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
653
654 const struct pciide_product_desc *
655 pciide_lookup_product(id)
656 u_int32_t id;
657 {
658 const struct pciide_product_desc *pp;
659 const struct pciide_vendor_desc *vp;
660
661 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
662 if (PCI_VENDOR(id) == vp->ide_vendor)
663 break;
664
665 if ((pp = vp->ide_products) == NULL)
666 return NULL;
667
668 for (; pp->chip_map != NULL; pp++)
669 if (PCI_PRODUCT(id) == pp->ide_product)
670 break;
671
672 if (pp->chip_map == NULL)
673 return NULL;
674 return pp;
675 }
676
677 int
678 pciide_match(parent, match, aux)
679 struct device *parent;
680 struct cfdata *match;
681 void *aux;
682 {
683 struct pci_attach_args *pa = aux;
684 const struct pciide_product_desc *pp;
685
686 /*
687 * Check the ID register to see that it's a PCI IDE controller.
688 * If it is, we assume that we can deal with it; it _should_
689 * work in a standardized way...
690 */
691 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
692 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
693 return (1);
694 }
695
696 /*
697 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
698 * controllers. Let see if we can deal with it anyway.
699 */
700 pp = pciide_lookup_product(pa->pa_id);
701 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
702 return (1);
703 }
704
705 return (0);
706 }
707
708 void
709 pciide_attach(parent, self, aux)
710 struct device *parent, *self;
711 void *aux;
712 {
713 struct pci_attach_args *pa = aux;
714 pci_chipset_tag_t pc = pa->pa_pc;
715 pcitag_t tag = pa->pa_tag;
716 struct pciide_softc *sc = (struct pciide_softc *)self;
717 pcireg_t csr;
718 char devinfo[256];
719 const char *displaydev;
720
721 sc->sc_pp = pciide_lookup_product(pa->pa_id);
722 if (sc->sc_pp == NULL) {
723 sc->sc_pp = &default_product_desc;
724 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
725 displaydev = devinfo;
726 } else
727 displaydev = sc->sc_pp->ide_name;
728
729 /* if displaydev == NULL, printf is done in chip-specific map */
730 if (displaydev)
731 printf(": %s (rev. 0x%02x)\n", displaydev,
732 PCI_REVISION(pa->pa_class));
733
734 sc->sc_pc = pa->pa_pc;
735 sc->sc_tag = pa->pa_tag;
736 #ifdef WDCDEBUG
737 if (wdcdebug_pciide_mask & DEBUG_PROBE)
738 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
739 #endif
740 sc->sc_pp->chip_map(sc, pa);
741
742 if (sc->sc_dma_ok) {
743 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
744 csr |= PCI_COMMAND_MASTER_ENABLE;
745 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
746 }
747 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
748 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
749 }
750
751 /* tell whether the chip is enabled or not */
752 int
753 pciide_chipen(sc, pa)
754 struct pciide_softc *sc;
755 struct pci_attach_args *pa;
756 {
757 pcireg_t csr;
758 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
759 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
760 PCI_COMMAND_STATUS_REG);
761 printf("%s: device disabled (at %s)\n",
762 sc->sc_wdcdev.sc_dev.dv_xname,
763 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
764 "device" : "bridge");
765 return 0;
766 }
767 return 1;
768 }
769
770 int
771 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
772 struct pci_attach_args *pa;
773 struct pciide_channel *cp;
774 int compatchan;
775 bus_size_t *cmdsizep, *ctlsizep;
776 {
777 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
778 struct channel_softc *wdc_cp = &cp->wdc_channel;
779
780 cp->compat = 1;
781 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
782 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
783
784 wdc_cp->cmd_iot = pa->pa_iot;
785 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
786 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
787 printf("%s: couldn't map %s channel cmd regs\n",
788 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
789 return (0);
790 }
791
792 wdc_cp->ctl_iot = pa->pa_iot;
793 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
794 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
795 printf("%s: couldn't map %s channel ctl regs\n",
796 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
797 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
798 PCIIDE_COMPAT_CMD_SIZE);
799 return (0);
800 }
801
802 return (1);
803 }
804
805 int
806 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
807 struct pci_attach_args * pa;
808 struct pciide_channel *cp;
809 bus_size_t *cmdsizep, *ctlsizep;
810 int (*pci_intr) __P((void *));
811 {
812 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
813 struct channel_softc *wdc_cp = &cp->wdc_channel;
814 const char *intrstr;
815 pci_intr_handle_t intrhandle;
816
817 cp->compat = 0;
818
819 if (sc->sc_pci_ih == NULL) {
820 if (pci_intr_map(pa, &intrhandle) != 0) {
821 printf("%s: couldn't map native-PCI interrupt\n",
822 sc->sc_wdcdev.sc_dev.dv_xname);
823 return 0;
824 }
825 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
826 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
827 intrhandle, IPL_BIO, pci_intr, sc);
828 if (sc->sc_pci_ih != NULL) {
829 printf("%s: using %s for native-PCI interrupt\n",
830 sc->sc_wdcdev.sc_dev.dv_xname,
831 intrstr ? intrstr : "unknown interrupt");
832 } else {
833 printf("%s: couldn't establish native-PCI interrupt",
834 sc->sc_wdcdev.sc_dev.dv_xname);
835 if (intrstr != NULL)
836 printf(" at %s", intrstr);
837 printf("\n");
838 return 0;
839 }
840 }
841 cp->ih = sc->sc_pci_ih;
842 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
843 PCI_MAPREG_TYPE_IO, 0,
844 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
845 printf("%s: couldn't map %s channel cmd regs\n",
846 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
847 return 0;
848 }
849
850 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
851 PCI_MAPREG_TYPE_IO, 0,
852 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
853 printf("%s: couldn't map %s channel ctl regs\n",
854 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
855 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
856 return 0;
857 }
858 /*
859 * In native mode, 4 bytes of I/O space are mapped for the control
860 * register, the control register is at offset 2. Pass the generic
861 * code a handle for only one byte at the right offset.
862 */
863 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
864 &wdc_cp->ctl_ioh) != 0) {
865 printf("%s: unable to subregion %s channel ctl regs\n",
866 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
867 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
868 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
869 return 0;
870 }
871 return (1);
872 }
873
874 void
875 pciide_mapreg_dma(sc, pa)
876 struct pciide_softc *sc;
877 struct pci_attach_args *pa;
878 {
879 pcireg_t maptype;
880 bus_addr_t addr;
881
882 /*
883 * Map DMA registers
884 *
885 * Note that sc_dma_ok is the right variable to test to see if
886 * DMA can be done. If the interface doesn't support DMA,
887 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
888 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
889 * non-zero if the interface supports DMA and the registers
890 * could be mapped.
891 *
892 * XXX Note that despite the fact that the Bus Master IDE specs
893 * XXX say that "The bus master IDE function uses 16 bytes of IO
894 * XXX space," some controllers (at least the United
895 * XXX Microelectronics UM8886BF) place it in memory space.
896 */
897 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
898 PCIIDE_REG_BUS_MASTER_DMA);
899
900 switch (maptype) {
901 case PCI_MAPREG_TYPE_IO:
902 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
903 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
904 &addr, NULL, NULL) == 0);
905 if (sc->sc_dma_ok == 0) {
906 printf(", but unused (couldn't query registers)");
907 break;
908 }
909 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
910 && addr >= 0x10000) {
911 sc->sc_dma_ok = 0;
912 printf(", but unused (registers at unsafe address "
913 "%#lx)", (unsigned long)addr);
914 break;
915 }
916 /* FALLTHROUGH */
917
918 case PCI_MAPREG_MEM_TYPE_32BIT:
919 sc->sc_dma_ok = (pci_mapreg_map(pa,
920 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
921 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
922 sc->sc_dmat = pa->pa_dmat;
923 if (sc->sc_dma_ok == 0) {
924 printf(", but unused (couldn't map registers)");
925 } else {
926 sc->sc_wdcdev.dma_arg = sc;
927 sc->sc_wdcdev.dma_init = pciide_dma_init;
928 sc->sc_wdcdev.dma_start = pciide_dma_start;
929 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
930 }
931
932 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
933 PCIIDE_OPTIONS_NODMA) {
934 printf(", but unused (forced off by config file)");
935 sc->sc_dma_ok = 0;
936 }
937 break;
938
939 default:
940 sc->sc_dma_ok = 0;
941 printf(", but unsupported register maptype (0x%x)", maptype);
942 }
943 }
944
945 int
946 pciide_compat_intr(arg)
947 void *arg;
948 {
949 struct pciide_channel *cp = arg;
950
951 #ifdef DIAGNOSTIC
952 /* should only be called for a compat channel */
953 if (cp->compat == 0)
954 panic("pciide compat intr called for non-compat chan %p", cp);
955 #endif
956 return (wdcintr(&cp->wdc_channel));
957 }
958
959 int
960 pciide_pci_intr(arg)
961 void *arg;
962 {
963 struct pciide_softc *sc = arg;
964 struct pciide_channel *cp;
965 struct channel_softc *wdc_cp;
966 int i, rv, crv;
967
968 rv = 0;
969 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
970 cp = &sc->pciide_channels[i];
971 wdc_cp = &cp->wdc_channel;
972
973 /* If a compat channel skip. */
974 if (cp->compat)
975 continue;
976 /* if this channel not waiting for intr, skip */
977 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
978 continue;
979
980 crv = wdcintr(wdc_cp);
981 if (crv == 0)
982 ; /* leave rv alone */
983 else if (crv == 1)
984 rv = 1; /* claim the intr */
985 else if (rv == 0) /* crv should be -1 in this case */
986 rv = crv; /* if we've done no better, take it */
987 }
988 return (rv);
989 }
990
991 void
992 pciide_channel_dma_setup(cp)
993 struct pciide_channel *cp;
994 {
995 int drive;
996 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
997 struct ata_drive_datas *drvp;
998
999 for (drive = 0; drive < 2; drive++) {
1000 drvp = &cp->wdc_channel.ch_drive[drive];
1001 /* If no drive, skip */
1002 if ((drvp->drive_flags & DRIVE) == 0)
1003 continue;
1004 /* setup DMA if needed */
1005 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1006 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1007 sc->sc_dma_ok == 0) {
1008 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1009 continue;
1010 }
1011 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1012 != 0) {
1013 /* Abort DMA setup */
1014 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1015 continue;
1016 }
1017 }
1018 }
1019
1020 int
1021 pciide_dma_table_setup(sc, channel, drive)
1022 struct pciide_softc *sc;
1023 int channel, drive;
1024 {
1025 bus_dma_segment_t seg;
1026 int error, rseg;
1027 const bus_size_t dma_table_size =
1028 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1029 struct pciide_dma_maps *dma_maps =
1030 &sc->pciide_channels[channel].dma_maps[drive];
1031
1032 /* If table was already allocated, just return */
1033 if (dma_maps->dma_table)
1034 return 0;
1035
1036 /* Allocate memory for the DMA tables and map it */
1037 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1038 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1039 BUS_DMA_NOWAIT)) != 0) {
1040 printf("%s:%d: unable to allocate table DMA for "
1041 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1042 channel, drive, error);
1043 return error;
1044 }
1045 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1046 dma_table_size,
1047 (caddr_t *)&dma_maps->dma_table,
1048 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1049 printf("%s:%d: unable to map table DMA for"
1050 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1051 channel, drive, error);
1052 return error;
1053 }
1054 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1055 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1056 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1057
1058 /* Create and load table DMA map for this disk */
1059 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1060 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1061 &dma_maps->dmamap_table)) != 0) {
1062 printf("%s:%d: unable to create table DMA map for "
1063 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1064 channel, drive, error);
1065 return error;
1066 }
1067 if ((error = bus_dmamap_load(sc->sc_dmat,
1068 dma_maps->dmamap_table,
1069 dma_maps->dma_table,
1070 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1071 printf("%s:%d: unable to load table DMA map for "
1072 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1073 channel, drive, error);
1074 return error;
1075 }
1076 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1077 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1078 DEBUG_PROBE);
1079 /* Create a xfer DMA map for this drive */
1080 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1081 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1082 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1083 &dma_maps->dmamap_xfer)) != 0) {
1084 printf("%s:%d: unable to create xfer DMA map for "
1085 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1086 channel, drive, error);
1087 return error;
1088 }
1089 return 0;
1090 }
1091
1092 int
1093 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1094 void *v;
1095 int channel, drive;
1096 void *databuf;
1097 size_t datalen;
1098 int flags;
1099 {
1100 struct pciide_softc *sc = v;
1101 int error, seg;
1102 struct pciide_dma_maps *dma_maps =
1103 &sc->pciide_channels[channel].dma_maps[drive];
1104
1105 error = bus_dmamap_load(sc->sc_dmat,
1106 dma_maps->dmamap_xfer,
1107 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1108 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1109 if (error) {
1110 printf("%s:%d: unable to load xfer DMA map for"
1111 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1112 channel, drive, error);
1113 return error;
1114 }
1115
1116 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1117 dma_maps->dmamap_xfer->dm_mapsize,
1118 (flags & WDC_DMA_READ) ?
1119 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1120
1121 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1122 #ifdef DIAGNOSTIC
1123 /* A segment must not cross a 64k boundary */
1124 {
1125 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1126 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1127 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1128 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1129 printf("pciide_dma: segment %d physical addr 0x%lx"
1130 " len 0x%lx not properly aligned\n",
1131 seg, phys, len);
1132 panic("pciide_dma: buf align");
1133 }
1134 }
1135 #endif
1136 dma_maps->dma_table[seg].base_addr =
1137 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1138 dma_maps->dma_table[seg].byte_count =
1139 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1140 IDEDMA_BYTE_COUNT_MASK);
1141 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1142 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1143 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1144
1145 }
1146 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1147 htole32(IDEDMA_BYTE_COUNT_EOT);
1148
1149 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1150 dma_maps->dmamap_table->dm_mapsize,
1151 BUS_DMASYNC_PREWRITE);
1152
1153 /* Maps are ready. Start DMA function */
1154 #ifdef DIAGNOSTIC
1155 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1156 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1157 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1158 panic("pciide_dma_init: table align");
1159 }
1160 #endif
1161
1162 /* Clear status bits */
1163 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1164 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1165 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1166 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1167 /* Write table addr */
1168 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1169 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1170 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1171 /* set read/write */
1172 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1173 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1174 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1175 /* remember flags */
1176 dma_maps->dma_flags = flags;
1177 return 0;
1178 }
1179
1180 void
1181 pciide_dma_start(v, channel, drive)
1182 void *v;
1183 int channel, drive;
1184 {
1185 struct pciide_softc *sc = v;
1186
1187 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1189 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1190 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1191 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1192 }
1193
1194 int
1195 pciide_dma_finish(v, channel, drive, force)
1196 void *v;
1197 int channel, drive;
1198 int force;
1199 {
1200 struct pciide_softc *sc = v;
1201 u_int8_t status;
1202 int error = 0;
1203 struct pciide_dma_maps *dma_maps =
1204 &sc->pciide_channels[channel].dma_maps[drive];
1205
1206 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1207 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1208 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1209 DEBUG_XFERS);
1210
1211 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1212 return WDC_DMAST_NOIRQ;
1213
1214 /* stop DMA channel */
1215 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1216 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1217 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1218 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1219
1220 /* Unload the map of the data buffer */
1221 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1222 dma_maps->dmamap_xfer->dm_mapsize,
1223 (dma_maps->dma_flags & WDC_DMA_READ) ?
1224 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1225 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1226
1227 if ((status & IDEDMA_CTL_ERR) != 0) {
1228 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1229 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1230 error |= WDC_DMAST_ERR;
1231 }
1232
1233 if ((status & IDEDMA_CTL_INTR) == 0) {
1234 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1235 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1236 drive, status);
1237 error |= WDC_DMAST_NOIRQ;
1238 }
1239
1240 if ((status & IDEDMA_CTL_ACT) != 0) {
1241 /* data underrun, may be a valid condition for ATAPI */
1242 error |= WDC_DMAST_UNDER;
1243 }
1244 return error;
1245 }
1246
1247 void
1248 pciide_irqack(chp)
1249 struct channel_softc *chp;
1250 {
1251 struct pciide_channel *cp = (struct pciide_channel*)chp;
1252 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1253
1254 /* clear status bits in IDE DMA registers */
1255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1256 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1257 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1258 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1259 }
1260
1261 /* some common code used by several chip_map */
1262 int
1263 pciide_chansetup(sc, channel, interface)
1264 struct pciide_softc *sc;
1265 int channel;
1266 pcireg_t interface;
1267 {
1268 struct pciide_channel *cp = &sc->pciide_channels[channel];
1269 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1270 cp->name = PCIIDE_CHANNEL_NAME(channel);
1271 cp->wdc_channel.channel = channel;
1272 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1273 cp->wdc_channel.ch_queue =
1274 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1275 if (cp->wdc_channel.ch_queue == NULL) {
1276 printf("%s %s channel: "
1277 "can't allocate memory for command queue",
1278 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1279 return 0;
1280 }
1281 printf("%s: %s channel %s to %s mode\n",
1282 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1283 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1284 "configured" : "wired",
1285 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1286 "native-PCI" : "compatibility");
1287 return 1;
1288 }
1289
1290 /* some common code used by several chip channel_map */
1291 void
1292 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1293 struct pci_attach_args *pa;
1294 struct pciide_channel *cp;
1295 pcireg_t interface;
1296 bus_size_t *cmdsizep, *ctlsizep;
1297 int (*pci_intr) __P((void *));
1298 {
1299 struct channel_softc *wdc_cp = &cp->wdc_channel;
1300
1301 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1302 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1303 pci_intr);
1304 else
1305 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1306 wdc_cp->channel, cmdsizep, ctlsizep);
1307
1308 if (cp->hw_ok == 0)
1309 return;
1310 wdc_cp->data32iot = wdc_cp->cmd_iot;
1311 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1312 wdcattach(wdc_cp);
1313 }
1314
1315 /*
1316 * Generic code to call to know if a channel can be disabled. Return 1
1317 * if channel can be disabled, 0 if not
1318 */
1319 int
1320 pciide_chan_candisable(cp)
1321 struct pciide_channel *cp;
1322 {
1323 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1324 struct channel_softc *wdc_cp = &cp->wdc_channel;
1325
1326 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1327 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1328 printf("%s: disabling %s channel (no drives)\n",
1329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1330 cp->hw_ok = 0;
1331 return 1;
1332 }
1333 return 0;
1334 }
1335
1336 /*
1337 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1338 * Set hw_ok=0 on failure
1339 */
1340 void
1341 pciide_map_compat_intr(pa, cp, compatchan, interface)
1342 struct pci_attach_args *pa;
1343 struct pciide_channel *cp;
1344 int compatchan, interface;
1345 {
1346 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1347 struct channel_softc *wdc_cp = &cp->wdc_channel;
1348
1349 if (cp->hw_ok == 0)
1350 return;
1351 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1352 return;
1353
1354 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1355 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1356 pa, compatchan, pciide_compat_intr, cp);
1357 if (cp->ih == NULL) {
1358 #endif
1359 printf("%s: no compatibility interrupt for use by %s "
1360 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1361 cp->hw_ok = 0;
1362 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1363 }
1364 #endif
1365 }
1366
1367 void
1368 pciide_print_modes(cp)
1369 struct pciide_channel *cp;
1370 {
1371 wdc_print_modes(&cp->wdc_channel);
1372 }
1373
1374 void
1375 default_chip_map(sc, pa)
1376 struct pciide_softc *sc;
1377 struct pci_attach_args *pa;
1378 {
1379 struct pciide_channel *cp;
1380 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1381 pcireg_t csr;
1382 int channel, drive;
1383 struct ata_drive_datas *drvp;
1384 u_int8_t idedma_ctl;
1385 bus_size_t cmdsize, ctlsize;
1386 char *failreason;
1387
1388 if (pciide_chipen(sc, pa) == 0)
1389 return;
1390
1391 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1392 printf("%s: bus-master DMA support present",
1393 sc->sc_wdcdev.sc_dev.dv_xname);
1394 if (sc->sc_pp == &default_product_desc &&
1395 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1396 PCIIDE_OPTIONS_DMA) == 0) {
1397 printf(", but unused (no driver support)");
1398 sc->sc_dma_ok = 0;
1399 } else {
1400 pciide_mapreg_dma(sc, pa);
1401 if (sc->sc_dma_ok != 0)
1402 printf(", used without full driver "
1403 "support");
1404 }
1405 } else {
1406 printf("%s: hardware does not support DMA",
1407 sc->sc_wdcdev.sc_dev.dv_xname);
1408 sc->sc_dma_ok = 0;
1409 }
1410 printf("\n");
1411 if (sc->sc_dma_ok) {
1412 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1413 sc->sc_wdcdev.irqack = pciide_irqack;
1414 }
1415 sc->sc_wdcdev.PIO_cap = 0;
1416 sc->sc_wdcdev.DMA_cap = 0;
1417
1418 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1419 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1420 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1421
1422 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1423 cp = &sc->pciide_channels[channel];
1424 if (pciide_chansetup(sc, channel, interface) == 0)
1425 continue;
1426 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1427 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1428 &ctlsize, pciide_pci_intr);
1429 } else {
1430 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1431 channel, &cmdsize, &ctlsize);
1432 }
1433 if (cp->hw_ok == 0)
1434 continue;
1435 /*
1436 * Check to see if something appears to be there.
1437 */
1438 failreason = NULL;
1439 if (!wdcprobe(&cp->wdc_channel)) {
1440 failreason = "not responding; disabled or no drives?";
1441 goto next;
1442 }
1443 /*
1444 * Now, make sure it's actually attributable to this PCI IDE
1445 * channel by trying to access the channel again while the
1446 * PCI IDE controller's I/O space is disabled. (If the
1447 * channel no longer appears to be there, it belongs to
1448 * this controller.) YUCK!
1449 */
1450 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1451 PCI_COMMAND_STATUS_REG);
1452 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1453 csr & ~PCI_COMMAND_IO_ENABLE);
1454 if (wdcprobe(&cp->wdc_channel))
1455 failreason = "other hardware responding at addresses";
1456 pci_conf_write(sc->sc_pc, sc->sc_tag,
1457 PCI_COMMAND_STATUS_REG, csr);
1458 next:
1459 if (failreason) {
1460 printf("%s: %s channel ignored (%s)\n",
1461 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1462 failreason);
1463 cp->hw_ok = 0;
1464 bus_space_unmap(cp->wdc_channel.cmd_iot,
1465 cp->wdc_channel.cmd_ioh, cmdsize);
1466 if (interface & PCIIDE_INTERFACE_PCI(channel))
1467 bus_space_unmap(cp->wdc_channel.ctl_iot,
1468 cp->ctl_baseioh, ctlsize);
1469 else
1470 bus_space_unmap(cp->wdc_channel.ctl_iot,
1471 cp->wdc_channel.ctl_ioh, ctlsize);
1472 } else {
1473 pciide_map_compat_intr(pa, cp, channel, interface);
1474 }
1475 if (cp->hw_ok) {
1476 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1477 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1478 wdcattach(&cp->wdc_channel);
1479 }
1480 }
1481
1482 if (sc->sc_dma_ok == 0)
1483 return;
1484
1485 /* Allocate DMA maps */
1486 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1487 idedma_ctl = 0;
1488 cp = &sc->pciide_channels[channel];
1489 for (drive = 0; drive < 2; drive++) {
1490 drvp = &cp->wdc_channel.ch_drive[drive];
1491 /* If no drive, skip */
1492 if ((drvp->drive_flags & DRIVE) == 0)
1493 continue;
1494 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1495 continue;
1496 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1497 /* Abort DMA setup */
1498 printf("%s:%d:%d: can't allocate DMA maps, "
1499 "using PIO transfers\n",
1500 sc->sc_wdcdev.sc_dev.dv_xname,
1501 channel, drive);
1502 drvp->drive_flags &= ~DRIVE_DMA;
1503 }
1504 printf("%s:%d:%d: using DMA data transfers\n",
1505 sc->sc_wdcdev.sc_dev.dv_xname,
1506 channel, drive);
1507 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1508 }
1509 if (idedma_ctl != 0) {
1510 /* Add software bits in status register */
1511 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1512 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1513 idedma_ctl);
1514 }
1515 }
1516 }
1517
1518 void
1519 piix_chip_map(sc, pa)
1520 struct pciide_softc *sc;
1521 struct pci_attach_args *pa;
1522 {
1523 struct pciide_channel *cp;
1524 int channel;
1525 u_int32_t idetim;
1526 bus_size_t cmdsize, ctlsize;
1527
1528 if (pciide_chipen(sc, pa) == 0)
1529 return;
1530
1531 printf("%s: bus-master DMA support present",
1532 sc->sc_wdcdev.sc_dev.dv_xname);
1533 pciide_mapreg_dma(sc, pa);
1534 printf("\n");
1535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1536 WDC_CAPABILITY_MODE;
1537 if (sc->sc_dma_ok) {
1538 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1539 sc->sc_wdcdev.irqack = pciide_irqack;
1540 switch(sc->sc_pp->ide_product) {
1541 case PCI_PRODUCT_INTEL_82371AB_IDE:
1542 case PCI_PRODUCT_INTEL_82440MX_IDE:
1543 case PCI_PRODUCT_INTEL_82801AA_IDE:
1544 case PCI_PRODUCT_INTEL_82801AB_IDE:
1545 case PCI_PRODUCT_INTEL_82801BA_IDE:
1546 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1547 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1548 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1549 case PCI_PRODUCT_INTEL_82801DB_IDE:
1550 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1551 }
1552 }
1553 sc->sc_wdcdev.PIO_cap = 4;
1554 sc->sc_wdcdev.DMA_cap = 2;
1555 switch(sc->sc_pp->ide_product) {
1556 case PCI_PRODUCT_INTEL_82801AA_IDE:
1557 sc->sc_wdcdev.UDMA_cap = 4;
1558 break;
1559 case PCI_PRODUCT_INTEL_82801BA_IDE:
1560 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1561 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1562 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1563 case PCI_PRODUCT_INTEL_82801DB_IDE:
1564 sc->sc_wdcdev.UDMA_cap = 5;
1565 break;
1566 default:
1567 sc->sc_wdcdev.UDMA_cap = 2;
1568 }
1569 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1570 sc->sc_wdcdev.set_modes = piix_setup_channel;
1571 else
1572 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1573 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1574 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1575
1576 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1577 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1578 DEBUG_PROBE);
1579 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1580 WDCDEBUG_PRINT((", sidetim=0x%x",
1581 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1582 DEBUG_PROBE);
1583 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1584 WDCDEBUG_PRINT((", udamreg 0x%x",
1585 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1586 DEBUG_PROBE);
1587 }
1588 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1589 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1590 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1591 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1592 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1593 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1594 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1595 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1596 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1597 DEBUG_PROBE);
1598 }
1599
1600 }
1601 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1602
1603 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1604 cp = &sc->pciide_channels[channel];
1605 /* PIIX is compat-only */
1606 if (pciide_chansetup(sc, channel, 0) == 0)
1607 continue;
1608 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1609 if ((PIIX_IDETIM_READ(idetim, channel) &
1610 PIIX_IDETIM_IDE) == 0) {
1611 printf("%s: %s channel ignored (disabled)\n",
1612 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1613 continue;
1614 }
1615 /* PIIX are compat-only pciide devices */
1616 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1617 if (cp->hw_ok == 0)
1618 continue;
1619 if (pciide_chan_candisable(cp)) {
1620 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1621 channel);
1622 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1623 idetim);
1624 }
1625 pciide_map_compat_intr(pa, cp, channel, 0);
1626 if (cp->hw_ok == 0)
1627 continue;
1628 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1629 }
1630
1631 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1632 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1633 DEBUG_PROBE);
1634 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1635 WDCDEBUG_PRINT((", sidetim=0x%x",
1636 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1637 DEBUG_PROBE);
1638 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1639 WDCDEBUG_PRINT((", udamreg 0x%x",
1640 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1641 DEBUG_PROBE);
1642 }
1643 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1644 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1645 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1646 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1647 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1648 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1649 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1650 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1651 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1652 DEBUG_PROBE);
1653 }
1654 }
1655 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1656 }
1657
1658 void
1659 piix_setup_channel(chp)
1660 struct channel_softc *chp;
1661 {
1662 u_int8_t mode[2], drive;
1663 u_int32_t oidetim, idetim, idedma_ctl;
1664 struct pciide_channel *cp = (struct pciide_channel*)chp;
1665 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1666 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1667
1668 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1669 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1670 idedma_ctl = 0;
1671
1672 /* set up new idetim: Enable IDE registers decode */
1673 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1674 chp->channel);
1675
1676 /* setup DMA */
1677 pciide_channel_dma_setup(cp);
1678
1679 /*
1680 * Here we have to mess up with drives mode: PIIX can't have
1681 * different timings for master and slave drives.
1682 * We need to find the best combination.
1683 */
1684
1685 /* If both drives supports DMA, take the lower mode */
1686 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1687 (drvp[1].drive_flags & DRIVE_DMA)) {
1688 mode[0] = mode[1] =
1689 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1690 drvp[0].DMA_mode = mode[0];
1691 drvp[1].DMA_mode = mode[1];
1692 goto ok;
1693 }
1694 /*
1695 * If only one drive supports DMA, use its mode, and
1696 * put the other one in PIO mode 0 if mode not compatible
1697 */
1698 if (drvp[0].drive_flags & DRIVE_DMA) {
1699 mode[0] = drvp[0].DMA_mode;
1700 mode[1] = drvp[1].PIO_mode;
1701 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1702 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1703 mode[1] = drvp[1].PIO_mode = 0;
1704 goto ok;
1705 }
1706 if (drvp[1].drive_flags & DRIVE_DMA) {
1707 mode[1] = drvp[1].DMA_mode;
1708 mode[0] = drvp[0].PIO_mode;
1709 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1710 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1711 mode[0] = drvp[0].PIO_mode = 0;
1712 goto ok;
1713 }
1714 /*
1715 * If both drives are not DMA, takes the lower mode, unless
1716 * one of them is PIO mode < 2
1717 */
1718 if (drvp[0].PIO_mode < 2) {
1719 mode[0] = drvp[0].PIO_mode = 0;
1720 mode[1] = drvp[1].PIO_mode;
1721 } else if (drvp[1].PIO_mode < 2) {
1722 mode[1] = drvp[1].PIO_mode = 0;
1723 mode[0] = drvp[0].PIO_mode;
1724 } else {
1725 mode[0] = mode[1] =
1726 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1727 drvp[0].PIO_mode = mode[0];
1728 drvp[1].PIO_mode = mode[1];
1729 }
1730 ok: /* The modes are setup */
1731 for (drive = 0; drive < 2; drive++) {
1732 if (drvp[drive].drive_flags & DRIVE_DMA) {
1733 idetim |= piix_setup_idetim_timings(
1734 mode[drive], 1, chp->channel);
1735 goto end;
1736 }
1737 }
1738 /* If we are there, none of the drives are DMA */
1739 if (mode[0] >= 2)
1740 idetim |= piix_setup_idetim_timings(
1741 mode[0], 0, chp->channel);
1742 else
1743 idetim |= piix_setup_idetim_timings(
1744 mode[1], 0, chp->channel);
1745 end: /*
1746 * timing mode is now set up in the controller. Enable
1747 * it per-drive
1748 */
1749 for (drive = 0; drive < 2; drive++) {
1750 /* If no drive, skip */
1751 if ((drvp[drive].drive_flags & DRIVE) == 0)
1752 continue;
1753 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1754 if (drvp[drive].drive_flags & DRIVE_DMA)
1755 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1756 }
1757 if (idedma_ctl != 0) {
1758 /* Add software bits in status register */
1759 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1760 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1761 idedma_ctl);
1762 }
1763 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1764 pciide_print_modes(cp);
1765 }
1766
1767 void
1768 piix3_4_setup_channel(chp)
1769 struct channel_softc *chp;
1770 {
1771 struct ata_drive_datas *drvp;
1772 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1773 struct pciide_channel *cp = (struct pciide_channel*)chp;
1774 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1775 int drive;
1776 int channel = chp->channel;
1777
1778 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1779 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1780 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1781 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1782 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1783 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1784 PIIX_SIDETIM_RTC_MASK(channel));
1785
1786 idedma_ctl = 0;
1787 /* If channel disabled, no need to go further */
1788 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1789 return;
1790 /* set up new idetim: Enable IDE registers decode */
1791 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1792
1793 /* setup DMA if needed */
1794 pciide_channel_dma_setup(cp);
1795
1796 for (drive = 0; drive < 2; drive++) {
1797 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1798 PIIX_UDMATIM_SET(0x3, channel, drive));
1799 drvp = &chp->ch_drive[drive];
1800 /* If no drive, skip */
1801 if ((drvp->drive_flags & DRIVE) == 0)
1802 continue;
1803 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1804 (drvp->drive_flags & DRIVE_UDMA) == 0))
1805 goto pio;
1806
1807 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1809 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1810 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1811 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1812 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1814 ideconf |= PIIX_CONFIG_PINGPONG;
1815 }
1816 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1817 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1818 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1819 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1820 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1821 /* setup Ultra/100 */
1822 if (drvp->UDMA_mode > 2 &&
1823 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1824 drvp->UDMA_mode = 2;
1825 if (drvp->UDMA_mode > 4) {
1826 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1827 } else {
1828 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1829 if (drvp->UDMA_mode > 2) {
1830 ideconf |= PIIX_CONFIG_UDMA66(channel,
1831 drive);
1832 } else {
1833 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1834 drive);
1835 }
1836 }
1837 }
1838 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1839 /* setup Ultra/66 */
1840 if (drvp->UDMA_mode > 2 &&
1841 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1842 drvp->UDMA_mode = 2;
1843 if (drvp->UDMA_mode > 2)
1844 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1845 else
1846 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1847 }
1848 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1849 (drvp->drive_flags & DRIVE_UDMA)) {
1850 /* use Ultra/DMA */
1851 drvp->drive_flags &= ~DRIVE_DMA;
1852 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1853 udmareg |= PIIX_UDMATIM_SET(
1854 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1855 } else {
1856 /* use Multiword DMA */
1857 drvp->drive_flags &= ~DRIVE_UDMA;
1858 if (drive == 0) {
1859 idetim |= piix_setup_idetim_timings(
1860 drvp->DMA_mode, 1, channel);
1861 } else {
1862 sidetim |= piix_setup_sidetim_timings(
1863 drvp->DMA_mode, 1, channel);
1864 idetim =PIIX_IDETIM_SET(idetim,
1865 PIIX_IDETIM_SITRE, channel);
1866 }
1867 }
1868 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1869
1870 pio: /* use PIO mode */
1871 idetim |= piix_setup_idetim_drvs(drvp);
1872 if (drive == 0) {
1873 idetim |= piix_setup_idetim_timings(
1874 drvp->PIO_mode, 0, channel);
1875 } else {
1876 sidetim |= piix_setup_sidetim_timings(
1877 drvp->PIO_mode, 0, channel);
1878 idetim =PIIX_IDETIM_SET(idetim,
1879 PIIX_IDETIM_SITRE, channel);
1880 }
1881 }
1882 if (idedma_ctl != 0) {
1883 /* Add software bits in status register */
1884 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1885 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1886 idedma_ctl);
1887 }
1888 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1889 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1890 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1891 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1892 pciide_print_modes(cp);
1893 }
1894
1895
1896 /* setup ISP and RTC fields, based on mode */
1897 static u_int32_t
1898 piix_setup_idetim_timings(mode, dma, channel)
1899 u_int8_t mode;
1900 u_int8_t dma;
1901 u_int8_t channel;
1902 {
1903
1904 if (dma)
1905 return PIIX_IDETIM_SET(0,
1906 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1907 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1908 channel);
1909 else
1910 return PIIX_IDETIM_SET(0,
1911 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1912 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1913 channel);
1914 }
1915
1916 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1917 static u_int32_t
1918 piix_setup_idetim_drvs(drvp)
1919 struct ata_drive_datas *drvp;
1920 {
1921 u_int32_t ret = 0;
1922 struct channel_softc *chp = drvp->chnl_softc;
1923 u_int8_t channel = chp->channel;
1924 u_int8_t drive = drvp->drive;
1925
1926 /*
1927 * If drive is using UDMA, timings setups are independant
1928 * So just check DMA and PIO here.
1929 */
1930 if (drvp->drive_flags & DRIVE_DMA) {
1931 /* if mode = DMA mode 0, use compatible timings */
1932 if ((drvp->drive_flags & DRIVE_DMA) &&
1933 drvp->DMA_mode == 0) {
1934 drvp->PIO_mode = 0;
1935 return ret;
1936 }
1937 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1938 /*
1939 * PIO and DMA timings are the same, use fast timings for PIO
1940 * too, else use compat timings.
1941 */
1942 if ((piix_isp_pio[drvp->PIO_mode] !=
1943 piix_isp_dma[drvp->DMA_mode]) ||
1944 (piix_rtc_pio[drvp->PIO_mode] !=
1945 piix_rtc_dma[drvp->DMA_mode]))
1946 drvp->PIO_mode = 0;
1947 /* if PIO mode <= 2, use compat timings for PIO */
1948 if (drvp->PIO_mode <= 2) {
1949 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1950 channel);
1951 return ret;
1952 }
1953 }
1954
1955 /*
1956 * Now setup PIO modes. If mode < 2, use compat timings.
1957 * Else enable fast timings. Enable IORDY and prefetch/post
1958 * if PIO mode >= 3.
1959 */
1960
1961 if (drvp->PIO_mode < 2)
1962 return ret;
1963
1964 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1965 if (drvp->PIO_mode >= 3) {
1966 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1967 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1968 }
1969 return ret;
1970 }
1971
1972 /* setup values in SIDETIM registers, based on mode */
1973 static u_int32_t
1974 piix_setup_sidetim_timings(mode, dma, channel)
1975 u_int8_t mode;
1976 u_int8_t dma;
1977 u_int8_t channel;
1978 {
1979 if (dma)
1980 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1981 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1982 else
1983 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1984 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1985 }
1986
1987 void
1988 amd7x6_chip_map(sc, pa)
1989 struct pciide_softc *sc;
1990 struct pci_attach_args *pa;
1991 {
1992 struct pciide_channel *cp;
1993 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1994 int channel;
1995 pcireg_t chanenable;
1996 bus_size_t cmdsize, ctlsize;
1997
1998 if (pciide_chipen(sc, pa) == 0)
1999 return;
2000 printf("%s: bus-master DMA support present",
2001 sc->sc_wdcdev.sc_dev.dv_xname);
2002 pciide_mapreg_dma(sc, pa);
2003 printf("\n");
2004 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2005 WDC_CAPABILITY_MODE;
2006 if (sc->sc_dma_ok) {
2007 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2009 sc->sc_wdcdev.irqack = pciide_irqack;
2010 }
2011 sc->sc_wdcdev.PIO_cap = 4;
2012 sc->sc_wdcdev.DMA_cap = 2;
2013
2014 switch (sc->sc_pp->ide_product) {
2015 case PCI_PRODUCT_AMD_PBC766_IDE:
2016 case PCI_PRODUCT_AMD_PBC768_IDE:
2017 case PCI_PRODUCT_AMD_PBC8111_IDE:
2018 sc->sc_wdcdev.UDMA_cap = 5;
2019 break;
2020 default:
2021 sc->sc_wdcdev.UDMA_cap = 4;
2022 }
2023 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2024 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2025 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2026 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2027
2028 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2029 DEBUG_PROBE);
2030 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2031 cp = &sc->pciide_channels[channel];
2032 if (pciide_chansetup(sc, channel, interface) == 0)
2033 continue;
2034
2035 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2036 printf("%s: %s channel ignored (disabled)\n",
2037 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2038 continue;
2039 }
2040 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2041 pciide_pci_intr);
2042
2043 if (pciide_chan_candisable(cp))
2044 chanenable &= ~AMD7X6_CHAN_EN(channel);
2045 pciide_map_compat_intr(pa, cp, channel, interface);
2046 if (cp->hw_ok == 0)
2047 continue;
2048
2049 amd7x6_setup_channel(&cp->wdc_channel);
2050 }
2051 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2052 chanenable);
2053 return;
2054 }
2055
2056 void
2057 amd7x6_setup_channel(chp)
2058 struct channel_softc *chp;
2059 {
2060 u_int32_t udmatim_reg, datatim_reg;
2061 u_int8_t idedma_ctl;
2062 int mode, drive;
2063 struct ata_drive_datas *drvp;
2064 struct pciide_channel *cp = (struct pciide_channel*)chp;
2065 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2066 #ifndef PCIIDE_AMD756_ENABLEDMA
2067 int rev = PCI_REVISION(
2068 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2069 #endif
2070
2071 idedma_ctl = 0;
2072 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2073 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2074 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2075 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2076
2077 /* setup DMA if needed */
2078 pciide_channel_dma_setup(cp);
2079
2080 for (drive = 0; drive < 2; drive++) {
2081 drvp = &chp->ch_drive[drive];
2082 /* If no drive, skip */
2083 if ((drvp->drive_flags & DRIVE) == 0)
2084 continue;
2085 /* add timing values, setup DMA if needed */
2086 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2087 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2088 mode = drvp->PIO_mode;
2089 goto pio;
2090 }
2091 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2092 (drvp->drive_flags & DRIVE_UDMA)) {
2093 /* use Ultra/DMA */
2094 drvp->drive_flags &= ~DRIVE_DMA;
2095 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2096 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2097 AMD7X6_UDMA_TIME(chp->channel, drive,
2098 amd7x6_udma_tim[drvp->UDMA_mode]);
2099 /* can use PIO timings, MW DMA unused */
2100 mode = drvp->PIO_mode;
2101 } else {
2102 /* use Multiword DMA, but only if revision is OK */
2103 drvp->drive_flags &= ~DRIVE_UDMA;
2104 #ifndef PCIIDE_AMD756_ENABLEDMA
2105 /*
2106 * The workaround doesn't seem to be necessary
2107 * with all drives, so it can be disabled by
2108 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2109 * triggered.
2110 */
2111 if (sc->sc_pp->ide_product ==
2112 PCI_PRODUCT_AMD_PBC756_IDE &&
2113 AMD756_CHIPREV_DISABLEDMA(rev)) {
2114 printf("%s:%d:%d: multi-word DMA disabled due "
2115 "to chip revision\n",
2116 sc->sc_wdcdev.sc_dev.dv_xname,
2117 chp->channel, drive);
2118 mode = drvp->PIO_mode;
2119 drvp->drive_flags &= ~DRIVE_DMA;
2120 goto pio;
2121 }
2122 #endif
2123 /* mode = min(pio, dma+2) */
2124 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2125 mode = drvp->PIO_mode;
2126 else
2127 mode = drvp->DMA_mode + 2;
2128 }
2129 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2130
2131 pio: /* setup PIO mode */
2132 if (mode <= 2) {
2133 drvp->DMA_mode = 0;
2134 drvp->PIO_mode = 0;
2135 mode = 0;
2136 } else {
2137 drvp->PIO_mode = mode;
2138 drvp->DMA_mode = mode - 2;
2139 }
2140 datatim_reg |=
2141 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2142 amd7x6_pio_set[mode]) |
2143 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2144 amd7x6_pio_rec[mode]);
2145 }
2146 if (idedma_ctl != 0) {
2147 /* Add software bits in status register */
2148 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2149 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2150 idedma_ctl);
2151 }
2152 pciide_print_modes(cp);
2153 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2154 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2155 }
2156
2157 void
2158 apollo_chip_map(sc, pa)
2159 struct pciide_softc *sc;
2160 struct pci_attach_args *pa;
2161 {
2162 struct pciide_channel *cp;
2163 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2164 int channel;
2165 u_int32_t ideconf;
2166 bus_size_t cmdsize, ctlsize;
2167 pcitag_t pcib_tag;
2168 pcireg_t pcib_id, pcib_class;
2169
2170 if (pciide_chipen(sc, pa) == 0)
2171 return;
2172 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2173 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2174 /* and read ID and rev of the ISA bridge */
2175 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2176 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2177 printf(": VIA Technologies ");
2178 switch (PCI_PRODUCT(pcib_id)) {
2179 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2180 printf("VT82C586 (Apollo VP) ");
2181 if(PCI_REVISION(pcib_class) >= 0x02) {
2182 printf("ATA33 controller\n");
2183 sc->sc_wdcdev.UDMA_cap = 2;
2184 } else {
2185 printf("controller\n");
2186 sc->sc_wdcdev.UDMA_cap = 0;
2187 }
2188 break;
2189 case PCI_PRODUCT_VIATECH_VT82C596A:
2190 printf("VT82C596A (Apollo Pro) ");
2191 if (PCI_REVISION(pcib_class) >= 0x12) {
2192 printf("ATA66 controller\n");
2193 sc->sc_wdcdev.UDMA_cap = 4;
2194 } else {
2195 printf("ATA33 controller\n");
2196 sc->sc_wdcdev.UDMA_cap = 2;
2197 }
2198 break;
2199 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2200 printf("VT82C686A (Apollo KX133) ");
2201 if (PCI_REVISION(pcib_class) >= 0x40) {
2202 printf("ATA100 controller\n");
2203 sc->sc_wdcdev.UDMA_cap = 5;
2204 } else {
2205 printf("ATA66 controller\n");
2206 sc->sc_wdcdev.UDMA_cap = 4;
2207 }
2208 break;
2209 case PCI_PRODUCT_VIATECH_VT8231:
2210 printf("VT8231 ATA100 controller\n");
2211 sc->sc_wdcdev.UDMA_cap = 5;
2212 break;
2213 case PCI_PRODUCT_VIATECH_VT8233:
2214 printf("VT8233 ATA100 controller\n");
2215 sc->sc_wdcdev.UDMA_cap = 5;
2216 break;
2217 case PCI_PRODUCT_VIATECH_VT8233A:
2218 printf("VT8233A ATA133 controller\n");
2219 sc->sc_wdcdev.UDMA_cap = 6;
2220 break;
2221 case PCI_PRODUCT_VIATECH_VT8235:
2222 printf("VT8235 ATA133 controller\n");
2223 sc->sc_wdcdev.UDMA_cap = 6;
2224 break;
2225 default:
2226 printf("unknown ATA controller\n");
2227 sc->sc_wdcdev.UDMA_cap = 0;
2228 }
2229
2230 printf("%s: bus-master DMA support present",
2231 sc->sc_wdcdev.sc_dev.dv_xname);
2232 pciide_mapreg_dma(sc, pa);
2233 printf("\n");
2234 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2235 WDC_CAPABILITY_MODE;
2236 if (sc->sc_dma_ok) {
2237 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2238 sc->sc_wdcdev.irqack = pciide_irqack;
2239 if (sc->sc_wdcdev.UDMA_cap > 0)
2240 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2241 }
2242 sc->sc_wdcdev.PIO_cap = 4;
2243 sc->sc_wdcdev.DMA_cap = 2;
2244 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2245 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2246 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2247
2248 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2249 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2250 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2251 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2252 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2253 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2254 DEBUG_PROBE);
2255
2256 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2257 cp = &sc->pciide_channels[channel];
2258 if (pciide_chansetup(sc, channel, interface) == 0)
2259 continue;
2260
2261 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2262 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2263 printf("%s: %s channel ignored (disabled)\n",
2264 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2265 continue;
2266 }
2267 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2268 pciide_pci_intr);
2269 if (cp->hw_ok == 0)
2270 continue;
2271 if (pciide_chan_candisable(cp)) {
2272 ideconf &= ~APO_IDECONF_EN(channel);
2273 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2274 ideconf);
2275 }
2276 pciide_map_compat_intr(pa, cp, channel, interface);
2277
2278 if (cp->hw_ok == 0)
2279 continue;
2280 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2281 }
2282 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2283 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2284 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2285 }
2286
2287 void
2288 apollo_setup_channel(chp)
2289 struct channel_softc *chp;
2290 {
2291 u_int32_t udmatim_reg, datatim_reg;
2292 u_int8_t idedma_ctl;
2293 int mode, drive;
2294 struct ata_drive_datas *drvp;
2295 struct pciide_channel *cp = (struct pciide_channel*)chp;
2296 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2297
2298 idedma_ctl = 0;
2299 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2300 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2301 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2302 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2303
2304 /* setup DMA if needed */
2305 pciide_channel_dma_setup(cp);
2306
2307 for (drive = 0; drive < 2; drive++) {
2308 drvp = &chp->ch_drive[drive];
2309 /* If no drive, skip */
2310 if ((drvp->drive_flags & DRIVE) == 0)
2311 continue;
2312 /* add timing values, setup DMA if needed */
2313 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2314 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2315 mode = drvp->PIO_mode;
2316 goto pio;
2317 }
2318 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2319 (drvp->drive_flags & DRIVE_UDMA)) {
2320 /* use Ultra/DMA */
2321 drvp->drive_flags &= ~DRIVE_DMA;
2322 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2323 APO_UDMA_EN_MTH(chp->channel, drive);
2324 if (sc->sc_wdcdev.UDMA_cap == 6) {
2325 /* 8233a */
2326 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2327 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2328 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2329 /* 686b */
2330 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2331 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2332 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2333 /* 596b or 686a */
2334 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2335 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2336 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2337 } else {
2338 /* 596a or 586b */
2339 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2340 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2341 }
2342 /* can use PIO timings, MW DMA unused */
2343 mode = drvp->PIO_mode;
2344 } else {
2345 /* use Multiword DMA */
2346 drvp->drive_flags &= ~DRIVE_UDMA;
2347 /* mode = min(pio, dma+2) */
2348 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2349 mode = drvp->PIO_mode;
2350 else
2351 mode = drvp->DMA_mode + 2;
2352 }
2353 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2354
2355 pio: /* setup PIO mode */
2356 if (mode <= 2) {
2357 drvp->DMA_mode = 0;
2358 drvp->PIO_mode = 0;
2359 mode = 0;
2360 } else {
2361 drvp->PIO_mode = mode;
2362 drvp->DMA_mode = mode - 2;
2363 }
2364 datatim_reg |=
2365 APO_DATATIM_PULSE(chp->channel, drive,
2366 apollo_pio_set[mode]) |
2367 APO_DATATIM_RECOV(chp->channel, drive,
2368 apollo_pio_rec[mode]);
2369 }
2370 if (idedma_ctl != 0) {
2371 /* Add software bits in status register */
2372 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2373 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2374 idedma_ctl);
2375 }
2376 pciide_print_modes(cp);
2377 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2378 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2379 }
2380
2381 void
2382 cmd_channel_map(pa, sc, channel)
2383 struct pci_attach_args *pa;
2384 struct pciide_softc *sc;
2385 int channel;
2386 {
2387 struct pciide_channel *cp = &sc->pciide_channels[channel];
2388 bus_size_t cmdsize, ctlsize;
2389 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2390 int interface, one_channel;
2391
2392 /*
2393 * The 0648/0649 can be told to identify as a RAID controller.
2394 * In this case, we have to fake interface
2395 */
2396 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2397 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2398 PCIIDE_INTERFACE_SETTABLE(1);
2399 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2400 CMD_CONF_DSA1)
2401 interface |= PCIIDE_INTERFACE_PCI(0) |
2402 PCIIDE_INTERFACE_PCI(1);
2403 } else {
2404 interface = PCI_INTERFACE(pa->pa_class);
2405 }
2406
2407 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2408 cp->name = PCIIDE_CHANNEL_NAME(channel);
2409 cp->wdc_channel.channel = channel;
2410 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2411
2412 /*
2413 * Older CMD64X doesn't have independant channels
2414 */
2415 switch (sc->sc_pp->ide_product) {
2416 case PCI_PRODUCT_CMDTECH_649:
2417 one_channel = 0;
2418 break;
2419 default:
2420 one_channel = 1;
2421 break;
2422 }
2423
2424 if (channel > 0 && one_channel) {
2425 cp->wdc_channel.ch_queue =
2426 sc->pciide_channels[0].wdc_channel.ch_queue;
2427 } else {
2428 cp->wdc_channel.ch_queue =
2429 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2430 }
2431 if (cp->wdc_channel.ch_queue == NULL) {
2432 printf("%s %s channel: "
2433 "can't allocate memory for command queue",
2434 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2435 return;
2436 }
2437
2438 printf("%s: %s channel %s to %s mode\n",
2439 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2440 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2441 "configured" : "wired",
2442 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2443 "native-PCI" : "compatibility");
2444
2445 /*
2446 * with a CMD PCI64x, if we get here, the first channel is enabled:
2447 * there's no way to disable the first channel without disabling
2448 * the whole device
2449 */
2450 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2451 printf("%s: %s channel ignored (disabled)\n",
2452 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2453 return;
2454 }
2455
2456 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2457 if (cp->hw_ok == 0)
2458 return;
2459 if (channel == 1) {
2460 if (pciide_chan_candisable(cp)) {
2461 ctrl &= ~CMD_CTRL_2PORT;
2462 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2463 CMD_CTRL, ctrl);
2464 }
2465 }
2466 pciide_map_compat_intr(pa, cp, channel, interface);
2467 }
2468
2469 int
2470 cmd_pci_intr(arg)
2471 void *arg;
2472 {
2473 struct pciide_softc *sc = arg;
2474 struct pciide_channel *cp;
2475 struct channel_softc *wdc_cp;
2476 int i, rv, crv;
2477 u_int32_t priirq, secirq;
2478
2479 rv = 0;
2480 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2481 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2482 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2483 cp = &sc->pciide_channels[i];
2484 wdc_cp = &cp->wdc_channel;
2485 /* If a compat channel skip. */
2486 if (cp->compat)
2487 continue;
2488 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2489 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2490 crv = wdcintr(wdc_cp);
2491 if (crv == 0)
2492 printf("%s:%d: bogus intr\n",
2493 sc->sc_wdcdev.sc_dev.dv_xname, i);
2494 else
2495 rv = 1;
2496 }
2497 }
2498 return rv;
2499 }
2500
2501 void
2502 cmd_chip_map(sc, pa)
2503 struct pciide_softc *sc;
2504 struct pci_attach_args *pa;
2505 {
2506 int channel;
2507
2508 /*
2509 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2510 * and base adresses registers can be disabled at
2511 * hardware level. In this case, the device is wired
2512 * in compat mode and its first channel is always enabled,
2513 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2514 * In fact, it seems that the first channel of the CMD PCI0640
2515 * can't be disabled.
2516 */
2517
2518 #ifdef PCIIDE_CMD064x_DISABLE
2519 if (pciide_chipen(sc, pa) == 0)
2520 return;
2521 #endif
2522
2523 printf("%s: hardware does not support DMA\n",
2524 sc->sc_wdcdev.sc_dev.dv_xname);
2525 sc->sc_dma_ok = 0;
2526
2527 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2528 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2529 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2530
2531 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2532 cmd_channel_map(pa, sc, channel);
2533 }
2534 }
2535
2536 void
2537 cmd0643_9_chip_map(sc, pa)
2538 struct pciide_softc *sc;
2539 struct pci_attach_args *pa;
2540 {
2541 struct pciide_channel *cp;
2542 int channel;
2543 pcireg_t rev = PCI_REVISION(pa->pa_class);
2544
2545 /*
2546 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2547 * and base adresses registers can be disabled at
2548 * hardware level. In this case, the device is wired
2549 * in compat mode and its first channel is always enabled,
2550 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2551 * In fact, it seems that the first channel of the CMD PCI0640
2552 * can't be disabled.
2553 */
2554
2555 #ifdef PCIIDE_CMD064x_DISABLE
2556 if (pciide_chipen(sc, pa) == 0)
2557 return;
2558 #endif
2559 printf("%s: bus-master DMA support present",
2560 sc->sc_wdcdev.sc_dev.dv_xname);
2561 pciide_mapreg_dma(sc, pa);
2562 printf("\n");
2563 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2564 WDC_CAPABILITY_MODE;
2565 if (sc->sc_dma_ok) {
2566 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2567 switch (sc->sc_pp->ide_product) {
2568 case PCI_PRODUCT_CMDTECH_649:
2569 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2570 sc->sc_wdcdev.UDMA_cap = 5;
2571 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2572 break;
2573 case PCI_PRODUCT_CMDTECH_648:
2574 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2575 sc->sc_wdcdev.UDMA_cap = 4;
2576 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2577 break;
2578 case PCI_PRODUCT_CMDTECH_646:
2579 if (rev >= CMD0646U2_REV) {
2580 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2581 sc->sc_wdcdev.UDMA_cap = 2;
2582 } else if (rev >= CMD0646U_REV) {
2583 /*
2584 * Linux's driver claims that the 646U is broken
2585 * with UDMA. Only enable it if we know what we're
2586 * doing
2587 */
2588 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2589 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2590 sc->sc_wdcdev.UDMA_cap = 2;
2591 #endif
2592 /* explicitly disable UDMA */
2593 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2594 CMD_UDMATIM(0), 0);
2595 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2596 CMD_UDMATIM(1), 0);
2597 }
2598 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2599 break;
2600 default:
2601 sc->sc_wdcdev.irqack = pciide_irqack;
2602 }
2603 }
2604
2605 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2606 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2607 sc->sc_wdcdev.PIO_cap = 4;
2608 sc->sc_wdcdev.DMA_cap = 2;
2609 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2610
2611 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2612 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2613 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2614 DEBUG_PROBE);
2615
2616 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2617 cp = &sc->pciide_channels[channel];
2618 cmd_channel_map(pa, sc, channel);
2619 if (cp->hw_ok == 0)
2620 continue;
2621 cmd0643_9_setup_channel(&cp->wdc_channel);
2622 }
2623 /*
2624 * note - this also makes sure we clear the irq disable and reset
2625 * bits
2626 */
2627 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2628 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2629 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2630 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2631 DEBUG_PROBE);
2632 }
2633
2634 void
2635 cmd0643_9_setup_channel(chp)
2636 struct channel_softc *chp;
2637 {
2638 struct ata_drive_datas *drvp;
2639 u_int8_t tim;
2640 u_int32_t idedma_ctl, udma_reg;
2641 int drive;
2642 struct pciide_channel *cp = (struct pciide_channel*)chp;
2643 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2644
2645 idedma_ctl = 0;
2646 /* setup DMA if needed */
2647 pciide_channel_dma_setup(cp);
2648
2649 for (drive = 0; drive < 2; drive++) {
2650 drvp = &chp->ch_drive[drive];
2651 /* If no drive, skip */
2652 if ((drvp->drive_flags & DRIVE) == 0)
2653 continue;
2654 /* add timing values, setup DMA if needed */
2655 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2656 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2657 if (drvp->drive_flags & DRIVE_UDMA) {
2658 /* UltraDMA on a 646U2, 0648 or 0649 */
2659 drvp->drive_flags &= ~DRIVE_DMA;
2660 udma_reg = pciide_pci_read(sc->sc_pc,
2661 sc->sc_tag, CMD_UDMATIM(chp->channel));
2662 if (drvp->UDMA_mode > 2 &&
2663 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2664 CMD_BICSR) &
2665 CMD_BICSR_80(chp->channel)) == 0)
2666 drvp->UDMA_mode = 2;
2667 if (drvp->UDMA_mode > 2)
2668 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2669 else if (sc->sc_wdcdev.UDMA_cap > 2)
2670 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2671 udma_reg |= CMD_UDMATIM_UDMA(drive);
2672 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2673 CMD_UDMATIM_TIM_OFF(drive));
2674 udma_reg |=
2675 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2676 CMD_UDMATIM_TIM_OFF(drive));
2677 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2678 CMD_UDMATIM(chp->channel), udma_reg);
2679 } else {
2680 /*
2681 * use Multiword DMA.
2682 * Timings will be used for both PIO and DMA,
2683 * so adjust DMA mode if needed
2684 * if we have a 0646U2/8/9, turn off UDMA
2685 */
2686 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2687 udma_reg = pciide_pci_read(sc->sc_pc,
2688 sc->sc_tag,
2689 CMD_UDMATIM(chp->channel));
2690 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2691 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2692 CMD_UDMATIM(chp->channel),
2693 udma_reg);
2694 }
2695 if (drvp->PIO_mode >= 3 &&
2696 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2697 drvp->DMA_mode = drvp->PIO_mode - 2;
2698 }
2699 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2700 }
2701 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2702 }
2703 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2704 CMD_DATA_TIM(chp->channel, drive), tim);
2705 }
2706 if (idedma_ctl != 0) {
2707 /* Add software bits in status register */
2708 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2709 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2710 idedma_ctl);
2711 }
2712 pciide_print_modes(cp);
2713 }
2714
2715 void
2716 cmd646_9_irqack(chp)
2717 struct channel_softc *chp;
2718 {
2719 u_int32_t priirq, secirq;
2720 struct pciide_channel *cp = (struct pciide_channel*)chp;
2721 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2722
2723 if (chp->channel == 0) {
2724 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2725 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2726 } else {
2727 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2728 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2729 }
2730 pciide_irqack(chp);
2731 }
2732
2733 void
2734 cmd680_chip_map(sc, pa)
2735 struct pciide_softc *sc;
2736 struct pci_attach_args *pa;
2737 {
2738 struct pciide_channel *cp;
2739 int channel;
2740
2741 if (pciide_chipen(sc, pa) == 0)
2742 return;
2743 printf("%s: bus-master DMA support present",
2744 sc->sc_wdcdev.sc_dev.dv_xname);
2745 pciide_mapreg_dma(sc, pa);
2746 printf("\n");
2747 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2748 WDC_CAPABILITY_MODE;
2749 if (sc->sc_dma_ok) {
2750 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2751 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2752 sc->sc_wdcdev.UDMA_cap = 6;
2753 sc->sc_wdcdev.irqack = pciide_irqack;
2754 }
2755
2756 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2757 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2758 sc->sc_wdcdev.PIO_cap = 4;
2759 sc->sc_wdcdev.DMA_cap = 2;
2760 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2761
2762 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2763 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2764 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2765 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2766 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2767 cp = &sc->pciide_channels[channel];
2768 cmd680_channel_map(pa, sc, channel);
2769 if (cp->hw_ok == 0)
2770 continue;
2771 cmd680_setup_channel(&cp->wdc_channel);
2772 }
2773 }
2774
2775 void
2776 cmd680_channel_map(pa, sc, channel)
2777 struct pci_attach_args *pa;
2778 struct pciide_softc *sc;
2779 int channel;
2780 {
2781 struct pciide_channel *cp = &sc->pciide_channels[channel];
2782 bus_size_t cmdsize, ctlsize;
2783 int interface, i, reg;
2784 static const u_int8_t init_val[] =
2785 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2786 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2787
2788 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2789 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2790 PCIIDE_INTERFACE_SETTABLE(1);
2791 interface |= PCIIDE_INTERFACE_PCI(0) |
2792 PCIIDE_INTERFACE_PCI(1);
2793 } else {
2794 interface = PCI_INTERFACE(pa->pa_class);
2795 }
2796
2797 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2798 cp->name = PCIIDE_CHANNEL_NAME(channel);
2799 cp->wdc_channel.channel = channel;
2800 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2801
2802 cp->wdc_channel.ch_queue =
2803 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2804 if (cp->wdc_channel.ch_queue == NULL) {
2805 printf("%s %s channel: "
2806 "can't allocate memory for command queue",
2807 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2808 return;
2809 }
2810
2811 /* XXX */
2812 reg = 0xa2 + channel * 16;
2813 for (i = 0; i < sizeof(init_val); i++)
2814 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2815
2816 printf("%s: %s channel %s to %s mode\n",
2817 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2818 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2819 "configured" : "wired",
2820 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2821 "native-PCI" : "compatibility");
2822
2823 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2824 if (cp->hw_ok == 0)
2825 return;
2826 pciide_map_compat_intr(pa, cp, channel, interface);
2827 }
2828
2829 void
2830 cmd680_setup_channel(chp)
2831 struct channel_softc *chp;
2832 {
2833 struct ata_drive_datas *drvp;
2834 u_int8_t mode, off, scsc;
2835 u_int16_t val;
2836 u_int32_t idedma_ctl;
2837 int drive;
2838 struct pciide_channel *cp = (struct pciide_channel*)chp;
2839 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2840 pci_chipset_tag_t pc = sc->sc_pc;
2841 pcitag_t pa = sc->sc_tag;
2842 static const u_int8_t udma2_tbl[] =
2843 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2844 static const u_int8_t udma_tbl[] =
2845 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2846 static const u_int16_t dma_tbl[] =
2847 { 0x2208, 0x10c2, 0x10c1 };
2848 static const u_int16_t pio_tbl[] =
2849 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2850
2851 idedma_ctl = 0;
2852 pciide_channel_dma_setup(cp);
2853 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2854
2855 for (drive = 0; drive < 2; drive++) {
2856 drvp = &chp->ch_drive[drive];
2857 /* If no drive, skip */
2858 if ((drvp->drive_flags & DRIVE) == 0)
2859 continue;
2860 mode &= ~(0x03 << (drive * 4));
2861 if (drvp->drive_flags & DRIVE_UDMA) {
2862 drvp->drive_flags &= ~DRIVE_DMA;
2863 off = 0xa0 + chp->channel * 16;
2864 if (drvp->UDMA_mode > 2 &&
2865 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2866 drvp->UDMA_mode = 2;
2867 scsc = pciide_pci_read(pc, pa, 0x8a);
2868 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2869 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2870 scsc = pciide_pci_read(pc, pa, 0x8a);
2871 if ((scsc & 0x30) == 0)
2872 drvp->UDMA_mode = 5;
2873 }
2874 mode |= 0x03 << (drive * 4);
2875 off = 0xac + chp->channel * 16 + drive * 2;
2876 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2877 if (scsc & 0x30)
2878 val |= udma2_tbl[drvp->UDMA_mode];
2879 else
2880 val |= udma_tbl[drvp->UDMA_mode];
2881 pciide_pci_write(pc, pa, off, val);
2882 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2883 } else if (drvp->drive_flags & DRIVE_DMA) {
2884 mode |= 0x02 << (drive * 4);
2885 off = 0xa8 + chp->channel * 16 + drive * 2;
2886 val = dma_tbl[drvp->DMA_mode];
2887 pciide_pci_write(pc, pa, off, val & 0xff);
2888 pciide_pci_write(pc, pa, off, val >> 8);
2889 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2890 } else {
2891 mode |= 0x01 << (drive * 4);
2892 off = 0xa4 + chp->channel * 16 + drive * 2;
2893 val = pio_tbl[drvp->PIO_mode];
2894 pciide_pci_write(pc, pa, off, val & 0xff);
2895 pciide_pci_write(pc, pa, off, val >> 8);
2896 }
2897 }
2898
2899 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2900 if (idedma_ctl != 0) {
2901 /* Add software bits in status register */
2902 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2903 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2904 idedma_ctl);
2905 }
2906 pciide_print_modes(cp);
2907 }
2908
2909 void
2910 cy693_chip_map(sc, pa)
2911 struct pciide_softc *sc;
2912 struct pci_attach_args *pa;
2913 {
2914 struct pciide_channel *cp;
2915 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2916 bus_size_t cmdsize, ctlsize;
2917
2918 if (pciide_chipen(sc, pa) == 0)
2919 return;
2920 /*
2921 * this chip has 2 PCI IDE functions, one for primary and one for
2922 * secondary. So we need to call pciide_mapregs_compat() with
2923 * the real channel
2924 */
2925 if (pa->pa_function == 1) {
2926 sc->sc_cy_compatchan = 0;
2927 } else if (pa->pa_function == 2) {
2928 sc->sc_cy_compatchan = 1;
2929 } else {
2930 printf("%s: unexpected PCI function %d\n",
2931 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2932 return;
2933 }
2934 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2935 printf("%s: bus-master DMA support present",
2936 sc->sc_wdcdev.sc_dev.dv_xname);
2937 pciide_mapreg_dma(sc, pa);
2938 } else {
2939 printf("%s: hardware does not support DMA",
2940 sc->sc_wdcdev.sc_dev.dv_xname);
2941 sc->sc_dma_ok = 0;
2942 }
2943 printf("\n");
2944
2945 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2946 if (sc->sc_cy_handle == NULL) {
2947 printf("%s: unable to map hyperCache control registers\n",
2948 sc->sc_wdcdev.sc_dev.dv_xname);
2949 sc->sc_dma_ok = 0;
2950 }
2951
2952 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2953 WDC_CAPABILITY_MODE;
2954 if (sc->sc_dma_ok) {
2955 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2956 sc->sc_wdcdev.irqack = pciide_irqack;
2957 }
2958 sc->sc_wdcdev.PIO_cap = 4;
2959 sc->sc_wdcdev.DMA_cap = 2;
2960 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2961
2962 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2963 sc->sc_wdcdev.nchannels = 1;
2964
2965 /* Only one channel for this chip; if we are here it's enabled */
2966 cp = &sc->pciide_channels[0];
2967 sc->wdc_chanarray[0] = &cp->wdc_channel;
2968 cp->name = PCIIDE_CHANNEL_NAME(0);
2969 cp->wdc_channel.channel = 0;
2970 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2971 cp->wdc_channel.ch_queue =
2972 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2973 if (cp->wdc_channel.ch_queue == NULL) {
2974 printf("%s primary channel: "
2975 "can't allocate memory for command queue",
2976 sc->sc_wdcdev.sc_dev.dv_xname);
2977 return;
2978 }
2979 printf("%s: primary channel %s to ",
2980 sc->sc_wdcdev.sc_dev.dv_xname,
2981 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2982 "configured" : "wired");
2983 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2984 printf("native-PCI");
2985 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2986 pciide_pci_intr);
2987 } else {
2988 printf("compatibility");
2989 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2990 &cmdsize, &ctlsize);
2991 }
2992 printf(" mode\n");
2993 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2994 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2995 wdcattach(&cp->wdc_channel);
2996 if (pciide_chan_candisable(cp)) {
2997 pci_conf_write(sc->sc_pc, sc->sc_tag,
2998 PCI_COMMAND_STATUS_REG, 0);
2999 }
3000 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3001 if (cp->hw_ok == 0)
3002 return;
3003 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3004 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3005 cy693_setup_channel(&cp->wdc_channel);
3006 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3007 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3008 }
3009
3010 void
3011 cy693_setup_channel(chp)
3012 struct channel_softc *chp;
3013 {
3014 struct ata_drive_datas *drvp;
3015 int drive;
3016 u_int32_t cy_cmd_ctrl;
3017 u_int32_t idedma_ctl;
3018 struct pciide_channel *cp = (struct pciide_channel*)chp;
3019 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3020 int dma_mode = -1;
3021
3022 cy_cmd_ctrl = idedma_ctl = 0;
3023
3024 /* setup DMA if needed */
3025 pciide_channel_dma_setup(cp);
3026
3027 for (drive = 0; drive < 2; drive++) {
3028 drvp = &chp->ch_drive[drive];
3029 /* If no drive, skip */
3030 if ((drvp->drive_flags & DRIVE) == 0)
3031 continue;
3032 /* add timing values, setup DMA if needed */
3033 if (drvp->drive_flags & DRIVE_DMA) {
3034 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3035 /* use Multiword DMA */
3036 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3037 dma_mode = drvp->DMA_mode;
3038 }
3039 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3040 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3041 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3042 CY_CMD_CTRL_IOW_REC_OFF(drive));
3043 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3044 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3045 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3046 CY_CMD_CTRL_IOR_REC_OFF(drive));
3047 }
3048 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3049 chp->ch_drive[0].DMA_mode = dma_mode;
3050 chp->ch_drive[1].DMA_mode = dma_mode;
3051
3052 if (dma_mode == -1)
3053 dma_mode = 0;
3054
3055 if (sc->sc_cy_handle != NULL) {
3056 /* Note: `multiple' is implied. */
3057 cy82c693_write(sc->sc_cy_handle,
3058 (sc->sc_cy_compatchan == 0) ?
3059 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3060 }
3061
3062 pciide_print_modes(cp);
3063
3064 if (idedma_ctl != 0) {
3065 /* Add software bits in status register */
3066 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3067 IDEDMA_CTL, idedma_ctl);
3068 }
3069 }
3070
3071 static int
3072 sis_hostbr_match(pa)
3073 struct pci_attach_args *pa;
3074 {
3075 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3076 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3077 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3078 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3079 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3080 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3081 }
3082
3083 void
3084 sis_chip_map(sc, pa)
3085 struct pciide_softc *sc;
3086 struct pci_attach_args *pa;
3087 {
3088 struct pciide_channel *cp;
3089 int channel;
3090 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3091 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3092 pcireg_t rev = PCI_REVISION(pa->pa_class);
3093 bus_size_t cmdsize, ctlsize;
3094 pcitag_t pchb_tag;
3095 pcireg_t pchb_id, pchb_class;
3096
3097 if (pciide_chipen(sc, pa) == 0)
3098 return;
3099 printf("%s: bus-master DMA support present",
3100 sc->sc_wdcdev.sc_dev.dv_xname);
3101 pciide_mapreg_dma(sc, pa);
3102 printf("\n");
3103
3104 /* get a PCI tag for the host bridge (function 0 of the same device) */
3105 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3106 /* and read ID and rev of the ISA bridge */
3107 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3108 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3109
3110 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3111 WDC_CAPABILITY_MODE;
3112 if (sc->sc_dma_ok) {
3113 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3114 sc->sc_wdcdev.irqack = pciide_irqack;
3115 /*
3116 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3117 * have problems with UDMA (info provided by Christos)
3118 */
3119 if (rev >= 0xd0 &&
3120 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3121 PCI_REVISION(pchb_class) >= 0x03))
3122 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3123 }
3124
3125 sc->sc_wdcdev.PIO_cap = 4;
3126 sc->sc_wdcdev.DMA_cap = 2;
3127 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3128 /*
3129 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3130 * chipsets.
3131 */
3132 sc->sc_wdcdev.UDMA_cap =
3133 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3134 sc->sc_wdcdev.set_modes = sis_setup_channel;
3135
3136 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3137 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3138
3139 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3140 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3141 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3142
3143 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3144 cp = &sc->pciide_channels[channel];
3145 if (pciide_chansetup(sc, channel, interface) == 0)
3146 continue;
3147 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3148 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3149 printf("%s: %s channel ignored (disabled)\n",
3150 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3151 continue;
3152 }
3153 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3154 pciide_pci_intr);
3155 if (cp->hw_ok == 0)
3156 continue;
3157 if (pciide_chan_candisable(cp)) {
3158 if (channel == 0)
3159 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3160 else
3161 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3162 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3163 sis_ctr0);
3164 }
3165 pciide_map_compat_intr(pa, cp, channel, interface);
3166 if (cp->hw_ok == 0)
3167 continue;
3168 sis_setup_channel(&cp->wdc_channel);
3169 }
3170 }
3171
3172 void
3173 sis_setup_channel(chp)
3174 struct channel_softc *chp;
3175 {
3176 struct ata_drive_datas *drvp;
3177 int drive;
3178 u_int32_t sis_tim;
3179 u_int32_t idedma_ctl;
3180 struct pciide_channel *cp = (struct pciide_channel*)chp;
3181 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3182
3183 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3184 "channel %d 0x%x\n", chp->channel,
3185 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3186 DEBUG_PROBE);
3187 sis_tim = 0;
3188 idedma_ctl = 0;
3189 /* setup DMA if needed */
3190 pciide_channel_dma_setup(cp);
3191
3192 for (drive = 0; drive < 2; drive++) {
3193 drvp = &chp->ch_drive[drive];
3194 /* If no drive, skip */
3195 if ((drvp->drive_flags & DRIVE) == 0)
3196 continue;
3197 /* add timing values, setup DMA if needed */
3198 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3199 (drvp->drive_flags & DRIVE_UDMA) == 0)
3200 goto pio;
3201
3202 if (drvp->drive_flags & DRIVE_UDMA) {
3203 /* use Ultra/DMA */
3204 drvp->drive_flags &= ~DRIVE_DMA;
3205 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3206 SIS_TIM_UDMA_TIME_OFF(drive);
3207 sis_tim |= SIS_TIM_UDMA_EN(drive);
3208 } else {
3209 /*
3210 * use Multiword DMA
3211 * Timings will be used for both PIO and DMA,
3212 * so adjust DMA mode if needed
3213 */
3214 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3215 drvp->PIO_mode = drvp->DMA_mode + 2;
3216 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3217 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3218 drvp->PIO_mode - 2 : 0;
3219 if (drvp->DMA_mode == 0)
3220 drvp->PIO_mode = 0;
3221 }
3222 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3223 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3224 SIS_TIM_ACT_OFF(drive);
3225 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3226 SIS_TIM_REC_OFF(drive);
3227 }
3228 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3229 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3230 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3231 if (idedma_ctl != 0) {
3232 /* Add software bits in status register */
3233 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3234 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3235 idedma_ctl);
3236 }
3237 pciide_print_modes(cp);
3238 }
3239
3240 void
3241 acer_chip_map(sc, pa)
3242 struct pciide_softc *sc;
3243 struct pci_attach_args *pa;
3244 {
3245 struct pciide_channel *cp;
3246 int channel;
3247 pcireg_t cr, interface;
3248 bus_size_t cmdsize, ctlsize;
3249 pcireg_t rev = PCI_REVISION(pa->pa_class);
3250
3251 if (pciide_chipen(sc, pa) == 0)
3252 return;
3253 printf("%s: bus-master DMA support present",
3254 sc->sc_wdcdev.sc_dev.dv_xname);
3255 pciide_mapreg_dma(sc, pa);
3256 printf("\n");
3257 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3258 WDC_CAPABILITY_MODE;
3259 if (sc->sc_dma_ok) {
3260 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3261 if (rev >= 0x20) {
3262 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3263 if (rev >= 0xC4)
3264 sc->sc_wdcdev.UDMA_cap = 5;
3265 else if (rev >= 0xC2)
3266 sc->sc_wdcdev.UDMA_cap = 4;
3267 else
3268 sc->sc_wdcdev.UDMA_cap = 2;
3269 }
3270 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3271 sc->sc_wdcdev.irqack = pciide_irqack;
3272 }
3273
3274 sc->sc_wdcdev.PIO_cap = 4;
3275 sc->sc_wdcdev.DMA_cap = 2;
3276 sc->sc_wdcdev.set_modes = acer_setup_channel;
3277 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3278 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3279
3280 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3281 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3282 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3283
3284 /* Enable "microsoft register bits" R/W. */
3285 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3286 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3287 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3288 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3289 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3290 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3291 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3292 ~ACER_CHANSTATUSREGS_RO);
3293 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3294 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3295 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3296 /* Don't use cr, re-read the real register content instead */
3297 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3298 PCI_CLASS_REG));
3299
3300 /* From linux: enable "Cable Detection" */
3301 if (rev >= 0xC2) {
3302 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3303 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3304 | ACER_0x4B_CDETECT);
3305 }
3306
3307 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3308 cp = &sc->pciide_channels[channel];
3309 if (pciide_chansetup(sc, channel, interface) == 0)
3310 continue;
3311 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3312 printf("%s: %s channel ignored (disabled)\n",
3313 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3314 continue;
3315 }
3316 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3317 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3318 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3319 if (cp->hw_ok == 0)
3320 continue;
3321 if (pciide_chan_candisable(cp)) {
3322 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3323 pci_conf_write(sc->sc_pc, sc->sc_tag,
3324 PCI_CLASS_REG, cr);
3325 }
3326 pciide_map_compat_intr(pa, cp, channel, interface);
3327 acer_setup_channel(&cp->wdc_channel);
3328 }
3329 }
3330
3331 void
3332 acer_setup_channel(chp)
3333 struct channel_softc *chp;
3334 {
3335 struct ata_drive_datas *drvp;
3336 int drive;
3337 u_int32_t acer_fifo_udma;
3338 u_int32_t idedma_ctl;
3339 struct pciide_channel *cp = (struct pciide_channel*)chp;
3340 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3341
3342 idedma_ctl = 0;
3343 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3344 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3345 acer_fifo_udma), DEBUG_PROBE);
3346 /* setup DMA if needed */
3347 pciide_channel_dma_setup(cp);
3348
3349 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3350 DRIVE_UDMA) { /* check 80 pins cable */
3351 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3352 ACER_0x4A_80PIN(chp->channel)) {
3353 if (chp->ch_drive[0].UDMA_mode > 2)
3354 chp->ch_drive[0].UDMA_mode = 2;
3355 if (chp->ch_drive[1].UDMA_mode > 2)
3356 chp->ch_drive[1].UDMA_mode = 2;
3357 }
3358 }
3359
3360 for (drive = 0; drive < 2; drive++) {
3361 drvp = &chp->ch_drive[drive];
3362 /* If no drive, skip */
3363 if ((drvp->drive_flags & DRIVE) == 0)
3364 continue;
3365 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3366 "channel %d drive %d 0x%x\n", chp->channel, drive,
3367 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3368 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3369 /* clear FIFO/DMA mode */
3370 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3371 ACER_UDMA_EN(chp->channel, drive) |
3372 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3373
3374 /* add timing values, setup DMA if needed */
3375 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3376 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3377 acer_fifo_udma |=
3378 ACER_FTH_OPL(chp->channel, drive, 0x1);
3379 goto pio;
3380 }
3381
3382 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3383 if (drvp->drive_flags & DRIVE_UDMA) {
3384 /* use Ultra/DMA */
3385 drvp->drive_flags &= ~DRIVE_DMA;
3386 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3387 acer_fifo_udma |=
3388 ACER_UDMA_TIM(chp->channel, drive,
3389 acer_udma[drvp->UDMA_mode]);
3390 /* XXX disable if one drive < UDMA3 ? */
3391 if (drvp->UDMA_mode >= 3) {
3392 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3393 ACER_0x4B,
3394 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3395 ACER_0x4B) | ACER_0x4B_UDMA66);
3396 }
3397 } else {
3398 /*
3399 * use Multiword DMA
3400 * Timings will be used for both PIO and DMA,
3401 * so adjust DMA mode if needed
3402 */
3403 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3404 drvp->PIO_mode = drvp->DMA_mode + 2;
3405 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3406 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3407 drvp->PIO_mode - 2 : 0;
3408 if (drvp->DMA_mode == 0)
3409 drvp->PIO_mode = 0;
3410 }
3411 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3412 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3413 ACER_IDETIM(chp->channel, drive),
3414 acer_pio[drvp->PIO_mode]);
3415 }
3416 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3417 acer_fifo_udma), DEBUG_PROBE);
3418 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3419 if (idedma_ctl != 0) {
3420 /* Add software bits in status register */
3421 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3422 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3423 idedma_ctl);
3424 }
3425 pciide_print_modes(cp);
3426 }
3427
3428 int
3429 acer_pci_intr(arg)
3430 void *arg;
3431 {
3432 struct pciide_softc *sc = arg;
3433 struct pciide_channel *cp;
3434 struct channel_softc *wdc_cp;
3435 int i, rv, crv;
3436 u_int32_t chids;
3437
3438 rv = 0;
3439 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3440 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3441 cp = &sc->pciide_channels[i];
3442 wdc_cp = &cp->wdc_channel;
3443 /* If a compat channel skip. */
3444 if (cp->compat)
3445 continue;
3446 if (chids & ACER_CHIDS_INT(i)) {
3447 crv = wdcintr(wdc_cp);
3448 if (crv == 0)
3449 printf("%s:%d: bogus intr\n",
3450 sc->sc_wdcdev.sc_dev.dv_xname, i);
3451 else
3452 rv = 1;
3453 }
3454 }
3455 return rv;
3456 }
3457
3458 void
3459 hpt_chip_map(sc, pa)
3460 struct pciide_softc *sc;
3461 struct pci_attach_args *pa;
3462 {
3463 struct pciide_channel *cp;
3464 int i, compatchan, revision;
3465 pcireg_t interface;
3466 bus_size_t cmdsize, ctlsize;
3467
3468 if (pciide_chipen(sc, pa) == 0)
3469 return;
3470 revision = PCI_REVISION(pa->pa_class);
3471 printf(": Triones/Highpoint ");
3472 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3473 printf("HPT374 IDE Controller\n");
3474 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3475 printf("HPT372 IDE Controller\n");
3476 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3477 if (revision == HPT372_REV)
3478 printf("HPT372 IDE Controller\n");
3479 else if (revision == HPT370_REV)
3480 printf("HPT370 IDE Controller\n");
3481 else if (revision == HPT370A_REV)
3482 printf("HPT370A IDE Controller\n");
3483 else if (revision == HPT366_REV)
3484 printf("HPT366 IDE Controller\n");
3485 else
3486 printf("unknown HPT IDE controller rev %d\n", revision);
3487 } else
3488 printf("unknown HPT IDE controller 0x%x\n",
3489 sc->sc_pp->ide_product);
3490
3491 /*
3492 * when the chip is in native mode it identifies itself as a
3493 * 'misc mass storage'. Fake interface in this case.
3494 */
3495 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3496 interface = PCI_INTERFACE(pa->pa_class);
3497 } else {
3498 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3499 PCIIDE_INTERFACE_PCI(0);
3500 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3501 (revision == HPT370_REV || revision == HPT370A_REV ||
3502 revision == HPT372_REV)) ||
3503 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3504 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3505 interface |= PCIIDE_INTERFACE_PCI(1);
3506 }
3507
3508 printf("%s: bus-master DMA support present",
3509 sc->sc_wdcdev.sc_dev.dv_xname);
3510 pciide_mapreg_dma(sc, pa);
3511 printf("\n");
3512 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3513 WDC_CAPABILITY_MODE;
3514 if (sc->sc_dma_ok) {
3515 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3516 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3517 sc->sc_wdcdev.irqack = pciide_irqack;
3518 }
3519 sc->sc_wdcdev.PIO_cap = 4;
3520 sc->sc_wdcdev.DMA_cap = 2;
3521
3522 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3523 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3524 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3525 revision == HPT366_REV) {
3526 sc->sc_wdcdev.UDMA_cap = 4;
3527 /*
3528 * The 366 has 2 PCI IDE functions, one for primary and one
3529 * for secondary. So we need to call pciide_mapregs_compat()
3530 * with the real channel
3531 */
3532 if (pa->pa_function == 0) {
3533 compatchan = 0;
3534 } else if (pa->pa_function == 1) {
3535 compatchan = 1;
3536 } else {
3537 printf("%s: unexpected PCI function %d\n",
3538 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3539 return;
3540 }
3541 sc->sc_wdcdev.nchannels = 1;
3542 } else {
3543 sc->sc_wdcdev.nchannels = 2;
3544 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3545 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3546 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3547 revision == HPT372_REV))
3548 sc->sc_wdcdev.UDMA_cap = 6;
3549 else
3550 sc->sc_wdcdev.UDMA_cap = 5;
3551 }
3552 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3553 cp = &sc->pciide_channels[i];
3554 if (sc->sc_wdcdev.nchannels > 1) {
3555 compatchan = i;
3556 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3557 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3558 printf("%s: %s channel ignored (disabled)\n",
3559 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3560 continue;
3561 }
3562 }
3563 if (pciide_chansetup(sc, i, interface) == 0)
3564 continue;
3565 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3566 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3567 &ctlsize, hpt_pci_intr);
3568 } else {
3569 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3570 &cmdsize, &ctlsize);
3571 }
3572 if (cp->hw_ok == 0)
3573 return;
3574 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3575 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3576 wdcattach(&cp->wdc_channel);
3577 hpt_setup_channel(&cp->wdc_channel);
3578 }
3579 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3580 (revision == HPT370_REV || revision == HPT370A_REV ||
3581 revision == HPT372_REV)) ||
3582 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3583 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3584 /*
3585 * HPT370_REV and highter has a bit to disable interrupts,
3586 * make sure to clear it
3587 */
3588 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3589 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3590 ~HPT_CSEL_IRQDIS);
3591 }
3592 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3593 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3594 revision == HPT372_REV ) ||
3595 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3596 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3597 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3598 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3599 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3600 return;
3601 }
3602
3603 void
3604 hpt_setup_channel(chp)
3605 struct channel_softc *chp;
3606 {
3607 struct ata_drive_datas *drvp;
3608 int drive;
3609 int cable;
3610 u_int32_t before, after;
3611 u_int32_t idedma_ctl;
3612 struct pciide_channel *cp = (struct pciide_channel*)chp;
3613 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3614 int revision =
3615 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3616
3617 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3618
3619 /* setup DMA if needed */
3620 pciide_channel_dma_setup(cp);
3621
3622 idedma_ctl = 0;
3623
3624 /* Per drive settings */
3625 for (drive = 0; drive < 2; drive++) {
3626 drvp = &chp->ch_drive[drive];
3627 /* If no drive, skip */
3628 if ((drvp->drive_flags & DRIVE) == 0)
3629 continue;
3630 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3631 HPT_IDETIM(chp->channel, drive));
3632
3633 /* add timing values, setup DMA if needed */
3634 if (drvp->drive_flags & DRIVE_UDMA) {
3635 /* use Ultra/DMA */
3636 drvp->drive_flags &= ~DRIVE_DMA;
3637 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3638 drvp->UDMA_mode > 2)
3639 drvp->UDMA_mode = 2;
3640 switch (sc->sc_pp->ide_product) {
3641 case PCI_PRODUCT_TRIONES_HPT374:
3642 after = hpt374_udma[drvp->UDMA_mode];
3643 break;
3644 case PCI_PRODUCT_TRIONES_HPT372:
3645 after = hpt372_udma[drvp->UDMA_mode];
3646 break;
3647 case PCI_PRODUCT_TRIONES_HPT366:
3648 default:
3649 switch(revision) {
3650 case HPT372_REV:
3651 after = hpt372_udma[drvp->UDMA_mode];
3652 break;
3653 case HPT370_REV:
3654 case HPT370A_REV:
3655 after = hpt370_udma[drvp->UDMA_mode];
3656 break;
3657 case HPT366_REV:
3658 default:
3659 after = hpt366_udma[drvp->UDMA_mode];
3660 break;
3661 }
3662 }
3663 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3664 } else if (drvp->drive_flags & DRIVE_DMA) {
3665 /*
3666 * use Multiword DMA.
3667 * Timings will be used for both PIO and DMA, so adjust
3668 * DMA mode if needed
3669 */
3670 if (drvp->PIO_mode >= 3 &&
3671 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3672 drvp->DMA_mode = drvp->PIO_mode - 2;
3673 }
3674 switch (sc->sc_pp->ide_product) {
3675 case PCI_PRODUCT_TRIONES_HPT374:
3676 after = hpt374_dma[drvp->DMA_mode];
3677 break;
3678 case PCI_PRODUCT_TRIONES_HPT372:
3679 after = hpt372_dma[drvp->DMA_mode];
3680 break;
3681 case PCI_PRODUCT_TRIONES_HPT366:
3682 default:
3683 switch(revision) {
3684 case HPT372_REV:
3685 after = hpt372_dma[drvp->DMA_mode];
3686 break;
3687 case HPT370_REV:
3688 case HPT370A_REV:
3689 after = hpt370_dma[drvp->DMA_mode];
3690 break;
3691 case HPT366_REV:
3692 default:
3693 after = hpt366_dma[drvp->DMA_mode];
3694 break;
3695 }
3696 }
3697 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3698 } else {
3699 /* PIO only */
3700 switch (sc->sc_pp->ide_product) {
3701 case PCI_PRODUCT_TRIONES_HPT374:
3702 after = hpt374_pio[drvp->PIO_mode];
3703 break;
3704 case PCI_PRODUCT_TRIONES_HPT372:
3705 after = hpt372_pio[drvp->PIO_mode];
3706 break;
3707 case PCI_PRODUCT_TRIONES_HPT366:
3708 default:
3709 switch(revision) {
3710 case HPT372_REV:
3711 after = hpt372_pio[drvp->PIO_mode];
3712 break;
3713 case HPT370_REV:
3714 case HPT370A_REV:
3715 after = hpt370_pio[drvp->PIO_mode];
3716 break;
3717 case HPT366_REV:
3718 default:
3719 after = hpt366_pio[drvp->PIO_mode];
3720 break;
3721 }
3722 }
3723 }
3724 pci_conf_write(sc->sc_pc, sc->sc_tag,
3725 HPT_IDETIM(chp->channel, drive), after);
3726 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3727 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3728 after, before), DEBUG_PROBE);
3729 }
3730 if (idedma_ctl != 0) {
3731 /* Add software bits in status register */
3732 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3733 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3734 idedma_ctl);
3735 }
3736 pciide_print_modes(cp);
3737 }
3738
3739 int
3740 hpt_pci_intr(arg)
3741 void *arg;
3742 {
3743 struct pciide_softc *sc = arg;
3744 struct pciide_channel *cp;
3745 struct channel_softc *wdc_cp;
3746 int rv = 0;
3747 int dmastat, i, crv;
3748
3749 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3750 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3751 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3752 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3753 IDEDMA_CTL_INTR)
3754 continue;
3755 cp = &sc->pciide_channels[i];
3756 wdc_cp = &cp->wdc_channel;
3757 crv = wdcintr(wdc_cp);
3758 if (crv == 0) {
3759 printf("%s:%d: bogus intr\n",
3760 sc->sc_wdcdev.sc_dev.dv_xname, i);
3761 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3762 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3763 } else
3764 rv = 1;
3765 }
3766 return rv;
3767 }
3768
3769
3770 /* Macros to test product */
3771 #define PDC_IS_262(sc) \
3772 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3773 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3774 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3775 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3776 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3777 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3778 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3779 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3780 #define PDC_IS_265(sc) \
3781 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3782 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3783 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3784 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3785 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3786 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3787 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3788 #define PDC_IS_268(sc) \
3789 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3790 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3791 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3792 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3793 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3794 #define PDC_IS_276(sc) \
3795 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3796 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3797 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3798
3799 void
3800 pdc202xx_chip_map(sc, pa)
3801 struct pciide_softc *sc;
3802 struct pci_attach_args *pa;
3803 {
3804 struct pciide_channel *cp;
3805 int channel;
3806 pcireg_t interface, st, mode;
3807 bus_size_t cmdsize, ctlsize;
3808
3809 if (!PDC_IS_268(sc)) {
3810 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3811 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3812 st), DEBUG_PROBE);
3813 }
3814 if (pciide_chipen(sc, pa) == 0)
3815 return;
3816
3817 /* turn off RAID mode */
3818 if (!PDC_IS_268(sc))
3819 st &= ~PDC2xx_STATE_IDERAID;
3820
3821 /*
3822 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3823 * mode. We have to fake interface
3824 */
3825 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3826 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3827 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3828
3829 printf("%s: bus-master DMA support present",
3830 sc->sc_wdcdev.sc_dev.dv_xname);
3831 pciide_mapreg_dma(sc, pa);
3832 printf("\n");
3833 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3834 WDC_CAPABILITY_MODE;
3835 if (sc->sc_dma_ok) {
3836 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3837 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3838 sc->sc_wdcdev.irqack = pciide_irqack;
3839 }
3840 sc->sc_wdcdev.PIO_cap = 4;
3841 sc->sc_wdcdev.DMA_cap = 2;
3842 if (PDC_IS_276(sc))
3843 sc->sc_wdcdev.UDMA_cap = 6;
3844 else if (PDC_IS_265(sc))
3845 sc->sc_wdcdev.UDMA_cap = 5;
3846 else if (PDC_IS_262(sc))
3847 sc->sc_wdcdev.UDMA_cap = 4;
3848 else
3849 sc->sc_wdcdev.UDMA_cap = 2;
3850 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3851 pdc20268_setup_channel : pdc202xx_setup_channel;
3852 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3853 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3854
3855 if (!PDC_IS_268(sc)) {
3856 /* setup failsafe defaults */
3857 mode = 0;
3858 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3859 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3860 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3861 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3862 for (channel = 0;
3863 channel < sc->sc_wdcdev.nchannels;
3864 channel++) {
3865 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3866 "drive 0 initial timings 0x%x, now 0x%x\n",
3867 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3868 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3869 DEBUG_PROBE);
3870 pci_conf_write(sc->sc_pc, sc->sc_tag,
3871 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3872 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3873 "drive 1 initial timings 0x%x, now 0x%x\n",
3874 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3875 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3876 pci_conf_write(sc->sc_pc, sc->sc_tag,
3877 PDC2xx_TIM(channel, 1), mode);
3878 }
3879
3880 mode = PDC2xx_SCR_DMA;
3881 if (PDC_IS_262(sc)) {
3882 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3883 } else {
3884 /* the BIOS set it up this way */
3885 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3886 }
3887 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3888 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3889 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3890 "now 0x%x\n",
3891 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3892 PDC2xx_SCR),
3893 mode), DEBUG_PROBE);
3894 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3895 PDC2xx_SCR, mode);
3896
3897 /* controller initial state register is OK even without BIOS */
3898 /* Set DMA mode to IDE DMA compatibility */
3899 mode =
3900 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3901 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3902 DEBUG_PROBE);
3903 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3904 mode | 0x1);
3905 mode =
3906 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3907 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3908 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3909 mode | 0x1);
3910 }
3911
3912 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3913 cp = &sc->pciide_channels[channel];
3914 if (pciide_chansetup(sc, channel, interface) == 0)
3915 continue;
3916 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3917 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3918 printf("%s: %s channel ignored (disabled)\n",
3919 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3920 continue;
3921 }
3922 if (PDC_IS_265(sc))
3923 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3924 pdc20265_pci_intr);
3925 else
3926 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3927 pdc202xx_pci_intr);
3928 if (cp->hw_ok == 0)
3929 continue;
3930 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3931 st &= ~(PDC_IS_262(sc) ?
3932 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3933 pciide_map_compat_intr(pa, cp, channel, interface);
3934 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3935 }
3936 if (!PDC_IS_268(sc)) {
3937 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3938 "0x%x\n", st), DEBUG_PROBE);
3939 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3940 }
3941 return;
3942 }
3943
3944 void
3945 pdc202xx_setup_channel(chp)
3946 struct channel_softc *chp;
3947 {
3948 struct ata_drive_datas *drvp;
3949 int drive;
3950 pcireg_t mode, st;
3951 u_int32_t idedma_ctl, scr, atapi;
3952 struct pciide_channel *cp = (struct pciide_channel*)chp;
3953 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3954 int channel = chp->channel;
3955
3956 /* setup DMA if needed */
3957 pciide_channel_dma_setup(cp);
3958
3959 idedma_ctl = 0;
3960 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3961 sc->sc_wdcdev.sc_dev.dv_xname,
3962 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3963 DEBUG_PROBE);
3964
3965 /* Per channel settings */
3966 if (PDC_IS_262(sc)) {
3967 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3968 PDC262_U66);
3969 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3970 /* Trim UDMA mode */
3971 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3972 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3973 chp->ch_drive[0].UDMA_mode <= 2) ||
3974 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3975 chp->ch_drive[1].UDMA_mode <= 2)) {
3976 if (chp->ch_drive[0].UDMA_mode > 2)
3977 chp->ch_drive[0].UDMA_mode = 2;
3978 if (chp->ch_drive[1].UDMA_mode > 2)
3979 chp->ch_drive[1].UDMA_mode = 2;
3980 }
3981 /* Set U66 if needed */
3982 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3983 chp->ch_drive[0].UDMA_mode > 2) ||
3984 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3985 chp->ch_drive[1].UDMA_mode > 2))
3986 scr |= PDC262_U66_EN(channel);
3987 else
3988 scr &= ~PDC262_U66_EN(channel);
3989 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3990 PDC262_U66, scr);
3991 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3992 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3993 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3994 PDC262_ATAPI(channel))), DEBUG_PROBE);
3995 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3996 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3997 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3998 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3999 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4000 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4001 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4002 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4003 atapi = 0;
4004 else
4005 atapi = PDC262_ATAPI_UDMA;
4006 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4007 PDC262_ATAPI(channel), atapi);
4008 }
4009 }
4010 for (drive = 0; drive < 2; drive++) {
4011 drvp = &chp->ch_drive[drive];
4012 /* If no drive, skip */
4013 if ((drvp->drive_flags & DRIVE) == 0)
4014 continue;
4015 mode = 0;
4016 if (drvp->drive_flags & DRIVE_UDMA) {
4017 /* use Ultra/DMA */
4018 drvp->drive_flags &= ~DRIVE_DMA;
4019 mode = PDC2xx_TIM_SET_MB(mode,
4020 pdc2xx_udma_mb[drvp->UDMA_mode]);
4021 mode = PDC2xx_TIM_SET_MC(mode,
4022 pdc2xx_udma_mc[drvp->UDMA_mode]);
4023 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4024 } else if (drvp->drive_flags & DRIVE_DMA) {
4025 mode = PDC2xx_TIM_SET_MB(mode,
4026 pdc2xx_dma_mb[drvp->DMA_mode]);
4027 mode = PDC2xx_TIM_SET_MC(mode,
4028 pdc2xx_dma_mc[drvp->DMA_mode]);
4029 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4030 } else {
4031 mode = PDC2xx_TIM_SET_MB(mode,
4032 pdc2xx_dma_mb[0]);
4033 mode = PDC2xx_TIM_SET_MC(mode,
4034 pdc2xx_dma_mc[0]);
4035 }
4036 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4037 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4038 if (drvp->drive_flags & DRIVE_ATA)
4039 mode |= PDC2xx_TIM_PRE;
4040 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4041 if (drvp->PIO_mode >= 3) {
4042 mode |= PDC2xx_TIM_IORDY;
4043 if (drive == 0)
4044 mode |= PDC2xx_TIM_IORDYp;
4045 }
4046 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4047 "timings 0x%x\n",
4048 sc->sc_wdcdev.sc_dev.dv_xname,
4049 chp->channel, drive, mode), DEBUG_PROBE);
4050 pci_conf_write(sc->sc_pc, sc->sc_tag,
4051 PDC2xx_TIM(chp->channel, drive), mode);
4052 }
4053 if (idedma_ctl != 0) {
4054 /* Add software bits in status register */
4055 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4056 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4057 idedma_ctl);
4058 }
4059 pciide_print_modes(cp);
4060 }
4061
4062 void
4063 pdc20268_setup_channel(chp)
4064 struct channel_softc *chp;
4065 {
4066 struct ata_drive_datas *drvp;
4067 int drive;
4068 u_int32_t idedma_ctl;
4069 struct pciide_channel *cp = (struct pciide_channel*)chp;
4070 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4071 int u100;
4072
4073 /* setup DMA if needed */
4074 pciide_channel_dma_setup(cp);
4075
4076 idedma_ctl = 0;
4077
4078 /* I don't know what this is for, FreeBSD does it ... */
4079 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4080 IDEDMA_CMD + 0x1, 0x0b);
4081
4082 /*
4083 * I don't know what this is for; FreeBSD checks this ... this is not
4084 * cable type detect.
4085 */
4086 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4087 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4088
4089 for (drive = 0; drive < 2; drive++) {
4090 drvp = &chp->ch_drive[drive];
4091 /* If no drive, skip */
4092 if ((drvp->drive_flags & DRIVE) == 0)
4093 continue;
4094 if (drvp->drive_flags & DRIVE_UDMA) {
4095 /* use Ultra/DMA */
4096 drvp->drive_flags &= ~DRIVE_DMA;
4097 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4098 if (drvp->UDMA_mode > 2 && u100 == 0)
4099 drvp->UDMA_mode = 2;
4100 } else if (drvp->drive_flags & DRIVE_DMA) {
4101 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4102 }
4103 }
4104 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4105 if (idedma_ctl != 0) {
4106 /* Add software bits in status register */
4107 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4108 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4109 idedma_ctl);
4110 }
4111 pciide_print_modes(cp);
4112 }
4113
4114 int
4115 pdc202xx_pci_intr(arg)
4116 void *arg;
4117 {
4118 struct pciide_softc *sc = arg;
4119 struct pciide_channel *cp;
4120 struct channel_softc *wdc_cp;
4121 int i, rv, crv;
4122 u_int32_t scr;
4123
4124 rv = 0;
4125 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4126 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4127 cp = &sc->pciide_channels[i];
4128 wdc_cp = &cp->wdc_channel;
4129 /* If a compat channel skip. */
4130 if (cp->compat)
4131 continue;
4132 if (scr & PDC2xx_SCR_INT(i)) {
4133 crv = wdcintr(wdc_cp);
4134 if (crv == 0)
4135 printf("%s:%d: bogus intr (reg 0x%x)\n",
4136 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4137 else
4138 rv = 1;
4139 }
4140 }
4141 return rv;
4142 }
4143
4144 int
4145 pdc20265_pci_intr(arg)
4146 void *arg;
4147 {
4148 struct pciide_softc *sc = arg;
4149 struct pciide_channel *cp;
4150 struct channel_softc *wdc_cp;
4151 int i, rv, crv;
4152 u_int32_t dmastat;
4153
4154 rv = 0;
4155 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4156 cp = &sc->pciide_channels[i];
4157 wdc_cp = &cp->wdc_channel;
4158 /* If a compat channel skip. */
4159 if (cp->compat)
4160 continue;
4161 /*
4162 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4163 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4164 * So use it instead (requires 2 reg reads instead of 1,
4165 * but we can't do it another way).
4166 */
4167 dmastat = bus_space_read_1(sc->sc_dma_iot,
4168 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4169 if((dmastat & IDEDMA_CTL_INTR) == 0)
4170 continue;
4171 crv = wdcintr(wdc_cp);
4172 if (crv == 0)
4173 printf("%s:%d: bogus intr\n",
4174 sc->sc_wdcdev.sc_dev.dv_xname, i);
4175 else
4176 rv = 1;
4177 }
4178 return rv;
4179 }
4180
4181 void
4182 opti_chip_map(sc, pa)
4183 struct pciide_softc *sc;
4184 struct pci_attach_args *pa;
4185 {
4186 struct pciide_channel *cp;
4187 bus_size_t cmdsize, ctlsize;
4188 pcireg_t interface;
4189 u_int8_t init_ctrl;
4190 int channel;
4191
4192 if (pciide_chipen(sc, pa) == 0)
4193 return;
4194 printf("%s: bus-master DMA support present",
4195 sc->sc_wdcdev.sc_dev.dv_xname);
4196
4197 /*
4198 * XXXSCW:
4199 * There seem to be a couple of buggy revisions/implementations
4200 * of the OPTi pciide chipset. This kludge seems to fix one of
4201 * the reported problems (PR/11644) but still fails for the
4202 * other (PR/13151), although the latter may be due to other
4203 * issues too...
4204 */
4205 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4206 printf(" but disabled due to chip rev. <= 0x12");
4207 sc->sc_dma_ok = 0;
4208 } else
4209 pciide_mapreg_dma(sc, pa);
4210
4211 printf("\n");
4212
4213 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4214 WDC_CAPABILITY_MODE;
4215 sc->sc_wdcdev.PIO_cap = 4;
4216 if (sc->sc_dma_ok) {
4217 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4218 sc->sc_wdcdev.irqack = pciide_irqack;
4219 sc->sc_wdcdev.DMA_cap = 2;
4220 }
4221 sc->sc_wdcdev.set_modes = opti_setup_channel;
4222
4223 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4224 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4225
4226 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4227 OPTI_REG_INIT_CONTROL);
4228
4229 interface = PCI_INTERFACE(pa->pa_class);
4230
4231 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4232 cp = &sc->pciide_channels[channel];
4233 if (pciide_chansetup(sc, channel, interface) == 0)
4234 continue;
4235 if (channel == 1 &&
4236 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4237 printf("%s: %s channel ignored (disabled)\n",
4238 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4239 continue;
4240 }
4241 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4242 pciide_pci_intr);
4243 if (cp->hw_ok == 0)
4244 continue;
4245 pciide_map_compat_intr(pa, cp, channel, interface);
4246 if (cp->hw_ok == 0)
4247 continue;
4248 opti_setup_channel(&cp->wdc_channel);
4249 }
4250 }
4251
4252 void
4253 opti_setup_channel(chp)
4254 struct channel_softc *chp;
4255 {
4256 struct ata_drive_datas *drvp;
4257 struct pciide_channel *cp = (struct pciide_channel*)chp;
4258 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4259 int drive, spd;
4260 int mode[2];
4261 u_int8_t rv, mr;
4262
4263 /*
4264 * The `Delay' and `Address Setup Time' fields of the
4265 * Miscellaneous Register are always zero initially.
4266 */
4267 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4268 mr &= ~(OPTI_MISC_DELAY_MASK |
4269 OPTI_MISC_ADDR_SETUP_MASK |
4270 OPTI_MISC_INDEX_MASK);
4271
4272 /* Prime the control register before setting timing values */
4273 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4274
4275 /* Determine the clockrate of the PCIbus the chip is attached to */
4276 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4277 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4278
4279 /* setup DMA if needed */
4280 pciide_channel_dma_setup(cp);
4281
4282 for (drive = 0; drive < 2; drive++) {
4283 drvp = &chp->ch_drive[drive];
4284 /* If no drive, skip */
4285 if ((drvp->drive_flags & DRIVE) == 0) {
4286 mode[drive] = -1;
4287 continue;
4288 }
4289
4290 if ((drvp->drive_flags & DRIVE_DMA)) {
4291 /*
4292 * Timings will be used for both PIO and DMA,
4293 * so adjust DMA mode if needed
4294 */
4295 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4296 drvp->PIO_mode = drvp->DMA_mode + 2;
4297 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4298 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4299 drvp->PIO_mode - 2 : 0;
4300 if (drvp->DMA_mode == 0)
4301 drvp->PIO_mode = 0;
4302
4303 mode[drive] = drvp->DMA_mode + 5;
4304 } else
4305 mode[drive] = drvp->PIO_mode;
4306
4307 if (drive && mode[0] >= 0 &&
4308 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4309 /*
4310 * Can't have two drives using different values
4311 * for `Address Setup Time'.
4312 * Slow down the faster drive to compensate.
4313 */
4314 int d = (opti_tim_as[spd][mode[0]] >
4315 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4316
4317 mode[d] = mode[1-d];
4318 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4319 chp->ch_drive[d].DMA_mode = 0;
4320 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4321 }
4322 }
4323
4324 for (drive = 0; drive < 2; drive++) {
4325 int m;
4326 if ((m = mode[drive]) < 0)
4327 continue;
4328
4329 /* Set the Address Setup Time and select appropriate index */
4330 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4331 rv |= OPTI_MISC_INDEX(drive);
4332 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4333
4334 /* Set the pulse width and recovery timing parameters */
4335 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4336 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4337 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4338 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4339
4340 /* Set the Enhanced Mode register appropriately */
4341 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4342 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4343 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4344 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4345 }
4346
4347 /* Finally, enable the timings */
4348 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4349
4350 pciide_print_modes(cp);
4351 }
4352
4353 #define ACARD_IS_850(sc) \
4354 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4355
4356 void
4357 acard_chip_map(sc, pa)
4358 struct pciide_softc *sc;
4359 struct pci_attach_args *pa;
4360 {
4361 struct pciide_channel *cp;
4362 int i;
4363 pcireg_t interface;
4364 bus_size_t cmdsize, ctlsize;
4365
4366 if (pciide_chipen(sc, pa) == 0)
4367 return;
4368
4369 /*
4370 * when the chip is in native mode it identifies itself as a
4371 * 'misc mass storage'. Fake interface in this case.
4372 */
4373 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4374 interface = PCI_INTERFACE(pa->pa_class);
4375 } else {
4376 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4377 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4378 }
4379
4380 printf("%s: bus-master DMA support present",
4381 sc->sc_wdcdev.sc_dev.dv_xname);
4382 pciide_mapreg_dma(sc, pa);
4383 printf("\n");
4384 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4385 WDC_CAPABILITY_MODE;
4386
4387 if (sc->sc_dma_ok) {
4388 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4389 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4390 sc->sc_wdcdev.irqack = pciide_irqack;
4391 }
4392 sc->sc_wdcdev.PIO_cap = 4;
4393 sc->sc_wdcdev.DMA_cap = 2;
4394 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4395
4396 sc->sc_wdcdev.set_modes = acard_setup_channel;
4397 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4398 sc->sc_wdcdev.nchannels = 2;
4399
4400 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4401 cp = &sc->pciide_channels[i];
4402 if (pciide_chansetup(sc, i, interface) == 0)
4403 continue;
4404 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4405 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4406 &ctlsize, pciide_pci_intr);
4407 } else {
4408 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4409 &cmdsize, &ctlsize);
4410 }
4411 if (cp->hw_ok == 0)
4412 return;
4413 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4414 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4415 wdcattach(&cp->wdc_channel);
4416 acard_setup_channel(&cp->wdc_channel);
4417 }
4418 if (!ACARD_IS_850(sc)) {
4419 u_int32_t reg;
4420 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4421 reg &= ~ATP860_CTRL_INT;
4422 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4423 }
4424 }
4425
4426 void
4427 acard_setup_channel(chp)
4428 struct channel_softc *chp;
4429 {
4430 struct ata_drive_datas *drvp;
4431 struct pciide_channel *cp = (struct pciide_channel*)chp;
4432 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4433 int channel = chp->channel;
4434 int drive;
4435 u_int32_t idetime, udma_mode;
4436 u_int32_t idedma_ctl;
4437
4438 /* setup DMA if needed */
4439 pciide_channel_dma_setup(cp);
4440
4441 if (ACARD_IS_850(sc)) {
4442 idetime = 0;
4443 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4444 udma_mode &= ~ATP850_UDMA_MASK(channel);
4445 } else {
4446 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4447 idetime &= ~ATP860_SETTIME_MASK(channel);
4448 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4449 udma_mode &= ~ATP860_UDMA_MASK(channel);
4450
4451 /* check 80 pins cable */
4452 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4453 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4454 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4455 & ATP860_CTRL_80P(chp->channel)) {
4456 if (chp->ch_drive[0].UDMA_mode > 2)
4457 chp->ch_drive[0].UDMA_mode = 2;
4458 if (chp->ch_drive[1].UDMA_mode > 2)
4459 chp->ch_drive[1].UDMA_mode = 2;
4460 }
4461 }
4462 }
4463
4464 idedma_ctl = 0;
4465
4466 /* Per drive settings */
4467 for (drive = 0; drive < 2; drive++) {
4468 drvp = &chp->ch_drive[drive];
4469 /* If no drive, skip */
4470 if ((drvp->drive_flags & DRIVE) == 0)
4471 continue;
4472 /* add timing values, setup DMA if needed */
4473 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4474 (drvp->drive_flags & DRIVE_UDMA)) {
4475 /* use Ultra/DMA */
4476 if (ACARD_IS_850(sc)) {
4477 idetime |= ATP850_SETTIME(drive,
4478 acard_act_udma[drvp->UDMA_mode],
4479 acard_rec_udma[drvp->UDMA_mode]);
4480 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4481 acard_udma_conf[drvp->UDMA_mode]);
4482 } else {
4483 idetime |= ATP860_SETTIME(channel, drive,
4484 acard_act_udma[drvp->UDMA_mode],
4485 acard_rec_udma[drvp->UDMA_mode]);
4486 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4487 acard_udma_conf[drvp->UDMA_mode]);
4488 }
4489 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4490 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4491 (drvp->drive_flags & DRIVE_DMA)) {
4492 /* use Multiword DMA */
4493 drvp->drive_flags &= ~DRIVE_UDMA;
4494 if (ACARD_IS_850(sc)) {
4495 idetime |= ATP850_SETTIME(drive,
4496 acard_act_dma[drvp->DMA_mode],
4497 acard_rec_dma[drvp->DMA_mode]);
4498 } else {
4499 idetime |= ATP860_SETTIME(channel, drive,
4500 acard_act_dma[drvp->DMA_mode],
4501 acard_rec_dma[drvp->DMA_mode]);
4502 }
4503 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4504 } else {
4505 /* PIO only */
4506 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4507 if (ACARD_IS_850(sc)) {
4508 idetime |= ATP850_SETTIME(drive,
4509 acard_act_pio[drvp->PIO_mode],
4510 acard_rec_pio[drvp->PIO_mode]);
4511 } else {
4512 idetime |= ATP860_SETTIME(channel, drive,
4513 acard_act_pio[drvp->PIO_mode],
4514 acard_rec_pio[drvp->PIO_mode]);
4515 }
4516 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4517 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4518 | ATP8x0_CTRL_EN(channel));
4519 }
4520 }
4521
4522 if (idedma_ctl != 0) {
4523 /* Add software bits in status register */
4524 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4525 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4526 }
4527 pciide_print_modes(cp);
4528
4529 if (ACARD_IS_850(sc)) {
4530 pci_conf_write(sc->sc_pc, sc->sc_tag,
4531 ATP850_IDETIME(channel), idetime);
4532 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4533 } else {
4534 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4535 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4536 }
4537 }
4538
4539 int
4540 acard_pci_intr(arg)
4541 void *arg;
4542 {
4543 struct pciide_softc *sc = arg;
4544 struct pciide_channel *cp;
4545 struct channel_softc *wdc_cp;
4546 int rv = 0;
4547 int dmastat, i, crv;
4548
4549 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4550 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4551 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4552 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4553 continue;
4554 cp = &sc->pciide_channels[i];
4555 wdc_cp = &cp->wdc_channel;
4556 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4557 (void)wdcintr(wdc_cp);
4558 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4559 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4560 continue;
4561 }
4562 crv = wdcintr(wdc_cp);
4563 if (crv == 0)
4564 printf("%s:%d: bogus intr\n",
4565 sc->sc_wdcdev.sc_dev.dv_xname, i);
4566 else if (crv == 1)
4567 rv = 1;
4568 else if (rv == 0)
4569 rv = crv;
4570 }
4571 return rv;
4572 }
4573
4574 static int
4575 sl82c105_bugchk(struct pci_attach_args *pa)
4576 {
4577
4578 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4579 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4580 return (0);
4581
4582 if (PCI_REVISION(pa->pa_class) <= 0x05)
4583 return (1);
4584
4585 return (0);
4586 }
4587
4588 void
4589 sl82c105_chip_map(sc, pa)
4590 struct pciide_softc *sc;
4591 struct pci_attach_args *pa;
4592 {
4593 struct pciide_channel *cp;
4594 bus_size_t cmdsize, ctlsize;
4595 pcireg_t interface, idecr;
4596 int channel;
4597
4598 if (pciide_chipen(sc, pa) == 0)
4599 return;
4600
4601 printf("%s: bus-master DMA support present",
4602 sc->sc_wdcdev.sc_dev.dv_xname);
4603
4604 /*
4605 * Check to see if we're part of the Winbond 83c553 Southbridge.
4606 * If so, we need to disable DMA on rev. <= 5 of that chip.
4607 */
4608 if (pci_find_device(pa, sl82c105_bugchk)) {
4609 printf(" but disabled due to 83c553 rev. <= 0x05");
4610 sc->sc_dma_ok = 0;
4611 } else
4612 pciide_mapreg_dma(sc, pa);
4613 printf("\n");
4614
4615 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4616 WDC_CAPABILITY_MODE;
4617 sc->sc_wdcdev.PIO_cap = 4;
4618 if (sc->sc_dma_ok) {
4619 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4620 sc->sc_wdcdev.irqack = pciide_irqack;
4621 sc->sc_wdcdev.DMA_cap = 2;
4622 }
4623 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4624
4625 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4626 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4627
4628 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4629
4630 interface = PCI_INTERFACE(pa->pa_class);
4631
4632 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4633 cp = &sc->pciide_channels[channel];
4634 if (pciide_chansetup(sc, channel, interface) == 0)
4635 continue;
4636 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4637 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4638 printf("%s: %s channel ignored (disabled)\n",
4639 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4640 continue;
4641 }
4642 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4643 pciide_pci_intr);
4644 if (cp->hw_ok == 0)
4645 continue;
4646 pciide_map_compat_intr(pa, cp, channel, interface);
4647 if (cp->hw_ok == 0)
4648 continue;
4649 sl82c105_setup_channel(&cp->wdc_channel);
4650 }
4651 }
4652
4653 void
4654 sl82c105_setup_channel(chp)
4655 struct channel_softc *chp;
4656 {
4657 struct ata_drive_datas *drvp;
4658 struct pciide_channel *cp = (struct pciide_channel*)chp;
4659 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4660 int pxdx_reg, drive;
4661 pcireg_t pxdx;
4662
4663 /* Set up DMA if needed. */
4664 pciide_channel_dma_setup(cp);
4665
4666 for (drive = 0; drive < 2; drive++) {
4667 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4668 : SYMPH_P1D0CR) + (drive * 4);
4669
4670 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4671
4672 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4673 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4674
4675 drvp = &chp->ch_drive[drive];
4676 /* If no drive, skip. */
4677 if ((drvp->drive_flags & DRIVE) == 0) {
4678 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4679 continue;
4680 }
4681
4682 if (drvp->drive_flags & DRIVE_DMA) {
4683 /*
4684 * Timings will be used for both PIO and DMA,
4685 * so adjust DMA mode if needed.
4686 */
4687 if (drvp->PIO_mode >= 3) {
4688 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4689 drvp->DMA_mode = drvp->PIO_mode - 2;
4690 if (drvp->DMA_mode < 1) {
4691 /*
4692 * Can't mix both PIO and DMA.
4693 * Disable DMA.
4694 */
4695 drvp->drive_flags &= ~DRIVE_DMA;
4696 }
4697 } else {
4698 /*
4699 * Can't mix both PIO and DMA. Disable
4700 * DMA.
4701 */
4702 drvp->drive_flags &= ~DRIVE_DMA;
4703 }
4704 }
4705
4706 if (drvp->drive_flags & DRIVE_DMA) {
4707 /* Use multi-word DMA. */
4708 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4709 PxDx_CMD_ON_SHIFT;
4710 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4711 } else {
4712 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4713 PxDx_CMD_ON_SHIFT;
4714 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4715 }
4716
4717 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4718
4719 /* ...and set the mode for this drive. */
4720 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4721 }
4722
4723 pciide_print_modes(cp);
4724 }
4725
4726 void
4727 serverworks_chip_map(sc, pa)
4728 struct pciide_softc *sc;
4729 struct pci_attach_args *pa;
4730 {
4731 struct pciide_channel *cp;
4732 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4733 pcitag_t pcib_tag;
4734 int channel;
4735 bus_size_t cmdsize, ctlsize;
4736
4737 if (pciide_chipen(sc, pa) == 0)
4738 return;
4739
4740 printf("%s: bus-master DMA support present",
4741 sc->sc_wdcdev.sc_dev.dv_xname);
4742 pciide_mapreg_dma(sc, pa);
4743 printf("\n");
4744 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4745 WDC_CAPABILITY_MODE;
4746
4747 if (sc->sc_dma_ok) {
4748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4749 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4750 sc->sc_wdcdev.irqack = pciide_irqack;
4751 }
4752 sc->sc_wdcdev.PIO_cap = 4;
4753 sc->sc_wdcdev.DMA_cap = 2;
4754 switch (sc->sc_pp->ide_product) {
4755 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4756 sc->sc_wdcdev.UDMA_cap = 2;
4757 break;
4758 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4759 if (PCI_REVISION(pa->pa_class) < 0x92)
4760 sc->sc_wdcdev.UDMA_cap = 4;
4761 else
4762 sc->sc_wdcdev.UDMA_cap = 5;
4763 break;
4764 }
4765
4766 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4767 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4768 sc->sc_wdcdev.nchannels = 2;
4769
4770 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4771 cp = &sc->pciide_channels[channel];
4772 if (pciide_chansetup(sc, channel, interface) == 0)
4773 continue;
4774 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4775 serverworks_pci_intr);
4776 if (cp->hw_ok == 0)
4777 return;
4778 pciide_map_compat_intr(pa, cp, channel, interface);
4779 if (cp->hw_ok == 0)
4780 return;
4781 serverworks_setup_channel(&cp->wdc_channel);
4782 }
4783
4784 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4785 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4786 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4787 }
4788
4789 void
4790 serverworks_setup_channel(chp)
4791 struct channel_softc *chp;
4792 {
4793 struct ata_drive_datas *drvp;
4794 struct pciide_channel *cp = (struct pciide_channel*)chp;
4795 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4796 int channel = chp->channel;
4797 int drive, unit;
4798 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4799 u_int32_t idedma_ctl;
4800 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4801 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4802
4803 /* setup DMA if needed */
4804 pciide_channel_dma_setup(cp);
4805
4806 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4807 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4808 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4809 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4810
4811 pio_time &= ~(0xffff << (16 * channel));
4812 dma_time &= ~(0xffff << (16 * channel));
4813 pio_mode &= ~(0xff << (8 * channel + 16));
4814 udma_mode &= ~(0xff << (8 * channel + 16));
4815 udma_mode &= ~(3 << (2 * channel));
4816
4817 idedma_ctl = 0;
4818
4819 /* Per drive settings */
4820 for (drive = 0; drive < 2; drive++) {
4821 drvp = &chp->ch_drive[drive];
4822 /* If no drive, skip */
4823 if ((drvp->drive_flags & DRIVE) == 0)
4824 continue;
4825 unit = drive + 2 * channel;
4826 /* add timing values, setup DMA if needed */
4827 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4828 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4829 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4830 (drvp->drive_flags & DRIVE_UDMA)) {
4831 /* use Ultra/DMA, check for 80-pin cable */
4832 if (drvp->UDMA_mode > 2 &&
4833 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4834 drvp->UDMA_mode = 2;
4835 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4836 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4837 udma_mode |= 1 << unit;
4838 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4839 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4840 (drvp->drive_flags & DRIVE_DMA)) {
4841 /* use Multiword DMA */
4842 drvp->drive_flags &= ~DRIVE_UDMA;
4843 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4844 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4845 } else {
4846 /* PIO only */
4847 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4848 }
4849 }
4850
4851 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4852 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4853 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4854 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4855 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4856
4857 if (idedma_ctl != 0) {
4858 /* Add software bits in status register */
4859 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4860 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4861 }
4862 pciide_print_modes(cp);
4863 }
4864
4865 int
4866 serverworks_pci_intr(arg)
4867 void *arg;
4868 {
4869 struct pciide_softc *sc = arg;
4870 struct pciide_channel *cp;
4871 struct channel_softc *wdc_cp;
4872 int rv = 0;
4873 int dmastat, i, crv;
4874
4875 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4876 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4877 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4878 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4879 IDEDMA_CTL_INTR)
4880 continue;
4881 cp = &sc->pciide_channels[i];
4882 wdc_cp = &cp->wdc_channel;
4883 crv = wdcintr(wdc_cp);
4884 if (crv == 0) {
4885 printf("%s:%d: bogus intr\n",
4886 sc->sc_wdcdev.sc_dev.dv_xname, i);
4887 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4888 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4889 } else
4890 rv = 1;
4891 }
4892 return rv;
4893 }
4894