pciide.c revision 1.166 1 /* $NetBSD: pciide.c,v 1.166 2002/08/23 16:24:54 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.166 2002/08/23 16:24:54 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_cmd_products[] = {
345 { PCI_PRODUCT_CMDTECH_640,
346 0,
347 "CMD Technology PCI0640",
348 cmd_chip_map
349 },
350 { PCI_PRODUCT_CMDTECH_643,
351 0,
352 "CMD Technology PCI0643",
353 cmd0643_9_chip_map,
354 },
355 { PCI_PRODUCT_CMDTECH_646,
356 0,
357 "CMD Technology PCI0646",
358 cmd0643_9_chip_map,
359 },
360 { PCI_PRODUCT_CMDTECH_648,
361 IDE_PCI_CLASS_OVERRIDE,
362 "CMD Technology PCI0648",
363 cmd0643_9_chip_map,
364 },
365 { PCI_PRODUCT_CMDTECH_649,
366 IDE_PCI_CLASS_OVERRIDE,
367 "CMD Technology PCI0649",
368 cmd0643_9_chip_map,
369 },
370 { PCI_PRODUCT_CMDTECH_680,
371 IDE_PCI_CLASS_OVERRIDE,
372 "Silicon Image 0680",
373 cmd680_chip_map,
374 },
375 { 0,
376 0,
377 NULL,
378 NULL
379 }
380 };
381
382 const struct pciide_product_desc pciide_via_products[] = {
383 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
384 0,
385 NULL,
386 apollo_chip_map,
387 },
388 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
389 0,
390 NULL,
391 apollo_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_cypress_products[] = {
401 { PCI_PRODUCT_CONTAQ_82C693,
402 IDE_16BIT_IOSPACE,
403 "Cypress 82C693 IDE Controller",
404 cy693_chip_map,
405 },
406 { 0,
407 0,
408 NULL,
409 NULL
410 }
411 };
412
413 const struct pciide_product_desc pciide_sis_products[] = {
414 { PCI_PRODUCT_SIS_5597_IDE,
415 0,
416 "Silicon Integrated System 5597/5598 IDE controller",
417 sis_chip_map,
418 },
419 { 0,
420 0,
421 NULL,
422 NULL
423 }
424 };
425
426 const struct pciide_product_desc pciide_acer_products[] = {
427 { PCI_PRODUCT_ALI_M5229,
428 0,
429 "Acer Labs M5229 UDMA IDE Controller",
430 acer_chip_map,
431 },
432 { 0,
433 0,
434 NULL,
435 NULL
436 }
437 };
438
439 const struct pciide_product_desc pciide_promise_products[] = {
440 { PCI_PRODUCT_PROMISE_ULTRA33,
441 IDE_PCI_CLASS_OVERRIDE,
442 "Promise Ultra33/ATA Bus Master IDE Accelerator",
443 pdc202xx_chip_map,
444 },
445 { PCI_PRODUCT_PROMISE_ULTRA66,
446 IDE_PCI_CLASS_OVERRIDE,
447 "Promise Ultra66/ATA Bus Master IDE Accelerator",
448 pdc202xx_chip_map,
449 },
450 { PCI_PRODUCT_PROMISE_ULTRA100,
451 IDE_PCI_CLASS_OVERRIDE,
452 "Promise Ultra100/ATA Bus Master IDE Accelerator",
453 pdc202xx_chip_map,
454 },
455 { PCI_PRODUCT_PROMISE_ULTRA100X,
456 IDE_PCI_CLASS_OVERRIDE,
457 "Promise Ultra100/ATA Bus Master IDE Accelerator",
458 pdc202xx_chip_map,
459 },
460 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
461 IDE_PCI_CLASS_OVERRIDE,
462 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
463 pdc202xx_chip_map,
464 },
465 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
466 IDE_PCI_CLASS_OVERRIDE,
467 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
468 pdc202xx_chip_map,
469 },
470 { PCI_PRODUCT_PROMISE_ULTRA133,
471 IDE_PCI_CLASS_OVERRIDE,
472 "Promise Ultra133/ATA Bus Master IDE Accelerator",
473 pdc202xx_chip_map,
474 },
475 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
476 IDE_PCI_CLASS_OVERRIDE,
477 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
478 pdc202xx_chip_map,
479 },
480 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
481 IDE_PCI_CLASS_OVERRIDE,
482 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
483 pdc202xx_chip_map,
484 },
485 { 0,
486 0,
487 NULL,
488 NULL
489 }
490 };
491
492 const struct pciide_product_desc pciide_opti_products[] = {
493 { PCI_PRODUCT_OPTI_82C621,
494 0,
495 "OPTi 82c621 PCI IDE controller",
496 opti_chip_map,
497 },
498 { PCI_PRODUCT_OPTI_82C568,
499 0,
500 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
501 opti_chip_map,
502 },
503 { PCI_PRODUCT_OPTI_82D568,
504 0,
505 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
506 opti_chip_map,
507 },
508 { 0,
509 0,
510 NULL,
511 NULL
512 }
513 };
514
515 const struct pciide_product_desc pciide_triones_products[] = {
516 { PCI_PRODUCT_TRIONES_HPT366,
517 IDE_PCI_CLASS_OVERRIDE,
518 NULL,
519 hpt_chip_map,
520 },
521 { PCI_PRODUCT_TRIONES_HPT372,
522 IDE_PCI_CLASS_OVERRIDE,
523 NULL,
524 hpt_chip_map
525 },
526 { PCI_PRODUCT_TRIONES_HPT374,
527 IDE_PCI_CLASS_OVERRIDE,
528 NULL,
529 hpt_chip_map
530 },
531 { 0,
532 0,
533 NULL,
534 NULL
535 }
536 };
537
538 const struct pciide_product_desc pciide_acard_products[] = {
539 { PCI_PRODUCT_ACARD_ATP850U,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Acard ATP850U Ultra33 IDE Controller",
542 acard_chip_map,
543 },
544 { PCI_PRODUCT_ACARD_ATP860,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Acard ATP860 Ultra66 IDE Controller",
547 acard_chip_map,
548 },
549 { PCI_PRODUCT_ACARD_ATP860A,
550 IDE_PCI_CLASS_OVERRIDE,
551 "Acard ATP860-A Ultra66 IDE Controller",
552 acard_chip_map,
553 },
554 { 0,
555 0,
556 NULL,
557 NULL
558 }
559 };
560
561 const struct pciide_product_desc pciide_serverworks_products[] = {
562 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
563 0,
564 "ServerWorks OSB4 IDE Controller",
565 serverworks_chip_map,
566 },
567 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
568 0,
569 "ServerWorks CSB5 IDE Controller",
570 serverworks_chip_map,
571 },
572 { 0,
573 0,
574 NULL,
575 }
576 };
577
578 const struct pciide_product_desc pciide_symphony_products[] = {
579 { PCI_PRODUCT_SYMPHONY_82C105,
580 0,
581 "Symphony Labs 82C105 IDE controller",
582 sl82c105_chip_map,
583 },
584 { 0,
585 0,
586 NULL,
587 }
588 };
589
590 const struct pciide_product_desc pciide_winbond_products[] = {
591 { PCI_PRODUCT_WINBOND_W83C553F_1,
592 0,
593 "Winbond W83C553F IDE controller",
594 sl82c105_chip_map,
595 },
596 { 0,
597 0,
598 NULL,
599 }
600 };
601
602 struct pciide_vendor_desc {
603 u_int32_t ide_vendor;
604 const struct pciide_product_desc *ide_products;
605 };
606
607 const struct pciide_vendor_desc pciide_vendors[] = {
608 { PCI_VENDOR_INTEL, pciide_intel_products },
609 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
610 { PCI_VENDOR_VIATECH, pciide_via_products },
611 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
612 { PCI_VENDOR_SIS, pciide_sis_products },
613 { PCI_VENDOR_ALI, pciide_acer_products },
614 { PCI_VENDOR_PROMISE, pciide_promise_products },
615 { PCI_VENDOR_AMD, pciide_amd_products },
616 { PCI_VENDOR_OPTI, pciide_opti_products },
617 { PCI_VENDOR_TRIONES, pciide_triones_products },
618 { PCI_VENDOR_ACARD, pciide_acard_products },
619 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
620 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
621 { PCI_VENDOR_WINBOND, pciide_winbond_products },
622 { 0, NULL }
623 };
624
625 /* options passed via the 'flags' config keyword */
626 #define PCIIDE_OPTIONS_DMA 0x01
627 #define PCIIDE_OPTIONS_NODMA 0x02
628
629 int pciide_match __P((struct device *, struct cfdata *, void *));
630 void pciide_attach __P((struct device *, struct device *, void *));
631
632 struct cfattach pciide_ca = {
633 sizeof(struct pciide_softc), pciide_match, pciide_attach
634 };
635 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
636 int pciide_mapregs_compat __P(( struct pci_attach_args *,
637 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
638 int pciide_mapregs_native __P((struct pci_attach_args *,
639 struct pciide_channel *, bus_size_t *, bus_size_t *,
640 int (*pci_intr) __P((void *))));
641 void pciide_mapreg_dma __P((struct pciide_softc *,
642 struct pci_attach_args *));
643 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
644 void pciide_mapchan __P((struct pci_attach_args *,
645 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
646 int (*pci_intr) __P((void *))));
647 int pciide_chan_candisable __P((struct pciide_channel *));
648 void pciide_map_compat_intr __P(( struct pci_attach_args *,
649 struct pciide_channel *, int, int));
650 int pciide_compat_intr __P((void *));
651 int pciide_pci_intr __P((void *));
652 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
653
654 const struct pciide_product_desc *
655 pciide_lookup_product(id)
656 u_int32_t id;
657 {
658 const struct pciide_product_desc *pp;
659 const struct pciide_vendor_desc *vp;
660
661 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
662 if (PCI_VENDOR(id) == vp->ide_vendor)
663 break;
664
665 if ((pp = vp->ide_products) == NULL)
666 return NULL;
667
668 for (; pp->chip_map != NULL; pp++)
669 if (PCI_PRODUCT(id) == pp->ide_product)
670 break;
671
672 if (pp->chip_map == NULL)
673 return NULL;
674 return pp;
675 }
676
677 int
678 pciide_match(parent, match, aux)
679 struct device *parent;
680 struct cfdata *match;
681 void *aux;
682 {
683 struct pci_attach_args *pa = aux;
684 const struct pciide_product_desc *pp;
685
686 /*
687 * Check the ID register to see that it's a PCI IDE controller.
688 * If it is, we assume that we can deal with it; it _should_
689 * work in a standardized way...
690 */
691 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
692 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
693 return (1);
694 }
695
696 /*
697 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
698 * controllers. Let see if we can deal with it anyway.
699 */
700 pp = pciide_lookup_product(pa->pa_id);
701 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
702 return (1);
703 }
704
705 return (0);
706 }
707
708 void
709 pciide_attach(parent, self, aux)
710 struct device *parent, *self;
711 void *aux;
712 {
713 struct pci_attach_args *pa = aux;
714 pci_chipset_tag_t pc = pa->pa_pc;
715 pcitag_t tag = pa->pa_tag;
716 struct pciide_softc *sc = (struct pciide_softc *)self;
717 pcireg_t csr;
718 char devinfo[256];
719 const char *displaydev;
720
721 sc->sc_pp = pciide_lookup_product(pa->pa_id);
722 if (sc->sc_pp == NULL) {
723 sc->sc_pp = &default_product_desc;
724 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
725 displaydev = devinfo;
726 } else
727 displaydev = sc->sc_pp->ide_name;
728
729 /* if displaydev == NULL, printf is done in chip-specific map */
730 if (displaydev)
731 printf(": %s (rev. 0x%02x)\n", displaydev,
732 PCI_REVISION(pa->pa_class));
733
734 sc->sc_pc = pa->pa_pc;
735 sc->sc_tag = pa->pa_tag;
736 #ifdef WDCDEBUG
737 if (wdcdebug_pciide_mask & DEBUG_PROBE)
738 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
739 #endif
740 sc->sc_pp->chip_map(sc, pa);
741
742 if (sc->sc_dma_ok) {
743 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
744 csr |= PCI_COMMAND_MASTER_ENABLE;
745 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
746 }
747 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
748 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
749 }
750
751 /* tell wether the chip is enabled or not */
752 int
753 pciide_chipen(sc, pa)
754 struct pciide_softc *sc;
755 struct pci_attach_args *pa;
756 {
757 pcireg_t csr;
758 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
759 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
760 PCI_COMMAND_STATUS_REG);
761 printf("%s: device disabled (at %s)\n",
762 sc->sc_wdcdev.sc_dev.dv_xname,
763 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
764 "device" : "bridge");
765 return 0;
766 }
767 return 1;
768 }
769
770 int
771 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
772 struct pci_attach_args *pa;
773 struct pciide_channel *cp;
774 int compatchan;
775 bus_size_t *cmdsizep, *ctlsizep;
776 {
777 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
778 struct channel_softc *wdc_cp = &cp->wdc_channel;
779
780 cp->compat = 1;
781 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
782 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
783
784 wdc_cp->cmd_iot = pa->pa_iot;
785 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
786 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
787 printf("%s: couldn't map %s channel cmd regs\n",
788 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
789 return (0);
790 }
791
792 wdc_cp->ctl_iot = pa->pa_iot;
793 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
794 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
795 printf("%s: couldn't map %s channel ctl regs\n",
796 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
797 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
798 PCIIDE_COMPAT_CMD_SIZE);
799 return (0);
800 }
801
802 return (1);
803 }
804
805 int
806 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
807 struct pci_attach_args * pa;
808 struct pciide_channel *cp;
809 bus_size_t *cmdsizep, *ctlsizep;
810 int (*pci_intr) __P((void *));
811 {
812 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
813 struct channel_softc *wdc_cp = &cp->wdc_channel;
814 const char *intrstr;
815 pci_intr_handle_t intrhandle;
816
817 cp->compat = 0;
818
819 if (sc->sc_pci_ih == NULL) {
820 if (pci_intr_map(pa, &intrhandle) != 0) {
821 printf("%s: couldn't map native-PCI interrupt\n",
822 sc->sc_wdcdev.sc_dev.dv_xname);
823 return 0;
824 }
825 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
826 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
827 intrhandle, IPL_BIO, pci_intr, sc);
828 if (sc->sc_pci_ih != NULL) {
829 printf("%s: using %s for native-PCI interrupt\n",
830 sc->sc_wdcdev.sc_dev.dv_xname,
831 intrstr ? intrstr : "unknown interrupt");
832 } else {
833 printf("%s: couldn't establish native-PCI interrupt",
834 sc->sc_wdcdev.sc_dev.dv_xname);
835 if (intrstr != NULL)
836 printf(" at %s", intrstr);
837 printf("\n");
838 return 0;
839 }
840 }
841 cp->ih = sc->sc_pci_ih;
842 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
843 PCI_MAPREG_TYPE_IO, 0,
844 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
845 printf("%s: couldn't map %s channel cmd regs\n",
846 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
847 return 0;
848 }
849
850 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
851 PCI_MAPREG_TYPE_IO, 0,
852 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
853 printf("%s: couldn't map %s channel ctl regs\n",
854 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
855 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
856 return 0;
857 }
858 /*
859 * In native mode, 4 bytes of I/O space are mapped for the control
860 * register, the control register is at offset 2. Pass the generic
861 * code a handle for only one byte at the right offset.
862 */
863 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
864 &wdc_cp->ctl_ioh) != 0) {
865 printf("%s: unable to subregion %s channel ctl regs\n",
866 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
867 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
868 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
869 return 0;
870 }
871 return (1);
872 }
873
874 void
875 pciide_mapreg_dma(sc, pa)
876 struct pciide_softc *sc;
877 struct pci_attach_args *pa;
878 {
879 pcireg_t maptype;
880 bus_addr_t addr;
881
882 /*
883 * Map DMA registers
884 *
885 * Note that sc_dma_ok is the right variable to test to see if
886 * DMA can be done. If the interface doesn't support DMA,
887 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
888 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
889 * non-zero if the interface supports DMA and the registers
890 * could be mapped.
891 *
892 * XXX Note that despite the fact that the Bus Master IDE specs
893 * XXX say that "The bus master IDE function uses 16 bytes of IO
894 * XXX space," some controllers (at least the United
895 * XXX Microelectronics UM8886BF) place it in memory space.
896 */
897 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
898 PCIIDE_REG_BUS_MASTER_DMA);
899
900 switch (maptype) {
901 case PCI_MAPREG_TYPE_IO:
902 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
903 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
904 &addr, NULL, NULL) == 0);
905 if (sc->sc_dma_ok == 0) {
906 printf(", but unused (couldn't query registers)");
907 break;
908 }
909 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
910 && addr >= 0x10000) {
911 sc->sc_dma_ok = 0;
912 printf(", but unused (registers at unsafe address "
913 "%#lx)", (unsigned long)addr);
914 break;
915 }
916 /* FALLTHROUGH */
917
918 case PCI_MAPREG_MEM_TYPE_32BIT:
919 sc->sc_dma_ok = (pci_mapreg_map(pa,
920 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
921 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
922 sc->sc_dmat = pa->pa_dmat;
923 if (sc->sc_dma_ok == 0) {
924 printf(", but unused (couldn't map registers)");
925 } else {
926 sc->sc_wdcdev.dma_arg = sc;
927 sc->sc_wdcdev.dma_init = pciide_dma_init;
928 sc->sc_wdcdev.dma_start = pciide_dma_start;
929 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
930 }
931
932 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
933 PCIIDE_OPTIONS_NODMA) {
934 printf(", but unused (forced off by config file)");
935 sc->sc_dma_ok = 0;
936 }
937 break;
938
939 default:
940 sc->sc_dma_ok = 0;
941 printf(", but unsupported register maptype (0x%x)", maptype);
942 }
943 }
944
945 int
946 pciide_compat_intr(arg)
947 void *arg;
948 {
949 struct pciide_channel *cp = arg;
950
951 #ifdef DIAGNOSTIC
952 /* should only be called for a compat channel */
953 if (cp->compat == 0)
954 panic("pciide compat intr called for non-compat chan %p\n", cp);
955 #endif
956 return (wdcintr(&cp->wdc_channel));
957 }
958
959 int
960 pciide_pci_intr(arg)
961 void *arg;
962 {
963 struct pciide_softc *sc = arg;
964 struct pciide_channel *cp;
965 struct channel_softc *wdc_cp;
966 int i, rv, crv;
967
968 rv = 0;
969 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
970 cp = &sc->pciide_channels[i];
971 wdc_cp = &cp->wdc_channel;
972
973 /* If a compat channel skip. */
974 if (cp->compat)
975 continue;
976 /* if this channel not waiting for intr, skip */
977 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
978 continue;
979
980 crv = wdcintr(wdc_cp);
981 if (crv == 0)
982 ; /* leave rv alone */
983 else if (crv == 1)
984 rv = 1; /* claim the intr */
985 else if (rv == 0) /* crv should be -1 in this case */
986 rv = crv; /* if we've done no better, take it */
987 }
988 return (rv);
989 }
990
991 void
992 pciide_channel_dma_setup(cp)
993 struct pciide_channel *cp;
994 {
995 int drive;
996 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
997 struct ata_drive_datas *drvp;
998
999 for (drive = 0; drive < 2; drive++) {
1000 drvp = &cp->wdc_channel.ch_drive[drive];
1001 /* If no drive, skip */
1002 if ((drvp->drive_flags & DRIVE) == 0)
1003 continue;
1004 /* setup DMA if needed */
1005 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1006 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1007 sc->sc_dma_ok == 0) {
1008 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1009 continue;
1010 }
1011 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1012 != 0) {
1013 /* Abort DMA setup */
1014 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1015 continue;
1016 }
1017 }
1018 }
1019
1020 int
1021 pciide_dma_table_setup(sc, channel, drive)
1022 struct pciide_softc *sc;
1023 int channel, drive;
1024 {
1025 bus_dma_segment_t seg;
1026 int error, rseg;
1027 const bus_size_t dma_table_size =
1028 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1029 struct pciide_dma_maps *dma_maps =
1030 &sc->pciide_channels[channel].dma_maps[drive];
1031
1032 /* If table was already allocated, just return */
1033 if (dma_maps->dma_table)
1034 return 0;
1035
1036 /* Allocate memory for the DMA tables and map it */
1037 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1038 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1039 BUS_DMA_NOWAIT)) != 0) {
1040 printf("%s:%d: unable to allocate table DMA for "
1041 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1042 channel, drive, error);
1043 return error;
1044 }
1045 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1046 dma_table_size,
1047 (caddr_t *)&dma_maps->dma_table,
1048 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1049 printf("%s:%d: unable to map table DMA for"
1050 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1051 channel, drive, error);
1052 return error;
1053 }
1054 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1055 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1056 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1057
1058 /* Create and load table DMA map for this disk */
1059 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1060 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1061 &dma_maps->dmamap_table)) != 0) {
1062 printf("%s:%d: unable to create table DMA map for "
1063 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1064 channel, drive, error);
1065 return error;
1066 }
1067 if ((error = bus_dmamap_load(sc->sc_dmat,
1068 dma_maps->dmamap_table,
1069 dma_maps->dma_table,
1070 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1071 printf("%s:%d: unable to load table DMA map for "
1072 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1073 channel, drive, error);
1074 return error;
1075 }
1076 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1077 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1078 DEBUG_PROBE);
1079 /* Create a xfer DMA map for this drive */
1080 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1081 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1082 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1083 &dma_maps->dmamap_xfer)) != 0) {
1084 printf("%s:%d: unable to create xfer DMA map for "
1085 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1086 channel, drive, error);
1087 return error;
1088 }
1089 return 0;
1090 }
1091
1092 int
1093 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1094 void *v;
1095 int channel, drive;
1096 void *databuf;
1097 size_t datalen;
1098 int flags;
1099 {
1100 struct pciide_softc *sc = v;
1101 int error, seg;
1102 struct pciide_dma_maps *dma_maps =
1103 &sc->pciide_channels[channel].dma_maps[drive];
1104
1105 error = bus_dmamap_load(sc->sc_dmat,
1106 dma_maps->dmamap_xfer,
1107 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1108 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1109 if (error) {
1110 printf("%s:%d: unable to load xfer DMA map for"
1111 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1112 channel, drive, error);
1113 return error;
1114 }
1115
1116 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1117 dma_maps->dmamap_xfer->dm_mapsize,
1118 (flags & WDC_DMA_READ) ?
1119 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1120
1121 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1122 #ifdef DIAGNOSTIC
1123 /* A segment must not cross a 64k boundary */
1124 {
1125 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1126 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1127 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1128 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1129 printf("pciide_dma: segment %d physical addr 0x%lx"
1130 " len 0x%lx not properly aligned\n",
1131 seg, phys, len);
1132 panic("pciide_dma: buf align");
1133 }
1134 }
1135 #endif
1136 dma_maps->dma_table[seg].base_addr =
1137 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1138 dma_maps->dma_table[seg].byte_count =
1139 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1140 IDEDMA_BYTE_COUNT_MASK);
1141 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1142 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1143 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1144
1145 }
1146 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1147 htole32(IDEDMA_BYTE_COUNT_EOT);
1148
1149 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1150 dma_maps->dmamap_table->dm_mapsize,
1151 BUS_DMASYNC_PREWRITE);
1152
1153 /* Maps are ready. Start DMA function */
1154 #ifdef DIAGNOSTIC
1155 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1156 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1157 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1158 panic("pciide_dma_init: table align");
1159 }
1160 #endif
1161
1162 /* Clear status bits */
1163 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1164 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1165 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1166 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1167 /* Write table addr */
1168 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1169 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1170 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1171 /* set read/write */
1172 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1173 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1174 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1175 /* remember flags */
1176 dma_maps->dma_flags = flags;
1177 return 0;
1178 }
1179
1180 void
1181 pciide_dma_start(v, channel, drive)
1182 void *v;
1183 int channel, drive;
1184 {
1185 struct pciide_softc *sc = v;
1186
1187 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1189 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1190 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1191 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1192 }
1193
1194 int
1195 pciide_dma_finish(v, channel, drive, force)
1196 void *v;
1197 int channel, drive;
1198 int force;
1199 {
1200 struct pciide_softc *sc = v;
1201 u_int8_t status;
1202 int error = 0;
1203 struct pciide_dma_maps *dma_maps =
1204 &sc->pciide_channels[channel].dma_maps[drive];
1205
1206 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1207 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1208 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1209 DEBUG_XFERS);
1210
1211 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1212 return WDC_DMAST_NOIRQ;
1213
1214 /* stop DMA channel */
1215 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1216 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1217 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1218 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1219
1220 /* Unload the map of the data buffer */
1221 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1222 dma_maps->dmamap_xfer->dm_mapsize,
1223 (dma_maps->dma_flags & WDC_DMA_READ) ?
1224 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1225 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1226
1227 if ((status & IDEDMA_CTL_ERR) != 0) {
1228 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1229 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1230 error |= WDC_DMAST_ERR;
1231 }
1232
1233 if ((status & IDEDMA_CTL_INTR) == 0) {
1234 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1235 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1236 drive, status);
1237 error |= WDC_DMAST_NOIRQ;
1238 }
1239
1240 if ((status & IDEDMA_CTL_ACT) != 0) {
1241 /* data underrun, may be a valid condition for ATAPI */
1242 error |= WDC_DMAST_UNDER;
1243 }
1244 return error;
1245 }
1246
1247 void
1248 pciide_irqack(chp)
1249 struct channel_softc *chp;
1250 {
1251 struct pciide_channel *cp = (struct pciide_channel*)chp;
1252 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1253
1254 /* clear status bits in IDE DMA registers */
1255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1256 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1257 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1258 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1259 }
1260
1261 /* some common code used by several chip_map */
1262 int
1263 pciide_chansetup(sc, channel, interface)
1264 struct pciide_softc *sc;
1265 int channel;
1266 pcireg_t interface;
1267 {
1268 struct pciide_channel *cp = &sc->pciide_channels[channel];
1269 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1270 cp->name = PCIIDE_CHANNEL_NAME(channel);
1271 cp->wdc_channel.channel = channel;
1272 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1273 cp->wdc_channel.ch_queue =
1274 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1275 if (cp->wdc_channel.ch_queue == NULL) {
1276 printf("%s %s channel: "
1277 "can't allocate memory for command queue",
1278 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1279 return 0;
1280 }
1281 printf("%s: %s channel %s to %s mode\n",
1282 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1283 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1284 "configured" : "wired",
1285 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1286 "native-PCI" : "compatibility");
1287 return 1;
1288 }
1289
1290 /* some common code used by several chip channel_map */
1291 void
1292 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1293 struct pci_attach_args *pa;
1294 struct pciide_channel *cp;
1295 pcireg_t interface;
1296 bus_size_t *cmdsizep, *ctlsizep;
1297 int (*pci_intr) __P((void *));
1298 {
1299 struct channel_softc *wdc_cp = &cp->wdc_channel;
1300
1301 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1302 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1303 pci_intr);
1304 else
1305 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1306 wdc_cp->channel, cmdsizep, ctlsizep);
1307
1308 if (cp->hw_ok == 0)
1309 return;
1310 wdc_cp->data32iot = wdc_cp->cmd_iot;
1311 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1312 wdcattach(wdc_cp);
1313 }
1314
1315 /*
1316 * Generic code to call to know if a channel can be disabled. Return 1
1317 * if channel can be disabled, 0 if not
1318 */
1319 int
1320 pciide_chan_candisable(cp)
1321 struct pciide_channel *cp;
1322 {
1323 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1324 struct channel_softc *wdc_cp = &cp->wdc_channel;
1325
1326 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1327 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1328 printf("%s: disabling %s channel (no drives)\n",
1329 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1330 cp->hw_ok = 0;
1331 return 1;
1332 }
1333 return 0;
1334 }
1335
1336 /*
1337 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1338 * Set hw_ok=0 on failure
1339 */
1340 void
1341 pciide_map_compat_intr(pa, cp, compatchan, interface)
1342 struct pci_attach_args *pa;
1343 struct pciide_channel *cp;
1344 int compatchan, interface;
1345 {
1346 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1347 struct channel_softc *wdc_cp = &cp->wdc_channel;
1348
1349 if (cp->hw_ok == 0)
1350 return;
1351 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1352 return;
1353
1354 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1355 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1356 pa, compatchan, pciide_compat_intr, cp);
1357 if (cp->ih == NULL) {
1358 #endif
1359 printf("%s: no compatibility interrupt for use by %s "
1360 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1361 cp->hw_ok = 0;
1362 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1363 }
1364 #endif
1365 }
1366
1367 void
1368 pciide_print_modes(cp)
1369 struct pciide_channel *cp;
1370 {
1371 wdc_print_modes(&cp->wdc_channel);
1372 }
1373
1374 void
1375 default_chip_map(sc, pa)
1376 struct pciide_softc *sc;
1377 struct pci_attach_args *pa;
1378 {
1379 struct pciide_channel *cp;
1380 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1381 pcireg_t csr;
1382 int channel, drive;
1383 struct ata_drive_datas *drvp;
1384 u_int8_t idedma_ctl;
1385 bus_size_t cmdsize, ctlsize;
1386 char *failreason;
1387
1388 if (pciide_chipen(sc, pa) == 0)
1389 return;
1390
1391 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1392 printf("%s: bus-master DMA support present",
1393 sc->sc_wdcdev.sc_dev.dv_xname);
1394 if (sc->sc_pp == &default_product_desc &&
1395 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1396 PCIIDE_OPTIONS_DMA) == 0) {
1397 printf(", but unused (no driver support)");
1398 sc->sc_dma_ok = 0;
1399 } else {
1400 pciide_mapreg_dma(sc, pa);
1401 if (sc->sc_dma_ok != 0)
1402 printf(", used without full driver "
1403 "support");
1404 }
1405 } else {
1406 printf("%s: hardware does not support DMA",
1407 sc->sc_wdcdev.sc_dev.dv_xname);
1408 sc->sc_dma_ok = 0;
1409 }
1410 printf("\n");
1411 if (sc->sc_dma_ok) {
1412 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1413 sc->sc_wdcdev.irqack = pciide_irqack;
1414 }
1415 sc->sc_wdcdev.PIO_cap = 0;
1416 sc->sc_wdcdev.DMA_cap = 0;
1417
1418 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1419 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1420 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1421
1422 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1423 cp = &sc->pciide_channels[channel];
1424 if (pciide_chansetup(sc, channel, interface) == 0)
1425 continue;
1426 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1427 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1428 &ctlsize, pciide_pci_intr);
1429 } else {
1430 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1431 channel, &cmdsize, &ctlsize);
1432 }
1433 if (cp->hw_ok == 0)
1434 continue;
1435 /*
1436 * Check to see if something appears to be there.
1437 */
1438 failreason = NULL;
1439 if (!wdcprobe(&cp->wdc_channel)) {
1440 failreason = "not responding; disabled or no drives?";
1441 goto next;
1442 }
1443 /*
1444 * Now, make sure it's actually attributable to this PCI IDE
1445 * channel by trying to access the channel again while the
1446 * PCI IDE controller's I/O space is disabled. (If the
1447 * channel no longer appears to be there, it belongs to
1448 * this controller.) YUCK!
1449 */
1450 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1451 PCI_COMMAND_STATUS_REG);
1452 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1453 csr & ~PCI_COMMAND_IO_ENABLE);
1454 if (wdcprobe(&cp->wdc_channel))
1455 failreason = "other hardware responding at addresses";
1456 pci_conf_write(sc->sc_pc, sc->sc_tag,
1457 PCI_COMMAND_STATUS_REG, csr);
1458 next:
1459 if (failreason) {
1460 printf("%s: %s channel ignored (%s)\n",
1461 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1462 failreason);
1463 cp->hw_ok = 0;
1464 bus_space_unmap(cp->wdc_channel.cmd_iot,
1465 cp->wdc_channel.cmd_ioh, cmdsize);
1466 if (interface & PCIIDE_INTERFACE_PCI(channel))
1467 bus_space_unmap(cp->wdc_channel.ctl_iot,
1468 cp->ctl_baseioh, ctlsize);
1469 else
1470 bus_space_unmap(cp->wdc_channel.ctl_iot,
1471 cp->wdc_channel.ctl_ioh, ctlsize);
1472 } else {
1473 pciide_map_compat_intr(pa, cp, channel, interface);
1474 }
1475 if (cp->hw_ok) {
1476 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1477 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1478 wdcattach(&cp->wdc_channel);
1479 }
1480 }
1481
1482 if (sc->sc_dma_ok == 0)
1483 return;
1484
1485 /* Allocate DMA maps */
1486 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1487 idedma_ctl = 0;
1488 cp = &sc->pciide_channels[channel];
1489 for (drive = 0; drive < 2; drive++) {
1490 drvp = &cp->wdc_channel.ch_drive[drive];
1491 /* If no drive, skip */
1492 if ((drvp->drive_flags & DRIVE) == 0)
1493 continue;
1494 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1495 continue;
1496 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1497 /* Abort DMA setup */
1498 printf("%s:%d:%d: can't allocate DMA maps, "
1499 "using PIO transfers\n",
1500 sc->sc_wdcdev.sc_dev.dv_xname,
1501 channel, drive);
1502 drvp->drive_flags &= ~DRIVE_DMA;
1503 }
1504 printf("%s:%d:%d: using DMA data transfers\n",
1505 sc->sc_wdcdev.sc_dev.dv_xname,
1506 channel, drive);
1507 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1508 }
1509 if (idedma_ctl != 0) {
1510 /* Add software bits in status register */
1511 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1512 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1513 idedma_ctl);
1514 }
1515 }
1516 }
1517
1518 void
1519 piix_chip_map(sc, pa)
1520 struct pciide_softc *sc;
1521 struct pci_attach_args *pa;
1522 {
1523 struct pciide_channel *cp;
1524 int channel;
1525 u_int32_t idetim;
1526 bus_size_t cmdsize, ctlsize;
1527
1528 if (pciide_chipen(sc, pa) == 0)
1529 return;
1530
1531 printf("%s: bus-master DMA support present",
1532 sc->sc_wdcdev.sc_dev.dv_xname);
1533 pciide_mapreg_dma(sc, pa);
1534 printf("\n");
1535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1536 WDC_CAPABILITY_MODE;
1537 if (sc->sc_dma_ok) {
1538 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1539 sc->sc_wdcdev.irqack = pciide_irqack;
1540 switch(sc->sc_pp->ide_product) {
1541 case PCI_PRODUCT_INTEL_82371AB_IDE:
1542 case PCI_PRODUCT_INTEL_82440MX_IDE:
1543 case PCI_PRODUCT_INTEL_82801AA_IDE:
1544 case PCI_PRODUCT_INTEL_82801AB_IDE:
1545 case PCI_PRODUCT_INTEL_82801BA_IDE:
1546 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1547 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1548 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1549 case PCI_PRODUCT_INTEL_82801DB_IDE:
1550 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1551 }
1552 }
1553 sc->sc_wdcdev.PIO_cap = 4;
1554 sc->sc_wdcdev.DMA_cap = 2;
1555 switch(sc->sc_pp->ide_product) {
1556 case PCI_PRODUCT_INTEL_82801AA_IDE:
1557 sc->sc_wdcdev.UDMA_cap = 4;
1558 break;
1559 case PCI_PRODUCT_INTEL_82801BA_IDE:
1560 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1561 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1562 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1563 case PCI_PRODUCT_INTEL_82801DB_IDE:
1564 sc->sc_wdcdev.UDMA_cap = 5;
1565 break;
1566 default:
1567 sc->sc_wdcdev.UDMA_cap = 2;
1568 }
1569 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1570 sc->sc_wdcdev.set_modes = piix_setup_channel;
1571 else
1572 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1573 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1574 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1575
1576 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1577 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1578 DEBUG_PROBE);
1579 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1580 WDCDEBUG_PRINT((", sidetim=0x%x",
1581 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1582 DEBUG_PROBE);
1583 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1584 WDCDEBUG_PRINT((", udamreg 0x%x",
1585 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1586 DEBUG_PROBE);
1587 }
1588 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1589 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1590 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1591 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1592 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1593 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1594 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1595 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1596 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1597 DEBUG_PROBE);
1598 }
1599
1600 }
1601 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1602
1603 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1604 cp = &sc->pciide_channels[channel];
1605 /* PIIX is compat-only */
1606 if (pciide_chansetup(sc, channel, 0) == 0)
1607 continue;
1608 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1609 if ((PIIX_IDETIM_READ(idetim, channel) &
1610 PIIX_IDETIM_IDE) == 0) {
1611 printf("%s: %s channel ignored (disabled)\n",
1612 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1613 continue;
1614 }
1615 /* PIIX are compat-only pciide devices */
1616 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1617 if (cp->hw_ok == 0)
1618 continue;
1619 if (pciide_chan_candisable(cp)) {
1620 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1621 channel);
1622 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1623 idetim);
1624 }
1625 pciide_map_compat_intr(pa, cp, channel, 0);
1626 if (cp->hw_ok == 0)
1627 continue;
1628 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1629 }
1630
1631 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1632 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1633 DEBUG_PROBE);
1634 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1635 WDCDEBUG_PRINT((", sidetim=0x%x",
1636 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1637 DEBUG_PROBE);
1638 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1639 WDCDEBUG_PRINT((", udamreg 0x%x",
1640 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1641 DEBUG_PROBE);
1642 }
1643 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1644 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1645 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1646 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1647 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1648 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1649 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1650 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1651 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1652 DEBUG_PROBE);
1653 }
1654 }
1655 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1656 }
1657
1658 void
1659 piix_setup_channel(chp)
1660 struct channel_softc *chp;
1661 {
1662 u_int8_t mode[2], drive;
1663 u_int32_t oidetim, idetim, idedma_ctl;
1664 struct pciide_channel *cp = (struct pciide_channel*)chp;
1665 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1666 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1667
1668 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1669 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1670 idedma_ctl = 0;
1671
1672 /* set up new idetim: Enable IDE registers decode */
1673 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1674 chp->channel);
1675
1676 /* setup DMA */
1677 pciide_channel_dma_setup(cp);
1678
1679 /*
1680 * Here we have to mess up with drives mode: PIIX can't have
1681 * different timings for master and slave drives.
1682 * We need to find the best combination.
1683 */
1684
1685 /* If both drives supports DMA, take the lower mode */
1686 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1687 (drvp[1].drive_flags & DRIVE_DMA)) {
1688 mode[0] = mode[1] =
1689 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1690 drvp[0].DMA_mode = mode[0];
1691 drvp[1].DMA_mode = mode[1];
1692 goto ok;
1693 }
1694 /*
1695 * If only one drive supports DMA, use its mode, and
1696 * put the other one in PIO mode 0 if mode not compatible
1697 */
1698 if (drvp[0].drive_flags & DRIVE_DMA) {
1699 mode[0] = drvp[0].DMA_mode;
1700 mode[1] = drvp[1].PIO_mode;
1701 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1702 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1703 mode[1] = drvp[1].PIO_mode = 0;
1704 goto ok;
1705 }
1706 if (drvp[1].drive_flags & DRIVE_DMA) {
1707 mode[1] = drvp[1].DMA_mode;
1708 mode[0] = drvp[0].PIO_mode;
1709 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1710 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1711 mode[0] = drvp[0].PIO_mode = 0;
1712 goto ok;
1713 }
1714 /*
1715 * If both drives are not DMA, takes the lower mode, unless
1716 * one of them is PIO mode < 2
1717 */
1718 if (drvp[0].PIO_mode < 2) {
1719 mode[0] = drvp[0].PIO_mode = 0;
1720 mode[1] = drvp[1].PIO_mode;
1721 } else if (drvp[1].PIO_mode < 2) {
1722 mode[1] = drvp[1].PIO_mode = 0;
1723 mode[0] = drvp[0].PIO_mode;
1724 } else {
1725 mode[0] = mode[1] =
1726 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1727 drvp[0].PIO_mode = mode[0];
1728 drvp[1].PIO_mode = mode[1];
1729 }
1730 ok: /* The modes are setup */
1731 for (drive = 0; drive < 2; drive++) {
1732 if (drvp[drive].drive_flags & DRIVE_DMA) {
1733 idetim |= piix_setup_idetim_timings(
1734 mode[drive], 1, chp->channel);
1735 goto end;
1736 }
1737 }
1738 /* If we are there, none of the drives are DMA */
1739 if (mode[0] >= 2)
1740 idetim |= piix_setup_idetim_timings(
1741 mode[0], 0, chp->channel);
1742 else
1743 idetim |= piix_setup_idetim_timings(
1744 mode[1], 0, chp->channel);
1745 end: /*
1746 * timing mode is now set up in the controller. Enable
1747 * it per-drive
1748 */
1749 for (drive = 0; drive < 2; drive++) {
1750 /* If no drive, skip */
1751 if ((drvp[drive].drive_flags & DRIVE) == 0)
1752 continue;
1753 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1754 if (drvp[drive].drive_flags & DRIVE_DMA)
1755 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1756 }
1757 if (idedma_ctl != 0) {
1758 /* Add software bits in status register */
1759 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1760 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1761 idedma_ctl);
1762 }
1763 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1764 pciide_print_modes(cp);
1765 }
1766
1767 void
1768 piix3_4_setup_channel(chp)
1769 struct channel_softc *chp;
1770 {
1771 struct ata_drive_datas *drvp;
1772 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1773 struct pciide_channel *cp = (struct pciide_channel*)chp;
1774 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1775 int drive;
1776 int channel = chp->channel;
1777
1778 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1779 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1780 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1781 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1782 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1783 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1784 PIIX_SIDETIM_RTC_MASK(channel));
1785
1786 idedma_ctl = 0;
1787 /* If channel disabled, no need to go further */
1788 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1789 return;
1790 /* set up new idetim: Enable IDE registers decode */
1791 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1792
1793 /* setup DMA if needed */
1794 pciide_channel_dma_setup(cp);
1795
1796 for (drive = 0; drive < 2; drive++) {
1797 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1798 PIIX_UDMATIM_SET(0x3, channel, drive));
1799 drvp = &chp->ch_drive[drive];
1800 /* If no drive, skip */
1801 if ((drvp->drive_flags & DRIVE) == 0)
1802 continue;
1803 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1804 (drvp->drive_flags & DRIVE_UDMA) == 0))
1805 goto pio;
1806
1807 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1809 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1810 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1811 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1812 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1814 ideconf |= PIIX_CONFIG_PINGPONG;
1815 }
1816 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1817 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1818 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1819 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1820 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1821 /* setup Ultra/100 */
1822 if (drvp->UDMA_mode > 2 &&
1823 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1824 drvp->UDMA_mode = 2;
1825 if (drvp->UDMA_mode > 4) {
1826 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1827 } else {
1828 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1829 if (drvp->UDMA_mode > 2) {
1830 ideconf |= PIIX_CONFIG_UDMA66(channel,
1831 drive);
1832 } else {
1833 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1834 drive);
1835 }
1836 }
1837 }
1838 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1839 /* setup Ultra/66 */
1840 if (drvp->UDMA_mode > 2 &&
1841 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1842 drvp->UDMA_mode = 2;
1843 if (drvp->UDMA_mode > 2)
1844 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1845 else
1846 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1847 }
1848 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1849 (drvp->drive_flags & DRIVE_UDMA)) {
1850 /* use Ultra/DMA */
1851 drvp->drive_flags &= ~DRIVE_DMA;
1852 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1853 udmareg |= PIIX_UDMATIM_SET(
1854 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1855 } else {
1856 /* use Multiword DMA */
1857 drvp->drive_flags &= ~DRIVE_UDMA;
1858 if (drive == 0) {
1859 idetim |= piix_setup_idetim_timings(
1860 drvp->DMA_mode, 1, channel);
1861 } else {
1862 sidetim |= piix_setup_sidetim_timings(
1863 drvp->DMA_mode, 1, channel);
1864 idetim =PIIX_IDETIM_SET(idetim,
1865 PIIX_IDETIM_SITRE, channel);
1866 }
1867 }
1868 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1869
1870 pio: /* use PIO mode */
1871 idetim |= piix_setup_idetim_drvs(drvp);
1872 if (drive == 0) {
1873 idetim |= piix_setup_idetim_timings(
1874 drvp->PIO_mode, 0, channel);
1875 } else {
1876 sidetim |= piix_setup_sidetim_timings(
1877 drvp->PIO_mode, 0, channel);
1878 idetim =PIIX_IDETIM_SET(idetim,
1879 PIIX_IDETIM_SITRE, channel);
1880 }
1881 }
1882 if (idedma_ctl != 0) {
1883 /* Add software bits in status register */
1884 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1885 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1886 idedma_ctl);
1887 }
1888 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1889 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1890 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1891 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1892 pciide_print_modes(cp);
1893 }
1894
1895
1896 /* setup ISP and RTC fields, based on mode */
1897 static u_int32_t
1898 piix_setup_idetim_timings(mode, dma, channel)
1899 u_int8_t mode;
1900 u_int8_t dma;
1901 u_int8_t channel;
1902 {
1903
1904 if (dma)
1905 return PIIX_IDETIM_SET(0,
1906 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1907 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1908 channel);
1909 else
1910 return PIIX_IDETIM_SET(0,
1911 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1912 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1913 channel);
1914 }
1915
1916 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1917 static u_int32_t
1918 piix_setup_idetim_drvs(drvp)
1919 struct ata_drive_datas *drvp;
1920 {
1921 u_int32_t ret = 0;
1922 struct channel_softc *chp = drvp->chnl_softc;
1923 u_int8_t channel = chp->channel;
1924 u_int8_t drive = drvp->drive;
1925
1926 /*
1927 * If drive is using UDMA, timings setups are independant
1928 * So just check DMA and PIO here.
1929 */
1930 if (drvp->drive_flags & DRIVE_DMA) {
1931 /* if mode = DMA mode 0, use compatible timings */
1932 if ((drvp->drive_flags & DRIVE_DMA) &&
1933 drvp->DMA_mode == 0) {
1934 drvp->PIO_mode = 0;
1935 return ret;
1936 }
1937 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1938 /*
1939 * PIO and DMA timings are the same, use fast timings for PIO
1940 * too, else use compat timings.
1941 */
1942 if ((piix_isp_pio[drvp->PIO_mode] !=
1943 piix_isp_dma[drvp->DMA_mode]) ||
1944 (piix_rtc_pio[drvp->PIO_mode] !=
1945 piix_rtc_dma[drvp->DMA_mode]))
1946 drvp->PIO_mode = 0;
1947 /* if PIO mode <= 2, use compat timings for PIO */
1948 if (drvp->PIO_mode <= 2) {
1949 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1950 channel);
1951 return ret;
1952 }
1953 }
1954
1955 /*
1956 * Now setup PIO modes. If mode < 2, use compat timings.
1957 * Else enable fast timings. Enable IORDY and prefetch/post
1958 * if PIO mode >= 3.
1959 */
1960
1961 if (drvp->PIO_mode < 2)
1962 return ret;
1963
1964 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1965 if (drvp->PIO_mode >= 3) {
1966 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1967 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1968 }
1969 return ret;
1970 }
1971
1972 /* setup values in SIDETIM registers, based on mode */
1973 static u_int32_t
1974 piix_setup_sidetim_timings(mode, dma, channel)
1975 u_int8_t mode;
1976 u_int8_t dma;
1977 u_int8_t channel;
1978 {
1979 if (dma)
1980 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1981 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1982 else
1983 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1984 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1985 }
1986
1987 void
1988 amd7x6_chip_map(sc, pa)
1989 struct pciide_softc *sc;
1990 struct pci_attach_args *pa;
1991 {
1992 struct pciide_channel *cp;
1993 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1994 int channel;
1995 pcireg_t chanenable;
1996 bus_size_t cmdsize, ctlsize;
1997
1998 if (pciide_chipen(sc, pa) == 0)
1999 return;
2000 printf("%s: bus-master DMA support present",
2001 sc->sc_wdcdev.sc_dev.dv_xname);
2002 pciide_mapreg_dma(sc, pa);
2003 printf("\n");
2004 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2005 WDC_CAPABILITY_MODE;
2006 if (sc->sc_dma_ok) {
2007 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2009 sc->sc_wdcdev.irqack = pciide_irqack;
2010 }
2011 sc->sc_wdcdev.PIO_cap = 4;
2012 sc->sc_wdcdev.DMA_cap = 2;
2013
2014 switch (sc->sc_pp->ide_product) {
2015 case PCI_PRODUCT_AMD_PBC766_IDE:
2016 case PCI_PRODUCT_AMD_PBC768_IDE:
2017 case PCI_PRODUCT_AMD_PBC8111_IDE:
2018 sc->sc_wdcdev.UDMA_cap = 5;
2019 break;
2020 default:
2021 sc->sc_wdcdev.UDMA_cap = 4;
2022 }
2023 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2024 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2025 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2026 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2027
2028 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2029 DEBUG_PROBE);
2030 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2031 cp = &sc->pciide_channels[channel];
2032 if (pciide_chansetup(sc, channel, interface) == 0)
2033 continue;
2034
2035 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2036 printf("%s: %s channel ignored (disabled)\n",
2037 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2038 continue;
2039 }
2040 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2041 pciide_pci_intr);
2042
2043 if (pciide_chan_candisable(cp))
2044 chanenable &= ~AMD7X6_CHAN_EN(channel);
2045 pciide_map_compat_intr(pa, cp, channel, interface);
2046 if (cp->hw_ok == 0)
2047 continue;
2048
2049 amd7x6_setup_channel(&cp->wdc_channel);
2050 }
2051 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2052 chanenable);
2053 return;
2054 }
2055
2056 void
2057 amd7x6_setup_channel(chp)
2058 struct channel_softc *chp;
2059 {
2060 u_int32_t udmatim_reg, datatim_reg;
2061 u_int8_t idedma_ctl;
2062 int mode, drive;
2063 struct ata_drive_datas *drvp;
2064 struct pciide_channel *cp = (struct pciide_channel*)chp;
2065 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2066 #ifndef PCIIDE_AMD756_ENABLEDMA
2067 int rev = PCI_REVISION(
2068 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2069 #endif
2070
2071 idedma_ctl = 0;
2072 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2073 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2074 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2075 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2076
2077 /* setup DMA if needed */
2078 pciide_channel_dma_setup(cp);
2079
2080 for (drive = 0; drive < 2; drive++) {
2081 drvp = &chp->ch_drive[drive];
2082 /* If no drive, skip */
2083 if ((drvp->drive_flags & DRIVE) == 0)
2084 continue;
2085 /* add timing values, setup DMA if needed */
2086 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2087 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2088 mode = drvp->PIO_mode;
2089 goto pio;
2090 }
2091 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2092 (drvp->drive_flags & DRIVE_UDMA)) {
2093 /* use Ultra/DMA */
2094 drvp->drive_flags &= ~DRIVE_DMA;
2095 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2096 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2097 AMD7X6_UDMA_TIME(chp->channel, drive,
2098 amd7x6_udma_tim[drvp->UDMA_mode]);
2099 /* can use PIO timings, MW DMA unused */
2100 mode = drvp->PIO_mode;
2101 } else {
2102 /* use Multiword DMA, but only if revision is OK */
2103 drvp->drive_flags &= ~DRIVE_UDMA;
2104 #ifndef PCIIDE_AMD756_ENABLEDMA
2105 /*
2106 * The workaround doesn't seem to be necessary
2107 * with all drives, so it can be disabled by
2108 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2109 * triggered.
2110 */
2111 if (sc->sc_pp->ide_product ==
2112 PCI_PRODUCT_AMD_PBC756_IDE &&
2113 AMD756_CHIPREV_DISABLEDMA(rev)) {
2114 printf("%s:%d:%d: multi-word DMA disabled due "
2115 "to chip revision\n",
2116 sc->sc_wdcdev.sc_dev.dv_xname,
2117 chp->channel, drive);
2118 mode = drvp->PIO_mode;
2119 drvp->drive_flags &= ~DRIVE_DMA;
2120 goto pio;
2121 }
2122 #endif
2123 /* mode = min(pio, dma+2) */
2124 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2125 mode = drvp->PIO_mode;
2126 else
2127 mode = drvp->DMA_mode + 2;
2128 }
2129 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2130
2131 pio: /* setup PIO mode */
2132 if (mode <= 2) {
2133 drvp->DMA_mode = 0;
2134 drvp->PIO_mode = 0;
2135 mode = 0;
2136 } else {
2137 drvp->PIO_mode = mode;
2138 drvp->DMA_mode = mode - 2;
2139 }
2140 datatim_reg |=
2141 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2142 amd7x6_pio_set[mode]) |
2143 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2144 amd7x6_pio_rec[mode]);
2145 }
2146 if (idedma_ctl != 0) {
2147 /* Add software bits in status register */
2148 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2149 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2150 idedma_ctl);
2151 }
2152 pciide_print_modes(cp);
2153 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2154 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2155 }
2156
2157 void
2158 apollo_chip_map(sc, pa)
2159 struct pciide_softc *sc;
2160 struct pci_attach_args *pa;
2161 {
2162 struct pciide_channel *cp;
2163 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2164 int channel;
2165 u_int32_t ideconf;
2166 bus_size_t cmdsize, ctlsize;
2167 pcitag_t pcib_tag;
2168 pcireg_t pcib_id, pcib_class;
2169
2170 if (pciide_chipen(sc, pa) == 0)
2171 return;
2172 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2173 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2174 /* and read ID and rev of the ISA bridge */
2175 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2176 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2177 printf(": VIA Technologies ");
2178 switch (PCI_PRODUCT(pcib_id)) {
2179 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2180 printf("VT82C586 (Apollo VP) ");
2181 if(PCI_REVISION(pcib_class) >= 0x02) {
2182 printf("ATA33 controller\n");
2183 sc->sc_wdcdev.UDMA_cap = 2;
2184 } else {
2185 printf("controller\n");
2186 sc->sc_wdcdev.UDMA_cap = 0;
2187 }
2188 break;
2189 case PCI_PRODUCT_VIATECH_VT82C596A:
2190 printf("VT82C596A (Apollo Pro) ");
2191 if (PCI_REVISION(pcib_class) >= 0x12) {
2192 printf("ATA66 controller\n");
2193 sc->sc_wdcdev.UDMA_cap = 4;
2194 } else {
2195 printf("ATA33 controller\n");
2196 sc->sc_wdcdev.UDMA_cap = 2;
2197 }
2198 break;
2199 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2200 printf("VT82C686A (Apollo KX133) ");
2201 if (PCI_REVISION(pcib_class) >= 0x40) {
2202 printf("ATA100 controller\n");
2203 sc->sc_wdcdev.UDMA_cap = 5;
2204 } else {
2205 printf("ATA66 controller\n");
2206 sc->sc_wdcdev.UDMA_cap = 4;
2207 }
2208 break;
2209 case PCI_PRODUCT_VIATECH_VT8231:
2210 printf("VT8231 ATA100 controller\n");
2211 sc->sc_wdcdev.UDMA_cap = 5;
2212 break;
2213 case PCI_PRODUCT_VIATECH_VT8233:
2214 printf("VT8233 ATA100 controller\n");
2215 sc->sc_wdcdev.UDMA_cap = 5;
2216 break;
2217 case PCI_PRODUCT_VIATECH_VT8233A:
2218 printf("VT8233A ATA133 controller\n");
2219 /* XXX use ATA100 untill ATA133 is supported */
2220 sc->sc_wdcdev.UDMA_cap = 5;
2221 break;
2222 default:
2223 printf("unknown ATA controller\n");
2224 sc->sc_wdcdev.UDMA_cap = 0;
2225 }
2226
2227 printf("%s: bus-master DMA support present",
2228 sc->sc_wdcdev.sc_dev.dv_xname);
2229 pciide_mapreg_dma(sc, pa);
2230 printf("\n");
2231 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2232 WDC_CAPABILITY_MODE;
2233 if (sc->sc_dma_ok) {
2234 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2235 sc->sc_wdcdev.irqack = pciide_irqack;
2236 if (sc->sc_wdcdev.UDMA_cap > 0)
2237 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2238 }
2239 sc->sc_wdcdev.PIO_cap = 4;
2240 sc->sc_wdcdev.DMA_cap = 2;
2241 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2242 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2243 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2244
2245 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2246 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2247 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2248 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2249 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2250 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2251 DEBUG_PROBE);
2252
2253 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2254 cp = &sc->pciide_channels[channel];
2255 if (pciide_chansetup(sc, channel, interface) == 0)
2256 continue;
2257
2258 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2259 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2260 printf("%s: %s channel ignored (disabled)\n",
2261 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2262 continue;
2263 }
2264 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2265 pciide_pci_intr);
2266 if (cp->hw_ok == 0)
2267 continue;
2268 if (pciide_chan_candisable(cp)) {
2269 ideconf &= ~APO_IDECONF_EN(channel);
2270 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2271 ideconf);
2272 }
2273 pciide_map_compat_intr(pa, cp, channel, interface);
2274
2275 if (cp->hw_ok == 0)
2276 continue;
2277 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2278 }
2279 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2280 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2281 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2282 }
2283
2284 void
2285 apollo_setup_channel(chp)
2286 struct channel_softc *chp;
2287 {
2288 u_int32_t udmatim_reg, datatim_reg;
2289 u_int8_t idedma_ctl;
2290 int mode, drive;
2291 struct ata_drive_datas *drvp;
2292 struct pciide_channel *cp = (struct pciide_channel*)chp;
2293 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2294
2295 idedma_ctl = 0;
2296 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2297 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2298 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2299 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2300
2301 /* setup DMA if needed */
2302 pciide_channel_dma_setup(cp);
2303
2304 for (drive = 0; drive < 2; drive++) {
2305 drvp = &chp->ch_drive[drive];
2306 /* If no drive, skip */
2307 if ((drvp->drive_flags & DRIVE) == 0)
2308 continue;
2309 /* add timing values, setup DMA if needed */
2310 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2311 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2312 mode = drvp->PIO_mode;
2313 goto pio;
2314 }
2315 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2316 (drvp->drive_flags & DRIVE_UDMA)) {
2317 /* use Ultra/DMA */
2318 drvp->drive_flags &= ~DRIVE_DMA;
2319 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2320 APO_UDMA_EN_MTH(chp->channel, drive);
2321 if (sc->sc_wdcdev.UDMA_cap == 5) {
2322 /* 686b */
2323 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2324 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2325 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2326 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2327 /* 596b or 686a */
2328 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2329 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2330 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2331 } else {
2332 /* 596a or 586b */
2333 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2334 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2335 }
2336 /* can use PIO timings, MW DMA unused */
2337 mode = drvp->PIO_mode;
2338 } else {
2339 /* use Multiword DMA */
2340 drvp->drive_flags &= ~DRIVE_UDMA;
2341 /* mode = min(pio, dma+2) */
2342 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2343 mode = drvp->PIO_mode;
2344 else
2345 mode = drvp->DMA_mode + 2;
2346 }
2347 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2348
2349 pio: /* setup PIO mode */
2350 if (mode <= 2) {
2351 drvp->DMA_mode = 0;
2352 drvp->PIO_mode = 0;
2353 mode = 0;
2354 } else {
2355 drvp->PIO_mode = mode;
2356 drvp->DMA_mode = mode - 2;
2357 }
2358 datatim_reg |=
2359 APO_DATATIM_PULSE(chp->channel, drive,
2360 apollo_pio_set[mode]) |
2361 APO_DATATIM_RECOV(chp->channel, drive,
2362 apollo_pio_rec[mode]);
2363 }
2364 if (idedma_ctl != 0) {
2365 /* Add software bits in status register */
2366 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2367 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2368 idedma_ctl);
2369 }
2370 pciide_print_modes(cp);
2371 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2372 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2373 }
2374
2375 void
2376 cmd_channel_map(pa, sc, channel)
2377 struct pci_attach_args *pa;
2378 struct pciide_softc *sc;
2379 int channel;
2380 {
2381 struct pciide_channel *cp = &sc->pciide_channels[channel];
2382 bus_size_t cmdsize, ctlsize;
2383 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2384 int interface, one_channel;
2385
2386 /*
2387 * The 0648/0649 can be told to identify as a RAID controller.
2388 * In this case, we have to fake interface
2389 */
2390 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2391 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2392 PCIIDE_INTERFACE_SETTABLE(1);
2393 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2394 CMD_CONF_DSA1)
2395 interface |= PCIIDE_INTERFACE_PCI(0) |
2396 PCIIDE_INTERFACE_PCI(1);
2397 } else {
2398 interface = PCI_INTERFACE(pa->pa_class);
2399 }
2400
2401 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2402 cp->name = PCIIDE_CHANNEL_NAME(channel);
2403 cp->wdc_channel.channel = channel;
2404 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2405
2406 /*
2407 * Older CMD64X doesn't have independant channels
2408 */
2409 switch (sc->sc_pp->ide_product) {
2410 case PCI_PRODUCT_CMDTECH_649:
2411 one_channel = 0;
2412 break;
2413 default:
2414 one_channel = 1;
2415 break;
2416 }
2417
2418 if (channel > 0 && one_channel) {
2419 cp->wdc_channel.ch_queue =
2420 sc->pciide_channels[0].wdc_channel.ch_queue;
2421 } else {
2422 cp->wdc_channel.ch_queue =
2423 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2424 }
2425 if (cp->wdc_channel.ch_queue == NULL) {
2426 printf("%s %s channel: "
2427 "can't allocate memory for command queue",
2428 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2429 return;
2430 }
2431
2432 printf("%s: %s channel %s to %s mode\n",
2433 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2434 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2435 "configured" : "wired",
2436 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2437 "native-PCI" : "compatibility");
2438
2439 /*
2440 * with a CMD PCI64x, if we get here, the first channel is enabled:
2441 * there's no way to disable the first channel without disabling
2442 * the whole device
2443 */
2444 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2445 printf("%s: %s channel ignored (disabled)\n",
2446 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2447 return;
2448 }
2449
2450 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2451 if (cp->hw_ok == 0)
2452 return;
2453 if (channel == 1) {
2454 if (pciide_chan_candisable(cp)) {
2455 ctrl &= ~CMD_CTRL_2PORT;
2456 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2457 CMD_CTRL, ctrl);
2458 }
2459 }
2460 pciide_map_compat_intr(pa, cp, channel, interface);
2461 }
2462
2463 int
2464 cmd_pci_intr(arg)
2465 void *arg;
2466 {
2467 struct pciide_softc *sc = arg;
2468 struct pciide_channel *cp;
2469 struct channel_softc *wdc_cp;
2470 int i, rv, crv;
2471 u_int32_t priirq, secirq;
2472
2473 rv = 0;
2474 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2475 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2476 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2477 cp = &sc->pciide_channels[i];
2478 wdc_cp = &cp->wdc_channel;
2479 /* If a compat channel skip. */
2480 if (cp->compat)
2481 continue;
2482 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2483 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2484 crv = wdcintr(wdc_cp);
2485 if (crv == 0)
2486 printf("%s:%d: bogus intr\n",
2487 sc->sc_wdcdev.sc_dev.dv_xname, i);
2488 else
2489 rv = 1;
2490 }
2491 }
2492 return rv;
2493 }
2494
2495 void
2496 cmd_chip_map(sc, pa)
2497 struct pciide_softc *sc;
2498 struct pci_attach_args *pa;
2499 {
2500 int channel;
2501
2502 /*
2503 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2504 * and base adresses registers can be disabled at
2505 * hardware level. In this case, the device is wired
2506 * in compat mode and its first channel is always enabled,
2507 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2508 * In fact, it seems that the first channel of the CMD PCI0640
2509 * can't be disabled.
2510 */
2511
2512 #ifdef PCIIDE_CMD064x_DISABLE
2513 if (pciide_chipen(sc, pa) == 0)
2514 return;
2515 #endif
2516
2517 printf("%s: hardware does not support DMA\n",
2518 sc->sc_wdcdev.sc_dev.dv_xname);
2519 sc->sc_dma_ok = 0;
2520
2521 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2522 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2523 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2524
2525 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2526 cmd_channel_map(pa, sc, channel);
2527 }
2528 }
2529
2530 void
2531 cmd0643_9_chip_map(sc, pa)
2532 struct pciide_softc *sc;
2533 struct pci_attach_args *pa;
2534 {
2535 struct pciide_channel *cp;
2536 int channel;
2537 pcireg_t rev = PCI_REVISION(pa->pa_class);
2538
2539 /*
2540 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2541 * and base adresses registers can be disabled at
2542 * hardware level. In this case, the device is wired
2543 * in compat mode and its first channel is always enabled,
2544 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2545 * In fact, it seems that the first channel of the CMD PCI0640
2546 * can't be disabled.
2547 */
2548
2549 #ifdef PCIIDE_CMD064x_DISABLE
2550 if (pciide_chipen(sc, pa) == 0)
2551 return;
2552 #endif
2553 printf("%s: bus-master DMA support present",
2554 sc->sc_wdcdev.sc_dev.dv_xname);
2555 pciide_mapreg_dma(sc, pa);
2556 printf("\n");
2557 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2558 WDC_CAPABILITY_MODE;
2559 if (sc->sc_dma_ok) {
2560 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2561 switch (sc->sc_pp->ide_product) {
2562 case PCI_PRODUCT_CMDTECH_649:
2563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2564 sc->sc_wdcdev.UDMA_cap = 5;
2565 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2566 break;
2567 case PCI_PRODUCT_CMDTECH_648:
2568 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2569 sc->sc_wdcdev.UDMA_cap = 4;
2570 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2571 break;
2572 case PCI_PRODUCT_CMDTECH_646:
2573 if (rev >= CMD0646U2_REV) {
2574 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2575 sc->sc_wdcdev.UDMA_cap = 2;
2576 } else if (rev >= CMD0646U_REV) {
2577 /*
2578 * Linux's driver claims that the 646U is broken
2579 * with UDMA. Only enable it if we know what we're
2580 * doing
2581 */
2582 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2583 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2584 sc->sc_wdcdev.UDMA_cap = 2;
2585 #endif
2586 /* explicitly disable UDMA */
2587 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2588 CMD_UDMATIM(0), 0);
2589 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2590 CMD_UDMATIM(1), 0);
2591 }
2592 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2593 break;
2594 default:
2595 sc->sc_wdcdev.irqack = pciide_irqack;
2596 }
2597 }
2598
2599 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2600 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2601 sc->sc_wdcdev.PIO_cap = 4;
2602 sc->sc_wdcdev.DMA_cap = 2;
2603 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2604
2605 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2606 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2607 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2608 DEBUG_PROBE);
2609
2610 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2611 cp = &sc->pciide_channels[channel];
2612 cmd_channel_map(pa, sc, channel);
2613 if (cp->hw_ok == 0)
2614 continue;
2615 cmd0643_9_setup_channel(&cp->wdc_channel);
2616 }
2617 /*
2618 * note - this also makes sure we clear the irq disable and reset
2619 * bits
2620 */
2621 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2622 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2623 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2624 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2625 DEBUG_PROBE);
2626 }
2627
2628 void
2629 cmd0643_9_setup_channel(chp)
2630 struct channel_softc *chp;
2631 {
2632 struct ata_drive_datas *drvp;
2633 u_int8_t tim;
2634 u_int32_t idedma_ctl, udma_reg;
2635 int drive;
2636 struct pciide_channel *cp = (struct pciide_channel*)chp;
2637 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2638
2639 idedma_ctl = 0;
2640 /* setup DMA if needed */
2641 pciide_channel_dma_setup(cp);
2642
2643 for (drive = 0; drive < 2; drive++) {
2644 drvp = &chp->ch_drive[drive];
2645 /* If no drive, skip */
2646 if ((drvp->drive_flags & DRIVE) == 0)
2647 continue;
2648 /* add timing values, setup DMA if needed */
2649 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2650 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2651 if (drvp->drive_flags & DRIVE_UDMA) {
2652 /* UltraDMA on a 646U2, 0648 or 0649 */
2653 drvp->drive_flags &= ~DRIVE_DMA;
2654 udma_reg = pciide_pci_read(sc->sc_pc,
2655 sc->sc_tag, CMD_UDMATIM(chp->channel));
2656 if (drvp->UDMA_mode > 2 &&
2657 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2658 CMD_BICSR) &
2659 CMD_BICSR_80(chp->channel)) == 0)
2660 drvp->UDMA_mode = 2;
2661 if (drvp->UDMA_mode > 2)
2662 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2663 else if (sc->sc_wdcdev.UDMA_cap > 2)
2664 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2665 udma_reg |= CMD_UDMATIM_UDMA(drive);
2666 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2667 CMD_UDMATIM_TIM_OFF(drive));
2668 udma_reg |=
2669 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2670 CMD_UDMATIM_TIM_OFF(drive));
2671 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2672 CMD_UDMATIM(chp->channel), udma_reg);
2673 } else {
2674 /*
2675 * use Multiword DMA.
2676 * Timings will be used for both PIO and DMA,
2677 * so adjust DMA mode if needed
2678 * if we have a 0646U2/8/9, turn off UDMA
2679 */
2680 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2681 udma_reg = pciide_pci_read(sc->sc_pc,
2682 sc->sc_tag,
2683 CMD_UDMATIM(chp->channel));
2684 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2685 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2686 CMD_UDMATIM(chp->channel),
2687 udma_reg);
2688 }
2689 if (drvp->PIO_mode >= 3 &&
2690 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2691 drvp->DMA_mode = drvp->PIO_mode - 2;
2692 }
2693 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2694 }
2695 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2696 }
2697 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2698 CMD_DATA_TIM(chp->channel, drive), tim);
2699 }
2700 if (idedma_ctl != 0) {
2701 /* Add software bits in status register */
2702 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2703 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2704 idedma_ctl);
2705 }
2706 pciide_print_modes(cp);
2707 }
2708
2709 void
2710 cmd646_9_irqack(chp)
2711 struct channel_softc *chp;
2712 {
2713 u_int32_t priirq, secirq;
2714 struct pciide_channel *cp = (struct pciide_channel*)chp;
2715 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2716
2717 if (chp->channel == 0) {
2718 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2719 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2720 } else {
2721 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2722 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2723 }
2724 pciide_irqack(chp);
2725 }
2726
2727 void
2728 cmd680_chip_map(sc, pa)
2729 struct pciide_softc *sc;
2730 struct pci_attach_args *pa;
2731 {
2732 struct pciide_channel *cp;
2733 int channel;
2734
2735 if (pciide_chipen(sc, pa) == 0)
2736 return;
2737 printf("%s: bus-master DMA support present",
2738 sc->sc_wdcdev.sc_dev.dv_xname);
2739 pciide_mapreg_dma(sc, pa);
2740 printf("\n");
2741 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2742 WDC_CAPABILITY_MODE;
2743 if (sc->sc_dma_ok) {
2744 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2745 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2746 sc->sc_wdcdev.UDMA_cap = 6;
2747 sc->sc_wdcdev.irqack = pciide_irqack;
2748 }
2749
2750 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2751 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2752 sc->sc_wdcdev.PIO_cap = 4;
2753 sc->sc_wdcdev.DMA_cap = 2;
2754 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2755
2756 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2757 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2758 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2759 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2760 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2761 cp = &sc->pciide_channels[channel];
2762 cmd680_channel_map(pa, sc, channel);
2763 if (cp->hw_ok == 0)
2764 continue;
2765 cmd680_setup_channel(&cp->wdc_channel);
2766 }
2767 }
2768
2769 void
2770 cmd680_channel_map(pa, sc, channel)
2771 struct pci_attach_args *pa;
2772 struct pciide_softc *sc;
2773 int channel;
2774 {
2775 struct pciide_channel *cp = &sc->pciide_channels[channel];
2776 bus_size_t cmdsize, ctlsize;
2777 int interface, i, reg;
2778 static const u_int8_t init_val[] =
2779 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2780 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2781
2782 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2783 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2784 PCIIDE_INTERFACE_SETTABLE(1);
2785 interface |= PCIIDE_INTERFACE_PCI(0) |
2786 PCIIDE_INTERFACE_PCI(1);
2787 } else {
2788 interface = PCI_INTERFACE(pa->pa_class);
2789 }
2790
2791 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2792 cp->name = PCIIDE_CHANNEL_NAME(channel);
2793 cp->wdc_channel.channel = channel;
2794 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2795
2796 cp->wdc_channel.ch_queue =
2797 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2798 if (cp->wdc_channel.ch_queue == NULL) {
2799 printf("%s %s channel: "
2800 "can't allocate memory for command queue",
2801 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2802 return;
2803 }
2804
2805 /* XXX */
2806 reg = 0xa2 + channel * 16;
2807 for (i = 0; i < sizeof(init_val); i++)
2808 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2809
2810 printf("%s: %s channel %s to %s mode\n",
2811 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2812 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2813 "configured" : "wired",
2814 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2815 "native-PCI" : "compatibility");
2816
2817 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2818 if (cp->hw_ok == 0)
2819 return;
2820 pciide_map_compat_intr(pa, cp, channel, interface);
2821 }
2822
2823 void
2824 cmd680_setup_channel(chp)
2825 struct channel_softc *chp;
2826 {
2827 struct ata_drive_datas *drvp;
2828 u_int8_t mode, off, scsc;
2829 u_int16_t val;
2830 u_int32_t idedma_ctl;
2831 int drive;
2832 struct pciide_channel *cp = (struct pciide_channel*)chp;
2833 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2834 pci_chipset_tag_t pc = sc->sc_pc;
2835 pcitag_t pa = sc->sc_tag;
2836 static const u_int8_t udma2_tbl[] =
2837 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2838 static const u_int8_t udma_tbl[] =
2839 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2840 static const u_int16_t dma_tbl[] =
2841 { 0x2208, 0x10c2, 0x10c1 };
2842 static const u_int16_t pio_tbl[] =
2843 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2844
2845 idedma_ctl = 0;
2846 pciide_channel_dma_setup(cp);
2847 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2848
2849 for (drive = 0; drive < 2; drive++) {
2850 drvp = &chp->ch_drive[drive];
2851 /* If no drive, skip */
2852 if ((drvp->drive_flags & DRIVE) == 0)
2853 continue;
2854 mode &= ~(0x03 << (drive * 4));
2855 if (drvp->drive_flags & DRIVE_UDMA) {
2856 drvp->drive_flags &= ~DRIVE_DMA;
2857 off = 0xa0 + chp->channel * 16;
2858 if (drvp->UDMA_mode > 2 &&
2859 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2860 drvp->UDMA_mode = 2;
2861 scsc = pciide_pci_read(pc, pa, 0x8a);
2862 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2863 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2864 scsc = pciide_pci_read(pc, pa, 0x8a);
2865 if ((scsc & 0x30) == 0)
2866 drvp->UDMA_mode = 5;
2867 }
2868 mode |= 0x03 << (drive * 4);
2869 off = 0xac + chp->channel * 16 + drive * 2;
2870 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2871 if (scsc & 0x30)
2872 val |= udma2_tbl[drvp->UDMA_mode];
2873 else
2874 val |= udma_tbl[drvp->UDMA_mode];
2875 pciide_pci_write(pc, pa, off, val);
2876 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2877 } else if (drvp->drive_flags & DRIVE_DMA) {
2878 mode |= 0x02 << (drive * 4);
2879 off = 0xa8 + chp->channel * 16 + drive * 2;
2880 val = dma_tbl[drvp->DMA_mode];
2881 pciide_pci_write(pc, pa, off, val & 0xff);
2882 pciide_pci_write(pc, pa, off, val >> 8);
2883 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2884 } else {
2885 mode |= 0x01 << (drive * 4);
2886 off = 0xa4 + chp->channel * 16 + drive * 2;
2887 val = pio_tbl[drvp->PIO_mode];
2888 pciide_pci_write(pc, pa, off, val & 0xff);
2889 pciide_pci_write(pc, pa, off, val >> 8);
2890 }
2891 }
2892
2893 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2894 if (idedma_ctl != 0) {
2895 /* Add software bits in status register */
2896 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2897 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2898 idedma_ctl);
2899 }
2900 pciide_print_modes(cp);
2901 }
2902
2903 void
2904 cy693_chip_map(sc, pa)
2905 struct pciide_softc *sc;
2906 struct pci_attach_args *pa;
2907 {
2908 struct pciide_channel *cp;
2909 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2910 bus_size_t cmdsize, ctlsize;
2911
2912 if (pciide_chipen(sc, pa) == 0)
2913 return;
2914 /*
2915 * this chip has 2 PCI IDE functions, one for primary and one for
2916 * secondary. So we need to call pciide_mapregs_compat() with
2917 * the real channel
2918 */
2919 if (pa->pa_function == 1) {
2920 sc->sc_cy_compatchan = 0;
2921 } else if (pa->pa_function == 2) {
2922 sc->sc_cy_compatchan = 1;
2923 } else {
2924 printf("%s: unexpected PCI function %d\n",
2925 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2926 return;
2927 }
2928 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2929 printf("%s: bus-master DMA support present",
2930 sc->sc_wdcdev.sc_dev.dv_xname);
2931 pciide_mapreg_dma(sc, pa);
2932 } else {
2933 printf("%s: hardware does not support DMA",
2934 sc->sc_wdcdev.sc_dev.dv_xname);
2935 sc->sc_dma_ok = 0;
2936 }
2937 printf("\n");
2938
2939 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2940 if (sc->sc_cy_handle == NULL) {
2941 printf("%s: unable to map hyperCache control registers\n",
2942 sc->sc_wdcdev.sc_dev.dv_xname);
2943 sc->sc_dma_ok = 0;
2944 }
2945
2946 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2947 WDC_CAPABILITY_MODE;
2948 if (sc->sc_dma_ok) {
2949 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2950 sc->sc_wdcdev.irqack = pciide_irqack;
2951 }
2952 sc->sc_wdcdev.PIO_cap = 4;
2953 sc->sc_wdcdev.DMA_cap = 2;
2954 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2955
2956 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2957 sc->sc_wdcdev.nchannels = 1;
2958
2959 /* Only one channel for this chip; if we are here it's enabled */
2960 cp = &sc->pciide_channels[0];
2961 sc->wdc_chanarray[0] = &cp->wdc_channel;
2962 cp->name = PCIIDE_CHANNEL_NAME(0);
2963 cp->wdc_channel.channel = 0;
2964 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2965 cp->wdc_channel.ch_queue =
2966 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2967 if (cp->wdc_channel.ch_queue == NULL) {
2968 printf("%s primary channel: "
2969 "can't allocate memory for command queue",
2970 sc->sc_wdcdev.sc_dev.dv_xname);
2971 return;
2972 }
2973 printf("%s: primary channel %s to ",
2974 sc->sc_wdcdev.sc_dev.dv_xname,
2975 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2976 "configured" : "wired");
2977 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2978 printf("native-PCI");
2979 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2980 pciide_pci_intr);
2981 } else {
2982 printf("compatibility");
2983 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2984 &cmdsize, &ctlsize);
2985 }
2986 printf(" mode\n");
2987 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2988 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2989 wdcattach(&cp->wdc_channel);
2990 if (pciide_chan_candisable(cp)) {
2991 pci_conf_write(sc->sc_pc, sc->sc_tag,
2992 PCI_COMMAND_STATUS_REG, 0);
2993 }
2994 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2995 if (cp->hw_ok == 0)
2996 return;
2997 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2998 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2999 cy693_setup_channel(&cp->wdc_channel);
3000 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3001 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3002 }
3003
3004 void
3005 cy693_setup_channel(chp)
3006 struct channel_softc *chp;
3007 {
3008 struct ata_drive_datas *drvp;
3009 int drive;
3010 u_int32_t cy_cmd_ctrl;
3011 u_int32_t idedma_ctl;
3012 struct pciide_channel *cp = (struct pciide_channel*)chp;
3013 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3014 int dma_mode = -1;
3015
3016 cy_cmd_ctrl = idedma_ctl = 0;
3017
3018 /* setup DMA if needed */
3019 pciide_channel_dma_setup(cp);
3020
3021 for (drive = 0; drive < 2; drive++) {
3022 drvp = &chp->ch_drive[drive];
3023 /* If no drive, skip */
3024 if ((drvp->drive_flags & DRIVE) == 0)
3025 continue;
3026 /* add timing values, setup DMA if needed */
3027 if (drvp->drive_flags & DRIVE_DMA) {
3028 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3029 /* use Multiword DMA */
3030 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3031 dma_mode = drvp->DMA_mode;
3032 }
3033 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3034 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3035 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3036 CY_CMD_CTRL_IOW_REC_OFF(drive));
3037 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3038 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3039 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3040 CY_CMD_CTRL_IOR_REC_OFF(drive));
3041 }
3042 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3043 chp->ch_drive[0].DMA_mode = dma_mode;
3044 chp->ch_drive[1].DMA_mode = dma_mode;
3045
3046 if (dma_mode == -1)
3047 dma_mode = 0;
3048
3049 if (sc->sc_cy_handle != NULL) {
3050 /* Note: `multiple' is implied. */
3051 cy82c693_write(sc->sc_cy_handle,
3052 (sc->sc_cy_compatchan == 0) ?
3053 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3054 }
3055
3056 pciide_print_modes(cp);
3057
3058 if (idedma_ctl != 0) {
3059 /* Add software bits in status register */
3060 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3061 IDEDMA_CTL, idedma_ctl);
3062 }
3063 }
3064
3065 static int
3066 sis_hostbr_match(pa)
3067 struct pci_attach_args *pa;
3068 {
3069 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3070 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3071 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3072 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3073 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3074 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3075 }
3076
3077 void
3078 sis_chip_map(sc, pa)
3079 struct pciide_softc *sc;
3080 struct pci_attach_args *pa;
3081 {
3082 struct pciide_channel *cp;
3083 int channel;
3084 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3085 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3086 pcireg_t rev = PCI_REVISION(pa->pa_class);
3087 bus_size_t cmdsize, ctlsize;
3088 pcitag_t pchb_tag;
3089 pcireg_t pchb_id, pchb_class;
3090
3091 if (pciide_chipen(sc, pa) == 0)
3092 return;
3093 printf("%s: bus-master DMA support present",
3094 sc->sc_wdcdev.sc_dev.dv_xname);
3095 pciide_mapreg_dma(sc, pa);
3096 printf("\n");
3097
3098 /* get a PCI tag for the host bridge (function 0 of the same device) */
3099 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3100 /* and read ID and rev of the ISA bridge */
3101 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3102 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3103
3104 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3105 WDC_CAPABILITY_MODE;
3106 if (sc->sc_dma_ok) {
3107 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3108 sc->sc_wdcdev.irqack = pciide_irqack;
3109 /*
3110 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3111 * have problems with UDMA (info provided by Christos)
3112 */
3113 if (rev >= 0xd0 &&
3114 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3115 PCI_REVISION(pchb_class) >= 0x03))
3116 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3117 }
3118
3119 sc->sc_wdcdev.PIO_cap = 4;
3120 sc->sc_wdcdev.DMA_cap = 2;
3121 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3122 /*
3123 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3124 * chipsets.
3125 */
3126 sc->sc_wdcdev.UDMA_cap =
3127 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3128 sc->sc_wdcdev.set_modes = sis_setup_channel;
3129
3130 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3131 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3132
3133 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3134 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3135 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3136
3137 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3138 cp = &sc->pciide_channels[channel];
3139 if (pciide_chansetup(sc, channel, interface) == 0)
3140 continue;
3141 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3142 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3143 printf("%s: %s channel ignored (disabled)\n",
3144 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3145 continue;
3146 }
3147 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3148 pciide_pci_intr);
3149 if (cp->hw_ok == 0)
3150 continue;
3151 if (pciide_chan_candisable(cp)) {
3152 if (channel == 0)
3153 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3154 else
3155 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3156 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3157 sis_ctr0);
3158 }
3159 pciide_map_compat_intr(pa, cp, channel, interface);
3160 if (cp->hw_ok == 0)
3161 continue;
3162 sis_setup_channel(&cp->wdc_channel);
3163 }
3164 }
3165
3166 void
3167 sis_setup_channel(chp)
3168 struct channel_softc *chp;
3169 {
3170 struct ata_drive_datas *drvp;
3171 int drive;
3172 u_int32_t sis_tim;
3173 u_int32_t idedma_ctl;
3174 struct pciide_channel *cp = (struct pciide_channel*)chp;
3175 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3176
3177 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3178 "channel %d 0x%x\n", chp->channel,
3179 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3180 DEBUG_PROBE);
3181 sis_tim = 0;
3182 idedma_ctl = 0;
3183 /* setup DMA if needed */
3184 pciide_channel_dma_setup(cp);
3185
3186 for (drive = 0; drive < 2; drive++) {
3187 drvp = &chp->ch_drive[drive];
3188 /* If no drive, skip */
3189 if ((drvp->drive_flags & DRIVE) == 0)
3190 continue;
3191 /* add timing values, setup DMA if needed */
3192 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3193 (drvp->drive_flags & DRIVE_UDMA) == 0)
3194 goto pio;
3195
3196 if (drvp->drive_flags & DRIVE_UDMA) {
3197 /* use Ultra/DMA */
3198 drvp->drive_flags &= ~DRIVE_DMA;
3199 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3200 SIS_TIM_UDMA_TIME_OFF(drive);
3201 sis_tim |= SIS_TIM_UDMA_EN(drive);
3202 } else {
3203 /*
3204 * use Multiword DMA
3205 * Timings will be used for both PIO and DMA,
3206 * so adjust DMA mode if needed
3207 */
3208 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3209 drvp->PIO_mode = drvp->DMA_mode + 2;
3210 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3211 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3212 drvp->PIO_mode - 2 : 0;
3213 if (drvp->DMA_mode == 0)
3214 drvp->PIO_mode = 0;
3215 }
3216 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3217 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3218 SIS_TIM_ACT_OFF(drive);
3219 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3220 SIS_TIM_REC_OFF(drive);
3221 }
3222 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3223 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3224 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3225 if (idedma_ctl != 0) {
3226 /* Add software bits in status register */
3227 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3228 IDEDMA_CTL, idedma_ctl);
3229 }
3230 pciide_print_modes(cp);
3231 }
3232
3233 void
3234 acer_chip_map(sc, pa)
3235 struct pciide_softc *sc;
3236 struct pci_attach_args *pa;
3237 {
3238 struct pciide_channel *cp;
3239 int channel;
3240 pcireg_t cr, interface;
3241 bus_size_t cmdsize, ctlsize;
3242 pcireg_t rev = PCI_REVISION(pa->pa_class);
3243
3244 if (pciide_chipen(sc, pa) == 0)
3245 return;
3246 printf("%s: bus-master DMA support present",
3247 sc->sc_wdcdev.sc_dev.dv_xname);
3248 pciide_mapreg_dma(sc, pa);
3249 printf("\n");
3250 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3251 WDC_CAPABILITY_MODE;
3252 if (sc->sc_dma_ok) {
3253 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3254 if (rev >= 0x20) {
3255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3256 if (rev >= 0xC4)
3257 sc->sc_wdcdev.UDMA_cap = 5;
3258 else if (rev >= 0xC2)
3259 sc->sc_wdcdev.UDMA_cap = 4;
3260 else
3261 sc->sc_wdcdev.UDMA_cap = 2;
3262 }
3263 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3264 sc->sc_wdcdev.irqack = pciide_irqack;
3265 }
3266
3267 sc->sc_wdcdev.PIO_cap = 4;
3268 sc->sc_wdcdev.DMA_cap = 2;
3269 sc->sc_wdcdev.set_modes = acer_setup_channel;
3270 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3271 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3272
3273 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3274 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3275 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3276
3277 /* Enable "microsoft register bits" R/W. */
3278 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3279 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3280 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3281 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3282 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3283 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3284 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3285 ~ACER_CHANSTATUSREGS_RO);
3286 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3287 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3288 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3289 /* Don't use cr, re-read the real register content instead */
3290 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3291 PCI_CLASS_REG));
3292
3293 /* From linux: enable "Cable Detection" */
3294 if (rev >= 0xC2) {
3295 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3296 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3297 | ACER_0x4B_CDETECT);
3298 }
3299
3300 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3301 cp = &sc->pciide_channels[channel];
3302 if (pciide_chansetup(sc, channel, interface) == 0)
3303 continue;
3304 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3305 printf("%s: %s channel ignored (disabled)\n",
3306 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3307 continue;
3308 }
3309 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3310 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3311 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3312 if (cp->hw_ok == 0)
3313 continue;
3314 if (pciide_chan_candisable(cp)) {
3315 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3316 pci_conf_write(sc->sc_pc, sc->sc_tag,
3317 PCI_CLASS_REG, cr);
3318 }
3319 pciide_map_compat_intr(pa, cp, channel, interface);
3320 acer_setup_channel(&cp->wdc_channel);
3321 }
3322 }
3323
3324 void
3325 acer_setup_channel(chp)
3326 struct channel_softc *chp;
3327 {
3328 struct ata_drive_datas *drvp;
3329 int drive;
3330 u_int32_t acer_fifo_udma;
3331 u_int32_t idedma_ctl;
3332 struct pciide_channel *cp = (struct pciide_channel*)chp;
3333 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3334
3335 idedma_ctl = 0;
3336 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3337 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3338 acer_fifo_udma), DEBUG_PROBE);
3339 /* setup DMA if needed */
3340 pciide_channel_dma_setup(cp);
3341
3342 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3343 DRIVE_UDMA) { /* check 80 pins cable */
3344 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3345 ACER_0x4A_80PIN(chp->channel)) {
3346 if (chp->ch_drive[0].UDMA_mode > 2)
3347 chp->ch_drive[0].UDMA_mode = 2;
3348 if (chp->ch_drive[1].UDMA_mode > 2)
3349 chp->ch_drive[1].UDMA_mode = 2;
3350 }
3351 }
3352
3353 for (drive = 0; drive < 2; drive++) {
3354 drvp = &chp->ch_drive[drive];
3355 /* If no drive, skip */
3356 if ((drvp->drive_flags & DRIVE) == 0)
3357 continue;
3358 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3359 "channel %d drive %d 0x%x\n", chp->channel, drive,
3360 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3361 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3362 /* clear FIFO/DMA mode */
3363 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3364 ACER_UDMA_EN(chp->channel, drive) |
3365 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3366
3367 /* add timing values, setup DMA if needed */
3368 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3369 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3370 acer_fifo_udma |=
3371 ACER_FTH_OPL(chp->channel, drive, 0x1);
3372 goto pio;
3373 }
3374
3375 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3376 if (drvp->drive_flags & DRIVE_UDMA) {
3377 /* use Ultra/DMA */
3378 drvp->drive_flags &= ~DRIVE_DMA;
3379 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3380 acer_fifo_udma |=
3381 ACER_UDMA_TIM(chp->channel, drive,
3382 acer_udma[drvp->UDMA_mode]);
3383 /* XXX disable if one drive < UDMA3 ? */
3384 if (drvp->UDMA_mode >= 3) {
3385 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3386 ACER_0x4B,
3387 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3388 ACER_0x4B) | ACER_0x4B_UDMA66);
3389 }
3390 } else {
3391 /*
3392 * use Multiword DMA
3393 * Timings will be used for both PIO and DMA,
3394 * so adjust DMA mode if needed
3395 */
3396 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3397 drvp->PIO_mode = drvp->DMA_mode + 2;
3398 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3399 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3400 drvp->PIO_mode - 2 : 0;
3401 if (drvp->DMA_mode == 0)
3402 drvp->PIO_mode = 0;
3403 }
3404 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3405 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3406 ACER_IDETIM(chp->channel, drive),
3407 acer_pio[drvp->PIO_mode]);
3408 }
3409 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3410 acer_fifo_udma), DEBUG_PROBE);
3411 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3412 if (idedma_ctl != 0) {
3413 /* Add software bits in status register */
3414 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3415 IDEDMA_CTL, idedma_ctl);
3416 }
3417 pciide_print_modes(cp);
3418 }
3419
3420 int
3421 acer_pci_intr(arg)
3422 void *arg;
3423 {
3424 struct pciide_softc *sc = arg;
3425 struct pciide_channel *cp;
3426 struct channel_softc *wdc_cp;
3427 int i, rv, crv;
3428 u_int32_t chids;
3429
3430 rv = 0;
3431 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3432 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3433 cp = &sc->pciide_channels[i];
3434 wdc_cp = &cp->wdc_channel;
3435 /* If a compat channel skip. */
3436 if (cp->compat)
3437 continue;
3438 if (chids & ACER_CHIDS_INT(i)) {
3439 crv = wdcintr(wdc_cp);
3440 if (crv == 0)
3441 printf("%s:%d: bogus intr\n",
3442 sc->sc_wdcdev.sc_dev.dv_xname, i);
3443 else
3444 rv = 1;
3445 }
3446 }
3447 return rv;
3448 }
3449
3450 void
3451 hpt_chip_map(sc, pa)
3452 struct pciide_softc *sc;
3453 struct pci_attach_args *pa;
3454 {
3455 struct pciide_channel *cp;
3456 int i, compatchan, revision;
3457 pcireg_t interface;
3458 bus_size_t cmdsize, ctlsize;
3459
3460 if (pciide_chipen(sc, pa) == 0)
3461 return;
3462 revision = PCI_REVISION(pa->pa_class);
3463 printf(": Triones/Highpoint ");
3464 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3465 printf("HPT374 IDE Controller\n");
3466 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3467 printf("HPT372 IDE Controller\n");
3468 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3469 if (revision == HPT372_REV)
3470 printf("HPT372 IDE Controller\n");
3471 else if (revision == HPT370_REV)
3472 printf("HPT370 IDE Controller\n");
3473 else if (revision == HPT370A_REV)
3474 printf("HPT370A IDE Controller\n");
3475 else if (revision == HPT366_REV)
3476 printf("HPT366 IDE Controller\n");
3477 else
3478 printf("unknown HPT IDE controller rev %d\n", revision);
3479 } else
3480 printf("unknown HPT IDE controller 0x%x\n",
3481 sc->sc_pp->ide_product);
3482
3483 /*
3484 * when the chip is in native mode it identifies itself as a
3485 * 'misc mass storage'. Fake interface in this case.
3486 */
3487 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3488 interface = PCI_INTERFACE(pa->pa_class);
3489 } else {
3490 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3491 PCIIDE_INTERFACE_PCI(0);
3492 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3493 (revision == HPT370_REV || revision == HPT370A_REV ||
3494 revision == HPT372_REV)) ||
3495 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3496 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3497 interface |= PCIIDE_INTERFACE_PCI(1);
3498 }
3499
3500 printf("%s: bus-master DMA support present",
3501 sc->sc_wdcdev.sc_dev.dv_xname);
3502 pciide_mapreg_dma(sc, pa);
3503 printf("\n");
3504 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3505 WDC_CAPABILITY_MODE;
3506 if (sc->sc_dma_ok) {
3507 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3508 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3509 sc->sc_wdcdev.irqack = pciide_irqack;
3510 }
3511 sc->sc_wdcdev.PIO_cap = 4;
3512 sc->sc_wdcdev.DMA_cap = 2;
3513
3514 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3515 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3516 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3517 revision == HPT366_REV) {
3518 sc->sc_wdcdev.UDMA_cap = 4;
3519 /*
3520 * The 366 has 2 PCI IDE functions, one for primary and one
3521 * for secondary. So we need to call pciide_mapregs_compat()
3522 * with the real channel
3523 */
3524 if (pa->pa_function == 0) {
3525 compatchan = 0;
3526 } else if (pa->pa_function == 1) {
3527 compatchan = 1;
3528 } else {
3529 printf("%s: unexpected PCI function %d\n",
3530 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3531 return;
3532 }
3533 sc->sc_wdcdev.nchannels = 1;
3534 } else {
3535 sc->sc_wdcdev.nchannels = 2;
3536 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3537 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3538 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3539 revision == HPT372_REV))
3540 sc->sc_wdcdev.UDMA_cap = 6;
3541 else
3542 sc->sc_wdcdev.UDMA_cap = 5;
3543 }
3544 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3545 cp = &sc->pciide_channels[i];
3546 if (sc->sc_wdcdev.nchannels > 1) {
3547 compatchan = i;
3548 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3549 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3550 printf("%s: %s channel ignored (disabled)\n",
3551 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3552 continue;
3553 }
3554 }
3555 if (pciide_chansetup(sc, i, interface) == 0)
3556 continue;
3557 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3558 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3559 &ctlsize, hpt_pci_intr);
3560 } else {
3561 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3562 &cmdsize, &ctlsize);
3563 }
3564 if (cp->hw_ok == 0)
3565 return;
3566 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3567 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3568 wdcattach(&cp->wdc_channel);
3569 hpt_setup_channel(&cp->wdc_channel);
3570 }
3571 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3572 (revision == HPT370_REV || revision == HPT370A_REV ||
3573 revision == HPT372_REV)) ||
3574 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3575 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3576 /*
3577 * HPT370_REV and highter has a bit to disable interrupts,
3578 * make sure to clear it
3579 */
3580 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3581 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3582 ~HPT_CSEL_IRQDIS);
3583 }
3584 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3585 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3586 revision == HPT372_REV ) ||
3587 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3588 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3589 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3590 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3591 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3592 return;
3593 }
3594
3595 void
3596 hpt_setup_channel(chp)
3597 struct channel_softc *chp;
3598 {
3599 struct ata_drive_datas *drvp;
3600 int drive;
3601 int cable;
3602 u_int32_t before, after;
3603 u_int32_t idedma_ctl;
3604 struct pciide_channel *cp = (struct pciide_channel*)chp;
3605 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3606 int revision =
3607 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3608
3609 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3610
3611 /* setup DMA if needed */
3612 pciide_channel_dma_setup(cp);
3613
3614 idedma_ctl = 0;
3615
3616 /* Per drive settings */
3617 for (drive = 0; drive < 2; drive++) {
3618 drvp = &chp->ch_drive[drive];
3619 /* If no drive, skip */
3620 if ((drvp->drive_flags & DRIVE) == 0)
3621 continue;
3622 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3623 HPT_IDETIM(chp->channel, drive));
3624
3625 /* add timing values, setup DMA if needed */
3626 if (drvp->drive_flags & DRIVE_UDMA) {
3627 /* use Ultra/DMA */
3628 drvp->drive_flags &= ~DRIVE_DMA;
3629 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3630 drvp->UDMA_mode > 2)
3631 drvp->UDMA_mode = 2;
3632 switch (sc->sc_pp->ide_product) {
3633 case PCI_PRODUCT_TRIONES_HPT374:
3634 after = hpt374_udma[drvp->UDMA_mode];
3635 break;
3636 case PCI_PRODUCT_TRIONES_HPT372:
3637 after = hpt372_udma[drvp->UDMA_mode];
3638 break;
3639 case PCI_PRODUCT_TRIONES_HPT366:
3640 default:
3641 switch(revision) {
3642 case HPT372_REV:
3643 after = hpt372_udma[drvp->UDMA_mode];
3644 break;
3645 case HPT370_REV:
3646 case HPT370A_REV:
3647 after = hpt370_udma[drvp->UDMA_mode];
3648 break;
3649 case HPT366_REV:
3650 default:
3651 after = hpt366_udma[drvp->UDMA_mode];
3652 break;
3653 }
3654 }
3655 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3656 } else if (drvp->drive_flags & DRIVE_DMA) {
3657 /*
3658 * use Multiword DMA.
3659 * Timings will be used for both PIO and DMA, so adjust
3660 * DMA mode if needed
3661 */
3662 if (drvp->PIO_mode >= 3 &&
3663 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3664 drvp->DMA_mode = drvp->PIO_mode - 2;
3665 }
3666 switch (sc->sc_pp->ide_product) {
3667 case PCI_PRODUCT_TRIONES_HPT374:
3668 after = hpt374_dma[drvp->DMA_mode];
3669 break;
3670 case PCI_PRODUCT_TRIONES_HPT372:
3671 after = hpt372_dma[drvp->DMA_mode];
3672 break;
3673 case PCI_PRODUCT_TRIONES_HPT366:
3674 default:
3675 switch(revision) {
3676 case HPT372_REV:
3677 after = hpt372_dma[drvp->DMA_mode];
3678 break;
3679 case HPT370_REV:
3680 case HPT370A_REV:
3681 after = hpt370_dma[drvp->DMA_mode];
3682 break;
3683 case HPT366_REV:
3684 default:
3685 after = hpt366_dma[drvp->DMA_mode];
3686 break;
3687 }
3688 }
3689 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3690 } else {
3691 /* PIO only */
3692 switch (sc->sc_pp->ide_product) {
3693 case PCI_PRODUCT_TRIONES_HPT374:
3694 after = hpt374_pio[drvp->PIO_mode];
3695 break;
3696 case PCI_PRODUCT_TRIONES_HPT372:
3697 after = hpt372_pio[drvp->PIO_mode];
3698 break;
3699 case PCI_PRODUCT_TRIONES_HPT366:
3700 default:
3701 switch(revision) {
3702 case HPT372_REV:
3703 after = hpt372_pio[drvp->PIO_mode];
3704 break;
3705 case HPT370_REV:
3706 case HPT370A_REV:
3707 after = hpt370_pio[drvp->PIO_mode];
3708 break;
3709 case HPT366_REV:
3710 default:
3711 after = hpt366_pio[drvp->PIO_mode];
3712 break;
3713 }
3714 }
3715 }
3716 pci_conf_write(sc->sc_pc, sc->sc_tag,
3717 HPT_IDETIM(chp->channel, drive), after);
3718 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3719 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3720 after, before), DEBUG_PROBE);
3721 }
3722 if (idedma_ctl != 0) {
3723 /* Add software bits in status register */
3724 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3725 IDEDMA_CTL, idedma_ctl);
3726 }
3727 pciide_print_modes(cp);
3728 }
3729
3730 int
3731 hpt_pci_intr(arg)
3732 void *arg;
3733 {
3734 struct pciide_softc *sc = arg;
3735 struct pciide_channel *cp;
3736 struct channel_softc *wdc_cp;
3737 int rv = 0;
3738 int dmastat, i, crv;
3739
3740 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3741 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3742 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3743 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3744 IDEDMA_CTL_INTR)
3745 continue;
3746 cp = &sc->pciide_channels[i];
3747 wdc_cp = &cp->wdc_channel;
3748 crv = wdcintr(wdc_cp);
3749 if (crv == 0) {
3750 printf("%s:%d: bogus intr\n",
3751 sc->sc_wdcdev.sc_dev.dv_xname, i);
3752 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3753 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3754 } else
3755 rv = 1;
3756 }
3757 return rv;
3758 }
3759
3760
3761 /* Macros to test product */
3762 #define PDC_IS_262(sc) \
3763 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3764 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3765 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3766 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3767 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3768 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3769 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3770 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3771 #define PDC_IS_265(sc) \
3772 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3773 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3774 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3775 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3776 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3777 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3778 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3779 #define PDC_IS_268(sc) \
3780 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3781 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3782 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3783 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3784 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3785
3786 void
3787 pdc202xx_chip_map(sc, pa)
3788 struct pciide_softc *sc;
3789 struct pci_attach_args *pa;
3790 {
3791 struct pciide_channel *cp;
3792 int channel;
3793 pcireg_t interface, st, mode;
3794 bus_size_t cmdsize, ctlsize;
3795
3796 if (!PDC_IS_268(sc)) {
3797 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3798 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3799 st), DEBUG_PROBE);
3800 }
3801 if (pciide_chipen(sc, pa) == 0)
3802 return;
3803
3804 /* turn off RAID mode */
3805 if (!PDC_IS_268(sc))
3806 st &= ~PDC2xx_STATE_IDERAID;
3807
3808 /*
3809 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3810 * mode. We have to fake interface
3811 */
3812 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3813 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3814 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3815
3816 printf("%s: bus-master DMA support present",
3817 sc->sc_wdcdev.sc_dev.dv_xname);
3818 pciide_mapreg_dma(sc, pa);
3819 printf("\n");
3820 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3821 WDC_CAPABILITY_MODE;
3822 if (sc->sc_dma_ok) {
3823 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3824 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3825 sc->sc_wdcdev.irqack = pciide_irqack;
3826 }
3827 sc->sc_wdcdev.PIO_cap = 4;
3828 sc->sc_wdcdev.DMA_cap = 2;
3829 if (PDC_IS_265(sc))
3830 sc->sc_wdcdev.UDMA_cap = 5;
3831 else if (PDC_IS_262(sc))
3832 sc->sc_wdcdev.UDMA_cap = 4;
3833 else
3834 sc->sc_wdcdev.UDMA_cap = 2;
3835 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3836 pdc20268_setup_channel : pdc202xx_setup_channel;
3837 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3838 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3839
3840 if (!PDC_IS_268(sc)) {
3841 /* setup failsafe defaults */
3842 mode = 0;
3843 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3844 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3845 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3846 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3847 for (channel = 0;
3848 channel < sc->sc_wdcdev.nchannels;
3849 channel++) {
3850 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3851 "drive 0 initial timings 0x%x, now 0x%x\n",
3852 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3853 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3854 DEBUG_PROBE);
3855 pci_conf_write(sc->sc_pc, sc->sc_tag,
3856 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3857 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3858 "drive 1 initial timings 0x%x, now 0x%x\n",
3859 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3860 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3861 pci_conf_write(sc->sc_pc, sc->sc_tag,
3862 PDC2xx_TIM(channel, 1), mode);
3863 }
3864
3865 mode = PDC2xx_SCR_DMA;
3866 if (PDC_IS_262(sc)) {
3867 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3868 } else {
3869 /* the BIOS set it up this way */
3870 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3871 }
3872 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3873 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3874 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3875 "now 0x%x\n",
3876 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3877 PDC2xx_SCR),
3878 mode), DEBUG_PROBE);
3879 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3880 PDC2xx_SCR, mode);
3881
3882 /* controller initial state register is OK even without BIOS */
3883 /* Set DMA mode to IDE DMA compatibility */
3884 mode =
3885 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3886 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3887 DEBUG_PROBE);
3888 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3889 mode | 0x1);
3890 mode =
3891 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3892 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3893 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3894 mode | 0x1);
3895 }
3896
3897 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3898 cp = &sc->pciide_channels[channel];
3899 if (pciide_chansetup(sc, channel, interface) == 0)
3900 continue;
3901 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3902 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3903 printf("%s: %s channel ignored (disabled)\n",
3904 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3905 continue;
3906 }
3907 if (PDC_IS_265(sc))
3908 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3909 pdc20265_pci_intr);
3910 else
3911 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3912 pdc202xx_pci_intr);
3913 if (cp->hw_ok == 0)
3914 continue;
3915 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3916 st &= ~(PDC_IS_262(sc) ?
3917 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3918 pciide_map_compat_intr(pa, cp, channel, interface);
3919 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3920 }
3921 if (!PDC_IS_268(sc)) {
3922 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3923 "0x%x\n", st), DEBUG_PROBE);
3924 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3925 }
3926 return;
3927 }
3928
3929 void
3930 pdc202xx_setup_channel(chp)
3931 struct channel_softc *chp;
3932 {
3933 struct ata_drive_datas *drvp;
3934 int drive;
3935 pcireg_t mode, st;
3936 u_int32_t idedma_ctl, scr, atapi;
3937 struct pciide_channel *cp = (struct pciide_channel*)chp;
3938 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3939 int channel = chp->channel;
3940
3941 /* setup DMA if needed */
3942 pciide_channel_dma_setup(cp);
3943
3944 idedma_ctl = 0;
3945 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3946 sc->sc_wdcdev.sc_dev.dv_xname,
3947 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3948 DEBUG_PROBE);
3949
3950 /* Per channel settings */
3951 if (PDC_IS_262(sc)) {
3952 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3953 PDC262_U66);
3954 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3955 /* Trim UDMA mode */
3956 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3957 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3958 chp->ch_drive[0].UDMA_mode <= 2) ||
3959 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3960 chp->ch_drive[1].UDMA_mode <= 2)) {
3961 if (chp->ch_drive[0].UDMA_mode > 2)
3962 chp->ch_drive[0].UDMA_mode = 2;
3963 if (chp->ch_drive[1].UDMA_mode > 2)
3964 chp->ch_drive[1].UDMA_mode = 2;
3965 }
3966 /* Set U66 if needed */
3967 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3968 chp->ch_drive[0].UDMA_mode > 2) ||
3969 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3970 chp->ch_drive[1].UDMA_mode > 2))
3971 scr |= PDC262_U66_EN(channel);
3972 else
3973 scr &= ~PDC262_U66_EN(channel);
3974 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3975 PDC262_U66, scr);
3976 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3977 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3978 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3979 PDC262_ATAPI(channel))), DEBUG_PROBE);
3980 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3981 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3982 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3983 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3984 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3985 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3986 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3987 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3988 atapi = 0;
3989 else
3990 atapi = PDC262_ATAPI_UDMA;
3991 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3992 PDC262_ATAPI(channel), atapi);
3993 }
3994 }
3995 for (drive = 0; drive < 2; drive++) {
3996 drvp = &chp->ch_drive[drive];
3997 /* If no drive, skip */
3998 if ((drvp->drive_flags & DRIVE) == 0)
3999 continue;
4000 mode = 0;
4001 if (drvp->drive_flags & DRIVE_UDMA) {
4002 /* use Ultra/DMA */
4003 drvp->drive_flags &= ~DRIVE_DMA;
4004 mode = PDC2xx_TIM_SET_MB(mode,
4005 pdc2xx_udma_mb[drvp->UDMA_mode]);
4006 mode = PDC2xx_TIM_SET_MC(mode,
4007 pdc2xx_udma_mc[drvp->UDMA_mode]);
4008 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4009 } else if (drvp->drive_flags & DRIVE_DMA) {
4010 mode = PDC2xx_TIM_SET_MB(mode,
4011 pdc2xx_dma_mb[drvp->DMA_mode]);
4012 mode = PDC2xx_TIM_SET_MC(mode,
4013 pdc2xx_dma_mc[drvp->DMA_mode]);
4014 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4015 } else {
4016 mode = PDC2xx_TIM_SET_MB(mode,
4017 pdc2xx_dma_mb[0]);
4018 mode = PDC2xx_TIM_SET_MC(mode,
4019 pdc2xx_dma_mc[0]);
4020 }
4021 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4022 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4023 if (drvp->drive_flags & DRIVE_ATA)
4024 mode |= PDC2xx_TIM_PRE;
4025 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4026 if (drvp->PIO_mode >= 3) {
4027 mode |= PDC2xx_TIM_IORDY;
4028 if (drive == 0)
4029 mode |= PDC2xx_TIM_IORDYp;
4030 }
4031 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4032 "timings 0x%x\n",
4033 sc->sc_wdcdev.sc_dev.dv_xname,
4034 chp->channel, drive, mode), DEBUG_PROBE);
4035 pci_conf_write(sc->sc_pc, sc->sc_tag,
4036 PDC2xx_TIM(chp->channel, drive), mode);
4037 }
4038 if (idedma_ctl != 0) {
4039 /* Add software bits in status register */
4040 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4041 IDEDMA_CTL, idedma_ctl);
4042 }
4043 pciide_print_modes(cp);
4044 }
4045
4046 void
4047 pdc20268_setup_channel(chp)
4048 struct channel_softc *chp;
4049 {
4050 struct ata_drive_datas *drvp;
4051 int drive;
4052 u_int32_t idedma_ctl;
4053 struct pciide_channel *cp = (struct pciide_channel*)chp;
4054 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4055 int u100;
4056
4057 /* setup DMA if needed */
4058 pciide_channel_dma_setup(cp);
4059
4060 idedma_ctl = 0;
4061
4062 /* I don't know what this is for, FreeBSD does it ... */
4063 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4064 IDEDMA_CMD + 0x1, 0x0b);
4065
4066 /*
4067 * I don't know what this is for; FreeBSD checks this ... this is not
4068 * cable type detect.
4069 */
4070 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4071 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4072
4073 for (drive = 0; drive < 2; drive++) {
4074 drvp = &chp->ch_drive[drive];
4075 /* If no drive, skip */
4076 if ((drvp->drive_flags & DRIVE) == 0)
4077 continue;
4078 if (drvp->drive_flags & DRIVE_UDMA) {
4079 /* use Ultra/DMA */
4080 drvp->drive_flags &= ~DRIVE_DMA;
4081 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4082 if (drvp->UDMA_mode > 2 && u100 == 0)
4083 drvp->UDMA_mode = 2;
4084 } else if (drvp->drive_flags & DRIVE_DMA) {
4085 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4086 }
4087 }
4088 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4089 if (idedma_ctl != 0) {
4090 /* Add software bits in status register */
4091 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4092 IDEDMA_CTL, idedma_ctl);
4093 }
4094 pciide_print_modes(cp);
4095 }
4096
4097 int
4098 pdc202xx_pci_intr(arg)
4099 void *arg;
4100 {
4101 struct pciide_softc *sc = arg;
4102 struct pciide_channel *cp;
4103 struct channel_softc *wdc_cp;
4104 int i, rv, crv;
4105 u_int32_t scr;
4106
4107 rv = 0;
4108 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4109 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4110 cp = &sc->pciide_channels[i];
4111 wdc_cp = &cp->wdc_channel;
4112 /* If a compat channel skip. */
4113 if (cp->compat)
4114 continue;
4115 if (scr & PDC2xx_SCR_INT(i)) {
4116 crv = wdcintr(wdc_cp);
4117 if (crv == 0)
4118 printf("%s:%d: bogus intr (reg 0x%x)\n",
4119 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4120 else
4121 rv = 1;
4122 }
4123 }
4124 return rv;
4125 }
4126
4127 int
4128 pdc20265_pci_intr(arg)
4129 void *arg;
4130 {
4131 struct pciide_softc *sc = arg;
4132 struct pciide_channel *cp;
4133 struct channel_softc *wdc_cp;
4134 int i, rv, crv;
4135 u_int32_t dmastat;
4136
4137 rv = 0;
4138 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4139 cp = &sc->pciide_channels[i];
4140 wdc_cp = &cp->wdc_channel;
4141 /* If a compat channel skip. */
4142 if (cp->compat)
4143 continue;
4144 /*
4145 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4146 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4147 * So use it instead (requires 2 reg reads instead of 1,
4148 * but we can't do it another way).
4149 */
4150 dmastat = bus_space_read_1(sc->sc_dma_iot,
4151 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4152 if((dmastat & IDEDMA_CTL_INTR) == 0)
4153 continue;
4154 crv = wdcintr(wdc_cp);
4155 if (crv == 0)
4156 printf("%s:%d: bogus intr\n",
4157 sc->sc_wdcdev.sc_dev.dv_xname, i);
4158 else
4159 rv = 1;
4160 }
4161 return rv;
4162 }
4163
4164 void
4165 opti_chip_map(sc, pa)
4166 struct pciide_softc *sc;
4167 struct pci_attach_args *pa;
4168 {
4169 struct pciide_channel *cp;
4170 bus_size_t cmdsize, ctlsize;
4171 pcireg_t interface;
4172 u_int8_t init_ctrl;
4173 int channel;
4174
4175 if (pciide_chipen(sc, pa) == 0)
4176 return;
4177 printf("%s: bus-master DMA support present",
4178 sc->sc_wdcdev.sc_dev.dv_xname);
4179
4180 /*
4181 * XXXSCW:
4182 * There seem to be a couple of buggy revisions/implementations
4183 * of the OPTi pciide chipset. This kludge seems to fix one of
4184 * the reported problems (PR/11644) but still fails for the
4185 * other (PR/13151), although the latter may be due to other
4186 * issues too...
4187 */
4188 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4189 printf(" but disabled due to chip rev. <= 0x12");
4190 sc->sc_dma_ok = 0;
4191 } else
4192 pciide_mapreg_dma(sc, pa);
4193
4194 printf("\n");
4195
4196 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4197 WDC_CAPABILITY_MODE;
4198 sc->sc_wdcdev.PIO_cap = 4;
4199 if (sc->sc_dma_ok) {
4200 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4201 sc->sc_wdcdev.irqack = pciide_irqack;
4202 sc->sc_wdcdev.DMA_cap = 2;
4203 }
4204 sc->sc_wdcdev.set_modes = opti_setup_channel;
4205
4206 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4207 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4208
4209 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4210 OPTI_REG_INIT_CONTROL);
4211
4212 interface = PCI_INTERFACE(pa->pa_class);
4213
4214 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4215 cp = &sc->pciide_channels[channel];
4216 if (pciide_chansetup(sc, channel, interface) == 0)
4217 continue;
4218 if (channel == 1 &&
4219 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4220 printf("%s: %s channel ignored (disabled)\n",
4221 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4222 continue;
4223 }
4224 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4225 pciide_pci_intr);
4226 if (cp->hw_ok == 0)
4227 continue;
4228 pciide_map_compat_intr(pa, cp, channel, interface);
4229 if (cp->hw_ok == 0)
4230 continue;
4231 opti_setup_channel(&cp->wdc_channel);
4232 }
4233 }
4234
4235 void
4236 opti_setup_channel(chp)
4237 struct channel_softc *chp;
4238 {
4239 struct ata_drive_datas *drvp;
4240 struct pciide_channel *cp = (struct pciide_channel*)chp;
4241 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4242 int drive, spd;
4243 int mode[2];
4244 u_int8_t rv, mr;
4245
4246 /*
4247 * The `Delay' and `Address Setup Time' fields of the
4248 * Miscellaneous Register are always zero initially.
4249 */
4250 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4251 mr &= ~(OPTI_MISC_DELAY_MASK |
4252 OPTI_MISC_ADDR_SETUP_MASK |
4253 OPTI_MISC_INDEX_MASK);
4254
4255 /* Prime the control register before setting timing values */
4256 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4257
4258 /* Determine the clockrate of the PCIbus the chip is attached to */
4259 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4260 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4261
4262 /* setup DMA if needed */
4263 pciide_channel_dma_setup(cp);
4264
4265 for (drive = 0; drive < 2; drive++) {
4266 drvp = &chp->ch_drive[drive];
4267 /* If no drive, skip */
4268 if ((drvp->drive_flags & DRIVE) == 0) {
4269 mode[drive] = -1;
4270 continue;
4271 }
4272
4273 if ((drvp->drive_flags & DRIVE_DMA)) {
4274 /*
4275 * Timings will be used for both PIO and DMA,
4276 * so adjust DMA mode if needed
4277 */
4278 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4279 drvp->PIO_mode = drvp->DMA_mode + 2;
4280 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4281 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4282 drvp->PIO_mode - 2 : 0;
4283 if (drvp->DMA_mode == 0)
4284 drvp->PIO_mode = 0;
4285
4286 mode[drive] = drvp->DMA_mode + 5;
4287 } else
4288 mode[drive] = drvp->PIO_mode;
4289
4290 if (drive && mode[0] >= 0 &&
4291 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4292 /*
4293 * Can't have two drives using different values
4294 * for `Address Setup Time'.
4295 * Slow down the faster drive to compensate.
4296 */
4297 int d = (opti_tim_as[spd][mode[0]] >
4298 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4299
4300 mode[d] = mode[1-d];
4301 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4302 chp->ch_drive[d].DMA_mode = 0;
4303 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4304 }
4305 }
4306
4307 for (drive = 0; drive < 2; drive++) {
4308 int m;
4309 if ((m = mode[drive]) < 0)
4310 continue;
4311
4312 /* Set the Address Setup Time and select appropriate index */
4313 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4314 rv |= OPTI_MISC_INDEX(drive);
4315 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4316
4317 /* Set the pulse width and recovery timing parameters */
4318 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4319 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4320 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4321 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4322
4323 /* Set the Enhanced Mode register appropriately */
4324 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4325 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4326 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4327 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4328 }
4329
4330 /* Finally, enable the timings */
4331 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4332
4333 pciide_print_modes(cp);
4334 }
4335
4336 #define ACARD_IS_850(sc) \
4337 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4338
4339 void
4340 acard_chip_map(sc, pa)
4341 struct pciide_softc *sc;
4342 struct pci_attach_args *pa;
4343 {
4344 struct pciide_channel *cp;
4345 int i;
4346 pcireg_t interface;
4347 bus_size_t cmdsize, ctlsize;
4348
4349 if (pciide_chipen(sc, pa) == 0)
4350 return;
4351
4352 /*
4353 * when the chip is in native mode it identifies itself as a
4354 * 'misc mass storage'. Fake interface in this case.
4355 */
4356 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4357 interface = PCI_INTERFACE(pa->pa_class);
4358 } else {
4359 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4360 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4361 }
4362
4363 printf("%s: bus-master DMA support present",
4364 sc->sc_wdcdev.sc_dev.dv_xname);
4365 pciide_mapreg_dma(sc, pa);
4366 printf("\n");
4367 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4368 WDC_CAPABILITY_MODE;
4369
4370 if (sc->sc_dma_ok) {
4371 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4372 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4373 sc->sc_wdcdev.irqack = pciide_irqack;
4374 }
4375 sc->sc_wdcdev.PIO_cap = 4;
4376 sc->sc_wdcdev.DMA_cap = 2;
4377 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4378
4379 sc->sc_wdcdev.set_modes = acard_setup_channel;
4380 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4381 sc->sc_wdcdev.nchannels = 2;
4382
4383 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4384 cp = &sc->pciide_channels[i];
4385 if (pciide_chansetup(sc, i, interface) == 0)
4386 continue;
4387 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4388 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4389 &ctlsize, pciide_pci_intr);
4390 } else {
4391 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4392 &cmdsize, &ctlsize);
4393 }
4394 if (cp->hw_ok == 0)
4395 return;
4396 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4397 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4398 wdcattach(&cp->wdc_channel);
4399 acard_setup_channel(&cp->wdc_channel);
4400 }
4401 if (!ACARD_IS_850(sc)) {
4402 u_int32_t reg;
4403 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4404 reg &= ~ATP860_CTRL_INT;
4405 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4406 }
4407 }
4408
4409 void
4410 acard_setup_channel(chp)
4411 struct channel_softc *chp;
4412 {
4413 struct ata_drive_datas *drvp;
4414 struct pciide_channel *cp = (struct pciide_channel*)chp;
4415 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4416 int channel = chp->channel;
4417 int drive;
4418 u_int32_t idetime, udma_mode;
4419 u_int32_t idedma_ctl;
4420
4421 /* setup DMA if needed */
4422 pciide_channel_dma_setup(cp);
4423
4424 if (ACARD_IS_850(sc)) {
4425 idetime = 0;
4426 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4427 udma_mode &= ~ATP850_UDMA_MASK(channel);
4428 } else {
4429 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4430 idetime &= ~ATP860_SETTIME_MASK(channel);
4431 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4432 udma_mode &= ~ATP860_UDMA_MASK(channel);
4433
4434 /* check 80 pins cable */
4435 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4436 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4437 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4438 & ATP860_CTRL_80P(chp->channel)) {
4439 if (chp->ch_drive[0].UDMA_mode > 2)
4440 chp->ch_drive[0].UDMA_mode = 2;
4441 if (chp->ch_drive[1].UDMA_mode > 2)
4442 chp->ch_drive[1].UDMA_mode = 2;
4443 }
4444 }
4445 }
4446
4447 idedma_ctl = 0;
4448
4449 /* Per drive settings */
4450 for (drive = 0; drive < 2; drive++) {
4451 drvp = &chp->ch_drive[drive];
4452 /* If no drive, skip */
4453 if ((drvp->drive_flags & DRIVE) == 0)
4454 continue;
4455 /* add timing values, setup DMA if needed */
4456 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4457 (drvp->drive_flags & DRIVE_UDMA)) {
4458 /* use Ultra/DMA */
4459 if (ACARD_IS_850(sc)) {
4460 idetime |= ATP850_SETTIME(drive,
4461 acard_act_udma[drvp->UDMA_mode],
4462 acard_rec_udma[drvp->UDMA_mode]);
4463 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4464 acard_udma_conf[drvp->UDMA_mode]);
4465 } else {
4466 idetime |= ATP860_SETTIME(channel, drive,
4467 acard_act_udma[drvp->UDMA_mode],
4468 acard_rec_udma[drvp->UDMA_mode]);
4469 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4470 acard_udma_conf[drvp->UDMA_mode]);
4471 }
4472 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4473 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4474 (drvp->drive_flags & DRIVE_DMA)) {
4475 /* use Multiword DMA */
4476 drvp->drive_flags &= ~DRIVE_UDMA;
4477 if (ACARD_IS_850(sc)) {
4478 idetime |= ATP850_SETTIME(drive,
4479 acard_act_dma[drvp->DMA_mode],
4480 acard_rec_dma[drvp->DMA_mode]);
4481 } else {
4482 idetime |= ATP860_SETTIME(channel, drive,
4483 acard_act_dma[drvp->DMA_mode],
4484 acard_rec_dma[drvp->DMA_mode]);
4485 }
4486 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4487 } else {
4488 /* PIO only */
4489 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4490 if (ACARD_IS_850(sc)) {
4491 idetime |= ATP850_SETTIME(drive,
4492 acard_act_pio[drvp->PIO_mode],
4493 acard_rec_pio[drvp->PIO_mode]);
4494 } else {
4495 idetime |= ATP860_SETTIME(channel, drive,
4496 acard_act_pio[drvp->PIO_mode],
4497 acard_rec_pio[drvp->PIO_mode]);
4498 }
4499 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4500 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4501 | ATP8x0_CTRL_EN(channel));
4502 }
4503 }
4504
4505 if (idedma_ctl != 0) {
4506 /* Add software bits in status register */
4507 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4508 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4509 }
4510 pciide_print_modes(cp);
4511
4512 if (ACARD_IS_850(sc)) {
4513 pci_conf_write(sc->sc_pc, sc->sc_tag,
4514 ATP850_IDETIME(channel), idetime);
4515 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4516 } else {
4517 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4518 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4519 }
4520 }
4521
4522 int
4523 acard_pci_intr(arg)
4524 void *arg;
4525 {
4526 struct pciide_softc *sc = arg;
4527 struct pciide_channel *cp;
4528 struct channel_softc *wdc_cp;
4529 int rv = 0;
4530 int dmastat, i, crv;
4531
4532 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4533 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4534 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4535 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4536 continue;
4537 cp = &sc->pciide_channels[i];
4538 wdc_cp = &cp->wdc_channel;
4539 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4540 (void)wdcintr(wdc_cp);
4541 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4542 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4543 continue;
4544 }
4545 crv = wdcintr(wdc_cp);
4546 if (crv == 0)
4547 printf("%s:%d: bogus intr\n",
4548 sc->sc_wdcdev.sc_dev.dv_xname, i);
4549 else if (crv == 1)
4550 rv = 1;
4551 else if (rv == 0)
4552 rv = crv;
4553 }
4554 return rv;
4555 }
4556
4557 static int
4558 sl82c105_bugchk(struct pci_attach_args *pa)
4559 {
4560
4561 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4562 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4563 return (0);
4564
4565 if (PCI_REVISION(pa->pa_class) <= 0x05)
4566 return (1);
4567
4568 return (0);
4569 }
4570
4571 void
4572 sl82c105_chip_map(sc, pa)
4573 struct pciide_softc *sc;
4574 struct pci_attach_args *pa;
4575 {
4576 struct pciide_channel *cp;
4577 bus_size_t cmdsize, ctlsize;
4578 pcireg_t interface, idecr;
4579 int channel;
4580
4581 if (pciide_chipen(sc, pa) == 0)
4582 return;
4583
4584 printf("%s: bus-master DMA support present",
4585 sc->sc_wdcdev.sc_dev.dv_xname);
4586
4587 /*
4588 * Check to see if we're part of the Winbond 83c553 Southbridge.
4589 * If so, we need to disable DMA on rev. <= 5 of that chip.
4590 */
4591 if (pci_find_device(pa, sl82c105_bugchk)) {
4592 printf(" but disabled due to 83c553 rev. <= 0x05");
4593 sc->sc_dma_ok = 0;
4594 } else
4595 pciide_mapreg_dma(sc, pa);
4596 printf("\n");
4597
4598 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4599 WDC_CAPABILITY_MODE;
4600 sc->sc_wdcdev.PIO_cap = 4;
4601 if (sc->sc_dma_ok) {
4602 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4603 sc->sc_wdcdev.irqack = pciide_irqack;
4604 sc->sc_wdcdev.DMA_cap = 2;
4605 }
4606 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4607
4608 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4609 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4610
4611 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4612
4613 interface = PCI_INTERFACE(pa->pa_class);
4614
4615 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4616 cp = &sc->pciide_channels[channel];
4617 if (pciide_chansetup(sc, channel, interface) == 0)
4618 continue;
4619 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4620 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4621 printf("%s: %s channel ignored (disabled)\n",
4622 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4623 continue;
4624 }
4625 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4626 pciide_pci_intr);
4627 if (cp->hw_ok == 0)
4628 continue;
4629 pciide_map_compat_intr(pa, cp, channel, interface);
4630 if (cp->hw_ok == 0)
4631 continue;
4632 sl82c105_setup_channel(&cp->wdc_channel);
4633 }
4634 }
4635
4636 void
4637 sl82c105_setup_channel(chp)
4638 struct channel_softc *chp;
4639 {
4640 struct ata_drive_datas *drvp;
4641 struct pciide_channel *cp = (struct pciide_channel*)chp;
4642 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4643 int pxdx_reg, drive;
4644 pcireg_t pxdx;
4645
4646 /* Set up DMA if needed. */
4647 pciide_channel_dma_setup(cp);
4648
4649 for (drive = 0; drive < 2; drive++) {
4650 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4651 : SYMPH_P1D0CR) + (drive * 4);
4652
4653 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4654
4655 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4656 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4657
4658 drvp = &chp->ch_drive[drive];
4659 /* If no drive, skip. */
4660 if ((drvp->drive_flags & DRIVE) == 0) {
4661 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4662 continue;
4663 }
4664
4665 if (drvp->drive_flags & DRIVE_DMA) {
4666 /*
4667 * Timings will be used for both PIO and DMA,
4668 * so adjust DMA mode if needed.
4669 */
4670 if (drvp->PIO_mode >= 3) {
4671 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4672 drvp->DMA_mode = drvp->PIO_mode - 2;
4673 if (drvp->DMA_mode < 1) {
4674 /*
4675 * Can't mix both PIO and DMA.
4676 * Disable DMA.
4677 */
4678 drvp->drive_flags &= ~DRIVE_DMA;
4679 }
4680 } else {
4681 /*
4682 * Can't mix both PIO and DMA. Disable
4683 * DMA.
4684 */
4685 drvp->drive_flags &= ~DRIVE_DMA;
4686 }
4687 }
4688
4689 if (drvp->drive_flags & DRIVE_DMA) {
4690 /* Use multi-word DMA. */
4691 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4692 PxDx_CMD_ON_SHIFT;
4693 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4694 } else {
4695 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4696 PxDx_CMD_ON_SHIFT;
4697 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4698 }
4699
4700 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4701
4702 /* ...and set the mode for this drive. */
4703 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4704 }
4705
4706 pciide_print_modes(cp);
4707 }
4708
4709 void
4710 serverworks_chip_map(sc, pa)
4711 struct pciide_softc *sc;
4712 struct pci_attach_args *pa;
4713 {
4714 struct pciide_channel *cp;
4715 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4716 pcitag_t pcib_tag;
4717 int channel;
4718 bus_size_t cmdsize, ctlsize;
4719
4720 if (pciide_chipen(sc, pa) == 0)
4721 return;
4722
4723 printf("%s: bus-master DMA support present",
4724 sc->sc_wdcdev.sc_dev.dv_xname);
4725 pciide_mapreg_dma(sc, pa);
4726 printf("\n");
4727 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4728 WDC_CAPABILITY_MODE;
4729
4730 if (sc->sc_dma_ok) {
4731 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4732 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4733 sc->sc_wdcdev.irqack = pciide_irqack;
4734 }
4735 sc->sc_wdcdev.PIO_cap = 4;
4736 sc->sc_wdcdev.DMA_cap = 2;
4737 switch (sc->sc_pp->ide_product) {
4738 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4739 sc->sc_wdcdev.UDMA_cap = 2;
4740 break;
4741 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4742 if (PCI_REVISION(pa->pa_class) < 0x92)
4743 sc->sc_wdcdev.UDMA_cap = 4;
4744 else
4745 sc->sc_wdcdev.UDMA_cap = 5;
4746 break;
4747 }
4748
4749 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4750 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4751 sc->sc_wdcdev.nchannels = 2;
4752
4753 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4754 cp = &sc->pciide_channels[channel];
4755 if (pciide_chansetup(sc, channel, interface) == 0)
4756 continue;
4757 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4758 serverworks_pci_intr);
4759 if (cp->hw_ok == 0)
4760 return;
4761 pciide_map_compat_intr(pa, cp, channel, interface);
4762 if (cp->hw_ok == 0)
4763 return;
4764 serverworks_setup_channel(&cp->wdc_channel);
4765 }
4766
4767 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4768 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4769 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4770 }
4771
4772 void
4773 serverworks_setup_channel(chp)
4774 struct channel_softc *chp;
4775 {
4776 struct ata_drive_datas *drvp;
4777 struct pciide_channel *cp = (struct pciide_channel*)chp;
4778 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4779 int channel = chp->channel;
4780 int drive, unit;
4781 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4782 u_int32_t idedma_ctl;
4783 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4784 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4785
4786 /* setup DMA if needed */
4787 pciide_channel_dma_setup(cp);
4788
4789 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4790 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4791 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4792 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4793
4794 pio_time &= ~(0xffff << (16 * channel));
4795 dma_time &= ~(0xffff << (16 * channel));
4796 pio_mode &= ~(0xff << (8 * channel + 16));
4797 udma_mode &= ~(0xff << (8 * channel + 16));
4798 udma_mode &= ~(3 << (2 * channel));
4799
4800 idedma_ctl = 0;
4801
4802 /* Per drive settings */
4803 for (drive = 0; drive < 2; drive++) {
4804 drvp = &chp->ch_drive[drive];
4805 /* If no drive, skip */
4806 if ((drvp->drive_flags & DRIVE) == 0)
4807 continue;
4808 unit = drive + 2 * channel;
4809 /* add timing values, setup DMA if needed */
4810 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4811 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4812 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4813 (drvp->drive_flags & DRIVE_UDMA)) {
4814 /* use Ultra/DMA, check for 80-pin cable */
4815 if (drvp->UDMA_mode > 2 &&
4816 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4817 drvp->UDMA_mode = 2;
4818 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4819 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4820 udma_mode |= 1 << unit;
4821 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4822 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4823 (drvp->drive_flags & DRIVE_DMA)) {
4824 /* use Multiword DMA */
4825 drvp->drive_flags &= ~DRIVE_UDMA;
4826 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4827 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4828 } else {
4829 /* PIO only */
4830 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4831 }
4832 }
4833
4834 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4835 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4836 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4837 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4838 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4839
4840 if (idedma_ctl != 0) {
4841 /* Add software bits in status register */
4842 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4843 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4844 }
4845 pciide_print_modes(cp);
4846 }
4847
4848 int
4849 serverworks_pci_intr(arg)
4850 void *arg;
4851 {
4852 struct pciide_softc *sc = arg;
4853 struct pciide_channel *cp;
4854 struct channel_softc *wdc_cp;
4855 int rv = 0;
4856 int dmastat, i, crv;
4857
4858 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4859 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4860 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4861 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4862 IDEDMA_CTL_INTR)
4863 continue;
4864 cp = &sc->pciide_channels[i];
4865 wdc_cp = &cp->wdc_channel;
4866 crv = wdcintr(wdc_cp);
4867 if (crv == 0) {
4868 printf("%s:%d: bogus intr\n",
4869 sc->sc_wdcdev.sc_dev.dv_xname, i);
4870 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4871 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4872 } else
4873 rv = 1;
4874 }
4875 return rv;
4876 }
4877