pciide.c revision 1.176 1 /* $NetBSD: pciide.c,v 1.176 2002/12/26 20:54:03 matt Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.176 2002/12/26 20:54:03 matt Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_cmd_products[] = {
345 { PCI_PRODUCT_CMDTECH_640,
346 0,
347 "CMD Technology PCI0640",
348 cmd_chip_map
349 },
350 { PCI_PRODUCT_CMDTECH_643,
351 0,
352 "CMD Technology PCI0643",
353 cmd0643_9_chip_map,
354 },
355 { PCI_PRODUCT_CMDTECH_646,
356 0,
357 "CMD Technology PCI0646",
358 cmd0643_9_chip_map,
359 },
360 { PCI_PRODUCT_CMDTECH_648,
361 IDE_PCI_CLASS_OVERRIDE,
362 "CMD Technology PCI0648",
363 cmd0643_9_chip_map,
364 },
365 { PCI_PRODUCT_CMDTECH_649,
366 IDE_PCI_CLASS_OVERRIDE,
367 "CMD Technology PCI0649",
368 cmd0643_9_chip_map,
369 },
370 { PCI_PRODUCT_CMDTECH_680,
371 IDE_PCI_CLASS_OVERRIDE,
372 "Silicon Image 0680",
373 cmd680_chip_map,
374 },
375 { 0,
376 0,
377 NULL,
378 NULL
379 }
380 };
381
382 const struct pciide_product_desc pciide_via_products[] = {
383 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
384 0,
385 NULL,
386 apollo_chip_map,
387 },
388 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
389 0,
390 NULL,
391 apollo_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_cypress_products[] = {
401 { PCI_PRODUCT_CONTAQ_82C693,
402 IDE_16BIT_IOSPACE,
403 "Cypress 82C693 IDE Controller",
404 cy693_chip_map,
405 },
406 { 0,
407 0,
408 NULL,
409 NULL
410 }
411 };
412
413 const struct pciide_product_desc pciide_sis_products[] = {
414 { PCI_PRODUCT_SIS_5597_IDE,
415 0,
416 "Silicon Integrated System 5597/5598 IDE controller",
417 sis_chip_map,
418 },
419 { 0,
420 0,
421 NULL,
422 NULL
423 }
424 };
425
426 const struct pciide_product_desc pciide_acer_products[] = {
427 { PCI_PRODUCT_ALI_M5229,
428 0,
429 "Acer Labs M5229 UDMA IDE Controller",
430 acer_chip_map,
431 },
432 { 0,
433 0,
434 NULL,
435 NULL
436 }
437 };
438
439 const struct pciide_product_desc pciide_promise_products[] = {
440 { PCI_PRODUCT_PROMISE_ULTRA33,
441 IDE_PCI_CLASS_OVERRIDE,
442 "Promise Ultra33/ATA Bus Master IDE Accelerator",
443 pdc202xx_chip_map,
444 },
445 { PCI_PRODUCT_PROMISE_ULTRA66,
446 IDE_PCI_CLASS_OVERRIDE,
447 "Promise Ultra66/ATA Bus Master IDE Accelerator",
448 pdc202xx_chip_map,
449 },
450 { PCI_PRODUCT_PROMISE_ULTRA100,
451 IDE_PCI_CLASS_OVERRIDE,
452 "Promise Ultra100/ATA Bus Master IDE Accelerator",
453 pdc202xx_chip_map,
454 },
455 { PCI_PRODUCT_PROMISE_ULTRA100X,
456 IDE_PCI_CLASS_OVERRIDE,
457 "Promise Ultra100/ATA Bus Master IDE Accelerator",
458 pdc202xx_chip_map,
459 },
460 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
461 IDE_PCI_CLASS_OVERRIDE,
462 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
463 pdc202xx_chip_map,
464 },
465 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
466 IDE_PCI_CLASS_OVERRIDE,
467 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
468 pdc202xx_chip_map,
469 },
470 { PCI_PRODUCT_PROMISE_ULTRA133,
471 IDE_PCI_CLASS_OVERRIDE,
472 "Promise Ultra133/ATA Bus Master IDE Accelerator",
473 pdc202xx_chip_map,
474 },
475 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
476 IDE_PCI_CLASS_OVERRIDE,
477 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
478 pdc202xx_chip_map,
479 },
480 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
481 IDE_PCI_CLASS_OVERRIDE,
482 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
483 pdc202xx_chip_map,
484 },
485 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
486 IDE_PCI_CLASS_OVERRIDE,
487 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
488 pdc202xx_chip_map,
489 },
490 { 0,
491 0,
492 NULL,
493 NULL
494 }
495 };
496
497 const struct pciide_product_desc pciide_opti_products[] = {
498 { PCI_PRODUCT_OPTI_82C621,
499 0,
500 "OPTi 82c621 PCI IDE controller",
501 opti_chip_map,
502 },
503 { PCI_PRODUCT_OPTI_82C568,
504 0,
505 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
506 opti_chip_map,
507 },
508 { PCI_PRODUCT_OPTI_82D568,
509 0,
510 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
511 opti_chip_map,
512 },
513 { 0,
514 0,
515 NULL,
516 NULL
517 }
518 };
519
520 const struct pciide_product_desc pciide_triones_products[] = {
521 { PCI_PRODUCT_TRIONES_HPT366,
522 IDE_PCI_CLASS_OVERRIDE,
523 NULL,
524 hpt_chip_map,
525 },
526 { PCI_PRODUCT_TRIONES_HPT372,
527 IDE_PCI_CLASS_OVERRIDE,
528 NULL,
529 hpt_chip_map
530 },
531 { PCI_PRODUCT_TRIONES_HPT374,
532 IDE_PCI_CLASS_OVERRIDE,
533 NULL,
534 hpt_chip_map
535 },
536 { 0,
537 0,
538 NULL,
539 NULL
540 }
541 };
542
543 const struct pciide_product_desc pciide_acard_products[] = {
544 { PCI_PRODUCT_ACARD_ATP850U,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Acard ATP850U Ultra33 IDE Controller",
547 acard_chip_map,
548 },
549 { PCI_PRODUCT_ACARD_ATP860,
550 IDE_PCI_CLASS_OVERRIDE,
551 "Acard ATP860 Ultra66 IDE Controller",
552 acard_chip_map,
553 },
554 { PCI_PRODUCT_ACARD_ATP860A,
555 IDE_PCI_CLASS_OVERRIDE,
556 "Acard ATP860-A Ultra66 IDE Controller",
557 acard_chip_map,
558 },
559 { 0,
560 0,
561 NULL,
562 NULL
563 }
564 };
565
566 const struct pciide_product_desc pciide_serverworks_products[] = {
567 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
568 0,
569 "ServerWorks OSB4 IDE Controller",
570 serverworks_chip_map,
571 },
572 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
573 0,
574 "ServerWorks CSB5 IDE Controller",
575 serverworks_chip_map,
576 },
577 { 0,
578 0,
579 NULL,
580 }
581 };
582
583 const struct pciide_product_desc pciide_symphony_products[] = {
584 { PCI_PRODUCT_SYMPHONY_82C105,
585 0,
586 "Symphony Labs 82C105 IDE controller",
587 sl82c105_chip_map,
588 },
589 { 0,
590 0,
591 NULL,
592 }
593 };
594
595 const struct pciide_product_desc pciide_winbond_products[] = {
596 { PCI_PRODUCT_WINBOND_W83C553F_1,
597 0,
598 "Winbond W83C553F IDE controller",
599 sl82c105_chip_map,
600 },
601 { 0,
602 0,
603 NULL,
604 }
605 };
606
607 struct pciide_vendor_desc {
608 u_int32_t ide_vendor;
609 const struct pciide_product_desc *ide_products;
610 };
611
612 const struct pciide_vendor_desc pciide_vendors[] = {
613 { PCI_VENDOR_INTEL, pciide_intel_products },
614 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
615 { PCI_VENDOR_VIATECH, pciide_via_products },
616 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
617 { PCI_VENDOR_SIS, pciide_sis_products },
618 { PCI_VENDOR_ALI, pciide_acer_products },
619 { PCI_VENDOR_PROMISE, pciide_promise_products },
620 { PCI_VENDOR_AMD, pciide_amd_products },
621 { PCI_VENDOR_OPTI, pciide_opti_products },
622 { PCI_VENDOR_TRIONES, pciide_triones_products },
623 { PCI_VENDOR_ACARD, pciide_acard_products },
624 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
625 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
626 { PCI_VENDOR_WINBOND, pciide_winbond_products },
627 { 0, NULL }
628 };
629
630 /* options passed via the 'flags' config keyword */
631 #define PCIIDE_OPTIONS_DMA 0x01
632 #define PCIIDE_OPTIONS_NODMA 0x02
633
634 int pciide_match __P((struct device *, struct cfdata *, void *));
635 void pciide_attach __P((struct device *, struct device *, void *));
636
637 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
638 pciide_match, pciide_attach, NULL, NULL);
639
640 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
641 int pciide_mapregs_compat __P(( struct pci_attach_args *,
642 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
643 int pciide_mapregs_native __P((struct pci_attach_args *,
644 struct pciide_channel *, bus_size_t *, bus_size_t *,
645 int (*pci_intr) __P((void *))));
646 void pciide_mapreg_dma __P((struct pciide_softc *,
647 struct pci_attach_args *));
648 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
649 void pciide_mapchan __P((struct pci_attach_args *,
650 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
651 int (*pci_intr) __P((void *))));
652 int pciide_chan_candisable __P((struct pciide_channel *));
653 void pciide_map_compat_intr __P(( struct pci_attach_args *,
654 struct pciide_channel *, int, int));
655 int pciide_compat_intr __P((void *));
656 int pciide_pci_intr __P((void *));
657 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
658
659 const struct pciide_product_desc *
660 pciide_lookup_product(id)
661 u_int32_t id;
662 {
663 const struct pciide_product_desc *pp;
664 const struct pciide_vendor_desc *vp;
665
666 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
667 if (PCI_VENDOR(id) == vp->ide_vendor)
668 break;
669
670 if ((pp = vp->ide_products) == NULL)
671 return NULL;
672
673 for (; pp->chip_map != NULL; pp++)
674 if (PCI_PRODUCT(id) == pp->ide_product)
675 break;
676
677 if (pp->chip_map == NULL)
678 return NULL;
679 return pp;
680 }
681
682 int
683 pciide_match(parent, match, aux)
684 struct device *parent;
685 struct cfdata *match;
686 void *aux;
687 {
688 struct pci_attach_args *pa = aux;
689 const struct pciide_product_desc *pp;
690
691 /*
692 * Check the ID register to see that it's a PCI IDE controller.
693 * If it is, we assume that we can deal with it; it _should_
694 * work in a standardized way...
695 */
696 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
697 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
698 return (1);
699 }
700
701 /*
702 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
703 * controllers. Let see if we can deal with it anyway.
704 */
705 pp = pciide_lookup_product(pa->pa_id);
706 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
707 return (1);
708 }
709
710 return (0);
711 }
712
713 void
714 pciide_attach(parent, self, aux)
715 struct device *parent, *self;
716 void *aux;
717 {
718 struct pci_attach_args *pa = aux;
719 pci_chipset_tag_t pc = pa->pa_pc;
720 pcitag_t tag = pa->pa_tag;
721 struct pciide_softc *sc = (struct pciide_softc *)self;
722 pcireg_t csr;
723 char devinfo[256];
724 const char *displaydev;
725
726 sc->sc_pp = pciide_lookup_product(pa->pa_id);
727 if (sc->sc_pp == NULL) {
728 sc->sc_pp = &default_product_desc;
729 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
730 displaydev = devinfo;
731 } else
732 displaydev = sc->sc_pp->ide_name;
733
734 /* if displaydev == NULL, printf is done in chip-specific map */
735 if (displaydev)
736 printf(": %s (rev. 0x%02x)\n", displaydev,
737 PCI_REVISION(pa->pa_class));
738
739 sc->sc_pc = pa->pa_pc;
740 sc->sc_tag = pa->pa_tag;
741 #ifdef WDCDEBUG
742 if (wdcdebug_pciide_mask & DEBUG_PROBE)
743 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
744 #endif
745 sc->sc_pp->chip_map(sc, pa);
746
747 if (sc->sc_dma_ok) {
748 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
749 csr |= PCI_COMMAND_MASTER_ENABLE;
750 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
751 }
752 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
753 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
754 }
755
756 /* tell whether the chip is enabled or not */
757 int
758 pciide_chipen(sc, pa)
759 struct pciide_softc *sc;
760 struct pci_attach_args *pa;
761 {
762 pcireg_t csr;
763 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
764 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
765 PCI_COMMAND_STATUS_REG);
766 printf("%s: device disabled (at %s)\n",
767 sc->sc_wdcdev.sc_dev.dv_xname,
768 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
769 "device" : "bridge");
770 return 0;
771 }
772 return 1;
773 }
774
775 int
776 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
777 struct pci_attach_args *pa;
778 struct pciide_channel *cp;
779 int compatchan;
780 bus_size_t *cmdsizep, *ctlsizep;
781 {
782 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
783 struct channel_softc *wdc_cp = &cp->wdc_channel;
784
785 cp->compat = 1;
786 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
787 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
788
789 wdc_cp->cmd_iot = pa->pa_iot;
790 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
791 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
792 printf("%s: couldn't map %s channel cmd regs\n",
793 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
794 return (0);
795 }
796
797 wdc_cp->ctl_iot = pa->pa_iot;
798 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
799 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
800 printf("%s: couldn't map %s channel ctl regs\n",
801 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
802 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
803 PCIIDE_COMPAT_CMD_SIZE);
804 return (0);
805 }
806
807 return (1);
808 }
809
810 int
811 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
812 struct pci_attach_args * pa;
813 struct pciide_channel *cp;
814 bus_size_t *cmdsizep, *ctlsizep;
815 int (*pci_intr) __P((void *));
816 {
817 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
818 struct channel_softc *wdc_cp = &cp->wdc_channel;
819 const char *intrstr;
820 pci_intr_handle_t intrhandle;
821
822 cp->compat = 0;
823
824 if (sc->sc_pci_ih == NULL) {
825 if (pci_intr_map(pa, &intrhandle) != 0) {
826 printf("%s: couldn't map native-PCI interrupt\n",
827 sc->sc_wdcdev.sc_dev.dv_xname);
828 return 0;
829 }
830 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
831 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
832 intrhandle, IPL_BIO, pci_intr, sc);
833 if (sc->sc_pci_ih != NULL) {
834 printf("%s: using %s for native-PCI interrupt\n",
835 sc->sc_wdcdev.sc_dev.dv_xname,
836 intrstr ? intrstr : "unknown interrupt");
837 } else {
838 printf("%s: couldn't establish native-PCI interrupt",
839 sc->sc_wdcdev.sc_dev.dv_xname);
840 if (intrstr != NULL)
841 printf(" at %s", intrstr);
842 printf("\n");
843 return 0;
844 }
845 }
846 cp->ih = sc->sc_pci_ih;
847 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
848 PCI_MAPREG_TYPE_IO, 0,
849 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
850 printf("%s: couldn't map %s channel cmd regs\n",
851 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
852 return 0;
853 }
854
855 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
856 PCI_MAPREG_TYPE_IO, 0,
857 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
858 printf("%s: couldn't map %s channel ctl regs\n",
859 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
860 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
861 return 0;
862 }
863 /*
864 * In native mode, 4 bytes of I/O space are mapped for the control
865 * register, the control register is at offset 2. Pass the generic
866 * code a handle for only one byte at the right offset.
867 */
868 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
869 &wdc_cp->ctl_ioh) != 0) {
870 printf("%s: unable to subregion %s channel ctl regs\n",
871 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
872 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
873 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
874 return 0;
875 }
876 return (1);
877 }
878
879 void
880 pciide_mapreg_dma(sc, pa)
881 struct pciide_softc *sc;
882 struct pci_attach_args *pa;
883 {
884 pcireg_t maptype;
885 bus_addr_t addr;
886
887 /*
888 * Map DMA registers
889 *
890 * Note that sc_dma_ok is the right variable to test to see if
891 * DMA can be done. If the interface doesn't support DMA,
892 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
893 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
894 * non-zero if the interface supports DMA and the registers
895 * could be mapped.
896 *
897 * XXX Note that despite the fact that the Bus Master IDE specs
898 * XXX say that "The bus master IDE function uses 16 bytes of IO
899 * XXX space," some controllers (at least the United
900 * XXX Microelectronics UM8886BF) place it in memory space.
901 */
902 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
903 PCIIDE_REG_BUS_MASTER_DMA);
904
905 switch (maptype) {
906 case PCI_MAPREG_TYPE_IO:
907 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
908 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
909 &addr, NULL, NULL) == 0);
910 if (sc->sc_dma_ok == 0) {
911 printf(", but unused (couldn't query registers)");
912 break;
913 }
914 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
915 && addr >= 0x10000) {
916 sc->sc_dma_ok = 0;
917 printf(", but unused (registers at unsafe address "
918 "%#lx)", (unsigned long)addr);
919 break;
920 }
921 /* FALLTHROUGH */
922
923 case PCI_MAPREG_MEM_TYPE_32BIT:
924 sc->sc_dma_ok = (pci_mapreg_map(pa,
925 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
926 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
927 sc->sc_dmat = pa->pa_dmat;
928 if (sc->sc_dma_ok == 0) {
929 printf(", but unused (couldn't map registers)");
930 } else {
931 sc->sc_wdcdev.dma_arg = sc;
932 sc->sc_wdcdev.dma_init = pciide_dma_init;
933 sc->sc_wdcdev.dma_start = pciide_dma_start;
934 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
935 }
936
937 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
938 PCIIDE_OPTIONS_NODMA) {
939 printf(", but unused (forced off by config file)");
940 sc->sc_dma_ok = 0;
941 }
942 break;
943
944 default:
945 sc->sc_dma_ok = 0;
946 printf(", but unsupported register maptype (0x%x)", maptype);
947 }
948 }
949
950 int
951 pciide_compat_intr(arg)
952 void *arg;
953 {
954 struct pciide_channel *cp = arg;
955
956 #ifdef DIAGNOSTIC
957 /* should only be called for a compat channel */
958 if (cp->compat == 0)
959 panic("pciide compat intr called for non-compat chan %p", cp);
960 #endif
961 return (wdcintr(&cp->wdc_channel));
962 }
963
964 int
965 pciide_pci_intr(arg)
966 void *arg;
967 {
968 struct pciide_softc *sc = arg;
969 struct pciide_channel *cp;
970 struct channel_softc *wdc_cp;
971 int i, rv, crv;
972
973 rv = 0;
974 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
975 cp = &sc->pciide_channels[i];
976 wdc_cp = &cp->wdc_channel;
977
978 /* If a compat channel skip. */
979 if (cp->compat)
980 continue;
981 /* if this channel not waiting for intr, skip */
982 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
983 continue;
984
985 crv = wdcintr(wdc_cp);
986 if (crv == 0)
987 ; /* leave rv alone */
988 else if (crv == 1)
989 rv = 1; /* claim the intr */
990 else if (rv == 0) /* crv should be -1 in this case */
991 rv = crv; /* if we've done no better, take it */
992 }
993 return (rv);
994 }
995
996 void
997 pciide_channel_dma_setup(cp)
998 struct pciide_channel *cp;
999 {
1000 int drive;
1001 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1002 struct ata_drive_datas *drvp;
1003
1004 for (drive = 0; drive < 2; drive++) {
1005 drvp = &cp->wdc_channel.ch_drive[drive];
1006 /* If no drive, skip */
1007 if ((drvp->drive_flags & DRIVE) == 0)
1008 continue;
1009 /* setup DMA if needed */
1010 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1011 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1012 sc->sc_dma_ok == 0) {
1013 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1014 continue;
1015 }
1016 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1017 != 0) {
1018 /* Abort DMA setup */
1019 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1020 continue;
1021 }
1022 }
1023 }
1024
1025 int
1026 pciide_dma_table_setup(sc, channel, drive)
1027 struct pciide_softc *sc;
1028 int channel, drive;
1029 {
1030 bus_dma_segment_t seg;
1031 int error, rseg;
1032 const bus_size_t dma_table_size =
1033 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1034 struct pciide_dma_maps *dma_maps =
1035 &sc->pciide_channels[channel].dma_maps[drive];
1036
1037 /* If table was already allocated, just return */
1038 if (dma_maps->dma_table)
1039 return 0;
1040
1041 /* Allocate memory for the DMA tables and map it */
1042 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1043 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1044 BUS_DMA_NOWAIT)) != 0) {
1045 printf("%s:%d: unable to allocate table DMA for "
1046 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1047 channel, drive, error);
1048 return error;
1049 }
1050 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1051 dma_table_size,
1052 (caddr_t *)&dma_maps->dma_table,
1053 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1054 printf("%s:%d: unable to map table DMA for"
1055 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1056 channel, drive, error);
1057 return error;
1058 }
1059 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1060 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1061 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1062
1063 /* Create and load table DMA map for this disk */
1064 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1065 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1066 &dma_maps->dmamap_table)) != 0) {
1067 printf("%s:%d: unable to create table DMA map for "
1068 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1069 channel, drive, error);
1070 return error;
1071 }
1072 if ((error = bus_dmamap_load(sc->sc_dmat,
1073 dma_maps->dmamap_table,
1074 dma_maps->dma_table,
1075 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1076 printf("%s:%d: unable to load table DMA map for "
1077 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1078 channel, drive, error);
1079 return error;
1080 }
1081 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1082 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1083 DEBUG_PROBE);
1084 /* Create a xfer DMA map for this drive */
1085 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1086 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1087 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1088 &dma_maps->dmamap_xfer)) != 0) {
1089 printf("%s:%d: unable to create xfer DMA map for "
1090 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1091 channel, drive, error);
1092 return error;
1093 }
1094 return 0;
1095 }
1096
1097 int
1098 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1099 void *v;
1100 int channel, drive;
1101 void *databuf;
1102 size_t datalen;
1103 int flags;
1104 {
1105 struct pciide_softc *sc = v;
1106 int error, seg;
1107 struct pciide_dma_maps *dma_maps =
1108 &sc->pciide_channels[channel].dma_maps[drive];
1109
1110 error = bus_dmamap_load(sc->sc_dmat,
1111 dma_maps->dmamap_xfer,
1112 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1113 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1114 if (error) {
1115 printf("%s:%d: unable to load xfer DMA map for"
1116 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1117 channel, drive, error);
1118 return error;
1119 }
1120
1121 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1122 dma_maps->dmamap_xfer->dm_mapsize,
1123 (flags & WDC_DMA_READ) ?
1124 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1125
1126 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1127 #ifdef DIAGNOSTIC
1128 /* A segment must not cross a 64k boundary */
1129 {
1130 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1131 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1132 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1133 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1134 printf("pciide_dma: segment %d physical addr 0x%lx"
1135 " len 0x%lx not properly aligned\n",
1136 seg, phys, len);
1137 panic("pciide_dma: buf align");
1138 }
1139 }
1140 #endif
1141 dma_maps->dma_table[seg].base_addr =
1142 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1143 dma_maps->dma_table[seg].byte_count =
1144 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1145 IDEDMA_BYTE_COUNT_MASK);
1146 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1147 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1148 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1149
1150 }
1151 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1152 htole32(IDEDMA_BYTE_COUNT_EOT);
1153
1154 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1155 dma_maps->dmamap_table->dm_mapsize,
1156 BUS_DMASYNC_PREWRITE);
1157
1158 /* Maps are ready. Start DMA function */
1159 #ifdef DIAGNOSTIC
1160 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1161 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1162 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1163 panic("pciide_dma_init: table align");
1164 }
1165 #endif
1166
1167 /* Clear status bits */
1168 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1169 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1170 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1171 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1172 /* Write table addr */
1173 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1174 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1175 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1176 /* set read/write */
1177 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1178 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1179 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1180 /* remember flags */
1181 dma_maps->dma_flags = flags;
1182 return 0;
1183 }
1184
1185 void
1186 pciide_dma_start(v, channel, drive)
1187 void *v;
1188 int channel, drive;
1189 {
1190 struct pciide_softc *sc = v;
1191
1192 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1193 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1194 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1195 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1196 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1197 }
1198
1199 int
1200 pciide_dma_finish(v, channel, drive, force)
1201 void *v;
1202 int channel, drive;
1203 int force;
1204 {
1205 struct pciide_softc *sc = v;
1206 u_int8_t status;
1207 int error = 0;
1208 struct pciide_dma_maps *dma_maps =
1209 &sc->pciide_channels[channel].dma_maps[drive];
1210
1211 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1212 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1213 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1214 DEBUG_XFERS);
1215
1216 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1217 return WDC_DMAST_NOIRQ;
1218
1219 /* stop DMA channel */
1220 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1221 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1222 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1223 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1224
1225 /* Unload the map of the data buffer */
1226 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1227 dma_maps->dmamap_xfer->dm_mapsize,
1228 (dma_maps->dma_flags & WDC_DMA_READ) ?
1229 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1230 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1231
1232 if ((status & IDEDMA_CTL_ERR) != 0) {
1233 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1234 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1235 error |= WDC_DMAST_ERR;
1236 }
1237
1238 if ((status & IDEDMA_CTL_INTR) == 0) {
1239 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1240 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1241 drive, status);
1242 error |= WDC_DMAST_NOIRQ;
1243 }
1244
1245 if ((status & IDEDMA_CTL_ACT) != 0) {
1246 /* data underrun, may be a valid condition for ATAPI */
1247 error |= WDC_DMAST_UNDER;
1248 }
1249 return error;
1250 }
1251
1252 void
1253 pciide_irqack(chp)
1254 struct channel_softc *chp;
1255 {
1256 struct pciide_channel *cp = (struct pciide_channel*)chp;
1257 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1258
1259 /* clear status bits in IDE DMA registers */
1260 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1261 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1262 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1263 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1264 }
1265
1266 /* some common code used by several chip_map */
1267 int
1268 pciide_chansetup(sc, channel, interface)
1269 struct pciide_softc *sc;
1270 int channel;
1271 pcireg_t interface;
1272 {
1273 struct pciide_channel *cp = &sc->pciide_channels[channel];
1274 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1275 cp->name = PCIIDE_CHANNEL_NAME(channel);
1276 cp->wdc_channel.channel = channel;
1277 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1278 cp->wdc_channel.ch_queue =
1279 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1280 if (cp->wdc_channel.ch_queue == NULL) {
1281 printf("%s %s channel: "
1282 "can't allocate memory for command queue",
1283 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1284 return 0;
1285 }
1286 printf("%s: %s channel %s to %s mode\n",
1287 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1288 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1289 "configured" : "wired",
1290 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1291 "native-PCI" : "compatibility");
1292 return 1;
1293 }
1294
1295 /* some common code used by several chip channel_map */
1296 void
1297 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1298 struct pci_attach_args *pa;
1299 struct pciide_channel *cp;
1300 pcireg_t interface;
1301 bus_size_t *cmdsizep, *ctlsizep;
1302 int (*pci_intr) __P((void *));
1303 {
1304 struct channel_softc *wdc_cp = &cp->wdc_channel;
1305
1306 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1307 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1308 pci_intr);
1309 else
1310 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1311 wdc_cp->channel, cmdsizep, ctlsizep);
1312
1313 if (cp->hw_ok == 0)
1314 return;
1315 wdc_cp->data32iot = wdc_cp->cmd_iot;
1316 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1317 wdcattach(wdc_cp);
1318 }
1319
1320 /*
1321 * Generic code to call to know if a channel can be disabled. Return 1
1322 * if channel can be disabled, 0 if not
1323 */
1324 int
1325 pciide_chan_candisable(cp)
1326 struct pciide_channel *cp;
1327 {
1328 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1329 struct channel_softc *wdc_cp = &cp->wdc_channel;
1330
1331 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1332 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1333 printf("%s: disabling %s channel (no drives)\n",
1334 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1335 cp->hw_ok = 0;
1336 return 1;
1337 }
1338 return 0;
1339 }
1340
1341 /*
1342 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1343 * Set hw_ok=0 on failure
1344 */
1345 void
1346 pciide_map_compat_intr(pa, cp, compatchan, interface)
1347 struct pci_attach_args *pa;
1348 struct pciide_channel *cp;
1349 int compatchan, interface;
1350 {
1351 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1352 struct channel_softc *wdc_cp = &cp->wdc_channel;
1353
1354 if (cp->hw_ok == 0)
1355 return;
1356 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1357 return;
1358
1359 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1360 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1361 pa, compatchan, pciide_compat_intr, cp);
1362 if (cp->ih == NULL) {
1363 #endif
1364 printf("%s: no compatibility interrupt for use by %s "
1365 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1366 cp->hw_ok = 0;
1367 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1368 }
1369 #endif
1370 }
1371
1372 void
1373 pciide_print_modes(cp)
1374 struct pciide_channel *cp;
1375 {
1376 wdc_print_modes(&cp->wdc_channel);
1377 }
1378
1379 void
1380 default_chip_map(sc, pa)
1381 struct pciide_softc *sc;
1382 struct pci_attach_args *pa;
1383 {
1384 struct pciide_channel *cp;
1385 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1386 pcireg_t csr;
1387 int channel, drive;
1388 struct ata_drive_datas *drvp;
1389 u_int8_t idedma_ctl;
1390 bus_size_t cmdsize, ctlsize;
1391 char *failreason;
1392
1393 if (pciide_chipen(sc, pa) == 0)
1394 return;
1395
1396 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1397 printf("%s: bus-master DMA support present",
1398 sc->sc_wdcdev.sc_dev.dv_xname);
1399 if (sc->sc_pp == &default_product_desc &&
1400 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1401 PCIIDE_OPTIONS_DMA) == 0) {
1402 printf(", but unused (no driver support)");
1403 sc->sc_dma_ok = 0;
1404 } else {
1405 pciide_mapreg_dma(sc, pa);
1406 if (sc->sc_dma_ok != 0)
1407 printf(", used without full driver "
1408 "support");
1409 }
1410 } else {
1411 printf("%s: hardware does not support DMA",
1412 sc->sc_wdcdev.sc_dev.dv_xname);
1413 sc->sc_dma_ok = 0;
1414 }
1415 printf("\n");
1416 if (sc->sc_dma_ok) {
1417 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1418 sc->sc_wdcdev.irqack = pciide_irqack;
1419 }
1420 sc->sc_wdcdev.PIO_cap = 0;
1421 sc->sc_wdcdev.DMA_cap = 0;
1422
1423 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1424 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1425 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1426
1427 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1428 cp = &sc->pciide_channels[channel];
1429 if (pciide_chansetup(sc, channel, interface) == 0)
1430 continue;
1431 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1432 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1433 &ctlsize, pciide_pci_intr);
1434 } else {
1435 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1436 channel, &cmdsize, &ctlsize);
1437 }
1438 if (cp->hw_ok == 0)
1439 continue;
1440 /*
1441 * Check to see if something appears to be there.
1442 */
1443 failreason = NULL;
1444 if (!wdcprobe(&cp->wdc_channel)) {
1445 failreason = "not responding; disabled or no drives?";
1446 goto next;
1447 }
1448 /*
1449 * Now, make sure it's actually attributable to this PCI IDE
1450 * channel by trying to access the channel again while the
1451 * PCI IDE controller's I/O space is disabled. (If the
1452 * channel no longer appears to be there, it belongs to
1453 * this controller.) YUCK!
1454 */
1455 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1456 PCI_COMMAND_STATUS_REG);
1457 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1458 csr & ~PCI_COMMAND_IO_ENABLE);
1459 if (wdcprobe(&cp->wdc_channel))
1460 failreason = "other hardware responding at addresses";
1461 pci_conf_write(sc->sc_pc, sc->sc_tag,
1462 PCI_COMMAND_STATUS_REG, csr);
1463 next:
1464 if (failreason) {
1465 printf("%s: %s channel ignored (%s)\n",
1466 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1467 failreason);
1468 cp->hw_ok = 0;
1469 bus_space_unmap(cp->wdc_channel.cmd_iot,
1470 cp->wdc_channel.cmd_ioh, cmdsize);
1471 if (interface & PCIIDE_INTERFACE_PCI(channel))
1472 bus_space_unmap(cp->wdc_channel.ctl_iot,
1473 cp->ctl_baseioh, ctlsize);
1474 else
1475 bus_space_unmap(cp->wdc_channel.ctl_iot,
1476 cp->wdc_channel.ctl_ioh, ctlsize);
1477 } else {
1478 pciide_map_compat_intr(pa, cp, channel, interface);
1479 }
1480 if (cp->hw_ok) {
1481 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1482 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1483 wdcattach(&cp->wdc_channel);
1484 }
1485 }
1486
1487 if (sc->sc_dma_ok == 0)
1488 return;
1489
1490 /* Allocate DMA maps */
1491 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1492 idedma_ctl = 0;
1493 cp = &sc->pciide_channels[channel];
1494 for (drive = 0; drive < 2; drive++) {
1495 drvp = &cp->wdc_channel.ch_drive[drive];
1496 /* If no drive, skip */
1497 if ((drvp->drive_flags & DRIVE) == 0)
1498 continue;
1499 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1500 continue;
1501 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1502 /* Abort DMA setup */
1503 printf("%s:%d:%d: can't allocate DMA maps, "
1504 "using PIO transfers\n",
1505 sc->sc_wdcdev.sc_dev.dv_xname,
1506 channel, drive);
1507 drvp->drive_flags &= ~DRIVE_DMA;
1508 }
1509 printf("%s:%d:%d: using DMA data transfers\n",
1510 sc->sc_wdcdev.sc_dev.dv_xname,
1511 channel, drive);
1512 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1513 }
1514 if (idedma_ctl != 0) {
1515 /* Add software bits in status register */
1516 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1517 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1518 idedma_ctl);
1519 }
1520 }
1521 }
1522
1523 void
1524 piix_chip_map(sc, pa)
1525 struct pciide_softc *sc;
1526 struct pci_attach_args *pa;
1527 {
1528 struct pciide_channel *cp;
1529 int channel;
1530 u_int32_t idetim;
1531 bus_size_t cmdsize, ctlsize;
1532
1533 if (pciide_chipen(sc, pa) == 0)
1534 return;
1535
1536 printf("%s: bus-master DMA support present",
1537 sc->sc_wdcdev.sc_dev.dv_xname);
1538 pciide_mapreg_dma(sc, pa);
1539 printf("\n");
1540 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1541 WDC_CAPABILITY_MODE;
1542 if (sc->sc_dma_ok) {
1543 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1544 sc->sc_wdcdev.irqack = pciide_irqack;
1545 switch(sc->sc_pp->ide_product) {
1546 case PCI_PRODUCT_INTEL_82371AB_IDE:
1547 case PCI_PRODUCT_INTEL_82440MX_IDE:
1548 case PCI_PRODUCT_INTEL_82801AA_IDE:
1549 case PCI_PRODUCT_INTEL_82801AB_IDE:
1550 case PCI_PRODUCT_INTEL_82801BA_IDE:
1551 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1552 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1553 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1554 case PCI_PRODUCT_INTEL_82801DB_IDE:
1555 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1556 }
1557 }
1558 sc->sc_wdcdev.PIO_cap = 4;
1559 sc->sc_wdcdev.DMA_cap = 2;
1560 switch(sc->sc_pp->ide_product) {
1561 case PCI_PRODUCT_INTEL_82801AA_IDE:
1562 sc->sc_wdcdev.UDMA_cap = 4;
1563 break;
1564 case PCI_PRODUCT_INTEL_82801BA_IDE:
1565 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1566 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1567 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1568 case PCI_PRODUCT_INTEL_82801DB_IDE:
1569 sc->sc_wdcdev.UDMA_cap = 5;
1570 break;
1571 default:
1572 sc->sc_wdcdev.UDMA_cap = 2;
1573 }
1574 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1575 sc->sc_wdcdev.set_modes = piix_setup_channel;
1576 else
1577 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1578 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1579 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1580
1581 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1582 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1583 DEBUG_PROBE);
1584 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1585 WDCDEBUG_PRINT((", sidetim=0x%x",
1586 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1587 DEBUG_PROBE);
1588 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1589 WDCDEBUG_PRINT((", udamreg 0x%x",
1590 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1591 DEBUG_PROBE);
1592 }
1593 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1594 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1595 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1596 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1597 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1598 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1599 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1600 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1601 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1602 DEBUG_PROBE);
1603 }
1604
1605 }
1606 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1607
1608 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1609 cp = &sc->pciide_channels[channel];
1610 /* PIIX is compat-only */
1611 if (pciide_chansetup(sc, channel, 0) == 0)
1612 continue;
1613 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1614 if ((PIIX_IDETIM_READ(idetim, channel) &
1615 PIIX_IDETIM_IDE) == 0) {
1616 printf("%s: %s channel ignored (disabled)\n",
1617 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1618 continue;
1619 }
1620 /* PIIX are compat-only pciide devices */
1621 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1622 if (cp->hw_ok == 0)
1623 continue;
1624 if (pciide_chan_candisable(cp)) {
1625 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1626 channel);
1627 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1628 idetim);
1629 }
1630 pciide_map_compat_intr(pa, cp, channel, 0);
1631 if (cp->hw_ok == 0)
1632 continue;
1633 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1634 }
1635
1636 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1637 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1638 DEBUG_PROBE);
1639 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1640 WDCDEBUG_PRINT((", sidetim=0x%x",
1641 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1642 DEBUG_PROBE);
1643 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1644 WDCDEBUG_PRINT((", udamreg 0x%x",
1645 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1646 DEBUG_PROBE);
1647 }
1648 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1649 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1650 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1651 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1652 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1653 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1654 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1655 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1656 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1657 DEBUG_PROBE);
1658 }
1659 }
1660 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1661 }
1662
1663 void
1664 piix_setup_channel(chp)
1665 struct channel_softc *chp;
1666 {
1667 u_int8_t mode[2], drive;
1668 u_int32_t oidetim, idetim, idedma_ctl;
1669 struct pciide_channel *cp = (struct pciide_channel*)chp;
1670 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1671 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1672
1673 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1674 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1675 idedma_ctl = 0;
1676
1677 /* set up new idetim: Enable IDE registers decode */
1678 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1679 chp->channel);
1680
1681 /* setup DMA */
1682 pciide_channel_dma_setup(cp);
1683
1684 /*
1685 * Here we have to mess up with drives mode: PIIX can't have
1686 * different timings for master and slave drives.
1687 * We need to find the best combination.
1688 */
1689
1690 /* If both drives supports DMA, take the lower mode */
1691 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1692 (drvp[1].drive_flags & DRIVE_DMA)) {
1693 mode[0] = mode[1] =
1694 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1695 drvp[0].DMA_mode = mode[0];
1696 drvp[1].DMA_mode = mode[1];
1697 goto ok;
1698 }
1699 /*
1700 * If only one drive supports DMA, use its mode, and
1701 * put the other one in PIO mode 0 if mode not compatible
1702 */
1703 if (drvp[0].drive_flags & DRIVE_DMA) {
1704 mode[0] = drvp[0].DMA_mode;
1705 mode[1] = drvp[1].PIO_mode;
1706 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1707 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1708 mode[1] = drvp[1].PIO_mode = 0;
1709 goto ok;
1710 }
1711 if (drvp[1].drive_flags & DRIVE_DMA) {
1712 mode[1] = drvp[1].DMA_mode;
1713 mode[0] = drvp[0].PIO_mode;
1714 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1715 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1716 mode[0] = drvp[0].PIO_mode = 0;
1717 goto ok;
1718 }
1719 /*
1720 * If both drives are not DMA, takes the lower mode, unless
1721 * one of them is PIO mode < 2
1722 */
1723 if (drvp[0].PIO_mode < 2) {
1724 mode[0] = drvp[0].PIO_mode = 0;
1725 mode[1] = drvp[1].PIO_mode;
1726 } else if (drvp[1].PIO_mode < 2) {
1727 mode[1] = drvp[1].PIO_mode = 0;
1728 mode[0] = drvp[0].PIO_mode;
1729 } else {
1730 mode[0] = mode[1] =
1731 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1732 drvp[0].PIO_mode = mode[0];
1733 drvp[1].PIO_mode = mode[1];
1734 }
1735 ok: /* The modes are setup */
1736 for (drive = 0; drive < 2; drive++) {
1737 if (drvp[drive].drive_flags & DRIVE_DMA) {
1738 idetim |= piix_setup_idetim_timings(
1739 mode[drive], 1, chp->channel);
1740 goto end;
1741 }
1742 }
1743 /* If we are there, none of the drives are DMA */
1744 if (mode[0] >= 2)
1745 idetim |= piix_setup_idetim_timings(
1746 mode[0], 0, chp->channel);
1747 else
1748 idetim |= piix_setup_idetim_timings(
1749 mode[1], 0, chp->channel);
1750 end: /*
1751 * timing mode is now set up in the controller. Enable
1752 * it per-drive
1753 */
1754 for (drive = 0; drive < 2; drive++) {
1755 /* If no drive, skip */
1756 if ((drvp[drive].drive_flags & DRIVE) == 0)
1757 continue;
1758 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1759 if (drvp[drive].drive_flags & DRIVE_DMA)
1760 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1761 }
1762 if (idedma_ctl != 0) {
1763 /* Add software bits in status register */
1764 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1765 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1766 idedma_ctl);
1767 }
1768 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1769 pciide_print_modes(cp);
1770 }
1771
1772 void
1773 piix3_4_setup_channel(chp)
1774 struct channel_softc *chp;
1775 {
1776 struct ata_drive_datas *drvp;
1777 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1778 struct pciide_channel *cp = (struct pciide_channel*)chp;
1779 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1780 int drive;
1781 int channel = chp->channel;
1782
1783 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1784 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1785 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1786 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1787 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1788 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1789 PIIX_SIDETIM_RTC_MASK(channel));
1790
1791 idedma_ctl = 0;
1792 /* If channel disabled, no need to go further */
1793 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1794 return;
1795 /* set up new idetim: Enable IDE registers decode */
1796 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1797
1798 /* setup DMA if needed */
1799 pciide_channel_dma_setup(cp);
1800
1801 for (drive = 0; drive < 2; drive++) {
1802 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1803 PIIX_UDMATIM_SET(0x3, channel, drive));
1804 drvp = &chp->ch_drive[drive];
1805 /* If no drive, skip */
1806 if ((drvp->drive_flags & DRIVE) == 0)
1807 continue;
1808 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1809 (drvp->drive_flags & DRIVE_UDMA) == 0))
1810 goto pio;
1811
1812 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1814 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1815 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1816 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1817 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1818 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1819 ideconf |= PIIX_CONFIG_PINGPONG;
1820 }
1821 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1822 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1823 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1824 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1825 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1826 /* setup Ultra/100 */
1827 if (drvp->UDMA_mode > 2 &&
1828 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1829 drvp->UDMA_mode = 2;
1830 if (drvp->UDMA_mode > 4) {
1831 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1832 } else {
1833 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1834 if (drvp->UDMA_mode > 2) {
1835 ideconf |= PIIX_CONFIG_UDMA66(channel,
1836 drive);
1837 } else {
1838 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1839 drive);
1840 }
1841 }
1842 }
1843 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1844 /* setup Ultra/66 */
1845 if (drvp->UDMA_mode > 2 &&
1846 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1847 drvp->UDMA_mode = 2;
1848 if (drvp->UDMA_mode > 2)
1849 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1850 else
1851 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1852 }
1853 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1854 (drvp->drive_flags & DRIVE_UDMA)) {
1855 /* use Ultra/DMA */
1856 drvp->drive_flags &= ~DRIVE_DMA;
1857 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1858 udmareg |= PIIX_UDMATIM_SET(
1859 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1860 } else {
1861 /* use Multiword DMA */
1862 drvp->drive_flags &= ~DRIVE_UDMA;
1863 if (drive == 0) {
1864 idetim |= piix_setup_idetim_timings(
1865 drvp->DMA_mode, 1, channel);
1866 } else {
1867 sidetim |= piix_setup_sidetim_timings(
1868 drvp->DMA_mode, 1, channel);
1869 idetim =PIIX_IDETIM_SET(idetim,
1870 PIIX_IDETIM_SITRE, channel);
1871 }
1872 }
1873 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1874
1875 pio: /* use PIO mode */
1876 idetim |= piix_setup_idetim_drvs(drvp);
1877 if (drive == 0) {
1878 idetim |= piix_setup_idetim_timings(
1879 drvp->PIO_mode, 0, channel);
1880 } else {
1881 sidetim |= piix_setup_sidetim_timings(
1882 drvp->PIO_mode, 0, channel);
1883 idetim =PIIX_IDETIM_SET(idetim,
1884 PIIX_IDETIM_SITRE, channel);
1885 }
1886 }
1887 if (idedma_ctl != 0) {
1888 /* Add software bits in status register */
1889 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1890 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1891 idedma_ctl);
1892 }
1893 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1894 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1895 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1896 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1897 pciide_print_modes(cp);
1898 }
1899
1900
1901 /* setup ISP and RTC fields, based on mode */
1902 static u_int32_t
1903 piix_setup_idetim_timings(mode, dma, channel)
1904 u_int8_t mode;
1905 u_int8_t dma;
1906 u_int8_t channel;
1907 {
1908
1909 if (dma)
1910 return PIIX_IDETIM_SET(0,
1911 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1912 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1913 channel);
1914 else
1915 return PIIX_IDETIM_SET(0,
1916 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1917 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1918 channel);
1919 }
1920
1921 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1922 static u_int32_t
1923 piix_setup_idetim_drvs(drvp)
1924 struct ata_drive_datas *drvp;
1925 {
1926 u_int32_t ret = 0;
1927 struct channel_softc *chp = drvp->chnl_softc;
1928 u_int8_t channel = chp->channel;
1929 u_int8_t drive = drvp->drive;
1930
1931 /*
1932 * If drive is using UDMA, timings setups are independant
1933 * So just check DMA and PIO here.
1934 */
1935 if (drvp->drive_flags & DRIVE_DMA) {
1936 /* if mode = DMA mode 0, use compatible timings */
1937 if ((drvp->drive_flags & DRIVE_DMA) &&
1938 drvp->DMA_mode == 0) {
1939 drvp->PIO_mode = 0;
1940 return ret;
1941 }
1942 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1943 /*
1944 * PIO and DMA timings are the same, use fast timings for PIO
1945 * too, else use compat timings.
1946 */
1947 if ((piix_isp_pio[drvp->PIO_mode] !=
1948 piix_isp_dma[drvp->DMA_mode]) ||
1949 (piix_rtc_pio[drvp->PIO_mode] !=
1950 piix_rtc_dma[drvp->DMA_mode]))
1951 drvp->PIO_mode = 0;
1952 /* if PIO mode <= 2, use compat timings for PIO */
1953 if (drvp->PIO_mode <= 2) {
1954 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1955 channel);
1956 return ret;
1957 }
1958 }
1959
1960 /*
1961 * Now setup PIO modes. If mode < 2, use compat timings.
1962 * Else enable fast timings. Enable IORDY and prefetch/post
1963 * if PIO mode >= 3.
1964 */
1965
1966 if (drvp->PIO_mode < 2)
1967 return ret;
1968
1969 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1970 if (drvp->PIO_mode >= 3) {
1971 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1972 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1973 }
1974 return ret;
1975 }
1976
1977 /* setup values in SIDETIM registers, based on mode */
1978 static u_int32_t
1979 piix_setup_sidetim_timings(mode, dma, channel)
1980 u_int8_t mode;
1981 u_int8_t dma;
1982 u_int8_t channel;
1983 {
1984 if (dma)
1985 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1986 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1987 else
1988 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1989 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1990 }
1991
1992 void
1993 amd7x6_chip_map(sc, pa)
1994 struct pciide_softc *sc;
1995 struct pci_attach_args *pa;
1996 {
1997 struct pciide_channel *cp;
1998 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1999 int channel;
2000 pcireg_t chanenable;
2001 bus_size_t cmdsize, ctlsize;
2002
2003 if (pciide_chipen(sc, pa) == 0)
2004 return;
2005 printf("%s: bus-master DMA support present",
2006 sc->sc_wdcdev.sc_dev.dv_xname);
2007 pciide_mapreg_dma(sc, pa);
2008 printf("\n");
2009 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2010 WDC_CAPABILITY_MODE;
2011 if (sc->sc_dma_ok) {
2012 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2013 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2014 sc->sc_wdcdev.irqack = pciide_irqack;
2015 }
2016 sc->sc_wdcdev.PIO_cap = 4;
2017 sc->sc_wdcdev.DMA_cap = 2;
2018
2019 switch (sc->sc_pp->ide_product) {
2020 case PCI_PRODUCT_AMD_PBC766_IDE:
2021 case PCI_PRODUCT_AMD_PBC768_IDE:
2022 case PCI_PRODUCT_AMD_PBC8111_IDE:
2023 sc->sc_wdcdev.UDMA_cap = 5;
2024 break;
2025 default:
2026 sc->sc_wdcdev.UDMA_cap = 4;
2027 }
2028 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2029 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2030 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2031 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2032
2033 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2034 DEBUG_PROBE);
2035 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2036 cp = &sc->pciide_channels[channel];
2037 if (pciide_chansetup(sc, channel, interface) == 0)
2038 continue;
2039
2040 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2041 printf("%s: %s channel ignored (disabled)\n",
2042 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2043 continue;
2044 }
2045 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2046 pciide_pci_intr);
2047
2048 if (pciide_chan_candisable(cp))
2049 chanenable &= ~AMD7X6_CHAN_EN(channel);
2050 pciide_map_compat_intr(pa, cp, channel, interface);
2051 if (cp->hw_ok == 0)
2052 continue;
2053
2054 amd7x6_setup_channel(&cp->wdc_channel);
2055 }
2056 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2057 chanenable);
2058 return;
2059 }
2060
2061 void
2062 amd7x6_setup_channel(chp)
2063 struct channel_softc *chp;
2064 {
2065 u_int32_t udmatim_reg, datatim_reg;
2066 u_int8_t idedma_ctl;
2067 int mode, drive;
2068 struct ata_drive_datas *drvp;
2069 struct pciide_channel *cp = (struct pciide_channel*)chp;
2070 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2071 #ifndef PCIIDE_AMD756_ENABLEDMA
2072 int rev = PCI_REVISION(
2073 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2074 #endif
2075
2076 idedma_ctl = 0;
2077 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2078 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2079 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2080 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2081
2082 /* setup DMA if needed */
2083 pciide_channel_dma_setup(cp);
2084
2085 for (drive = 0; drive < 2; drive++) {
2086 drvp = &chp->ch_drive[drive];
2087 /* If no drive, skip */
2088 if ((drvp->drive_flags & DRIVE) == 0)
2089 continue;
2090 /* add timing values, setup DMA if needed */
2091 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2092 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2093 mode = drvp->PIO_mode;
2094 goto pio;
2095 }
2096 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2097 (drvp->drive_flags & DRIVE_UDMA)) {
2098 /* use Ultra/DMA */
2099 drvp->drive_flags &= ~DRIVE_DMA;
2100 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2101 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2102 AMD7X6_UDMA_TIME(chp->channel, drive,
2103 amd7x6_udma_tim[drvp->UDMA_mode]);
2104 /* can use PIO timings, MW DMA unused */
2105 mode = drvp->PIO_mode;
2106 } else {
2107 /* use Multiword DMA, but only if revision is OK */
2108 drvp->drive_flags &= ~DRIVE_UDMA;
2109 #ifndef PCIIDE_AMD756_ENABLEDMA
2110 /*
2111 * The workaround doesn't seem to be necessary
2112 * with all drives, so it can be disabled by
2113 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2114 * triggered.
2115 */
2116 if (sc->sc_pp->ide_product ==
2117 PCI_PRODUCT_AMD_PBC756_IDE &&
2118 AMD756_CHIPREV_DISABLEDMA(rev)) {
2119 printf("%s:%d:%d: multi-word DMA disabled due "
2120 "to chip revision\n",
2121 sc->sc_wdcdev.sc_dev.dv_xname,
2122 chp->channel, drive);
2123 mode = drvp->PIO_mode;
2124 drvp->drive_flags &= ~DRIVE_DMA;
2125 goto pio;
2126 }
2127 #endif
2128 /* mode = min(pio, dma+2) */
2129 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2130 mode = drvp->PIO_mode;
2131 else
2132 mode = drvp->DMA_mode + 2;
2133 }
2134 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2135
2136 pio: /* setup PIO mode */
2137 if (mode <= 2) {
2138 drvp->DMA_mode = 0;
2139 drvp->PIO_mode = 0;
2140 mode = 0;
2141 } else {
2142 drvp->PIO_mode = mode;
2143 drvp->DMA_mode = mode - 2;
2144 }
2145 datatim_reg |=
2146 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2147 amd7x6_pio_set[mode]) |
2148 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2149 amd7x6_pio_rec[mode]);
2150 }
2151 if (idedma_ctl != 0) {
2152 /* Add software bits in status register */
2153 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2154 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2155 idedma_ctl);
2156 }
2157 pciide_print_modes(cp);
2158 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2159 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2160 }
2161
2162 void
2163 apollo_chip_map(sc, pa)
2164 struct pciide_softc *sc;
2165 struct pci_attach_args *pa;
2166 {
2167 struct pciide_channel *cp;
2168 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2169 int channel;
2170 u_int32_t ideconf;
2171 bus_size_t cmdsize, ctlsize;
2172 pcitag_t pcib_tag;
2173 pcireg_t pcib_id, pcib_class;
2174
2175 if (pciide_chipen(sc, pa) == 0)
2176 return;
2177 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2178 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2179 /* and read ID and rev of the ISA bridge */
2180 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2181 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2182 printf(": VIA Technologies ");
2183 switch (PCI_PRODUCT(pcib_id)) {
2184 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2185 printf("VT82C586 (Apollo VP) ");
2186 if(PCI_REVISION(pcib_class) >= 0x02) {
2187 printf("ATA33 controller\n");
2188 sc->sc_wdcdev.UDMA_cap = 2;
2189 } else {
2190 printf("controller\n");
2191 sc->sc_wdcdev.UDMA_cap = 0;
2192 }
2193 break;
2194 case PCI_PRODUCT_VIATECH_VT82C596A:
2195 printf("VT82C596A (Apollo Pro) ");
2196 if (PCI_REVISION(pcib_class) >= 0x12) {
2197 printf("ATA66 controller\n");
2198 sc->sc_wdcdev.UDMA_cap = 4;
2199 } else {
2200 printf("ATA33 controller\n");
2201 sc->sc_wdcdev.UDMA_cap = 2;
2202 }
2203 break;
2204 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2205 printf("VT82C686A (Apollo KX133) ");
2206 if (PCI_REVISION(pcib_class) >= 0x40) {
2207 printf("ATA100 controller\n");
2208 sc->sc_wdcdev.UDMA_cap = 5;
2209 } else {
2210 printf("ATA66 controller\n");
2211 sc->sc_wdcdev.UDMA_cap = 4;
2212 }
2213 break;
2214 case PCI_PRODUCT_VIATECH_VT8231:
2215 printf("VT8231 ATA100 controller\n");
2216 sc->sc_wdcdev.UDMA_cap = 5;
2217 break;
2218 case PCI_PRODUCT_VIATECH_VT8233:
2219 printf("VT8233 ATA100 controller\n");
2220 sc->sc_wdcdev.UDMA_cap = 5;
2221 break;
2222 case PCI_PRODUCT_VIATECH_VT8233A:
2223 printf("VT8233A ATA133 controller\n");
2224 sc->sc_wdcdev.UDMA_cap = 6;
2225 break;
2226 case PCI_PRODUCT_VIATECH_VT8235:
2227 printf("VT8235 ATA133 controller\n");
2228 sc->sc_wdcdev.UDMA_cap = 6;
2229 break;
2230 default:
2231 printf("unknown ATA controller\n");
2232 sc->sc_wdcdev.UDMA_cap = 0;
2233 }
2234
2235 printf("%s: bus-master DMA support present",
2236 sc->sc_wdcdev.sc_dev.dv_xname);
2237 pciide_mapreg_dma(sc, pa);
2238 printf("\n");
2239 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2240 WDC_CAPABILITY_MODE;
2241 if (sc->sc_dma_ok) {
2242 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2243 sc->sc_wdcdev.irqack = pciide_irqack;
2244 if (sc->sc_wdcdev.UDMA_cap > 0)
2245 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2246 }
2247 sc->sc_wdcdev.PIO_cap = 4;
2248 sc->sc_wdcdev.DMA_cap = 2;
2249 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2250 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2251 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2252
2253 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2254 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2255 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2256 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2257 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2258 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2259 DEBUG_PROBE);
2260
2261 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2262 cp = &sc->pciide_channels[channel];
2263 if (pciide_chansetup(sc, channel, interface) == 0)
2264 continue;
2265
2266 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2267 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2268 printf("%s: %s channel ignored (disabled)\n",
2269 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2270 continue;
2271 }
2272 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2273 pciide_pci_intr);
2274 if (cp->hw_ok == 0)
2275 continue;
2276 if (pciide_chan_candisable(cp)) {
2277 ideconf &= ~APO_IDECONF_EN(channel);
2278 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2279 ideconf);
2280 }
2281 pciide_map_compat_intr(pa, cp, channel, interface);
2282
2283 if (cp->hw_ok == 0)
2284 continue;
2285 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2286 }
2287 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2288 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2289 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2290 }
2291
2292 void
2293 apollo_setup_channel(chp)
2294 struct channel_softc *chp;
2295 {
2296 u_int32_t udmatim_reg, datatim_reg;
2297 u_int8_t idedma_ctl;
2298 int mode, drive;
2299 struct ata_drive_datas *drvp;
2300 struct pciide_channel *cp = (struct pciide_channel*)chp;
2301 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2302
2303 idedma_ctl = 0;
2304 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2305 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2306 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2307 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2308
2309 /* setup DMA if needed */
2310 pciide_channel_dma_setup(cp);
2311
2312 for (drive = 0; drive < 2; drive++) {
2313 drvp = &chp->ch_drive[drive];
2314 /* If no drive, skip */
2315 if ((drvp->drive_flags & DRIVE) == 0)
2316 continue;
2317 /* add timing values, setup DMA if needed */
2318 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2319 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2320 mode = drvp->PIO_mode;
2321 goto pio;
2322 }
2323 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2324 (drvp->drive_flags & DRIVE_UDMA)) {
2325 /* use Ultra/DMA */
2326 drvp->drive_flags &= ~DRIVE_DMA;
2327 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2328 APO_UDMA_EN_MTH(chp->channel, drive);
2329 if (sc->sc_wdcdev.UDMA_cap == 6) {
2330 /* 8233a */
2331 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2332 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2333 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2334 /* 686b */
2335 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2336 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2337 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2338 /* 596b or 686a */
2339 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2340 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2341 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2342 } else {
2343 /* 596a or 586b */
2344 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2345 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2346 }
2347 /* can use PIO timings, MW DMA unused */
2348 mode = drvp->PIO_mode;
2349 } else {
2350 /* use Multiword DMA */
2351 drvp->drive_flags &= ~DRIVE_UDMA;
2352 /* mode = min(pio, dma+2) */
2353 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2354 mode = drvp->PIO_mode;
2355 else
2356 mode = drvp->DMA_mode + 2;
2357 }
2358 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2359
2360 pio: /* setup PIO mode */
2361 if (mode <= 2) {
2362 drvp->DMA_mode = 0;
2363 drvp->PIO_mode = 0;
2364 mode = 0;
2365 } else {
2366 drvp->PIO_mode = mode;
2367 drvp->DMA_mode = mode - 2;
2368 }
2369 datatim_reg |=
2370 APO_DATATIM_PULSE(chp->channel, drive,
2371 apollo_pio_set[mode]) |
2372 APO_DATATIM_RECOV(chp->channel, drive,
2373 apollo_pio_rec[mode]);
2374 }
2375 if (idedma_ctl != 0) {
2376 /* Add software bits in status register */
2377 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2378 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2379 idedma_ctl);
2380 }
2381 pciide_print_modes(cp);
2382 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2383 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2384 }
2385
2386 void
2387 cmd_channel_map(pa, sc, channel)
2388 struct pci_attach_args *pa;
2389 struct pciide_softc *sc;
2390 int channel;
2391 {
2392 struct pciide_channel *cp = &sc->pciide_channels[channel];
2393 bus_size_t cmdsize, ctlsize;
2394 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2395 int interface, one_channel;
2396
2397 /*
2398 * The 0648/0649 can be told to identify as a RAID controller.
2399 * In this case, we have to fake interface
2400 */
2401 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2402 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2403 PCIIDE_INTERFACE_SETTABLE(1);
2404 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2405 CMD_CONF_DSA1)
2406 interface |= PCIIDE_INTERFACE_PCI(0) |
2407 PCIIDE_INTERFACE_PCI(1);
2408 } else {
2409 interface = PCI_INTERFACE(pa->pa_class);
2410 }
2411
2412 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2413 cp->name = PCIIDE_CHANNEL_NAME(channel);
2414 cp->wdc_channel.channel = channel;
2415 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2416
2417 /*
2418 * Older CMD64X doesn't have independant channels
2419 */
2420 switch (sc->sc_pp->ide_product) {
2421 case PCI_PRODUCT_CMDTECH_649:
2422 one_channel = 0;
2423 break;
2424 default:
2425 one_channel = 1;
2426 break;
2427 }
2428
2429 if (channel > 0 && one_channel) {
2430 cp->wdc_channel.ch_queue =
2431 sc->pciide_channels[0].wdc_channel.ch_queue;
2432 } else {
2433 cp->wdc_channel.ch_queue =
2434 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2435 }
2436 if (cp->wdc_channel.ch_queue == NULL) {
2437 printf("%s %s channel: "
2438 "can't allocate memory for command queue",
2439 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2440 return;
2441 }
2442
2443 printf("%s: %s channel %s to %s mode\n",
2444 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2445 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2446 "configured" : "wired",
2447 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2448 "native-PCI" : "compatibility");
2449
2450 /*
2451 * with a CMD PCI64x, if we get here, the first channel is enabled:
2452 * there's no way to disable the first channel without disabling
2453 * the whole device
2454 */
2455 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2456 printf("%s: %s channel ignored (disabled)\n",
2457 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2458 return;
2459 }
2460
2461 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2462 if (cp->hw_ok == 0)
2463 return;
2464 if (channel == 1) {
2465 if (pciide_chan_candisable(cp)) {
2466 ctrl &= ~CMD_CTRL_2PORT;
2467 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2468 CMD_CTRL, ctrl);
2469 }
2470 }
2471 pciide_map_compat_intr(pa, cp, channel, interface);
2472 }
2473
2474 int
2475 cmd_pci_intr(arg)
2476 void *arg;
2477 {
2478 struct pciide_softc *sc = arg;
2479 struct pciide_channel *cp;
2480 struct channel_softc *wdc_cp;
2481 int i, rv, crv;
2482 u_int32_t priirq, secirq;
2483
2484 rv = 0;
2485 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2486 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2487 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2488 cp = &sc->pciide_channels[i];
2489 wdc_cp = &cp->wdc_channel;
2490 /* If a compat channel skip. */
2491 if (cp->compat)
2492 continue;
2493 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2494 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2495 crv = wdcintr(wdc_cp);
2496 if (crv == 0)
2497 printf("%s:%d: bogus intr\n",
2498 sc->sc_wdcdev.sc_dev.dv_xname, i);
2499 else
2500 rv = 1;
2501 }
2502 }
2503 return rv;
2504 }
2505
2506 void
2507 cmd_chip_map(sc, pa)
2508 struct pciide_softc *sc;
2509 struct pci_attach_args *pa;
2510 {
2511 int channel;
2512
2513 /*
2514 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2515 * and base adresses registers can be disabled at
2516 * hardware level. In this case, the device is wired
2517 * in compat mode and its first channel is always enabled,
2518 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2519 * In fact, it seems that the first channel of the CMD PCI0640
2520 * can't be disabled.
2521 */
2522
2523 #ifdef PCIIDE_CMD064x_DISABLE
2524 if (pciide_chipen(sc, pa) == 0)
2525 return;
2526 #endif
2527
2528 printf("%s: hardware does not support DMA\n",
2529 sc->sc_wdcdev.sc_dev.dv_xname);
2530 sc->sc_dma_ok = 0;
2531
2532 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2533 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2534 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2535
2536 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2537 cmd_channel_map(pa, sc, channel);
2538 }
2539 }
2540
2541 void
2542 cmd0643_9_chip_map(sc, pa)
2543 struct pciide_softc *sc;
2544 struct pci_attach_args *pa;
2545 {
2546 struct pciide_channel *cp;
2547 int channel;
2548 pcireg_t rev = PCI_REVISION(pa->pa_class);
2549
2550 /*
2551 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2552 * and base adresses registers can be disabled at
2553 * hardware level. In this case, the device is wired
2554 * in compat mode and its first channel is always enabled,
2555 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2556 * In fact, it seems that the first channel of the CMD PCI0640
2557 * can't be disabled.
2558 */
2559
2560 #ifdef PCIIDE_CMD064x_DISABLE
2561 if (pciide_chipen(sc, pa) == 0)
2562 return;
2563 #endif
2564 printf("%s: bus-master DMA support present",
2565 sc->sc_wdcdev.sc_dev.dv_xname);
2566 pciide_mapreg_dma(sc, pa);
2567 printf("\n");
2568 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2569 WDC_CAPABILITY_MODE;
2570 if (sc->sc_dma_ok) {
2571 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2572 switch (sc->sc_pp->ide_product) {
2573 case PCI_PRODUCT_CMDTECH_649:
2574 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2575 sc->sc_wdcdev.UDMA_cap = 5;
2576 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2577 break;
2578 case PCI_PRODUCT_CMDTECH_648:
2579 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2580 sc->sc_wdcdev.UDMA_cap = 4;
2581 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2582 break;
2583 case PCI_PRODUCT_CMDTECH_646:
2584 if (rev >= CMD0646U2_REV) {
2585 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2586 sc->sc_wdcdev.UDMA_cap = 2;
2587 } else if (rev >= CMD0646U_REV) {
2588 /*
2589 * Linux's driver claims that the 646U is broken
2590 * with UDMA. Only enable it if we know what we're
2591 * doing
2592 */
2593 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2594 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2595 sc->sc_wdcdev.UDMA_cap = 2;
2596 #endif
2597 /* explicitly disable UDMA */
2598 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2599 CMD_UDMATIM(0), 0);
2600 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2601 CMD_UDMATIM(1), 0);
2602 }
2603 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2604 break;
2605 default:
2606 sc->sc_wdcdev.irqack = pciide_irqack;
2607 }
2608 }
2609
2610 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2611 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2612 sc->sc_wdcdev.PIO_cap = 4;
2613 sc->sc_wdcdev.DMA_cap = 2;
2614 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2615
2616 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2617 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2618 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2619 DEBUG_PROBE);
2620
2621 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2622 cp = &sc->pciide_channels[channel];
2623 cmd_channel_map(pa, sc, channel);
2624 if (cp->hw_ok == 0)
2625 continue;
2626 cmd0643_9_setup_channel(&cp->wdc_channel);
2627 }
2628 /*
2629 * note - this also makes sure we clear the irq disable and reset
2630 * bits
2631 */
2632 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2633 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2634 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2635 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2636 DEBUG_PROBE);
2637 }
2638
2639 void
2640 cmd0643_9_setup_channel(chp)
2641 struct channel_softc *chp;
2642 {
2643 struct ata_drive_datas *drvp;
2644 u_int8_t tim;
2645 u_int32_t idedma_ctl, udma_reg;
2646 int drive;
2647 struct pciide_channel *cp = (struct pciide_channel*)chp;
2648 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2649
2650 idedma_ctl = 0;
2651 /* setup DMA if needed */
2652 pciide_channel_dma_setup(cp);
2653
2654 for (drive = 0; drive < 2; drive++) {
2655 drvp = &chp->ch_drive[drive];
2656 /* If no drive, skip */
2657 if ((drvp->drive_flags & DRIVE) == 0)
2658 continue;
2659 /* add timing values, setup DMA if needed */
2660 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2661 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2662 if (drvp->drive_flags & DRIVE_UDMA) {
2663 /* UltraDMA on a 646U2, 0648 or 0649 */
2664 drvp->drive_flags &= ~DRIVE_DMA;
2665 udma_reg = pciide_pci_read(sc->sc_pc,
2666 sc->sc_tag, CMD_UDMATIM(chp->channel));
2667 if (drvp->UDMA_mode > 2 &&
2668 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2669 CMD_BICSR) &
2670 CMD_BICSR_80(chp->channel)) == 0)
2671 drvp->UDMA_mode = 2;
2672 if (drvp->UDMA_mode > 2)
2673 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2674 else if (sc->sc_wdcdev.UDMA_cap > 2)
2675 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2676 udma_reg |= CMD_UDMATIM_UDMA(drive);
2677 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2678 CMD_UDMATIM_TIM_OFF(drive));
2679 udma_reg |=
2680 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2681 CMD_UDMATIM_TIM_OFF(drive));
2682 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2683 CMD_UDMATIM(chp->channel), udma_reg);
2684 } else {
2685 /*
2686 * use Multiword DMA.
2687 * Timings will be used for both PIO and DMA,
2688 * so adjust DMA mode if needed
2689 * if we have a 0646U2/8/9, turn off UDMA
2690 */
2691 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2692 udma_reg = pciide_pci_read(sc->sc_pc,
2693 sc->sc_tag,
2694 CMD_UDMATIM(chp->channel));
2695 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2696 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2697 CMD_UDMATIM(chp->channel),
2698 udma_reg);
2699 }
2700 if (drvp->PIO_mode >= 3 &&
2701 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2702 drvp->DMA_mode = drvp->PIO_mode - 2;
2703 }
2704 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2705 }
2706 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2707 }
2708 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2709 CMD_DATA_TIM(chp->channel, drive), tim);
2710 }
2711 if (idedma_ctl != 0) {
2712 /* Add software bits in status register */
2713 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2714 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2715 idedma_ctl);
2716 }
2717 pciide_print_modes(cp);
2718 }
2719
2720 void
2721 cmd646_9_irqack(chp)
2722 struct channel_softc *chp;
2723 {
2724 u_int32_t priirq, secirq;
2725 struct pciide_channel *cp = (struct pciide_channel*)chp;
2726 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2727
2728 if (chp->channel == 0) {
2729 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2730 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2731 } else {
2732 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2733 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2734 }
2735 pciide_irqack(chp);
2736 }
2737
2738 void
2739 cmd680_chip_map(sc, pa)
2740 struct pciide_softc *sc;
2741 struct pci_attach_args *pa;
2742 {
2743 struct pciide_channel *cp;
2744 int channel;
2745
2746 if (pciide_chipen(sc, pa) == 0)
2747 return;
2748 printf("%s: bus-master DMA support present",
2749 sc->sc_wdcdev.sc_dev.dv_xname);
2750 pciide_mapreg_dma(sc, pa);
2751 printf("\n");
2752 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2753 WDC_CAPABILITY_MODE;
2754 if (sc->sc_dma_ok) {
2755 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2756 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2757 sc->sc_wdcdev.UDMA_cap = 6;
2758 sc->sc_wdcdev.irqack = pciide_irqack;
2759 }
2760
2761 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2762 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2763 sc->sc_wdcdev.PIO_cap = 4;
2764 sc->sc_wdcdev.DMA_cap = 2;
2765 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2766
2767 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2768 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2769 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2770 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2771 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2772 cp = &sc->pciide_channels[channel];
2773 cmd680_channel_map(pa, sc, channel);
2774 if (cp->hw_ok == 0)
2775 continue;
2776 cmd680_setup_channel(&cp->wdc_channel);
2777 }
2778 }
2779
2780 void
2781 cmd680_channel_map(pa, sc, channel)
2782 struct pci_attach_args *pa;
2783 struct pciide_softc *sc;
2784 int channel;
2785 {
2786 struct pciide_channel *cp = &sc->pciide_channels[channel];
2787 bus_size_t cmdsize, ctlsize;
2788 int interface, i, reg;
2789 static const u_int8_t init_val[] =
2790 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2791 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2792
2793 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2794 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2795 PCIIDE_INTERFACE_SETTABLE(1);
2796 interface |= PCIIDE_INTERFACE_PCI(0) |
2797 PCIIDE_INTERFACE_PCI(1);
2798 } else {
2799 interface = PCI_INTERFACE(pa->pa_class);
2800 }
2801
2802 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2803 cp->name = PCIIDE_CHANNEL_NAME(channel);
2804 cp->wdc_channel.channel = channel;
2805 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2806
2807 cp->wdc_channel.ch_queue =
2808 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2809 if (cp->wdc_channel.ch_queue == NULL) {
2810 printf("%s %s channel: "
2811 "can't allocate memory for command queue",
2812 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2813 return;
2814 }
2815
2816 /* XXX */
2817 reg = 0xa2 + channel * 16;
2818 for (i = 0; i < sizeof(init_val); i++)
2819 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2820
2821 printf("%s: %s channel %s to %s mode\n",
2822 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2823 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2824 "configured" : "wired",
2825 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2826 "native-PCI" : "compatibility");
2827
2828 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2829 if (cp->hw_ok == 0)
2830 return;
2831 pciide_map_compat_intr(pa, cp, channel, interface);
2832 }
2833
2834 void
2835 cmd680_setup_channel(chp)
2836 struct channel_softc *chp;
2837 {
2838 struct ata_drive_datas *drvp;
2839 u_int8_t mode, off, scsc;
2840 u_int16_t val;
2841 u_int32_t idedma_ctl;
2842 int drive;
2843 struct pciide_channel *cp = (struct pciide_channel*)chp;
2844 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2845 pci_chipset_tag_t pc = sc->sc_pc;
2846 pcitag_t pa = sc->sc_tag;
2847 static const u_int8_t udma2_tbl[] =
2848 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2849 static const u_int8_t udma_tbl[] =
2850 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2851 static const u_int16_t dma_tbl[] =
2852 { 0x2208, 0x10c2, 0x10c1 };
2853 static const u_int16_t pio_tbl[] =
2854 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2855
2856 idedma_ctl = 0;
2857 pciide_channel_dma_setup(cp);
2858 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2859
2860 for (drive = 0; drive < 2; drive++) {
2861 drvp = &chp->ch_drive[drive];
2862 /* If no drive, skip */
2863 if ((drvp->drive_flags & DRIVE) == 0)
2864 continue;
2865 mode &= ~(0x03 << (drive * 4));
2866 if (drvp->drive_flags & DRIVE_UDMA) {
2867 drvp->drive_flags &= ~DRIVE_DMA;
2868 off = 0xa0 + chp->channel * 16;
2869 if (drvp->UDMA_mode > 2 &&
2870 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2871 drvp->UDMA_mode = 2;
2872 scsc = pciide_pci_read(pc, pa, 0x8a);
2873 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2874 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2875 scsc = pciide_pci_read(pc, pa, 0x8a);
2876 if ((scsc & 0x30) == 0)
2877 drvp->UDMA_mode = 5;
2878 }
2879 mode |= 0x03 << (drive * 4);
2880 off = 0xac + chp->channel * 16 + drive * 2;
2881 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2882 if (scsc & 0x30)
2883 val |= udma2_tbl[drvp->UDMA_mode];
2884 else
2885 val |= udma_tbl[drvp->UDMA_mode];
2886 pciide_pci_write(pc, pa, off, val);
2887 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2888 } else if (drvp->drive_flags & DRIVE_DMA) {
2889 mode |= 0x02 << (drive * 4);
2890 off = 0xa8 + chp->channel * 16 + drive * 2;
2891 val = dma_tbl[drvp->DMA_mode];
2892 pciide_pci_write(pc, pa, off, val & 0xff);
2893 pciide_pci_write(pc, pa, off, val >> 8);
2894 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2895 } else {
2896 mode |= 0x01 << (drive * 4);
2897 off = 0xa4 + chp->channel * 16 + drive * 2;
2898 val = pio_tbl[drvp->PIO_mode];
2899 pciide_pci_write(pc, pa, off, val & 0xff);
2900 pciide_pci_write(pc, pa, off, val >> 8);
2901 }
2902 }
2903
2904 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2905 if (idedma_ctl != 0) {
2906 /* Add software bits in status register */
2907 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2908 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2909 idedma_ctl);
2910 }
2911 pciide_print_modes(cp);
2912 }
2913
2914 void
2915 cy693_chip_map(sc, pa)
2916 struct pciide_softc *sc;
2917 struct pci_attach_args *pa;
2918 {
2919 struct pciide_channel *cp;
2920 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2921 bus_size_t cmdsize, ctlsize;
2922
2923 if (pciide_chipen(sc, pa) == 0)
2924 return;
2925 /*
2926 * this chip has 2 PCI IDE functions, one for primary and one for
2927 * secondary. So we need to call pciide_mapregs_compat() with
2928 * the real channel
2929 */
2930 if (pa->pa_function == 1) {
2931 sc->sc_cy_compatchan = 0;
2932 } else if (pa->pa_function == 2) {
2933 sc->sc_cy_compatchan = 1;
2934 } else {
2935 printf("%s: unexpected PCI function %d\n",
2936 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2937 return;
2938 }
2939 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2940 printf("%s: bus-master DMA support present",
2941 sc->sc_wdcdev.sc_dev.dv_xname);
2942 pciide_mapreg_dma(sc, pa);
2943 } else {
2944 printf("%s: hardware does not support DMA",
2945 sc->sc_wdcdev.sc_dev.dv_xname);
2946 sc->sc_dma_ok = 0;
2947 }
2948 printf("\n");
2949
2950 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2951 if (sc->sc_cy_handle == NULL) {
2952 printf("%s: unable to map hyperCache control registers\n",
2953 sc->sc_wdcdev.sc_dev.dv_xname);
2954 sc->sc_dma_ok = 0;
2955 }
2956
2957 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2958 WDC_CAPABILITY_MODE;
2959 if (sc->sc_dma_ok) {
2960 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2961 sc->sc_wdcdev.irqack = pciide_irqack;
2962 }
2963 sc->sc_wdcdev.PIO_cap = 4;
2964 sc->sc_wdcdev.DMA_cap = 2;
2965 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2966
2967 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2968 sc->sc_wdcdev.nchannels = 1;
2969
2970 /* Only one channel for this chip; if we are here it's enabled */
2971 cp = &sc->pciide_channels[0];
2972 sc->wdc_chanarray[0] = &cp->wdc_channel;
2973 cp->name = PCIIDE_CHANNEL_NAME(0);
2974 cp->wdc_channel.channel = 0;
2975 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2976 cp->wdc_channel.ch_queue =
2977 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2978 if (cp->wdc_channel.ch_queue == NULL) {
2979 printf("%s primary channel: "
2980 "can't allocate memory for command queue",
2981 sc->sc_wdcdev.sc_dev.dv_xname);
2982 return;
2983 }
2984 printf("%s: primary channel %s to ",
2985 sc->sc_wdcdev.sc_dev.dv_xname,
2986 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2987 "configured" : "wired");
2988 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2989 printf("native-PCI");
2990 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2991 pciide_pci_intr);
2992 } else {
2993 printf("compatibility");
2994 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2995 &cmdsize, &ctlsize);
2996 }
2997 printf(" mode\n");
2998 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2999 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3000 wdcattach(&cp->wdc_channel);
3001 if (pciide_chan_candisable(cp)) {
3002 pci_conf_write(sc->sc_pc, sc->sc_tag,
3003 PCI_COMMAND_STATUS_REG, 0);
3004 }
3005 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3006 if (cp->hw_ok == 0)
3007 return;
3008 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3009 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3010 cy693_setup_channel(&cp->wdc_channel);
3011 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3012 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3013 }
3014
3015 void
3016 cy693_setup_channel(chp)
3017 struct channel_softc *chp;
3018 {
3019 struct ata_drive_datas *drvp;
3020 int drive;
3021 u_int32_t cy_cmd_ctrl;
3022 u_int32_t idedma_ctl;
3023 struct pciide_channel *cp = (struct pciide_channel*)chp;
3024 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3025 int dma_mode = -1;
3026
3027 cy_cmd_ctrl = idedma_ctl = 0;
3028
3029 /* setup DMA if needed */
3030 pciide_channel_dma_setup(cp);
3031
3032 for (drive = 0; drive < 2; drive++) {
3033 drvp = &chp->ch_drive[drive];
3034 /* If no drive, skip */
3035 if ((drvp->drive_flags & DRIVE) == 0)
3036 continue;
3037 /* add timing values, setup DMA if needed */
3038 if (drvp->drive_flags & DRIVE_DMA) {
3039 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3040 /* use Multiword DMA */
3041 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3042 dma_mode = drvp->DMA_mode;
3043 }
3044 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3045 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3046 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3047 CY_CMD_CTRL_IOW_REC_OFF(drive));
3048 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3049 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3050 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3051 CY_CMD_CTRL_IOR_REC_OFF(drive));
3052 }
3053 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3054 chp->ch_drive[0].DMA_mode = dma_mode;
3055 chp->ch_drive[1].DMA_mode = dma_mode;
3056
3057 if (dma_mode == -1)
3058 dma_mode = 0;
3059
3060 if (sc->sc_cy_handle != NULL) {
3061 /* Note: `multiple' is implied. */
3062 cy82c693_write(sc->sc_cy_handle,
3063 (sc->sc_cy_compatchan == 0) ?
3064 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3065 }
3066
3067 pciide_print_modes(cp);
3068
3069 if (idedma_ctl != 0) {
3070 /* Add software bits in status register */
3071 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3072 IDEDMA_CTL, idedma_ctl);
3073 }
3074 }
3075
3076 static int
3077 sis_hostbr_match(pa)
3078 struct pci_attach_args *pa;
3079 {
3080 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3081 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3082 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3083 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3084 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3085 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3086 }
3087
3088 void
3089 sis_chip_map(sc, pa)
3090 struct pciide_softc *sc;
3091 struct pci_attach_args *pa;
3092 {
3093 struct pciide_channel *cp;
3094 int channel;
3095 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3096 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3097 pcireg_t rev = PCI_REVISION(pa->pa_class);
3098 bus_size_t cmdsize, ctlsize;
3099 pcitag_t pchb_tag;
3100 pcireg_t pchb_id, pchb_class;
3101
3102 if (pciide_chipen(sc, pa) == 0)
3103 return;
3104 printf("%s: bus-master DMA support present",
3105 sc->sc_wdcdev.sc_dev.dv_xname);
3106 pciide_mapreg_dma(sc, pa);
3107 printf("\n");
3108
3109 /* get a PCI tag for the host bridge (function 0 of the same device) */
3110 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3111 /* and read ID and rev of the ISA bridge */
3112 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3113 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3114
3115 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3116 WDC_CAPABILITY_MODE;
3117 if (sc->sc_dma_ok) {
3118 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3119 sc->sc_wdcdev.irqack = pciide_irqack;
3120 /*
3121 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3122 * have problems with UDMA (info provided by Christos)
3123 */
3124 if (rev >= 0xd0 &&
3125 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3126 PCI_REVISION(pchb_class) >= 0x03))
3127 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3128 }
3129
3130 sc->sc_wdcdev.PIO_cap = 4;
3131 sc->sc_wdcdev.DMA_cap = 2;
3132 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3133 /*
3134 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3135 * chipsets.
3136 */
3137 sc->sc_wdcdev.UDMA_cap =
3138 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3139 sc->sc_wdcdev.set_modes = sis_setup_channel;
3140
3141 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3142 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3143
3144 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3145 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3146 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3147
3148 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3149 cp = &sc->pciide_channels[channel];
3150 if (pciide_chansetup(sc, channel, interface) == 0)
3151 continue;
3152 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3153 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3154 printf("%s: %s channel ignored (disabled)\n",
3155 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3156 continue;
3157 }
3158 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3159 pciide_pci_intr);
3160 if (cp->hw_ok == 0)
3161 continue;
3162 if (pciide_chan_candisable(cp)) {
3163 if (channel == 0)
3164 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3165 else
3166 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3167 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3168 sis_ctr0);
3169 }
3170 pciide_map_compat_intr(pa, cp, channel, interface);
3171 if (cp->hw_ok == 0)
3172 continue;
3173 sis_setup_channel(&cp->wdc_channel);
3174 }
3175 }
3176
3177 void
3178 sis_setup_channel(chp)
3179 struct channel_softc *chp;
3180 {
3181 struct ata_drive_datas *drvp;
3182 int drive;
3183 u_int32_t sis_tim;
3184 u_int32_t idedma_ctl;
3185 struct pciide_channel *cp = (struct pciide_channel*)chp;
3186 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3187
3188 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3189 "channel %d 0x%x\n", chp->channel,
3190 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3191 DEBUG_PROBE);
3192 sis_tim = 0;
3193 idedma_ctl = 0;
3194 /* setup DMA if needed */
3195 pciide_channel_dma_setup(cp);
3196
3197 for (drive = 0; drive < 2; drive++) {
3198 drvp = &chp->ch_drive[drive];
3199 /* If no drive, skip */
3200 if ((drvp->drive_flags & DRIVE) == 0)
3201 continue;
3202 /* add timing values, setup DMA if needed */
3203 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3204 (drvp->drive_flags & DRIVE_UDMA) == 0)
3205 goto pio;
3206
3207 if (drvp->drive_flags & DRIVE_UDMA) {
3208 /* use Ultra/DMA */
3209 drvp->drive_flags &= ~DRIVE_DMA;
3210 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3211 SIS_TIM_UDMA_TIME_OFF(drive);
3212 sis_tim |= SIS_TIM_UDMA_EN(drive);
3213 } else {
3214 /*
3215 * use Multiword DMA
3216 * Timings will be used for both PIO and DMA,
3217 * so adjust DMA mode if needed
3218 */
3219 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3220 drvp->PIO_mode = drvp->DMA_mode + 2;
3221 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3222 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3223 drvp->PIO_mode - 2 : 0;
3224 if (drvp->DMA_mode == 0)
3225 drvp->PIO_mode = 0;
3226 }
3227 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3228 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3229 SIS_TIM_ACT_OFF(drive);
3230 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3231 SIS_TIM_REC_OFF(drive);
3232 }
3233 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3234 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3235 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3236 if (idedma_ctl != 0) {
3237 /* Add software bits in status register */
3238 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3239 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3240 idedma_ctl);
3241 }
3242 pciide_print_modes(cp);
3243 }
3244
3245 void
3246 acer_chip_map(sc, pa)
3247 struct pciide_softc *sc;
3248 struct pci_attach_args *pa;
3249 {
3250 struct pciide_channel *cp;
3251 int channel;
3252 pcireg_t cr, interface;
3253 bus_size_t cmdsize, ctlsize;
3254 pcireg_t rev = PCI_REVISION(pa->pa_class);
3255
3256 if (pciide_chipen(sc, pa) == 0)
3257 return;
3258 printf("%s: bus-master DMA support present",
3259 sc->sc_wdcdev.sc_dev.dv_xname);
3260 pciide_mapreg_dma(sc, pa);
3261 printf("\n");
3262 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3263 WDC_CAPABILITY_MODE;
3264 if (sc->sc_dma_ok) {
3265 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3266 if (rev >= 0x20) {
3267 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3268 if (rev >= 0xC4)
3269 sc->sc_wdcdev.UDMA_cap = 5;
3270 else if (rev >= 0xC2)
3271 sc->sc_wdcdev.UDMA_cap = 4;
3272 else
3273 sc->sc_wdcdev.UDMA_cap = 2;
3274 }
3275 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3276 sc->sc_wdcdev.irqack = pciide_irqack;
3277 }
3278
3279 sc->sc_wdcdev.PIO_cap = 4;
3280 sc->sc_wdcdev.DMA_cap = 2;
3281 sc->sc_wdcdev.set_modes = acer_setup_channel;
3282 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3283 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3284
3285 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3286 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3287 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3288
3289 /* Enable "microsoft register bits" R/W. */
3290 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3291 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3292 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3293 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3294 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3295 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3296 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3297 ~ACER_CHANSTATUSREGS_RO);
3298 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3299 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3300 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3301 /* Don't use cr, re-read the real register content instead */
3302 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3303 PCI_CLASS_REG));
3304
3305 /* From linux: enable "Cable Detection" */
3306 if (rev >= 0xC2) {
3307 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3308 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3309 | ACER_0x4B_CDETECT);
3310 }
3311
3312 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3313 cp = &sc->pciide_channels[channel];
3314 if (pciide_chansetup(sc, channel, interface) == 0)
3315 continue;
3316 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3317 printf("%s: %s channel ignored (disabled)\n",
3318 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3319 continue;
3320 }
3321 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3322 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3323 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3324 if (cp->hw_ok == 0)
3325 continue;
3326 if (pciide_chan_candisable(cp)) {
3327 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3328 pci_conf_write(sc->sc_pc, sc->sc_tag,
3329 PCI_CLASS_REG, cr);
3330 }
3331 pciide_map_compat_intr(pa, cp, channel, interface);
3332 acer_setup_channel(&cp->wdc_channel);
3333 }
3334 }
3335
3336 void
3337 acer_setup_channel(chp)
3338 struct channel_softc *chp;
3339 {
3340 struct ata_drive_datas *drvp;
3341 int drive;
3342 u_int32_t acer_fifo_udma;
3343 u_int32_t idedma_ctl;
3344 struct pciide_channel *cp = (struct pciide_channel*)chp;
3345 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3346
3347 idedma_ctl = 0;
3348 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3349 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3350 acer_fifo_udma), DEBUG_PROBE);
3351 /* setup DMA if needed */
3352 pciide_channel_dma_setup(cp);
3353
3354 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3355 DRIVE_UDMA) { /* check 80 pins cable */
3356 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3357 ACER_0x4A_80PIN(chp->channel)) {
3358 if (chp->ch_drive[0].UDMA_mode > 2)
3359 chp->ch_drive[0].UDMA_mode = 2;
3360 if (chp->ch_drive[1].UDMA_mode > 2)
3361 chp->ch_drive[1].UDMA_mode = 2;
3362 }
3363 }
3364
3365 for (drive = 0; drive < 2; drive++) {
3366 drvp = &chp->ch_drive[drive];
3367 /* If no drive, skip */
3368 if ((drvp->drive_flags & DRIVE) == 0)
3369 continue;
3370 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3371 "channel %d drive %d 0x%x\n", chp->channel, drive,
3372 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3373 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3374 /* clear FIFO/DMA mode */
3375 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3376 ACER_UDMA_EN(chp->channel, drive) |
3377 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3378
3379 /* add timing values, setup DMA if needed */
3380 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3381 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3382 acer_fifo_udma |=
3383 ACER_FTH_OPL(chp->channel, drive, 0x1);
3384 goto pio;
3385 }
3386
3387 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3388 if (drvp->drive_flags & DRIVE_UDMA) {
3389 /* use Ultra/DMA */
3390 drvp->drive_flags &= ~DRIVE_DMA;
3391 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3392 acer_fifo_udma |=
3393 ACER_UDMA_TIM(chp->channel, drive,
3394 acer_udma[drvp->UDMA_mode]);
3395 /* XXX disable if one drive < UDMA3 ? */
3396 if (drvp->UDMA_mode >= 3) {
3397 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3398 ACER_0x4B,
3399 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3400 ACER_0x4B) | ACER_0x4B_UDMA66);
3401 }
3402 } else {
3403 /*
3404 * use Multiword DMA
3405 * Timings will be used for both PIO and DMA,
3406 * so adjust DMA mode if needed
3407 */
3408 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3409 drvp->PIO_mode = drvp->DMA_mode + 2;
3410 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3411 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3412 drvp->PIO_mode - 2 : 0;
3413 if (drvp->DMA_mode == 0)
3414 drvp->PIO_mode = 0;
3415 }
3416 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3417 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3418 ACER_IDETIM(chp->channel, drive),
3419 acer_pio[drvp->PIO_mode]);
3420 }
3421 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3422 acer_fifo_udma), DEBUG_PROBE);
3423 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3424 if (idedma_ctl != 0) {
3425 /* Add software bits in status register */
3426 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3427 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3428 idedma_ctl);
3429 }
3430 pciide_print_modes(cp);
3431 }
3432
3433 int
3434 acer_pci_intr(arg)
3435 void *arg;
3436 {
3437 struct pciide_softc *sc = arg;
3438 struct pciide_channel *cp;
3439 struct channel_softc *wdc_cp;
3440 int i, rv, crv;
3441 u_int32_t chids;
3442
3443 rv = 0;
3444 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3445 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3446 cp = &sc->pciide_channels[i];
3447 wdc_cp = &cp->wdc_channel;
3448 /* If a compat channel skip. */
3449 if (cp->compat)
3450 continue;
3451 if (chids & ACER_CHIDS_INT(i)) {
3452 crv = wdcintr(wdc_cp);
3453 if (crv == 0)
3454 printf("%s:%d: bogus intr\n",
3455 sc->sc_wdcdev.sc_dev.dv_xname, i);
3456 else
3457 rv = 1;
3458 }
3459 }
3460 return rv;
3461 }
3462
3463 void
3464 hpt_chip_map(sc, pa)
3465 struct pciide_softc *sc;
3466 struct pci_attach_args *pa;
3467 {
3468 struct pciide_channel *cp;
3469 int i, compatchan, revision;
3470 pcireg_t interface;
3471 bus_size_t cmdsize, ctlsize;
3472
3473 if (pciide_chipen(sc, pa) == 0)
3474 return;
3475 revision = PCI_REVISION(pa->pa_class);
3476 printf(": Triones/Highpoint ");
3477 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3478 printf("HPT374 IDE Controller\n");
3479 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3480 printf("HPT372 IDE Controller\n");
3481 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3482 if (revision == HPT372_REV)
3483 printf("HPT372 IDE Controller\n");
3484 else if (revision == HPT370_REV)
3485 printf("HPT370 IDE Controller\n");
3486 else if (revision == HPT370A_REV)
3487 printf("HPT370A IDE Controller\n");
3488 else if (revision == HPT366_REV)
3489 printf("HPT366 IDE Controller\n");
3490 else
3491 printf("unknown HPT IDE controller rev %d\n", revision);
3492 } else
3493 printf("unknown HPT IDE controller 0x%x\n",
3494 sc->sc_pp->ide_product);
3495
3496 /*
3497 * when the chip is in native mode it identifies itself as a
3498 * 'misc mass storage'. Fake interface in this case.
3499 */
3500 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3501 interface = PCI_INTERFACE(pa->pa_class);
3502 } else {
3503 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3504 PCIIDE_INTERFACE_PCI(0);
3505 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3506 (revision == HPT370_REV || revision == HPT370A_REV ||
3507 revision == HPT372_REV)) ||
3508 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3509 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3510 interface |= PCIIDE_INTERFACE_PCI(1);
3511 }
3512
3513 printf("%s: bus-master DMA support present",
3514 sc->sc_wdcdev.sc_dev.dv_xname);
3515 pciide_mapreg_dma(sc, pa);
3516 printf("\n");
3517 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3518 WDC_CAPABILITY_MODE;
3519 if (sc->sc_dma_ok) {
3520 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3521 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3522 sc->sc_wdcdev.irqack = pciide_irqack;
3523 }
3524 sc->sc_wdcdev.PIO_cap = 4;
3525 sc->sc_wdcdev.DMA_cap = 2;
3526
3527 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3528 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3529 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3530 revision == HPT366_REV) {
3531 sc->sc_wdcdev.UDMA_cap = 4;
3532 /*
3533 * The 366 has 2 PCI IDE functions, one for primary and one
3534 * for secondary. So we need to call pciide_mapregs_compat()
3535 * with the real channel
3536 */
3537 if (pa->pa_function == 0) {
3538 compatchan = 0;
3539 } else if (pa->pa_function == 1) {
3540 compatchan = 1;
3541 } else {
3542 printf("%s: unexpected PCI function %d\n",
3543 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3544 return;
3545 }
3546 sc->sc_wdcdev.nchannels = 1;
3547 } else {
3548 sc->sc_wdcdev.nchannels = 2;
3549 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3550 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3551 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3552 revision == HPT372_REV))
3553 sc->sc_wdcdev.UDMA_cap = 6;
3554 else
3555 sc->sc_wdcdev.UDMA_cap = 5;
3556 }
3557 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3558 cp = &sc->pciide_channels[i];
3559 if (sc->sc_wdcdev.nchannels > 1) {
3560 compatchan = i;
3561 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3562 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3563 printf("%s: %s channel ignored (disabled)\n",
3564 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3565 continue;
3566 }
3567 }
3568 if (pciide_chansetup(sc, i, interface) == 0)
3569 continue;
3570 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3571 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3572 &ctlsize, hpt_pci_intr);
3573 } else {
3574 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3575 &cmdsize, &ctlsize);
3576 }
3577 if (cp->hw_ok == 0)
3578 return;
3579 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3580 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3581 wdcattach(&cp->wdc_channel);
3582 hpt_setup_channel(&cp->wdc_channel);
3583 }
3584 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3585 (revision == HPT370_REV || revision == HPT370A_REV ||
3586 revision == HPT372_REV)) ||
3587 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3588 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3589 /*
3590 * HPT370_REV and highter has a bit to disable interrupts,
3591 * make sure to clear it
3592 */
3593 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3594 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3595 ~HPT_CSEL_IRQDIS);
3596 }
3597 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3598 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3599 revision == HPT372_REV ) ||
3600 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3601 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3602 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3603 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3604 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3605 return;
3606 }
3607
3608 void
3609 hpt_setup_channel(chp)
3610 struct channel_softc *chp;
3611 {
3612 struct ata_drive_datas *drvp;
3613 int drive;
3614 int cable;
3615 u_int32_t before, after;
3616 u_int32_t idedma_ctl;
3617 struct pciide_channel *cp = (struct pciide_channel*)chp;
3618 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3619 int revision =
3620 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3621
3622 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3623
3624 /* setup DMA if needed */
3625 pciide_channel_dma_setup(cp);
3626
3627 idedma_ctl = 0;
3628
3629 /* Per drive settings */
3630 for (drive = 0; drive < 2; drive++) {
3631 drvp = &chp->ch_drive[drive];
3632 /* If no drive, skip */
3633 if ((drvp->drive_flags & DRIVE) == 0)
3634 continue;
3635 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3636 HPT_IDETIM(chp->channel, drive));
3637
3638 /* add timing values, setup DMA if needed */
3639 if (drvp->drive_flags & DRIVE_UDMA) {
3640 /* use Ultra/DMA */
3641 drvp->drive_flags &= ~DRIVE_DMA;
3642 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3643 drvp->UDMA_mode > 2)
3644 drvp->UDMA_mode = 2;
3645 switch (sc->sc_pp->ide_product) {
3646 case PCI_PRODUCT_TRIONES_HPT374:
3647 after = hpt374_udma[drvp->UDMA_mode];
3648 break;
3649 case PCI_PRODUCT_TRIONES_HPT372:
3650 after = hpt372_udma[drvp->UDMA_mode];
3651 break;
3652 case PCI_PRODUCT_TRIONES_HPT366:
3653 default:
3654 switch(revision) {
3655 case HPT372_REV:
3656 after = hpt372_udma[drvp->UDMA_mode];
3657 break;
3658 case HPT370_REV:
3659 case HPT370A_REV:
3660 after = hpt370_udma[drvp->UDMA_mode];
3661 break;
3662 case HPT366_REV:
3663 default:
3664 after = hpt366_udma[drvp->UDMA_mode];
3665 break;
3666 }
3667 }
3668 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3669 } else if (drvp->drive_flags & DRIVE_DMA) {
3670 /*
3671 * use Multiword DMA.
3672 * Timings will be used for both PIO and DMA, so adjust
3673 * DMA mode if needed
3674 */
3675 if (drvp->PIO_mode >= 3 &&
3676 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3677 drvp->DMA_mode = drvp->PIO_mode - 2;
3678 }
3679 switch (sc->sc_pp->ide_product) {
3680 case PCI_PRODUCT_TRIONES_HPT374:
3681 after = hpt374_dma[drvp->DMA_mode];
3682 break;
3683 case PCI_PRODUCT_TRIONES_HPT372:
3684 after = hpt372_dma[drvp->DMA_mode];
3685 break;
3686 case PCI_PRODUCT_TRIONES_HPT366:
3687 default:
3688 switch(revision) {
3689 case HPT372_REV:
3690 after = hpt372_dma[drvp->DMA_mode];
3691 break;
3692 case HPT370_REV:
3693 case HPT370A_REV:
3694 after = hpt370_dma[drvp->DMA_mode];
3695 break;
3696 case HPT366_REV:
3697 default:
3698 after = hpt366_dma[drvp->DMA_mode];
3699 break;
3700 }
3701 }
3702 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3703 } else {
3704 /* PIO only */
3705 switch (sc->sc_pp->ide_product) {
3706 case PCI_PRODUCT_TRIONES_HPT374:
3707 after = hpt374_pio[drvp->PIO_mode];
3708 break;
3709 case PCI_PRODUCT_TRIONES_HPT372:
3710 after = hpt372_pio[drvp->PIO_mode];
3711 break;
3712 case PCI_PRODUCT_TRIONES_HPT366:
3713 default:
3714 switch(revision) {
3715 case HPT372_REV:
3716 after = hpt372_pio[drvp->PIO_mode];
3717 break;
3718 case HPT370_REV:
3719 case HPT370A_REV:
3720 after = hpt370_pio[drvp->PIO_mode];
3721 break;
3722 case HPT366_REV:
3723 default:
3724 after = hpt366_pio[drvp->PIO_mode];
3725 break;
3726 }
3727 }
3728 }
3729 pci_conf_write(sc->sc_pc, sc->sc_tag,
3730 HPT_IDETIM(chp->channel, drive), after);
3731 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3732 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3733 after, before), DEBUG_PROBE);
3734 }
3735 if (idedma_ctl != 0) {
3736 /* Add software bits in status register */
3737 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3738 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3739 idedma_ctl);
3740 }
3741 pciide_print_modes(cp);
3742 }
3743
3744 int
3745 hpt_pci_intr(arg)
3746 void *arg;
3747 {
3748 struct pciide_softc *sc = arg;
3749 struct pciide_channel *cp;
3750 struct channel_softc *wdc_cp;
3751 int rv = 0;
3752 int dmastat, i, crv;
3753
3754 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3755 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3756 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3757 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3758 IDEDMA_CTL_INTR)
3759 continue;
3760 cp = &sc->pciide_channels[i];
3761 wdc_cp = &cp->wdc_channel;
3762 crv = wdcintr(wdc_cp);
3763 if (crv == 0) {
3764 printf("%s:%d: bogus intr\n",
3765 sc->sc_wdcdev.sc_dev.dv_xname, i);
3766 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3767 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3768 } else
3769 rv = 1;
3770 }
3771 return rv;
3772 }
3773
3774
3775 /* Macros to test product */
3776 #define PDC_IS_262(sc) \
3777 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3778 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3779 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3780 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3781 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3782 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3783 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3784 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3785 #define PDC_IS_265(sc) \
3786 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3787 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3788 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3789 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3790 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3791 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3792 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3793 #define PDC_IS_268(sc) \
3794 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3795 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3796 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3797 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3798 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3799 #define PDC_IS_276(sc) \
3800 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3801 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3802 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3803
3804 void
3805 pdc202xx_chip_map(sc, pa)
3806 struct pciide_softc *sc;
3807 struct pci_attach_args *pa;
3808 {
3809 struct pciide_channel *cp;
3810 int channel;
3811 pcireg_t interface, st, mode;
3812 bus_size_t cmdsize, ctlsize;
3813
3814 if (!PDC_IS_268(sc)) {
3815 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3816 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3817 st), DEBUG_PROBE);
3818 }
3819 if (pciide_chipen(sc, pa) == 0)
3820 return;
3821
3822 /* turn off RAID mode */
3823 if (!PDC_IS_268(sc))
3824 st &= ~PDC2xx_STATE_IDERAID;
3825
3826 /*
3827 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3828 * mode. We have to fake interface
3829 */
3830 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3831 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3832 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3833
3834 printf("%s: bus-master DMA support present",
3835 sc->sc_wdcdev.sc_dev.dv_xname);
3836 pciide_mapreg_dma(sc, pa);
3837 printf("\n");
3838 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3839 WDC_CAPABILITY_MODE;
3840 if (sc->sc_dma_ok) {
3841 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3842 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3843 sc->sc_wdcdev.irqack = pciide_irqack;
3844 }
3845 sc->sc_wdcdev.PIO_cap = 4;
3846 sc->sc_wdcdev.DMA_cap = 2;
3847 if (PDC_IS_276(sc))
3848 sc->sc_wdcdev.UDMA_cap = 6;
3849 else if (PDC_IS_265(sc))
3850 sc->sc_wdcdev.UDMA_cap = 5;
3851 else if (PDC_IS_262(sc))
3852 sc->sc_wdcdev.UDMA_cap = 4;
3853 else
3854 sc->sc_wdcdev.UDMA_cap = 2;
3855 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3856 pdc20268_setup_channel : pdc202xx_setup_channel;
3857 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3858 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3859
3860 if (!PDC_IS_268(sc)) {
3861 /* setup failsafe defaults */
3862 mode = 0;
3863 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3864 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3865 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3866 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3867 for (channel = 0;
3868 channel < sc->sc_wdcdev.nchannels;
3869 channel++) {
3870 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3871 "drive 0 initial timings 0x%x, now 0x%x\n",
3872 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3873 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3874 DEBUG_PROBE);
3875 pci_conf_write(sc->sc_pc, sc->sc_tag,
3876 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3877 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3878 "drive 1 initial timings 0x%x, now 0x%x\n",
3879 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3880 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3881 pci_conf_write(sc->sc_pc, sc->sc_tag,
3882 PDC2xx_TIM(channel, 1), mode);
3883 }
3884
3885 mode = PDC2xx_SCR_DMA;
3886 if (PDC_IS_262(sc)) {
3887 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3888 } else {
3889 /* the BIOS set it up this way */
3890 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3891 }
3892 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3893 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3894 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3895 "now 0x%x\n",
3896 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3897 PDC2xx_SCR),
3898 mode), DEBUG_PROBE);
3899 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3900 PDC2xx_SCR, mode);
3901
3902 /* controller initial state register is OK even without BIOS */
3903 /* Set DMA mode to IDE DMA compatibility */
3904 mode =
3905 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3906 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3907 DEBUG_PROBE);
3908 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3909 mode | 0x1);
3910 mode =
3911 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3912 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3913 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3914 mode | 0x1);
3915 }
3916
3917 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3918 cp = &sc->pciide_channels[channel];
3919 if (pciide_chansetup(sc, channel, interface) == 0)
3920 continue;
3921 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3922 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3923 printf("%s: %s channel ignored (disabled)\n",
3924 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3925 continue;
3926 }
3927 if (PDC_IS_265(sc))
3928 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3929 pdc20265_pci_intr);
3930 else
3931 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3932 pdc202xx_pci_intr);
3933 if (cp->hw_ok == 0)
3934 continue;
3935 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3936 st &= ~(PDC_IS_262(sc) ?
3937 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3938 pciide_map_compat_intr(pa, cp, channel, interface);
3939 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3940 }
3941 if (!PDC_IS_268(sc)) {
3942 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3943 "0x%x\n", st), DEBUG_PROBE);
3944 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3945 }
3946 return;
3947 }
3948
3949 void
3950 pdc202xx_setup_channel(chp)
3951 struct channel_softc *chp;
3952 {
3953 struct ata_drive_datas *drvp;
3954 int drive;
3955 pcireg_t mode, st;
3956 u_int32_t idedma_ctl, scr, atapi;
3957 struct pciide_channel *cp = (struct pciide_channel*)chp;
3958 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3959 int channel = chp->channel;
3960
3961 /* setup DMA if needed */
3962 pciide_channel_dma_setup(cp);
3963
3964 idedma_ctl = 0;
3965 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3966 sc->sc_wdcdev.sc_dev.dv_xname,
3967 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3968 DEBUG_PROBE);
3969
3970 /* Per channel settings */
3971 if (PDC_IS_262(sc)) {
3972 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3973 PDC262_U66);
3974 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3975 /* Trim UDMA mode */
3976 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3977 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3978 chp->ch_drive[0].UDMA_mode <= 2) ||
3979 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3980 chp->ch_drive[1].UDMA_mode <= 2)) {
3981 if (chp->ch_drive[0].UDMA_mode > 2)
3982 chp->ch_drive[0].UDMA_mode = 2;
3983 if (chp->ch_drive[1].UDMA_mode > 2)
3984 chp->ch_drive[1].UDMA_mode = 2;
3985 }
3986 /* Set U66 if needed */
3987 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3988 chp->ch_drive[0].UDMA_mode > 2) ||
3989 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3990 chp->ch_drive[1].UDMA_mode > 2))
3991 scr |= PDC262_U66_EN(channel);
3992 else
3993 scr &= ~PDC262_U66_EN(channel);
3994 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3995 PDC262_U66, scr);
3996 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3997 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3998 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3999 PDC262_ATAPI(channel))), DEBUG_PROBE);
4000 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4001 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4002 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4003 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4004 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4005 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4006 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4007 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4008 atapi = 0;
4009 else
4010 atapi = PDC262_ATAPI_UDMA;
4011 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4012 PDC262_ATAPI(channel), atapi);
4013 }
4014 }
4015 for (drive = 0; drive < 2; drive++) {
4016 drvp = &chp->ch_drive[drive];
4017 /* If no drive, skip */
4018 if ((drvp->drive_flags & DRIVE) == 0)
4019 continue;
4020 mode = 0;
4021 if (drvp->drive_flags & DRIVE_UDMA) {
4022 /* use Ultra/DMA */
4023 drvp->drive_flags &= ~DRIVE_DMA;
4024 mode = PDC2xx_TIM_SET_MB(mode,
4025 pdc2xx_udma_mb[drvp->UDMA_mode]);
4026 mode = PDC2xx_TIM_SET_MC(mode,
4027 pdc2xx_udma_mc[drvp->UDMA_mode]);
4028 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4029 } else if (drvp->drive_flags & DRIVE_DMA) {
4030 mode = PDC2xx_TIM_SET_MB(mode,
4031 pdc2xx_dma_mb[drvp->DMA_mode]);
4032 mode = PDC2xx_TIM_SET_MC(mode,
4033 pdc2xx_dma_mc[drvp->DMA_mode]);
4034 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4035 } else {
4036 mode = PDC2xx_TIM_SET_MB(mode,
4037 pdc2xx_dma_mb[0]);
4038 mode = PDC2xx_TIM_SET_MC(mode,
4039 pdc2xx_dma_mc[0]);
4040 }
4041 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4042 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4043 if (drvp->drive_flags & DRIVE_ATA)
4044 mode |= PDC2xx_TIM_PRE;
4045 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4046 if (drvp->PIO_mode >= 3) {
4047 mode |= PDC2xx_TIM_IORDY;
4048 if (drive == 0)
4049 mode |= PDC2xx_TIM_IORDYp;
4050 }
4051 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4052 "timings 0x%x\n",
4053 sc->sc_wdcdev.sc_dev.dv_xname,
4054 chp->channel, drive, mode), DEBUG_PROBE);
4055 pci_conf_write(sc->sc_pc, sc->sc_tag,
4056 PDC2xx_TIM(chp->channel, drive), mode);
4057 }
4058 if (idedma_ctl != 0) {
4059 /* Add software bits in status register */
4060 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4061 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4062 idedma_ctl);
4063 }
4064 pciide_print_modes(cp);
4065 }
4066
4067 void
4068 pdc20268_setup_channel(chp)
4069 struct channel_softc *chp;
4070 {
4071 struct ata_drive_datas *drvp;
4072 int drive;
4073 u_int32_t idedma_ctl;
4074 struct pciide_channel *cp = (struct pciide_channel*)chp;
4075 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4076 int u100;
4077
4078 /* setup DMA if needed */
4079 pciide_channel_dma_setup(cp);
4080
4081 idedma_ctl = 0;
4082
4083 /* I don't know what this is for, FreeBSD does it ... */
4084 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4085 IDEDMA_CMD + 0x1, 0x0b);
4086
4087 /*
4088 * I don't know what this is for; FreeBSD checks this ... this is not
4089 * cable type detect.
4090 */
4091 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4092 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4093
4094 for (drive = 0; drive < 2; drive++) {
4095 drvp = &chp->ch_drive[drive];
4096 /* If no drive, skip */
4097 if ((drvp->drive_flags & DRIVE) == 0)
4098 continue;
4099 if (drvp->drive_flags & DRIVE_UDMA) {
4100 /* use Ultra/DMA */
4101 drvp->drive_flags &= ~DRIVE_DMA;
4102 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4103 if (drvp->UDMA_mode > 2 && u100 == 0)
4104 drvp->UDMA_mode = 2;
4105 } else if (drvp->drive_flags & DRIVE_DMA) {
4106 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4107 }
4108 }
4109 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4110 if (idedma_ctl != 0) {
4111 /* Add software bits in status register */
4112 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4113 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4114 idedma_ctl);
4115 }
4116 pciide_print_modes(cp);
4117 }
4118
4119 int
4120 pdc202xx_pci_intr(arg)
4121 void *arg;
4122 {
4123 struct pciide_softc *sc = arg;
4124 struct pciide_channel *cp;
4125 struct channel_softc *wdc_cp;
4126 int i, rv, crv;
4127 u_int32_t scr;
4128
4129 rv = 0;
4130 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4131 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4132 cp = &sc->pciide_channels[i];
4133 wdc_cp = &cp->wdc_channel;
4134 /* If a compat channel skip. */
4135 if (cp->compat)
4136 continue;
4137 if (scr & PDC2xx_SCR_INT(i)) {
4138 crv = wdcintr(wdc_cp);
4139 if (crv == 0)
4140 printf("%s:%d: bogus intr (reg 0x%x)\n",
4141 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4142 else
4143 rv = 1;
4144 }
4145 }
4146 return rv;
4147 }
4148
4149 int
4150 pdc20265_pci_intr(arg)
4151 void *arg;
4152 {
4153 struct pciide_softc *sc = arg;
4154 struct pciide_channel *cp;
4155 struct channel_softc *wdc_cp;
4156 int i, rv, crv;
4157 u_int32_t dmastat;
4158
4159 rv = 0;
4160 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4161 cp = &sc->pciide_channels[i];
4162 wdc_cp = &cp->wdc_channel;
4163 /* If a compat channel skip. */
4164 if (cp->compat)
4165 continue;
4166 /*
4167 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4168 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4169 * So use it instead (requires 2 reg reads instead of 1,
4170 * but we can't do it another way).
4171 */
4172 dmastat = bus_space_read_1(sc->sc_dma_iot,
4173 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4174 if((dmastat & IDEDMA_CTL_INTR) == 0)
4175 continue;
4176 crv = wdcintr(wdc_cp);
4177 if (crv == 0)
4178 printf("%s:%d: bogus intr\n",
4179 sc->sc_wdcdev.sc_dev.dv_xname, i);
4180 else
4181 rv = 1;
4182 }
4183 return rv;
4184 }
4185
4186 void
4187 opti_chip_map(sc, pa)
4188 struct pciide_softc *sc;
4189 struct pci_attach_args *pa;
4190 {
4191 struct pciide_channel *cp;
4192 bus_size_t cmdsize, ctlsize;
4193 pcireg_t interface;
4194 u_int8_t init_ctrl;
4195 int channel;
4196
4197 if (pciide_chipen(sc, pa) == 0)
4198 return;
4199 printf("%s: bus-master DMA support present",
4200 sc->sc_wdcdev.sc_dev.dv_xname);
4201
4202 /*
4203 * XXXSCW:
4204 * There seem to be a couple of buggy revisions/implementations
4205 * of the OPTi pciide chipset. This kludge seems to fix one of
4206 * the reported problems (PR/11644) but still fails for the
4207 * other (PR/13151), although the latter may be due to other
4208 * issues too...
4209 */
4210 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4211 printf(" but disabled due to chip rev. <= 0x12");
4212 sc->sc_dma_ok = 0;
4213 } else
4214 pciide_mapreg_dma(sc, pa);
4215
4216 printf("\n");
4217
4218 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4219 WDC_CAPABILITY_MODE;
4220 sc->sc_wdcdev.PIO_cap = 4;
4221 if (sc->sc_dma_ok) {
4222 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4223 sc->sc_wdcdev.irqack = pciide_irqack;
4224 sc->sc_wdcdev.DMA_cap = 2;
4225 }
4226 sc->sc_wdcdev.set_modes = opti_setup_channel;
4227
4228 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4229 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4230
4231 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4232 OPTI_REG_INIT_CONTROL);
4233
4234 interface = PCI_INTERFACE(pa->pa_class);
4235
4236 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4237 cp = &sc->pciide_channels[channel];
4238 if (pciide_chansetup(sc, channel, interface) == 0)
4239 continue;
4240 if (channel == 1 &&
4241 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4242 printf("%s: %s channel ignored (disabled)\n",
4243 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4244 continue;
4245 }
4246 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4247 pciide_pci_intr);
4248 if (cp->hw_ok == 0)
4249 continue;
4250 pciide_map_compat_intr(pa, cp, channel, interface);
4251 if (cp->hw_ok == 0)
4252 continue;
4253 opti_setup_channel(&cp->wdc_channel);
4254 }
4255 }
4256
4257 void
4258 opti_setup_channel(chp)
4259 struct channel_softc *chp;
4260 {
4261 struct ata_drive_datas *drvp;
4262 struct pciide_channel *cp = (struct pciide_channel*)chp;
4263 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4264 int drive, spd;
4265 int mode[2];
4266 u_int8_t rv, mr;
4267
4268 /*
4269 * The `Delay' and `Address Setup Time' fields of the
4270 * Miscellaneous Register are always zero initially.
4271 */
4272 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4273 mr &= ~(OPTI_MISC_DELAY_MASK |
4274 OPTI_MISC_ADDR_SETUP_MASK |
4275 OPTI_MISC_INDEX_MASK);
4276
4277 /* Prime the control register before setting timing values */
4278 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4279
4280 /* Determine the clockrate of the PCIbus the chip is attached to */
4281 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4282 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4283
4284 /* setup DMA if needed */
4285 pciide_channel_dma_setup(cp);
4286
4287 for (drive = 0; drive < 2; drive++) {
4288 drvp = &chp->ch_drive[drive];
4289 /* If no drive, skip */
4290 if ((drvp->drive_flags & DRIVE) == 0) {
4291 mode[drive] = -1;
4292 continue;
4293 }
4294
4295 if ((drvp->drive_flags & DRIVE_DMA)) {
4296 /*
4297 * Timings will be used for both PIO and DMA,
4298 * so adjust DMA mode if needed
4299 */
4300 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4301 drvp->PIO_mode = drvp->DMA_mode + 2;
4302 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4303 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4304 drvp->PIO_mode - 2 : 0;
4305 if (drvp->DMA_mode == 0)
4306 drvp->PIO_mode = 0;
4307
4308 mode[drive] = drvp->DMA_mode + 5;
4309 } else
4310 mode[drive] = drvp->PIO_mode;
4311
4312 if (drive && mode[0] >= 0 &&
4313 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4314 /*
4315 * Can't have two drives using different values
4316 * for `Address Setup Time'.
4317 * Slow down the faster drive to compensate.
4318 */
4319 int d = (opti_tim_as[spd][mode[0]] >
4320 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4321
4322 mode[d] = mode[1-d];
4323 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4324 chp->ch_drive[d].DMA_mode = 0;
4325 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4326 }
4327 }
4328
4329 for (drive = 0; drive < 2; drive++) {
4330 int m;
4331 if ((m = mode[drive]) < 0)
4332 continue;
4333
4334 /* Set the Address Setup Time and select appropriate index */
4335 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4336 rv |= OPTI_MISC_INDEX(drive);
4337 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4338
4339 /* Set the pulse width and recovery timing parameters */
4340 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4341 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4342 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4343 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4344
4345 /* Set the Enhanced Mode register appropriately */
4346 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4347 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4348 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4349 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4350 }
4351
4352 /* Finally, enable the timings */
4353 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4354
4355 pciide_print_modes(cp);
4356 }
4357
4358 #define ACARD_IS_850(sc) \
4359 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4360
4361 void
4362 acard_chip_map(sc, pa)
4363 struct pciide_softc *sc;
4364 struct pci_attach_args *pa;
4365 {
4366 struct pciide_channel *cp;
4367 int i;
4368 pcireg_t interface;
4369 bus_size_t cmdsize, ctlsize;
4370
4371 if (pciide_chipen(sc, pa) == 0)
4372 return;
4373
4374 /*
4375 * when the chip is in native mode it identifies itself as a
4376 * 'misc mass storage'. Fake interface in this case.
4377 */
4378 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4379 interface = PCI_INTERFACE(pa->pa_class);
4380 } else {
4381 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4382 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4383 }
4384
4385 printf("%s: bus-master DMA support present",
4386 sc->sc_wdcdev.sc_dev.dv_xname);
4387 pciide_mapreg_dma(sc, pa);
4388 printf("\n");
4389 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4390 WDC_CAPABILITY_MODE;
4391
4392 if (sc->sc_dma_ok) {
4393 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4394 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4395 sc->sc_wdcdev.irqack = pciide_irqack;
4396 }
4397 sc->sc_wdcdev.PIO_cap = 4;
4398 sc->sc_wdcdev.DMA_cap = 2;
4399 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4400
4401 sc->sc_wdcdev.set_modes = acard_setup_channel;
4402 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4403 sc->sc_wdcdev.nchannels = 2;
4404
4405 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4406 cp = &sc->pciide_channels[i];
4407 if (pciide_chansetup(sc, i, interface) == 0)
4408 continue;
4409 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4410 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4411 &ctlsize, pciide_pci_intr);
4412 } else {
4413 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4414 &cmdsize, &ctlsize);
4415 }
4416 if (cp->hw_ok == 0)
4417 return;
4418 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4419 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4420 wdcattach(&cp->wdc_channel);
4421 acard_setup_channel(&cp->wdc_channel);
4422 }
4423 if (!ACARD_IS_850(sc)) {
4424 u_int32_t reg;
4425 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4426 reg &= ~ATP860_CTRL_INT;
4427 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4428 }
4429 }
4430
4431 void
4432 acard_setup_channel(chp)
4433 struct channel_softc *chp;
4434 {
4435 struct ata_drive_datas *drvp;
4436 struct pciide_channel *cp = (struct pciide_channel*)chp;
4437 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4438 int channel = chp->channel;
4439 int drive;
4440 u_int32_t idetime, udma_mode;
4441 u_int32_t idedma_ctl;
4442
4443 /* setup DMA if needed */
4444 pciide_channel_dma_setup(cp);
4445
4446 if (ACARD_IS_850(sc)) {
4447 idetime = 0;
4448 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4449 udma_mode &= ~ATP850_UDMA_MASK(channel);
4450 } else {
4451 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4452 idetime &= ~ATP860_SETTIME_MASK(channel);
4453 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4454 udma_mode &= ~ATP860_UDMA_MASK(channel);
4455
4456 /* check 80 pins cable */
4457 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4458 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4459 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4460 & ATP860_CTRL_80P(chp->channel)) {
4461 if (chp->ch_drive[0].UDMA_mode > 2)
4462 chp->ch_drive[0].UDMA_mode = 2;
4463 if (chp->ch_drive[1].UDMA_mode > 2)
4464 chp->ch_drive[1].UDMA_mode = 2;
4465 }
4466 }
4467 }
4468
4469 idedma_ctl = 0;
4470
4471 /* Per drive settings */
4472 for (drive = 0; drive < 2; drive++) {
4473 drvp = &chp->ch_drive[drive];
4474 /* If no drive, skip */
4475 if ((drvp->drive_flags & DRIVE) == 0)
4476 continue;
4477 /* add timing values, setup DMA if needed */
4478 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4479 (drvp->drive_flags & DRIVE_UDMA)) {
4480 /* use Ultra/DMA */
4481 if (ACARD_IS_850(sc)) {
4482 idetime |= ATP850_SETTIME(drive,
4483 acard_act_udma[drvp->UDMA_mode],
4484 acard_rec_udma[drvp->UDMA_mode]);
4485 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4486 acard_udma_conf[drvp->UDMA_mode]);
4487 } else {
4488 idetime |= ATP860_SETTIME(channel, drive,
4489 acard_act_udma[drvp->UDMA_mode],
4490 acard_rec_udma[drvp->UDMA_mode]);
4491 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4492 acard_udma_conf[drvp->UDMA_mode]);
4493 }
4494 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4495 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4496 (drvp->drive_flags & DRIVE_DMA)) {
4497 /* use Multiword DMA */
4498 drvp->drive_flags &= ~DRIVE_UDMA;
4499 if (ACARD_IS_850(sc)) {
4500 idetime |= ATP850_SETTIME(drive,
4501 acard_act_dma[drvp->DMA_mode],
4502 acard_rec_dma[drvp->DMA_mode]);
4503 } else {
4504 idetime |= ATP860_SETTIME(channel, drive,
4505 acard_act_dma[drvp->DMA_mode],
4506 acard_rec_dma[drvp->DMA_mode]);
4507 }
4508 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4509 } else {
4510 /* PIO only */
4511 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4512 if (ACARD_IS_850(sc)) {
4513 idetime |= ATP850_SETTIME(drive,
4514 acard_act_pio[drvp->PIO_mode],
4515 acard_rec_pio[drvp->PIO_mode]);
4516 } else {
4517 idetime |= ATP860_SETTIME(channel, drive,
4518 acard_act_pio[drvp->PIO_mode],
4519 acard_rec_pio[drvp->PIO_mode]);
4520 }
4521 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4522 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4523 | ATP8x0_CTRL_EN(channel));
4524 }
4525 }
4526
4527 if (idedma_ctl != 0) {
4528 /* Add software bits in status register */
4529 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4530 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4531 }
4532 pciide_print_modes(cp);
4533
4534 if (ACARD_IS_850(sc)) {
4535 pci_conf_write(sc->sc_pc, sc->sc_tag,
4536 ATP850_IDETIME(channel), idetime);
4537 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4538 } else {
4539 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4540 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4541 }
4542 }
4543
4544 int
4545 acard_pci_intr(arg)
4546 void *arg;
4547 {
4548 struct pciide_softc *sc = arg;
4549 struct pciide_channel *cp;
4550 struct channel_softc *wdc_cp;
4551 int rv = 0;
4552 int dmastat, i, crv;
4553
4554 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4555 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4556 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4557 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4558 continue;
4559 cp = &sc->pciide_channels[i];
4560 wdc_cp = &cp->wdc_channel;
4561 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4562 (void)wdcintr(wdc_cp);
4563 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4564 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4565 continue;
4566 }
4567 crv = wdcintr(wdc_cp);
4568 if (crv == 0)
4569 printf("%s:%d: bogus intr\n",
4570 sc->sc_wdcdev.sc_dev.dv_xname, i);
4571 else if (crv == 1)
4572 rv = 1;
4573 else if (rv == 0)
4574 rv = crv;
4575 }
4576 return rv;
4577 }
4578
4579 static int
4580 sl82c105_bugchk(struct pci_attach_args *pa)
4581 {
4582
4583 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4584 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4585 return (0);
4586
4587 if (PCI_REVISION(pa->pa_class) <= 0x05)
4588 return (1);
4589
4590 return (0);
4591 }
4592
4593 void
4594 sl82c105_chip_map(sc, pa)
4595 struct pciide_softc *sc;
4596 struct pci_attach_args *pa;
4597 {
4598 struct pciide_channel *cp;
4599 bus_size_t cmdsize, ctlsize;
4600 pcireg_t interface, idecr;
4601 int channel;
4602
4603 if (pciide_chipen(sc, pa) == 0)
4604 return;
4605
4606 printf("%s: bus-master DMA support present",
4607 sc->sc_wdcdev.sc_dev.dv_xname);
4608
4609 /*
4610 * Check to see if we're part of the Winbond 83c553 Southbridge.
4611 * If so, we need to disable DMA on rev. <= 5 of that chip.
4612 */
4613 if (pci_find_device(pa, sl82c105_bugchk)) {
4614 printf(" but disabled due to 83c553 rev. <= 0x05");
4615 sc->sc_dma_ok = 0;
4616 } else
4617 pciide_mapreg_dma(sc, pa);
4618 printf("\n");
4619
4620 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4621 WDC_CAPABILITY_MODE;
4622 sc->sc_wdcdev.PIO_cap = 4;
4623 if (sc->sc_dma_ok) {
4624 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4625 sc->sc_wdcdev.irqack = pciide_irqack;
4626 sc->sc_wdcdev.DMA_cap = 2;
4627 }
4628 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4629
4630 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4631 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4632
4633 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4634
4635 interface = PCI_INTERFACE(pa->pa_class);
4636
4637 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4638 cp = &sc->pciide_channels[channel];
4639 if (pciide_chansetup(sc, channel, interface) == 0)
4640 continue;
4641 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4642 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4643 printf("%s: %s channel ignored (disabled)\n",
4644 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4645 continue;
4646 }
4647 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4648 pciide_pci_intr);
4649 if (cp->hw_ok == 0)
4650 continue;
4651 pciide_map_compat_intr(pa, cp, channel, interface);
4652 if (cp->hw_ok == 0)
4653 continue;
4654 sl82c105_setup_channel(&cp->wdc_channel);
4655 }
4656 }
4657
4658 void
4659 sl82c105_setup_channel(chp)
4660 struct channel_softc *chp;
4661 {
4662 struct ata_drive_datas *drvp;
4663 struct pciide_channel *cp = (struct pciide_channel*)chp;
4664 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4665 int pxdx_reg, drive;
4666 pcireg_t pxdx;
4667
4668 /* Set up DMA if needed. */
4669 pciide_channel_dma_setup(cp);
4670
4671 for (drive = 0; drive < 2; drive++) {
4672 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4673 : SYMPH_P1D0CR) + (drive * 4);
4674
4675 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4676
4677 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4678 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4679
4680 drvp = &chp->ch_drive[drive];
4681 /* If no drive, skip. */
4682 if ((drvp->drive_flags & DRIVE) == 0) {
4683 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4684 continue;
4685 }
4686
4687 if (drvp->drive_flags & DRIVE_DMA) {
4688 /*
4689 * Timings will be used for both PIO and DMA,
4690 * so adjust DMA mode if needed.
4691 */
4692 if (drvp->PIO_mode >= 3) {
4693 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4694 drvp->DMA_mode = drvp->PIO_mode - 2;
4695 if (drvp->DMA_mode < 1) {
4696 /*
4697 * Can't mix both PIO and DMA.
4698 * Disable DMA.
4699 */
4700 drvp->drive_flags &= ~DRIVE_DMA;
4701 }
4702 } else {
4703 /*
4704 * Can't mix both PIO and DMA. Disable
4705 * DMA.
4706 */
4707 drvp->drive_flags &= ~DRIVE_DMA;
4708 }
4709 }
4710
4711 if (drvp->drive_flags & DRIVE_DMA) {
4712 /* Use multi-word DMA. */
4713 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4714 PxDx_CMD_ON_SHIFT;
4715 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4716 } else {
4717 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4718 PxDx_CMD_ON_SHIFT;
4719 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4720 }
4721
4722 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4723
4724 /* ...and set the mode for this drive. */
4725 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4726 }
4727
4728 pciide_print_modes(cp);
4729 }
4730
4731 void
4732 serverworks_chip_map(sc, pa)
4733 struct pciide_softc *sc;
4734 struct pci_attach_args *pa;
4735 {
4736 struct pciide_channel *cp;
4737 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4738 pcitag_t pcib_tag;
4739 int channel;
4740 bus_size_t cmdsize, ctlsize;
4741
4742 if (pciide_chipen(sc, pa) == 0)
4743 return;
4744
4745 printf("%s: bus-master DMA support present",
4746 sc->sc_wdcdev.sc_dev.dv_xname);
4747 pciide_mapreg_dma(sc, pa);
4748 printf("\n");
4749 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4750 WDC_CAPABILITY_MODE;
4751
4752 if (sc->sc_dma_ok) {
4753 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4754 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4755 sc->sc_wdcdev.irqack = pciide_irqack;
4756 }
4757 sc->sc_wdcdev.PIO_cap = 4;
4758 sc->sc_wdcdev.DMA_cap = 2;
4759 switch (sc->sc_pp->ide_product) {
4760 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4761 sc->sc_wdcdev.UDMA_cap = 2;
4762 break;
4763 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4764 if (PCI_REVISION(pa->pa_class) < 0x92)
4765 sc->sc_wdcdev.UDMA_cap = 4;
4766 else
4767 sc->sc_wdcdev.UDMA_cap = 5;
4768 break;
4769 }
4770
4771 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4772 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4773 sc->sc_wdcdev.nchannels = 2;
4774
4775 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4776 cp = &sc->pciide_channels[channel];
4777 if (pciide_chansetup(sc, channel, interface) == 0)
4778 continue;
4779 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4780 serverworks_pci_intr);
4781 if (cp->hw_ok == 0)
4782 return;
4783 pciide_map_compat_intr(pa, cp, channel, interface);
4784 if (cp->hw_ok == 0)
4785 return;
4786 serverworks_setup_channel(&cp->wdc_channel);
4787 }
4788
4789 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4790 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4791 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4792 }
4793
4794 void
4795 serverworks_setup_channel(chp)
4796 struct channel_softc *chp;
4797 {
4798 struct ata_drive_datas *drvp;
4799 struct pciide_channel *cp = (struct pciide_channel*)chp;
4800 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4801 int channel = chp->channel;
4802 int drive, unit;
4803 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4804 u_int32_t idedma_ctl;
4805 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4806 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4807
4808 /* setup DMA if needed */
4809 pciide_channel_dma_setup(cp);
4810
4811 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4812 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4813 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4814 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4815
4816 pio_time &= ~(0xffff << (16 * channel));
4817 dma_time &= ~(0xffff << (16 * channel));
4818 pio_mode &= ~(0xff << (8 * channel + 16));
4819 udma_mode &= ~(0xff << (8 * channel + 16));
4820 udma_mode &= ~(3 << (2 * channel));
4821
4822 idedma_ctl = 0;
4823
4824 /* Per drive settings */
4825 for (drive = 0; drive < 2; drive++) {
4826 drvp = &chp->ch_drive[drive];
4827 /* If no drive, skip */
4828 if ((drvp->drive_flags & DRIVE) == 0)
4829 continue;
4830 unit = drive + 2 * channel;
4831 /* add timing values, setup DMA if needed */
4832 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4833 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4834 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4835 (drvp->drive_flags & DRIVE_UDMA)) {
4836 /* use Ultra/DMA, check for 80-pin cable */
4837 if (drvp->UDMA_mode > 2 &&
4838 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4839 drvp->UDMA_mode = 2;
4840 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4841 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4842 udma_mode |= 1 << unit;
4843 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4844 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4845 (drvp->drive_flags & DRIVE_DMA)) {
4846 /* use Multiword DMA */
4847 drvp->drive_flags &= ~DRIVE_UDMA;
4848 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4849 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4850 } else {
4851 /* PIO only */
4852 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4853 }
4854 }
4855
4856 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4857 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4858 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4859 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4860 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4861
4862 if (idedma_ctl != 0) {
4863 /* Add software bits in status register */
4864 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4865 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4866 }
4867 pciide_print_modes(cp);
4868 }
4869
4870 int
4871 serverworks_pci_intr(arg)
4872 void *arg;
4873 {
4874 struct pciide_softc *sc = arg;
4875 struct pciide_channel *cp;
4876 struct channel_softc *wdc_cp;
4877 int rv = 0;
4878 int dmastat, i, crv;
4879
4880 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4881 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4882 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4883 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4884 IDEDMA_CTL_INTR)
4885 continue;
4886 cp = &sc->pciide_channels[i];
4887 wdc_cp = &cp->wdc_channel;
4888 crv = wdcintr(wdc_cp);
4889 if (crv == 0) {
4890 printf("%s:%d: bogus intr\n",
4891 sc->sc_wdcdev.sc_dev.dv_xname, i);
4892 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4893 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4894 } else
4895 rv = 1;
4896 }
4897 return rv;
4898 }
4899