pciide.c revision 1.153.2.14 1 /* $NetBSD: pciide.c,v 1.153.2.14 2003/08/16 15:58:24 tron Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.153.2.14 2003/08/16 15:58:24 tron Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 void sis96x_setup_channel __P((struct channel_softc*));
187 static int sis_hostbr_match __P(( struct pci_attach_args *));
188 static int sis_south_match __P(( struct pci_attach_args *));
189
190 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void acer_setup_channel __P((struct channel_softc*));
192 int acer_pci_intr __P((void *));
193
194 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void pdc202xx_setup_channel __P((struct channel_softc*));
196 void pdc20268_setup_channel __P((struct channel_softc*));
197 int pdc202xx_pci_intr __P((void *));
198 int pdc20265_pci_intr __P((void *));
199 static void pdc20262_dma_start __P((void*, int, int));
200 static int pdc20262_dma_finish __P((void*, int, int, int));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { PCI_PRODUCT_INTEL_82801EB_IDE,
310 0,
311 "Intel 82801EB IDE Controller (ICH5)",
312 piix_chip_map,
313 },
314 { 0,
315 0,
316 NULL,
317 NULL
318 }
319 };
320
321 const struct pciide_product_desc pciide_amd_products[] = {
322 { PCI_PRODUCT_AMD_PBC756_IDE,
323 0,
324 "Advanced Micro Devices AMD756 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC766_IDE,
328 0,
329 "Advanced Micro Devices AMD766 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC768_IDE,
333 0,
334 "Advanced Micro Devices AMD768 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_cmd_products[] = {
345 { PCI_PRODUCT_CMDTECH_640,
346 0,
347 "CMD Technology PCI0640",
348 cmd_chip_map
349 },
350 { PCI_PRODUCT_CMDTECH_643,
351 0,
352 "CMD Technology PCI0643",
353 cmd0643_9_chip_map,
354 },
355 { PCI_PRODUCT_CMDTECH_646,
356 0,
357 "CMD Technology PCI0646",
358 cmd0643_9_chip_map,
359 },
360 { PCI_PRODUCT_CMDTECH_648,
361 IDE_PCI_CLASS_OVERRIDE,
362 "CMD Technology PCI0648",
363 cmd0643_9_chip_map,
364 },
365 { PCI_PRODUCT_CMDTECH_649,
366 IDE_PCI_CLASS_OVERRIDE,
367 "CMD Technology PCI0649",
368 cmd0643_9_chip_map,
369 },
370 { 0,
371 0,
372 NULL,
373 NULL
374 }
375 };
376
377 const struct pciide_product_desc pciide_via_products[] = {
378 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
379 0,
380 NULL,
381 apollo_chip_map,
382 },
383 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
384 0,
385 NULL,
386 apollo_chip_map,
387 },
388 { 0,
389 0,
390 NULL,
391 NULL
392 }
393 };
394
395 const struct pciide_product_desc pciide_cypress_products[] = {
396 { PCI_PRODUCT_CONTAQ_82C693,
397 IDE_16BIT_IOSPACE,
398 "Cypress 82C693 IDE Controller",
399 cy693_chip_map,
400 },
401 { 0,
402 0,
403 NULL,
404 NULL
405 }
406 };
407
408 const struct pciide_product_desc pciide_sis_products[] = {
409 { PCI_PRODUCT_SIS_5597_IDE,
410 0,
411 NULL,
412 sis_chip_map,
413 },
414 { 0,
415 0,
416 NULL,
417 NULL
418 }
419 };
420
421 const struct pciide_product_desc pciide_acer_products[] = {
422 { PCI_PRODUCT_ALI_M5229,
423 0,
424 "Acer Labs M5229 UDMA IDE Controller",
425 acer_chip_map,
426 },
427 { 0,
428 0,
429 NULL,
430 NULL
431 }
432 };
433
434 const struct pciide_product_desc pciide_promise_products[] = {
435 { PCI_PRODUCT_PROMISE_ULTRA33,
436 IDE_PCI_CLASS_OVERRIDE,
437 "Promise Ultra33/ATA Bus Master IDE Accelerator",
438 pdc202xx_chip_map,
439 },
440 { PCI_PRODUCT_PROMISE_ULTRA66,
441 IDE_PCI_CLASS_OVERRIDE,
442 "Promise Ultra66/ATA Bus Master IDE Accelerator",
443 pdc202xx_chip_map,
444 },
445 { PCI_PRODUCT_PROMISE_ULTRA100,
446 IDE_PCI_CLASS_OVERRIDE,
447 "Promise Ultra100/ATA Bus Master IDE Accelerator",
448 pdc202xx_chip_map,
449 },
450 { PCI_PRODUCT_PROMISE_ULTRA100X,
451 IDE_PCI_CLASS_OVERRIDE,
452 "Promise Ultra100/ATA Bus Master IDE Accelerator",
453 pdc202xx_chip_map,
454 },
455 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
456 IDE_PCI_CLASS_OVERRIDE,
457 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
458 pdc202xx_chip_map,
459 },
460 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
461 IDE_PCI_CLASS_OVERRIDE,
462 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
463 pdc202xx_chip_map,
464 },
465 { PCI_PRODUCT_PROMISE_ULTRA133,
466 IDE_PCI_CLASS_OVERRIDE,
467 "Promise Ultra133/ATA Bus Master IDE Accelerator",
468 pdc202xx_chip_map,
469 },
470 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
471 IDE_PCI_CLASS_OVERRIDE,
472 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
473 pdc202xx_chip_map,
474 },
475 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
476 IDE_PCI_CLASS_OVERRIDE,
477 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
478 pdc202xx_chip_map,
479 },
480 { 0,
481 0,
482 NULL,
483 NULL
484 }
485 };
486
487 const struct pciide_product_desc pciide_opti_products[] = {
488 { PCI_PRODUCT_OPTI_82C621,
489 0,
490 "OPTi 82c621 PCI IDE controller",
491 opti_chip_map,
492 },
493 { PCI_PRODUCT_OPTI_82C568,
494 0,
495 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
496 opti_chip_map,
497 },
498 { PCI_PRODUCT_OPTI_82D568,
499 0,
500 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
501 opti_chip_map,
502 },
503 { 0,
504 0,
505 NULL,
506 NULL
507 }
508 };
509
510 const struct pciide_product_desc pciide_triones_products[] = {
511 { PCI_PRODUCT_TRIONES_HPT366,
512 IDE_PCI_CLASS_OVERRIDE,
513 NULL,
514 hpt_chip_map,
515 },
516 { PCI_PRODUCT_TRIONES_HPT372,
517 IDE_PCI_CLASS_OVERRIDE,
518 NULL,
519 hpt_chip_map
520 },
521 { PCI_PRODUCT_TRIONES_HPT374,
522 IDE_PCI_CLASS_OVERRIDE,
523 NULL,
524 hpt_chip_map
525 },
526 { 0,
527 0,
528 NULL,
529 NULL
530 }
531 };
532
533 const struct pciide_product_desc pciide_acard_products[] = {
534 { PCI_PRODUCT_ACARD_ATP850U,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Acard ATP850U Ultra33 IDE Controller",
537 acard_chip_map,
538 },
539 { PCI_PRODUCT_ACARD_ATP860,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Acard ATP860 Ultra66 IDE Controller",
542 acard_chip_map,
543 },
544 { PCI_PRODUCT_ACARD_ATP860A,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Acard ATP860-A Ultra66 IDE Controller",
547 acard_chip_map,
548 },
549 { 0,
550 0,
551 NULL,
552 NULL
553 }
554 };
555
556 const struct pciide_product_desc pciide_serverworks_products[] = {
557 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
558 0,
559 "ServerWorks OSB4 IDE Controller",
560 serverworks_chip_map,
561 },
562 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
563 0,
564 "ServerWorks CSB5 IDE Controller",
565 serverworks_chip_map,
566 },
567 { 0,
568 0,
569 NULL,
570 }
571 };
572
573 const struct pciide_product_desc pciide_symphony_products[] = {
574 { PCI_PRODUCT_SYMPHONY_82C105,
575 0,
576 "Symphony Labs 82C105 IDE controller",
577 sl82c105_chip_map,
578 },
579 { 0,
580 0,
581 NULL,
582 }
583 };
584
585 const struct pciide_product_desc pciide_winbond_products[] = {
586 { PCI_PRODUCT_WINBOND_W83C553F_1,
587 0,
588 "Winbond W83C553F IDE controller",
589 sl82c105_chip_map,
590 },
591 { 0,
592 0,
593 NULL,
594 }
595 };
596
597 struct pciide_vendor_desc {
598 u_int32_t ide_vendor;
599 const struct pciide_product_desc *ide_products;
600 };
601
602 const struct pciide_vendor_desc pciide_vendors[] = {
603 { PCI_VENDOR_INTEL, pciide_intel_products },
604 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
605 { PCI_VENDOR_VIATECH, pciide_via_products },
606 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
607 { PCI_VENDOR_SIS, pciide_sis_products },
608 { PCI_VENDOR_ALI, pciide_acer_products },
609 { PCI_VENDOR_PROMISE, pciide_promise_products },
610 { PCI_VENDOR_AMD, pciide_amd_products },
611 { PCI_VENDOR_OPTI, pciide_opti_products },
612 { PCI_VENDOR_TRIONES, pciide_triones_products },
613 { PCI_VENDOR_ACARD, pciide_acard_products },
614 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
615 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
616 { PCI_VENDOR_WINBOND, pciide_winbond_products },
617 { 0, NULL }
618 };
619
620 /* options passed via the 'flags' config keyword */
621 #define PCIIDE_OPTIONS_DMA 0x01
622 #define PCIIDE_OPTIONS_NODMA 0x02
623
624 int pciide_match __P((struct device *, struct cfdata *, void *));
625 void pciide_attach __P((struct device *, struct device *, void *));
626
627 struct cfattach pciide_ca = {
628 sizeof(struct pciide_softc), pciide_match, pciide_attach
629 };
630 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
631 int pciide_mapregs_compat __P(( struct pci_attach_args *,
632 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
633 int pciide_mapregs_native __P((struct pci_attach_args *,
634 struct pciide_channel *, bus_size_t *, bus_size_t *,
635 int (*pci_intr) __P((void *))));
636 void pciide_mapreg_dma __P((struct pciide_softc *,
637 struct pci_attach_args *));
638 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
639 void pciide_mapchan __P((struct pci_attach_args *,
640 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
641 int (*pci_intr) __P((void *))));
642 int pciide_chan_candisable __P((struct pciide_channel *));
643 void pciide_map_compat_intr __P(( struct pci_attach_args *,
644 struct pciide_channel *, int, int));
645 int pciide_compat_intr __P((void *));
646 int pciide_pci_intr __P((void *));
647 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
648
649 const struct pciide_product_desc *
650 pciide_lookup_product(id)
651 u_int32_t id;
652 {
653 const struct pciide_product_desc *pp;
654 const struct pciide_vendor_desc *vp;
655
656 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
657 if (PCI_VENDOR(id) == vp->ide_vendor)
658 break;
659
660 if ((pp = vp->ide_products) == NULL)
661 return NULL;
662
663 for (; pp->chip_map != NULL; pp++)
664 if (PCI_PRODUCT(id) == pp->ide_product)
665 break;
666
667 if (pp->chip_map == NULL)
668 return NULL;
669 return pp;
670 }
671
672 int
673 pciide_match(parent, match, aux)
674 struct device *parent;
675 struct cfdata *match;
676 void *aux;
677 {
678 struct pci_attach_args *pa = aux;
679 const struct pciide_product_desc *pp;
680
681 /*
682 * Check the ID register to see that it's a PCI IDE controller.
683 * If it is, we assume that we can deal with it; it _should_
684 * work in a standardized way...
685 */
686 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
687 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
688 return (1);
689 }
690
691 /*
692 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
693 * controllers. Let see if we can deal with it anyway.
694 */
695 pp = pciide_lookup_product(pa->pa_id);
696 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
697 return (1);
698 }
699
700 return (0);
701 }
702
703 void
704 pciide_attach(parent, self, aux)
705 struct device *parent, *self;
706 void *aux;
707 {
708 struct pci_attach_args *pa = aux;
709 pci_chipset_tag_t pc = pa->pa_pc;
710 pcitag_t tag = pa->pa_tag;
711 struct pciide_softc *sc = (struct pciide_softc *)self;
712 pcireg_t csr;
713 char devinfo[256];
714 const char *displaydev;
715
716 sc->sc_pp = pciide_lookup_product(pa->pa_id);
717 if (sc->sc_pp == NULL) {
718 sc->sc_pp = &default_product_desc;
719 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
720 displaydev = devinfo;
721 } else
722 displaydev = sc->sc_pp->ide_name;
723
724 /* if displaydev == NULL, printf is done in chip-specific map */
725 if (displaydev)
726 printf(": %s (rev. 0x%02x)\n", displaydev,
727 PCI_REVISION(pa->pa_class));
728
729 sc->sc_pc = pa->pa_pc;
730 sc->sc_tag = pa->pa_tag;
731 #ifdef WDCDEBUG
732 if (wdcdebug_pciide_mask & DEBUG_PROBE)
733 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
734 #endif
735 sc->sc_pp->chip_map(sc, pa);
736
737 if (sc->sc_dma_ok) {
738 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
739 csr |= PCI_COMMAND_MASTER_ENABLE;
740 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
741 }
742 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
743 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
744 }
745
746 /* tell wether the chip is enabled or not */
747 int
748 pciide_chipen(sc, pa)
749 struct pciide_softc *sc;
750 struct pci_attach_args *pa;
751 {
752 pcireg_t csr;
753 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
754 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
755 PCI_COMMAND_STATUS_REG);
756 printf("%s: device disabled (at %s)\n",
757 sc->sc_wdcdev.sc_dev.dv_xname,
758 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
759 "device" : "bridge");
760 return 0;
761 }
762 return 1;
763 }
764
765 int
766 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
767 struct pci_attach_args *pa;
768 struct pciide_channel *cp;
769 int compatchan;
770 bus_size_t *cmdsizep, *ctlsizep;
771 {
772 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
773 struct channel_softc *wdc_cp = &cp->wdc_channel;
774
775 cp->compat = 1;
776 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
777 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
778
779 wdc_cp->cmd_iot = pa->pa_iot;
780 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
781 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
782 printf("%s: couldn't map %s channel cmd regs\n",
783 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
784 return (0);
785 }
786
787 wdc_cp->ctl_iot = pa->pa_iot;
788 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
789 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
790 printf("%s: couldn't map %s channel ctl regs\n",
791 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
792 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
793 PCIIDE_COMPAT_CMD_SIZE);
794 return (0);
795 }
796
797 return (1);
798 }
799
800 int
801 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
802 struct pci_attach_args * pa;
803 struct pciide_channel *cp;
804 bus_size_t *cmdsizep, *ctlsizep;
805 int (*pci_intr) __P((void *));
806 {
807 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
808 struct channel_softc *wdc_cp = &cp->wdc_channel;
809 const char *intrstr;
810 pci_intr_handle_t intrhandle;
811
812 cp->compat = 0;
813
814 if (sc->sc_pci_ih == NULL) {
815 if (pci_intr_map(pa, &intrhandle) != 0) {
816 printf("%s: couldn't map native-PCI interrupt\n",
817 sc->sc_wdcdev.sc_dev.dv_xname);
818 return 0;
819 }
820 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
821 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
822 intrhandle, IPL_BIO, pci_intr, sc);
823 if (sc->sc_pci_ih != NULL) {
824 printf("%s: using %s for native-PCI interrupt\n",
825 sc->sc_wdcdev.sc_dev.dv_xname,
826 intrstr ? intrstr : "unknown interrupt");
827 } else {
828 printf("%s: couldn't establish native-PCI interrupt",
829 sc->sc_wdcdev.sc_dev.dv_xname);
830 if (intrstr != NULL)
831 printf(" at %s", intrstr);
832 printf("\n");
833 return 0;
834 }
835 }
836 cp->ih = sc->sc_pci_ih;
837 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
838 PCI_MAPREG_TYPE_IO, 0,
839 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
840 printf("%s: couldn't map %s channel cmd regs\n",
841 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
842 return 0;
843 }
844
845 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
846 PCI_MAPREG_TYPE_IO, 0,
847 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
848 printf("%s: couldn't map %s channel ctl regs\n",
849 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
850 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
851 return 0;
852 }
853 /*
854 * In native mode, 4 bytes of I/O space are mapped for the control
855 * register, the control register is at offset 2. Pass the generic
856 * code a handle for only one byte at the rigth offset.
857 */
858 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
859 &wdc_cp->ctl_ioh) != 0) {
860 printf("%s: unable to subregion %s channel ctl regs\n",
861 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
862 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
863 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
864 return 0;
865 }
866 return (1);
867 }
868
869 void
870 pciide_mapreg_dma(sc, pa)
871 struct pciide_softc *sc;
872 struct pci_attach_args *pa;
873 {
874 pcireg_t maptype;
875 bus_addr_t addr;
876
877 /*
878 * Map DMA registers
879 *
880 * Note that sc_dma_ok is the right variable to test to see if
881 * DMA can be done. If the interface doesn't support DMA,
882 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
883 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
884 * non-zero if the interface supports DMA and the registers
885 * could be mapped.
886 *
887 * XXX Note that despite the fact that the Bus Master IDE specs
888 * XXX say that "The bus master IDE function uses 16 bytes of IO
889 * XXX space," some controllers (at least the United
890 * XXX Microelectronics UM8886BF) place it in memory space.
891 */
892 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
893 PCIIDE_REG_BUS_MASTER_DMA);
894
895 switch (maptype) {
896 case PCI_MAPREG_TYPE_IO:
897 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
898 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
899 &addr, NULL, NULL) == 0);
900 if (sc->sc_dma_ok == 0) {
901 printf(", but unused (couldn't query registers)");
902 break;
903 }
904 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
905 && addr >= 0x10000) {
906 sc->sc_dma_ok = 0;
907 printf(", but unused (registers at unsafe address "
908 "%#lx)", (unsigned long)addr);
909 break;
910 }
911 /* FALLTHROUGH */
912
913 case PCI_MAPREG_MEM_TYPE_32BIT:
914 sc->sc_dma_ok = (pci_mapreg_map(pa,
915 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
916 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
917 sc->sc_dmat = pa->pa_dmat;
918 if (sc->sc_dma_ok == 0) {
919 printf(", but unused (couldn't map registers)");
920 } else {
921 sc->sc_wdcdev.dma_arg = sc;
922 sc->sc_wdcdev.dma_init = pciide_dma_init;
923 sc->sc_wdcdev.dma_start = pciide_dma_start;
924 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
925 }
926
927 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
928 PCIIDE_OPTIONS_NODMA) {
929 printf(", but unused (forced off by config file)");
930 sc->sc_dma_ok = 0;
931 }
932 break;
933
934 default:
935 sc->sc_dma_ok = 0;
936 printf(", but unsupported register maptype (0x%x)", maptype);
937 }
938 }
939
940 int
941 pciide_compat_intr(arg)
942 void *arg;
943 {
944 struct pciide_channel *cp = arg;
945
946 #ifdef DIAGNOSTIC
947 /* should only be called for a compat channel */
948 if (cp->compat == 0)
949 panic("pciide compat intr called for non-compat chan %p\n", cp);
950 #endif
951 return (wdcintr(&cp->wdc_channel));
952 }
953
954 int
955 pciide_pci_intr(arg)
956 void *arg;
957 {
958 struct pciide_softc *sc = arg;
959 struct pciide_channel *cp;
960 struct channel_softc *wdc_cp;
961 int i, rv, crv;
962
963 rv = 0;
964 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
965 cp = &sc->pciide_channels[i];
966 wdc_cp = &cp->wdc_channel;
967
968 /* If a compat channel skip. */
969 if (cp->compat)
970 continue;
971 /* if this channel not waiting for intr, skip */
972 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
973 continue;
974
975 crv = wdcintr(wdc_cp);
976 if (crv == 0)
977 ; /* leave rv alone */
978 else if (crv == 1)
979 rv = 1; /* claim the intr */
980 else if (rv == 0) /* crv should be -1 in this case */
981 rv = crv; /* if we've done no better, take it */
982 }
983 return (rv);
984 }
985
986 void
987 pciide_channel_dma_setup(cp)
988 struct pciide_channel *cp;
989 {
990 int drive;
991 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
992 struct ata_drive_datas *drvp;
993
994 for (drive = 0; drive < 2; drive++) {
995 drvp = &cp->wdc_channel.ch_drive[drive];
996 /* If no drive, skip */
997 if ((drvp->drive_flags & DRIVE) == 0)
998 continue;
999 /* setup DMA if needed */
1000 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1001 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1002 sc->sc_dma_ok == 0) {
1003 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1004 continue;
1005 }
1006 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1007 != 0) {
1008 /* Abort DMA setup */
1009 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1010 continue;
1011 }
1012 }
1013 }
1014
1015 int
1016 pciide_dma_table_setup(sc, channel, drive)
1017 struct pciide_softc *sc;
1018 int channel, drive;
1019 {
1020 bus_dma_segment_t seg;
1021 int error, rseg;
1022 const bus_size_t dma_table_size =
1023 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1024 struct pciide_dma_maps *dma_maps =
1025 &sc->pciide_channels[channel].dma_maps[drive];
1026
1027 /* If table was already allocated, just return */
1028 if (dma_maps->dma_table)
1029 return 0;
1030
1031 /* Allocate memory for the DMA tables and map it */
1032 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1033 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1034 BUS_DMA_NOWAIT)) != 0) {
1035 printf("%s:%d: unable to allocate table DMA for "
1036 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1037 channel, drive, error);
1038 return error;
1039 }
1040 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1041 dma_table_size,
1042 (caddr_t *)&dma_maps->dma_table,
1043 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1044 printf("%s:%d: unable to map table DMA for"
1045 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1046 channel, drive, error);
1047 return error;
1048 }
1049 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1050 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1051 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1052
1053 /* Create and load table DMA map for this disk */
1054 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1055 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1056 &dma_maps->dmamap_table)) != 0) {
1057 printf("%s:%d: unable to create table DMA map for "
1058 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1059 channel, drive, error);
1060 return error;
1061 }
1062 if ((error = bus_dmamap_load(sc->sc_dmat,
1063 dma_maps->dmamap_table,
1064 dma_maps->dma_table,
1065 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1066 printf("%s:%d: unable to load table DMA map for "
1067 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1068 channel, drive, error);
1069 return error;
1070 }
1071 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1072 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1073 DEBUG_PROBE);
1074 /* Create a xfer DMA map for this drive */
1075 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1076 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1077 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1078 &dma_maps->dmamap_xfer)) != 0) {
1079 printf("%s:%d: unable to create xfer DMA map for "
1080 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1081 channel, drive, error);
1082 return error;
1083 }
1084 return 0;
1085 }
1086
1087 int
1088 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1089 void *v;
1090 int channel, drive;
1091 void *databuf;
1092 size_t datalen;
1093 int flags;
1094 {
1095 struct pciide_softc *sc = v;
1096 int error, seg;
1097 struct pciide_dma_maps *dma_maps =
1098 &sc->pciide_channels[channel].dma_maps[drive];
1099
1100 error = bus_dmamap_load(sc->sc_dmat,
1101 dma_maps->dmamap_xfer,
1102 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1103 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1104 if (error) {
1105 printf("%s:%d: unable to load xfer DMA map for"
1106 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1107 channel, drive, error);
1108 return error;
1109 }
1110
1111 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1112 dma_maps->dmamap_xfer->dm_mapsize,
1113 (flags & WDC_DMA_READ) ?
1114 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1115
1116 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1117 #ifdef DIAGNOSTIC
1118 /* A segment must not cross a 64k boundary */
1119 {
1120 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1121 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1122 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1123 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1124 printf("pciide_dma: segment %d physical addr 0x%lx"
1125 " len 0x%lx not properly aligned\n",
1126 seg, phys, len);
1127 panic("pciide_dma: buf align");
1128 }
1129 }
1130 #endif
1131 dma_maps->dma_table[seg].base_addr =
1132 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1133 dma_maps->dma_table[seg].byte_count =
1134 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1135 IDEDMA_BYTE_COUNT_MASK);
1136 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1137 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1138 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1139
1140 }
1141 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1142 htole32(IDEDMA_BYTE_COUNT_EOT);
1143
1144 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1145 dma_maps->dmamap_table->dm_mapsize,
1146 BUS_DMASYNC_PREWRITE);
1147
1148 /* Maps are ready. Start DMA function */
1149 #ifdef DIAGNOSTIC
1150 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1151 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1152 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1153 panic("pciide_dma_init: table align");
1154 }
1155 #endif
1156
1157 /* Clear status bits */
1158 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1159 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1160 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1161 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1162 /* Write table addr */
1163 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1164 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1165 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1166 /* set read/write */
1167 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1168 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1169 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1170 /* remember flags */
1171 dma_maps->dma_flags = flags;
1172 return 0;
1173 }
1174
1175 void
1176 pciide_dma_start(v, channel, drive)
1177 void *v;
1178 int channel, drive;
1179 {
1180 struct pciide_softc *sc = v;
1181
1182 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1183 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1184 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1185 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1186 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1187 }
1188
1189 int
1190 pciide_dma_finish(v, channel, drive, force)
1191 void *v;
1192 int channel, drive;
1193 int force;
1194 {
1195 struct pciide_softc *sc = v;
1196 u_int8_t status;
1197 int error = 0;
1198 struct pciide_dma_maps *dma_maps =
1199 &sc->pciide_channels[channel].dma_maps[drive];
1200
1201 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1202 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1203 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1204 DEBUG_XFERS);
1205
1206 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1207 return WDC_DMAST_NOIRQ;
1208
1209 /* stop DMA channel */
1210 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1211 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1212 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1213 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1214
1215 /* Unload the map of the data buffer */
1216 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1217 dma_maps->dmamap_xfer->dm_mapsize,
1218 (dma_maps->dma_flags & WDC_DMA_READ) ?
1219 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1220 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1221
1222 if ((status & IDEDMA_CTL_ERR) != 0) {
1223 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1224 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1225 error |= WDC_DMAST_ERR;
1226 }
1227
1228 if ((status & IDEDMA_CTL_INTR) == 0) {
1229 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1230 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1231 drive, status);
1232 error |= WDC_DMAST_NOIRQ;
1233 }
1234
1235 if ((status & IDEDMA_CTL_ACT) != 0) {
1236 /* data underrun, may be a valid condition for ATAPI */
1237 error |= WDC_DMAST_UNDER;
1238 }
1239 return error;
1240 }
1241
1242 void
1243 pciide_irqack(chp)
1244 struct channel_softc *chp;
1245 {
1246 struct pciide_channel *cp = (struct pciide_channel*)chp;
1247 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1248
1249 /* clear status bits in IDE DMA registers */
1250 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1251 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1252 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1253 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1254 }
1255
1256 /* some common code used by several chip_map */
1257 int
1258 pciide_chansetup(sc, channel, interface)
1259 struct pciide_softc *sc;
1260 int channel;
1261 pcireg_t interface;
1262 {
1263 struct pciide_channel *cp = &sc->pciide_channels[channel];
1264 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1265 cp->name = PCIIDE_CHANNEL_NAME(channel);
1266 cp->wdc_channel.channel = channel;
1267 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1268 cp->wdc_channel.ch_queue =
1269 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1270 if (cp->wdc_channel.ch_queue == NULL) {
1271 printf("%s %s channel: "
1272 "can't allocate memory for command queue",
1273 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1274 return 0;
1275 }
1276 printf("%s: %s channel %s to %s mode\n",
1277 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1278 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1279 "configured" : "wired",
1280 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1281 "native-PCI" : "compatibility");
1282 return 1;
1283 }
1284
1285 /* some common code used by several chip channel_map */
1286 void
1287 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1288 struct pci_attach_args *pa;
1289 struct pciide_channel *cp;
1290 pcireg_t interface;
1291 bus_size_t *cmdsizep, *ctlsizep;
1292 int (*pci_intr) __P((void *));
1293 {
1294 struct channel_softc *wdc_cp = &cp->wdc_channel;
1295
1296 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1297 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1298 pci_intr);
1299 else
1300 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1301 wdc_cp->channel, cmdsizep, ctlsizep);
1302
1303 if (cp->hw_ok == 0)
1304 return;
1305 wdc_cp->data32iot = wdc_cp->cmd_iot;
1306 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1307 wdcattach(wdc_cp);
1308 }
1309
1310 /*
1311 * Generic code to call to know if a channel can be disabled. Return 1
1312 * if channel can be disabled, 0 if not
1313 */
1314 int
1315 pciide_chan_candisable(cp)
1316 struct pciide_channel *cp;
1317 {
1318 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1319 struct channel_softc *wdc_cp = &cp->wdc_channel;
1320
1321 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1322 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1323 printf("%s: disabling %s channel (no drives)\n",
1324 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1325 cp->hw_ok = 0;
1326 return 1;
1327 }
1328 return 0;
1329 }
1330
1331 /*
1332 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1333 * Set hw_ok=0 on failure
1334 */
1335 void
1336 pciide_map_compat_intr(pa, cp, compatchan, interface)
1337 struct pci_attach_args *pa;
1338 struct pciide_channel *cp;
1339 int compatchan, interface;
1340 {
1341 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1342 struct channel_softc *wdc_cp = &cp->wdc_channel;
1343
1344 if (cp->hw_ok == 0)
1345 return;
1346 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1347 return;
1348
1349 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1350 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1351 pa, compatchan, pciide_compat_intr, cp);
1352 if (cp->ih == NULL) {
1353 #endif
1354 printf("%s: no compatibility interrupt for use by %s "
1355 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1356 cp->hw_ok = 0;
1357 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1358 }
1359 #endif
1360 }
1361
1362 void
1363 pciide_print_modes(cp)
1364 struct pciide_channel *cp;
1365 {
1366 wdc_print_modes(&cp->wdc_channel);
1367 }
1368
1369 void
1370 default_chip_map(sc, pa)
1371 struct pciide_softc *sc;
1372 struct pci_attach_args *pa;
1373 {
1374 struct pciide_channel *cp;
1375 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1376 pcireg_t csr;
1377 int channel, drive;
1378 struct ata_drive_datas *drvp;
1379 u_int8_t idedma_ctl;
1380 bus_size_t cmdsize, ctlsize;
1381 char *failreason;
1382
1383 if (pciide_chipen(sc, pa) == 0)
1384 return;
1385
1386 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1387 printf("%s: bus-master DMA support present",
1388 sc->sc_wdcdev.sc_dev.dv_xname);
1389 if (sc->sc_pp == &default_product_desc &&
1390 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1391 PCIIDE_OPTIONS_DMA) == 0) {
1392 printf(", but unused (no driver support)");
1393 sc->sc_dma_ok = 0;
1394 } else {
1395 pciide_mapreg_dma(sc, pa);
1396 if (sc->sc_dma_ok != 0)
1397 printf(", used without full driver "
1398 "support");
1399 }
1400 } else {
1401 printf("%s: hardware does not support DMA",
1402 sc->sc_wdcdev.sc_dev.dv_xname);
1403 sc->sc_dma_ok = 0;
1404 }
1405 printf("\n");
1406 if (sc->sc_dma_ok) {
1407 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1408 sc->sc_wdcdev.irqack = pciide_irqack;
1409 }
1410 sc->sc_wdcdev.PIO_cap = 0;
1411 sc->sc_wdcdev.DMA_cap = 0;
1412
1413 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1414 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1415 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1416
1417 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1418 cp = &sc->pciide_channels[channel];
1419 if (pciide_chansetup(sc, channel, interface) == 0)
1420 continue;
1421 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1422 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1423 &ctlsize, pciide_pci_intr);
1424 } else {
1425 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1426 channel, &cmdsize, &ctlsize);
1427 }
1428 if (cp->hw_ok == 0)
1429 continue;
1430 /*
1431 * Check to see if something appears to be there.
1432 */
1433 failreason = NULL;
1434 if (!wdcprobe(&cp->wdc_channel)) {
1435 failreason = "not responding; disabled or no drives?";
1436 goto next;
1437 }
1438 /*
1439 * Now, make sure it's actually attributable to this PCI IDE
1440 * channel by trying to access the channel again while the
1441 * PCI IDE controller's I/O space is disabled. (If the
1442 * channel no longer appears to be there, it belongs to
1443 * this controller.) YUCK!
1444 */
1445 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1446 PCI_COMMAND_STATUS_REG);
1447 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1448 csr & ~PCI_COMMAND_IO_ENABLE);
1449 if (wdcprobe(&cp->wdc_channel))
1450 failreason = "other hardware responding at addresses";
1451 pci_conf_write(sc->sc_pc, sc->sc_tag,
1452 PCI_COMMAND_STATUS_REG, csr);
1453 next:
1454 if (failreason) {
1455 printf("%s: %s channel ignored (%s)\n",
1456 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1457 failreason);
1458 cp->hw_ok = 0;
1459 bus_space_unmap(cp->wdc_channel.cmd_iot,
1460 cp->wdc_channel.cmd_ioh, cmdsize);
1461 if (interface & PCIIDE_INTERFACE_PCI(channel))
1462 bus_space_unmap(cp->wdc_channel.ctl_iot,
1463 cp->ctl_baseioh, ctlsize);
1464 else
1465 bus_space_unmap(cp->wdc_channel.ctl_iot,
1466 cp->wdc_channel.ctl_ioh, ctlsize);
1467 } else {
1468 pciide_map_compat_intr(pa, cp, channel, interface);
1469 }
1470 if (cp->hw_ok) {
1471 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1472 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1473 wdcattach(&cp->wdc_channel);
1474 }
1475 }
1476
1477 if (sc->sc_dma_ok == 0)
1478 return;
1479
1480 /* Allocate DMA maps */
1481 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1482 idedma_ctl = 0;
1483 cp = &sc->pciide_channels[channel];
1484 for (drive = 0; drive < 2; drive++) {
1485 drvp = &cp->wdc_channel.ch_drive[drive];
1486 /* If no drive, skip */
1487 if ((drvp->drive_flags & DRIVE) == 0)
1488 continue;
1489 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1490 continue;
1491 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1492 /* Abort DMA setup */
1493 printf("%s:%d:%d: can't allocate DMA maps, "
1494 "using PIO transfers\n",
1495 sc->sc_wdcdev.sc_dev.dv_xname,
1496 channel, drive);
1497 drvp->drive_flags &= ~DRIVE_DMA;
1498 }
1499 printf("%s:%d:%d: using DMA data transfers\n",
1500 sc->sc_wdcdev.sc_dev.dv_xname,
1501 channel, drive);
1502 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1503 }
1504 if (idedma_ctl != 0) {
1505 /* Add software bits in status register */
1506 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1507 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1508 idedma_ctl);
1509 }
1510 }
1511 }
1512
1513 void
1514 piix_chip_map(sc, pa)
1515 struct pciide_softc *sc;
1516 struct pci_attach_args *pa;
1517 {
1518 struct pciide_channel *cp;
1519 int channel;
1520 u_int32_t idetim;
1521 bus_size_t cmdsize, ctlsize;
1522
1523 if (pciide_chipen(sc, pa) == 0)
1524 return;
1525
1526 printf("%s: bus-master DMA support present",
1527 sc->sc_wdcdev.sc_dev.dv_xname);
1528 pciide_mapreg_dma(sc, pa);
1529 printf("\n");
1530 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1531 WDC_CAPABILITY_MODE;
1532 if (sc->sc_dma_ok) {
1533 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1534 sc->sc_wdcdev.irqack = pciide_irqack;
1535 switch(sc->sc_pp->ide_product) {
1536 case PCI_PRODUCT_INTEL_82371AB_IDE:
1537 case PCI_PRODUCT_INTEL_82440MX_IDE:
1538 case PCI_PRODUCT_INTEL_82801AA_IDE:
1539 case PCI_PRODUCT_INTEL_82801AB_IDE:
1540 case PCI_PRODUCT_INTEL_82801BA_IDE:
1541 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1542 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1543 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1544 case PCI_PRODUCT_INTEL_82801DB_IDE:
1545 case PCI_PRODUCT_INTEL_82801EB_IDE:
1546 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1547 }
1548 }
1549 sc->sc_wdcdev.PIO_cap = 4;
1550 sc->sc_wdcdev.DMA_cap = 2;
1551 switch(sc->sc_pp->ide_product) {
1552 case PCI_PRODUCT_INTEL_82801AA_IDE:
1553 sc->sc_wdcdev.UDMA_cap = 4;
1554 break;
1555 case PCI_PRODUCT_INTEL_82801BA_IDE:
1556 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1557 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1558 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1559 case PCI_PRODUCT_INTEL_82801DB_IDE:
1560 case PCI_PRODUCT_INTEL_82801EB_IDE:
1561 sc->sc_wdcdev.UDMA_cap = 5;
1562 break;
1563 default:
1564 sc->sc_wdcdev.UDMA_cap = 2;
1565 }
1566 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1567 sc->sc_wdcdev.set_modes = piix_setup_channel;
1568 else
1569 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1570 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1571 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1572
1573 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1574 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1575 DEBUG_PROBE);
1576 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1577 WDCDEBUG_PRINT((", sidetim=0x%x",
1578 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1579 DEBUG_PROBE);
1580 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1581 WDCDEBUG_PRINT((", udamreg 0x%x",
1582 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1583 DEBUG_PROBE);
1584 }
1585 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1586 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1587 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1588 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1589 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1590 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1591 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1592 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1593 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1594 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1595 DEBUG_PROBE);
1596 }
1597
1598 }
1599 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1600
1601 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1602 cp = &sc->pciide_channels[channel];
1603 /* PIIX is compat-only */
1604 if (pciide_chansetup(sc, channel, 0) == 0)
1605 continue;
1606 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1607 if ((PIIX_IDETIM_READ(idetim, channel) &
1608 PIIX_IDETIM_IDE) == 0) {
1609 printf("%s: %s channel ignored (disabled)\n",
1610 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1611 continue;
1612 }
1613 /* PIIX are compat-only pciide devices */
1614 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1615 if (cp->hw_ok == 0)
1616 continue;
1617 if (pciide_chan_candisable(cp)) {
1618 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1619 channel);
1620 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1621 idetim);
1622 }
1623 pciide_map_compat_intr(pa, cp, channel, 0);
1624 if (cp->hw_ok == 0)
1625 continue;
1626 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1627 }
1628
1629 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1630 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1631 DEBUG_PROBE);
1632 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1633 WDCDEBUG_PRINT((", sidetim=0x%x",
1634 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1635 DEBUG_PROBE);
1636 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1637 WDCDEBUG_PRINT((", udamreg 0x%x",
1638 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1639 DEBUG_PROBE);
1640 }
1641 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1642 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1643 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1644 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1645 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1646 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1647 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1648 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ) {
1649 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1650 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1651 DEBUG_PROBE);
1652 }
1653 }
1654 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1655 }
1656
1657 void
1658 piix_setup_channel(chp)
1659 struct channel_softc *chp;
1660 {
1661 u_int8_t mode[2], drive;
1662 u_int32_t oidetim, idetim, idedma_ctl;
1663 struct pciide_channel *cp = (struct pciide_channel*)chp;
1664 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1665 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1666
1667 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1668 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1669 idedma_ctl = 0;
1670
1671 /* set up new idetim: Enable IDE registers decode */
1672 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1673 chp->channel);
1674
1675 /* setup DMA */
1676 pciide_channel_dma_setup(cp);
1677
1678 /*
1679 * Here we have to mess up with drives mode: PIIX can't have
1680 * different timings for master and slave drives.
1681 * We need to find the best combination.
1682 */
1683
1684 /* If both drives supports DMA, take the lower mode */
1685 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1686 (drvp[1].drive_flags & DRIVE_DMA)) {
1687 mode[0] = mode[1] =
1688 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1689 drvp[0].DMA_mode = mode[0];
1690 drvp[1].DMA_mode = mode[1];
1691 goto ok;
1692 }
1693 /*
1694 * If only one drive supports DMA, use its mode, and
1695 * put the other one in PIO mode 0 if mode not compatible
1696 */
1697 if (drvp[0].drive_flags & DRIVE_DMA) {
1698 mode[0] = drvp[0].DMA_mode;
1699 mode[1] = drvp[1].PIO_mode;
1700 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1701 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1702 mode[1] = drvp[1].PIO_mode = 0;
1703 goto ok;
1704 }
1705 if (drvp[1].drive_flags & DRIVE_DMA) {
1706 mode[1] = drvp[1].DMA_mode;
1707 mode[0] = drvp[0].PIO_mode;
1708 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1709 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1710 mode[0] = drvp[0].PIO_mode = 0;
1711 goto ok;
1712 }
1713 /*
1714 * If both drives are not DMA, takes the lower mode, unless
1715 * one of them is PIO mode < 2
1716 */
1717 if (drvp[0].PIO_mode < 2) {
1718 mode[0] = drvp[0].PIO_mode = 0;
1719 mode[1] = drvp[1].PIO_mode;
1720 } else if (drvp[1].PIO_mode < 2) {
1721 mode[1] = drvp[1].PIO_mode = 0;
1722 mode[0] = drvp[0].PIO_mode;
1723 } else {
1724 mode[0] = mode[1] =
1725 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1726 drvp[0].PIO_mode = mode[0];
1727 drvp[1].PIO_mode = mode[1];
1728 }
1729 ok: /* The modes are setup */
1730 for (drive = 0; drive < 2; drive++) {
1731 if (drvp[drive].drive_flags & DRIVE_DMA) {
1732 idetim |= piix_setup_idetim_timings(
1733 mode[drive], 1, chp->channel);
1734 goto end;
1735 }
1736 }
1737 /* If we are there, none of the drives are DMA */
1738 if (mode[0] >= 2)
1739 idetim |= piix_setup_idetim_timings(
1740 mode[0], 0, chp->channel);
1741 else
1742 idetim |= piix_setup_idetim_timings(
1743 mode[1], 0, chp->channel);
1744 end: /*
1745 * timing mode is now set up in the controller. Enable
1746 * it per-drive
1747 */
1748 for (drive = 0; drive < 2; drive++) {
1749 /* If no drive, skip */
1750 if ((drvp[drive].drive_flags & DRIVE) == 0)
1751 continue;
1752 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1753 if (drvp[drive].drive_flags & DRIVE_DMA)
1754 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1755 }
1756 if (idedma_ctl != 0) {
1757 /* Add software bits in status register */
1758 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1759 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1760 idedma_ctl);
1761 }
1762 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1763 pciide_print_modes(cp);
1764 }
1765
1766 void
1767 piix3_4_setup_channel(chp)
1768 struct channel_softc *chp;
1769 {
1770 struct ata_drive_datas *drvp;
1771 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1772 struct pciide_channel *cp = (struct pciide_channel*)chp;
1773 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1774 int drive;
1775 int channel = chp->channel;
1776
1777 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1778 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1779 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1780 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1781 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1782 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1783 PIIX_SIDETIM_RTC_MASK(channel));
1784
1785 idedma_ctl = 0;
1786 /* If channel disabled, no need to go further */
1787 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1788 return;
1789 /* set up new idetim: Enable IDE registers decode */
1790 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1791
1792 /* setup DMA if needed */
1793 pciide_channel_dma_setup(cp);
1794
1795 for (drive = 0; drive < 2; drive++) {
1796 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1797 PIIX_UDMATIM_SET(0x3, channel, drive));
1798 drvp = &chp->ch_drive[drive];
1799 /* If no drive, skip */
1800 if ((drvp->drive_flags & DRIVE) == 0)
1801 continue;
1802 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1803 (drvp->drive_flags & DRIVE_UDMA) == 0))
1804 goto pio;
1805
1806 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1807 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1809 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1810 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1811 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1812 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1814 ideconf |= PIIX_CONFIG_PINGPONG;
1815 }
1816 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1817 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1818 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1819 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1820 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1821 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) {
1822 /* setup Ultra/100 */
1823 if (drvp->UDMA_mode > 2 &&
1824 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1825 drvp->UDMA_mode = 2;
1826 if (drvp->UDMA_mode > 4) {
1827 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1828 } else {
1829 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1830 if (drvp->UDMA_mode > 2) {
1831 ideconf |= PIIX_CONFIG_UDMA66(channel,
1832 drive);
1833 } else {
1834 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1835 drive);
1836 }
1837 }
1838 }
1839 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1840 /* setup Ultra/66 */
1841 if (drvp->UDMA_mode > 2 &&
1842 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1843 drvp->UDMA_mode = 2;
1844 if (drvp->UDMA_mode > 2)
1845 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1846 else
1847 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1848 }
1849 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1850 (drvp->drive_flags & DRIVE_UDMA)) {
1851 /* use Ultra/DMA */
1852 drvp->drive_flags &= ~DRIVE_DMA;
1853 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1854 udmareg |= PIIX_UDMATIM_SET(
1855 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1856 } else {
1857 /* use Multiword DMA */
1858 drvp->drive_flags &= ~DRIVE_UDMA;
1859 if (drive == 0) {
1860 idetim |= piix_setup_idetim_timings(
1861 drvp->DMA_mode, 1, channel);
1862 } else {
1863 sidetim |= piix_setup_sidetim_timings(
1864 drvp->DMA_mode, 1, channel);
1865 idetim =PIIX_IDETIM_SET(idetim,
1866 PIIX_IDETIM_SITRE, channel);
1867 }
1868 }
1869 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1870
1871 pio: /* use PIO mode */
1872 idetim |= piix_setup_idetim_drvs(drvp);
1873 if (drive == 0) {
1874 idetim |= piix_setup_idetim_timings(
1875 drvp->PIO_mode, 0, channel);
1876 } else {
1877 sidetim |= piix_setup_sidetim_timings(
1878 drvp->PIO_mode, 0, channel);
1879 idetim =PIIX_IDETIM_SET(idetim,
1880 PIIX_IDETIM_SITRE, channel);
1881 }
1882 }
1883 if (idedma_ctl != 0) {
1884 /* Add software bits in status register */
1885 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1886 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1887 idedma_ctl);
1888 }
1889 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1890 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1891 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1892 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1893 pciide_print_modes(cp);
1894 }
1895
1896
1897 /* setup ISP and RTC fields, based on mode */
1898 static u_int32_t
1899 piix_setup_idetim_timings(mode, dma, channel)
1900 u_int8_t mode;
1901 u_int8_t dma;
1902 u_int8_t channel;
1903 {
1904
1905 if (dma)
1906 return PIIX_IDETIM_SET(0,
1907 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1908 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1909 channel);
1910 else
1911 return PIIX_IDETIM_SET(0,
1912 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1913 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1914 channel);
1915 }
1916
1917 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1918 static u_int32_t
1919 piix_setup_idetim_drvs(drvp)
1920 struct ata_drive_datas *drvp;
1921 {
1922 u_int32_t ret = 0;
1923 struct channel_softc *chp = drvp->chnl_softc;
1924 u_int8_t channel = chp->channel;
1925 u_int8_t drive = drvp->drive;
1926
1927 /*
1928 * If drive is using UDMA, timings setups are independant
1929 * So just check DMA and PIO here.
1930 */
1931 if (drvp->drive_flags & DRIVE_DMA) {
1932 /* if mode = DMA mode 0, use compatible timings */
1933 if ((drvp->drive_flags & DRIVE_DMA) &&
1934 drvp->DMA_mode == 0) {
1935 drvp->PIO_mode = 0;
1936 return ret;
1937 }
1938 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1939 /*
1940 * PIO and DMA timings are the same, use fast timings for PIO
1941 * too, else use compat timings.
1942 */
1943 if ((piix_isp_pio[drvp->PIO_mode] !=
1944 piix_isp_dma[drvp->DMA_mode]) ||
1945 (piix_rtc_pio[drvp->PIO_mode] !=
1946 piix_rtc_dma[drvp->DMA_mode]))
1947 drvp->PIO_mode = 0;
1948 /* if PIO mode <= 2, use compat timings for PIO */
1949 if (drvp->PIO_mode <= 2) {
1950 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1951 channel);
1952 return ret;
1953 }
1954 }
1955
1956 /*
1957 * Now setup PIO modes. If mode < 2, use compat timings.
1958 * Else enable fast timings. Enable IORDY and prefetch/post
1959 * if PIO mode >= 3.
1960 */
1961
1962 if (drvp->PIO_mode < 2)
1963 return ret;
1964
1965 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1966 if (drvp->PIO_mode >= 3) {
1967 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1968 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1969 }
1970 return ret;
1971 }
1972
1973 /* setup values in SIDETIM registers, based on mode */
1974 static u_int32_t
1975 piix_setup_sidetim_timings(mode, dma, channel)
1976 u_int8_t mode;
1977 u_int8_t dma;
1978 u_int8_t channel;
1979 {
1980 if (dma)
1981 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1982 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1983 else
1984 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1985 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1986 }
1987
1988 void
1989 amd7x6_chip_map(sc, pa)
1990 struct pciide_softc *sc;
1991 struct pci_attach_args *pa;
1992 {
1993 struct pciide_channel *cp;
1994 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1995 int channel;
1996 pcireg_t chanenable;
1997 bus_size_t cmdsize, ctlsize;
1998
1999 if (pciide_chipen(sc, pa) == 0)
2000 return;
2001 printf("%s: bus-master DMA support present",
2002 sc->sc_wdcdev.sc_dev.dv_xname);
2003 pciide_mapreg_dma(sc, pa);
2004 printf("\n");
2005 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2006 WDC_CAPABILITY_MODE;
2007 if (sc->sc_dma_ok) {
2008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2009 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2010 sc->sc_wdcdev.irqack = pciide_irqack;
2011 }
2012 sc->sc_wdcdev.PIO_cap = 4;
2013 sc->sc_wdcdev.DMA_cap = 2;
2014
2015 switch (sc->sc_pp->ide_product) {
2016 case PCI_PRODUCT_AMD_PBC766_IDE:
2017 case PCI_PRODUCT_AMD_PBC768_IDE:
2018 sc->sc_wdcdev.UDMA_cap = 5;
2019 break;
2020 default:
2021 sc->sc_wdcdev.UDMA_cap = 4;
2022 }
2023 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2024 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2025 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2026 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2027
2028 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2029 DEBUG_PROBE);
2030 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2031 cp = &sc->pciide_channels[channel];
2032 if (pciide_chansetup(sc, channel, interface) == 0)
2033 continue;
2034
2035 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2036 printf("%s: %s channel ignored (disabled)\n",
2037 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2038 continue;
2039 }
2040 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2041 pciide_pci_intr);
2042
2043 if (pciide_chan_candisable(cp))
2044 chanenable &= ~AMD7X6_CHAN_EN(channel);
2045 pciide_map_compat_intr(pa, cp, channel, interface);
2046 if (cp->hw_ok == 0)
2047 continue;
2048
2049 amd7x6_setup_channel(&cp->wdc_channel);
2050 }
2051 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2052 chanenable);
2053 return;
2054 }
2055
2056 void
2057 amd7x6_setup_channel(chp)
2058 struct channel_softc *chp;
2059 {
2060 u_int32_t udmatim_reg, datatim_reg;
2061 u_int8_t idedma_ctl;
2062 int mode, drive;
2063 struct ata_drive_datas *drvp;
2064 struct pciide_channel *cp = (struct pciide_channel*)chp;
2065 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2066 #ifndef PCIIDE_AMD756_ENABLEDMA
2067 int rev = PCI_REVISION(
2068 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2069 #endif
2070
2071 idedma_ctl = 0;
2072 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2073 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2074 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2075 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2076
2077 /* setup DMA if needed */
2078 pciide_channel_dma_setup(cp);
2079
2080 for (drive = 0; drive < 2; drive++) {
2081 drvp = &chp->ch_drive[drive];
2082 /* If no drive, skip */
2083 if ((drvp->drive_flags & DRIVE) == 0)
2084 continue;
2085 /* add timing values, setup DMA if needed */
2086 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2087 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2088 mode = drvp->PIO_mode;
2089 goto pio;
2090 }
2091 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2092 (drvp->drive_flags & DRIVE_UDMA)) {
2093 /* use Ultra/DMA */
2094 drvp->drive_flags &= ~DRIVE_DMA;
2095 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2096 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2097 AMD7X6_UDMA_TIME(chp->channel, drive,
2098 amd7x6_udma_tim[drvp->UDMA_mode]);
2099 /* can use PIO timings, MW DMA unused */
2100 mode = drvp->PIO_mode;
2101 } else {
2102 /* use Multiword DMA, but only if revision is OK */
2103 drvp->drive_flags &= ~DRIVE_UDMA;
2104 #ifndef PCIIDE_AMD756_ENABLEDMA
2105 /*
2106 * The workaround doesn't seem to be necessary
2107 * with all drives, so it can be disabled by
2108 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2109 * triggered.
2110 */
2111 if (sc->sc_pp->ide_product ==
2112 PCI_PRODUCT_AMD_PBC756_IDE &&
2113 AMD756_CHIPREV_DISABLEDMA(rev)) {
2114 printf("%s:%d:%d: multi-word DMA disabled due "
2115 "to chip revision\n",
2116 sc->sc_wdcdev.sc_dev.dv_xname,
2117 chp->channel, drive);
2118 mode = drvp->PIO_mode;
2119 drvp->drive_flags &= ~DRIVE_DMA;
2120 goto pio;
2121 }
2122 #endif
2123 /* mode = min(pio, dma+2) */
2124 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2125 mode = drvp->PIO_mode;
2126 else
2127 mode = drvp->DMA_mode + 2;
2128 }
2129 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2130
2131 pio: /* setup PIO mode */
2132 if (mode <= 2) {
2133 drvp->DMA_mode = 0;
2134 drvp->PIO_mode = 0;
2135 mode = 0;
2136 } else {
2137 drvp->PIO_mode = mode;
2138 drvp->DMA_mode = mode - 2;
2139 }
2140 datatim_reg |=
2141 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2142 amd7x6_pio_set[mode]) |
2143 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2144 amd7x6_pio_rec[mode]);
2145 }
2146 if (idedma_ctl != 0) {
2147 /* Add software bits in status register */
2148 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2149 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2150 idedma_ctl);
2151 }
2152 pciide_print_modes(cp);
2153 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2154 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2155 }
2156
2157 void
2158 apollo_chip_map(sc, pa)
2159 struct pciide_softc *sc;
2160 struct pci_attach_args *pa;
2161 {
2162 struct pciide_channel *cp;
2163 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2164 int channel;
2165 u_int32_t ideconf;
2166 bus_size_t cmdsize, ctlsize;
2167 pcitag_t pcib_tag;
2168 pcireg_t pcib_id, pcib_class;
2169
2170 if (pciide_chipen(sc, pa) == 0)
2171 return;
2172 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2173 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2174 /* and read ID and rev of the ISA bridge */
2175 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2176 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2177 printf(": VIA Technologies ");
2178 switch (PCI_PRODUCT(pcib_id)) {
2179 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2180 printf("VT82C586 (Apollo VP) ");
2181 if(PCI_REVISION(pcib_class) >= 0x02) {
2182 printf("ATA33 controller\n");
2183 sc->sc_wdcdev.UDMA_cap = 2;
2184 } else {
2185 printf("controller\n");
2186 sc->sc_wdcdev.UDMA_cap = 0;
2187 }
2188 break;
2189 case PCI_PRODUCT_VIATECH_VT82C596A:
2190 printf("VT82C596A (Apollo Pro) ");
2191 if (PCI_REVISION(pcib_class) >= 0x12) {
2192 printf("ATA66 controller\n");
2193 sc->sc_wdcdev.UDMA_cap = 4;
2194 } else {
2195 printf("ATA33 controller\n");
2196 sc->sc_wdcdev.UDMA_cap = 2;
2197 }
2198 break;
2199 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2200 printf("VT82C686A (Apollo KX133) ");
2201 if (PCI_REVISION(pcib_class) >= 0x40) {
2202 printf("ATA100 controller\n");
2203 sc->sc_wdcdev.UDMA_cap = 5;
2204 } else {
2205 printf("ATA66 controller\n");
2206 sc->sc_wdcdev.UDMA_cap = 4;
2207 }
2208 break;
2209 case PCI_PRODUCT_VIATECH_VT8231:
2210 printf("VT8231 ATA100 controller\n");
2211 sc->sc_wdcdev.UDMA_cap = 5;
2212 break;
2213 case PCI_PRODUCT_VIATECH_VT8233:
2214 printf("VT8233 ATA100 controller\n");
2215 sc->sc_wdcdev.UDMA_cap = 5;
2216 break;
2217 case PCI_PRODUCT_VIATECH_VT8233A:
2218 printf("VT8233A ATA133 controller\n");
2219 sc->sc_wdcdev.UDMA_cap = 6;
2220 break;
2221 case PCI_PRODUCT_VIATECH_VT8235:
2222 printf("VT8235 ATA133 controller\n");
2223 sc->sc_wdcdev.UDMA_cap = 6;
2224 break;
2225 default:
2226 printf("unknown ATA controller\n");
2227 sc->sc_wdcdev.UDMA_cap = 0;
2228 }
2229
2230 printf("%s: bus-master DMA support present",
2231 sc->sc_wdcdev.sc_dev.dv_xname);
2232 pciide_mapreg_dma(sc, pa);
2233 printf("\n");
2234 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2235 WDC_CAPABILITY_MODE;
2236 if (sc->sc_dma_ok) {
2237 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2238 sc->sc_wdcdev.irqack = pciide_irqack;
2239 if (sc->sc_wdcdev.UDMA_cap > 0)
2240 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2241 }
2242 sc->sc_wdcdev.PIO_cap = 4;
2243 sc->sc_wdcdev.DMA_cap = 2;
2244 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2245 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2246 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2247
2248 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2249 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2250 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2251 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2252 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2253 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2254 DEBUG_PROBE);
2255
2256 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2257 cp = &sc->pciide_channels[channel];
2258 if (pciide_chansetup(sc, channel, interface) == 0)
2259 continue;
2260
2261 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2262 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2263 printf("%s: %s channel ignored (disabled)\n",
2264 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2265 continue;
2266 }
2267 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2268 pciide_pci_intr);
2269 if (cp->hw_ok == 0)
2270 continue;
2271 if (pciide_chan_candisable(cp)) {
2272 ideconf &= ~APO_IDECONF_EN(channel);
2273 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2274 ideconf);
2275 }
2276 pciide_map_compat_intr(pa, cp, channel, interface);
2277
2278 if (cp->hw_ok == 0)
2279 continue;
2280 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2281 }
2282 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2283 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2284 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2285 }
2286
2287 void
2288 apollo_setup_channel(chp)
2289 struct channel_softc *chp;
2290 {
2291 u_int32_t udmatim_reg, datatim_reg;
2292 u_int8_t idedma_ctl;
2293 int mode, drive;
2294 struct ata_drive_datas *drvp;
2295 struct pciide_channel *cp = (struct pciide_channel*)chp;
2296 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2297
2298 idedma_ctl = 0;
2299 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2300 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2301 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2302 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2303
2304 /* setup DMA if needed */
2305 pciide_channel_dma_setup(cp);
2306
2307 for (drive = 0; drive < 2; drive++) {
2308 drvp = &chp->ch_drive[drive];
2309 /* If no drive, skip */
2310 if ((drvp->drive_flags & DRIVE) == 0)
2311 continue;
2312 /* add timing values, setup DMA if needed */
2313 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2314 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2315 mode = drvp->PIO_mode;
2316 goto pio;
2317 }
2318 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2319 (drvp->drive_flags & DRIVE_UDMA)) {
2320 /* use Ultra/DMA */
2321 drvp->drive_flags &= ~DRIVE_DMA;
2322 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2323 APO_UDMA_EN_MTH(chp->channel, drive);
2324 if (sc->sc_wdcdev.UDMA_cap == 6) {
2325 /* 8233a */
2326 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2327 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2328 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2329 /* 686b */
2330 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2331 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2332 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2333 /* 596b or 686a */
2334 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2335 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2336 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2337 } else {
2338 /* 596a or 586b */
2339 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2340 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2341 }
2342 /* can use PIO timings, MW DMA unused */
2343 mode = drvp->PIO_mode;
2344 } else {
2345 /* use Multiword DMA */
2346 drvp->drive_flags &= ~DRIVE_UDMA;
2347 /* mode = min(pio, dma+2) */
2348 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2349 mode = drvp->PIO_mode;
2350 else
2351 mode = drvp->DMA_mode + 2;
2352 }
2353 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2354
2355 pio: /* setup PIO mode */
2356 if (mode <= 2) {
2357 drvp->DMA_mode = 0;
2358 drvp->PIO_mode = 0;
2359 mode = 0;
2360 } else {
2361 drvp->PIO_mode = mode;
2362 drvp->DMA_mode = mode - 2;
2363 }
2364 datatim_reg |=
2365 APO_DATATIM_PULSE(chp->channel, drive,
2366 apollo_pio_set[mode]) |
2367 APO_DATATIM_RECOV(chp->channel, drive,
2368 apollo_pio_rec[mode]);
2369 }
2370 if (idedma_ctl != 0) {
2371 /* Add software bits in status register */
2372 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2373 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2374 idedma_ctl);
2375 }
2376 pciide_print_modes(cp);
2377 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2378 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2379 }
2380
2381 void
2382 cmd_channel_map(pa, sc, channel)
2383 struct pci_attach_args *pa;
2384 struct pciide_softc *sc;
2385 int channel;
2386 {
2387 struct pciide_channel *cp = &sc->pciide_channels[channel];
2388 bus_size_t cmdsize, ctlsize;
2389 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2390 int interface, one_channel;
2391
2392 /*
2393 * The 0648/0649 can be told to identify as a RAID controller.
2394 * In this case, we have to fake interface
2395 */
2396 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2397 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2398 PCIIDE_INTERFACE_SETTABLE(1);
2399 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2400 CMD_CONF_DSA1)
2401 interface |= PCIIDE_INTERFACE_PCI(0) |
2402 PCIIDE_INTERFACE_PCI(1);
2403 } else {
2404 interface = PCI_INTERFACE(pa->pa_class);
2405 }
2406
2407 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2408 cp->name = PCIIDE_CHANNEL_NAME(channel);
2409 cp->wdc_channel.channel = channel;
2410 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2411
2412 /*
2413 * Older CMD64X doesn't have independant channels
2414 */
2415 switch (sc->sc_pp->ide_product) {
2416 case PCI_PRODUCT_CMDTECH_649:
2417 one_channel = 0;
2418 break;
2419 default:
2420 one_channel = 1;
2421 break;
2422 }
2423
2424 if (channel > 0 && one_channel) {
2425 cp->wdc_channel.ch_queue =
2426 sc->pciide_channels[0].wdc_channel.ch_queue;
2427 } else {
2428 cp->wdc_channel.ch_queue =
2429 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2430 }
2431 if (cp->wdc_channel.ch_queue == NULL) {
2432 printf("%s %s channel: "
2433 "can't allocate memory for command queue",
2434 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2435 return;
2436 }
2437
2438 printf("%s: %s channel %s to %s mode\n",
2439 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2440 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2441 "configured" : "wired",
2442 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2443 "native-PCI" : "compatibility");
2444
2445 /*
2446 * with a CMD PCI64x, if we get here, the first channel is enabled:
2447 * there's no way to disable the first channel without disabling
2448 * the whole device
2449 */
2450 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2451 printf("%s: %s channel ignored (disabled)\n",
2452 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2453 return;
2454 }
2455
2456 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2457 if (cp->hw_ok == 0)
2458 return;
2459 if (channel == 1) {
2460 if (pciide_chan_candisable(cp)) {
2461 ctrl &= ~CMD_CTRL_2PORT;
2462 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2463 CMD_CTRL, ctrl);
2464 }
2465 }
2466 pciide_map_compat_intr(pa, cp, channel, interface);
2467 }
2468
2469 int
2470 cmd_pci_intr(arg)
2471 void *arg;
2472 {
2473 struct pciide_softc *sc = arg;
2474 struct pciide_channel *cp;
2475 struct channel_softc *wdc_cp;
2476 int i, rv, crv;
2477 u_int32_t priirq, secirq;
2478
2479 rv = 0;
2480 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2481 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2482 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2483 cp = &sc->pciide_channels[i];
2484 wdc_cp = &cp->wdc_channel;
2485 /* If a compat channel skip. */
2486 if (cp->compat)
2487 continue;
2488 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2489 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2490 crv = wdcintr(wdc_cp);
2491 if (crv == 0)
2492 printf("%s:%d: bogus intr\n",
2493 sc->sc_wdcdev.sc_dev.dv_xname, i);
2494 else
2495 rv = 1;
2496 }
2497 }
2498 return rv;
2499 }
2500
2501 void
2502 cmd_chip_map(sc, pa)
2503 struct pciide_softc *sc;
2504 struct pci_attach_args *pa;
2505 {
2506 int channel;
2507
2508 /*
2509 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2510 * and base adresses registers can be disabled at
2511 * hardware level. In this case, the device is wired
2512 * in compat mode and its first channel is always enabled,
2513 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2514 * In fact, it seems that the first channel of the CMD PCI0640
2515 * can't be disabled.
2516 */
2517
2518 #ifdef PCIIDE_CMD064x_DISABLE
2519 if (pciide_chipen(sc, pa) == 0)
2520 return;
2521 #endif
2522
2523 printf("%s: hardware does not support DMA\n",
2524 sc->sc_wdcdev.sc_dev.dv_xname);
2525 sc->sc_dma_ok = 0;
2526
2527 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2528 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2529 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2530
2531 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2532 cmd_channel_map(pa, sc, channel);
2533 }
2534 }
2535
2536 void
2537 cmd0643_9_chip_map(sc, pa)
2538 struct pciide_softc *sc;
2539 struct pci_attach_args *pa;
2540 {
2541 struct pciide_channel *cp;
2542 int channel;
2543 pcireg_t rev = PCI_REVISION(pa->pa_class);
2544
2545 /*
2546 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2547 * and base adresses registers can be disabled at
2548 * hardware level. In this case, the device is wired
2549 * in compat mode and its first channel is always enabled,
2550 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2551 * In fact, it seems that the first channel of the CMD PCI0640
2552 * can't be disabled.
2553 */
2554
2555 #ifdef PCIIDE_CMD064x_DISABLE
2556 if (pciide_chipen(sc, pa) == 0)
2557 return;
2558 #endif
2559 printf("%s: bus-master DMA support present",
2560 sc->sc_wdcdev.sc_dev.dv_xname);
2561 pciide_mapreg_dma(sc, pa);
2562 printf("\n");
2563 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2564 WDC_CAPABILITY_MODE;
2565 if (sc->sc_dma_ok) {
2566 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2567 switch (sc->sc_pp->ide_product) {
2568 case PCI_PRODUCT_CMDTECH_649:
2569 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2570 sc->sc_wdcdev.UDMA_cap = 5;
2571 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2572 break;
2573 case PCI_PRODUCT_CMDTECH_648:
2574 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2575 sc->sc_wdcdev.UDMA_cap = 4;
2576 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2577 break;
2578 case PCI_PRODUCT_CMDTECH_646:
2579 if (rev >= CMD0646U2_REV) {
2580 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2581 sc->sc_wdcdev.UDMA_cap = 2;
2582 } else if (rev >= CMD0646U_REV) {
2583 /*
2584 * Linux's driver claims that the 646U is broken
2585 * with UDMA. Only enable it if we know what we're
2586 * doing
2587 */
2588 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2589 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2590 sc->sc_wdcdev.UDMA_cap = 2;
2591 #endif
2592 /* explicitly disable UDMA */
2593 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2594 CMD_UDMATIM(0), 0);
2595 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2596 CMD_UDMATIM(1), 0);
2597 }
2598 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2599 break;
2600 default:
2601 sc->sc_wdcdev.irqack = pciide_irqack;
2602 }
2603 }
2604
2605 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2606 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2607 sc->sc_wdcdev.PIO_cap = 4;
2608 sc->sc_wdcdev.DMA_cap = 2;
2609 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2610
2611 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2612 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2613 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2614 DEBUG_PROBE);
2615
2616 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2617 cp = &sc->pciide_channels[channel];
2618 cmd_channel_map(pa, sc, channel);
2619 if (cp->hw_ok == 0)
2620 continue;
2621 cmd0643_9_setup_channel(&cp->wdc_channel);
2622 }
2623 /*
2624 * note - this also makes sure we clear the irq disable and reset
2625 * bits
2626 */
2627 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2628 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2629 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2630 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2631 DEBUG_PROBE);
2632 }
2633
2634 void
2635 cmd0643_9_setup_channel(chp)
2636 struct channel_softc *chp;
2637 {
2638 struct ata_drive_datas *drvp;
2639 u_int8_t tim;
2640 u_int32_t idedma_ctl, udma_reg;
2641 int drive;
2642 struct pciide_channel *cp = (struct pciide_channel*)chp;
2643 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2644
2645 idedma_ctl = 0;
2646 /* setup DMA if needed */
2647 pciide_channel_dma_setup(cp);
2648
2649 for (drive = 0; drive < 2; drive++) {
2650 drvp = &chp->ch_drive[drive];
2651 /* If no drive, skip */
2652 if ((drvp->drive_flags & DRIVE) == 0)
2653 continue;
2654 /* add timing values, setup DMA if needed */
2655 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2656 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2657 if (drvp->drive_flags & DRIVE_UDMA) {
2658 /* UltraDMA on a 646U2, 0648 or 0649 */
2659 drvp->drive_flags &= ~DRIVE_DMA;
2660 udma_reg = pciide_pci_read(sc->sc_pc,
2661 sc->sc_tag, CMD_UDMATIM(chp->channel));
2662 if (drvp->UDMA_mode > 2 &&
2663 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2664 CMD_BICSR) &
2665 CMD_BICSR_80(chp->channel)) == 0)
2666 drvp->UDMA_mode = 2;
2667 if (drvp->UDMA_mode > 2)
2668 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2669 else if (sc->sc_wdcdev.UDMA_cap > 2)
2670 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2671 udma_reg |= CMD_UDMATIM_UDMA(drive);
2672 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2673 CMD_UDMATIM_TIM_OFF(drive));
2674 udma_reg |=
2675 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2676 CMD_UDMATIM_TIM_OFF(drive));
2677 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2678 CMD_UDMATIM(chp->channel), udma_reg);
2679 } else {
2680 /*
2681 * use Multiword DMA.
2682 * Timings will be used for both PIO and DMA,
2683 * so adjust DMA mode if needed
2684 * if we have a 0646U2/8/9, turn off UDMA
2685 */
2686 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2687 udma_reg = pciide_pci_read(sc->sc_pc,
2688 sc->sc_tag,
2689 CMD_UDMATIM(chp->channel));
2690 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2691 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2692 CMD_UDMATIM(chp->channel),
2693 udma_reg);
2694 }
2695 if (drvp->PIO_mode >= 3 &&
2696 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2697 drvp->DMA_mode = drvp->PIO_mode - 2;
2698 }
2699 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2700 }
2701 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2702 }
2703 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2704 CMD_DATA_TIM(chp->channel, drive), tim);
2705 }
2706 if (idedma_ctl != 0) {
2707 /* Add software bits in status register */
2708 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2709 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2710 idedma_ctl);
2711 }
2712 pciide_print_modes(cp);
2713 }
2714
2715 void
2716 cmd646_9_irqack(chp)
2717 struct channel_softc *chp;
2718 {
2719 u_int32_t priirq, secirq;
2720 struct pciide_channel *cp = (struct pciide_channel*)chp;
2721 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2722
2723 if (chp->channel == 0) {
2724 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2725 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2726 } else {
2727 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2728 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2729 }
2730 pciide_irqack(chp);
2731 }
2732
2733 void
2734 cy693_chip_map(sc, pa)
2735 struct pciide_softc *sc;
2736 struct pci_attach_args *pa;
2737 {
2738 struct pciide_channel *cp;
2739 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2740 bus_size_t cmdsize, ctlsize;
2741
2742 if (pciide_chipen(sc, pa) == 0)
2743 return;
2744 /*
2745 * this chip has 2 PCI IDE functions, one for primary and one for
2746 * secondary. So we need to call pciide_mapregs_compat() with
2747 * the real channel
2748 */
2749 if (pa->pa_function == 1) {
2750 sc->sc_cy_compatchan = 0;
2751 } else if (pa->pa_function == 2) {
2752 sc->sc_cy_compatchan = 1;
2753 } else {
2754 printf("%s: unexpected PCI function %d\n",
2755 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2756 return;
2757 }
2758 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2759 printf("%s: bus-master DMA support present",
2760 sc->sc_wdcdev.sc_dev.dv_xname);
2761 pciide_mapreg_dma(sc, pa);
2762 } else {
2763 printf("%s: hardware does not support DMA",
2764 sc->sc_wdcdev.sc_dev.dv_xname);
2765 sc->sc_dma_ok = 0;
2766 }
2767 printf("\n");
2768
2769 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2770 if (sc->sc_cy_handle == NULL) {
2771 printf("%s: unable to map hyperCache control registers\n",
2772 sc->sc_wdcdev.sc_dev.dv_xname);
2773 sc->sc_dma_ok = 0;
2774 }
2775
2776 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2777 WDC_CAPABILITY_MODE;
2778 if (sc->sc_dma_ok) {
2779 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2780 sc->sc_wdcdev.irqack = pciide_irqack;
2781 }
2782 sc->sc_wdcdev.PIO_cap = 4;
2783 sc->sc_wdcdev.DMA_cap = 2;
2784 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2785
2786 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2787 sc->sc_wdcdev.nchannels = 1;
2788
2789 /* Only one channel for this chip; if we are here it's enabled */
2790 cp = &sc->pciide_channels[0];
2791 sc->wdc_chanarray[0] = &cp->wdc_channel;
2792 cp->name = PCIIDE_CHANNEL_NAME(0);
2793 cp->wdc_channel.channel = 0;
2794 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2795 cp->wdc_channel.ch_queue =
2796 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2797 if (cp->wdc_channel.ch_queue == NULL) {
2798 printf("%s primary channel: "
2799 "can't allocate memory for command queue",
2800 sc->sc_wdcdev.sc_dev.dv_xname);
2801 return;
2802 }
2803 printf("%s: primary channel %s to ",
2804 sc->sc_wdcdev.sc_dev.dv_xname,
2805 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2806 "configured" : "wired");
2807 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2808 printf("native-PCI");
2809 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2810 pciide_pci_intr);
2811 } else {
2812 printf("compatibility");
2813 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2814 &cmdsize, &ctlsize);
2815 }
2816 printf(" mode\n");
2817 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2818 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2819 wdcattach(&cp->wdc_channel);
2820 if (pciide_chan_candisable(cp)) {
2821 pci_conf_write(sc->sc_pc, sc->sc_tag,
2822 PCI_COMMAND_STATUS_REG, 0);
2823 }
2824 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2825 if (cp->hw_ok == 0)
2826 return;
2827 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2828 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2829 cy693_setup_channel(&cp->wdc_channel);
2830 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2831 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2832 }
2833
2834 void
2835 cy693_setup_channel(chp)
2836 struct channel_softc *chp;
2837 {
2838 struct ata_drive_datas *drvp;
2839 int drive;
2840 u_int32_t cy_cmd_ctrl;
2841 u_int32_t idedma_ctl;
2842 struct pciide_channel *cp = (struct pciide_channel*)chp;
2843 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2844 int dma_mode = -1;
2845
2846 cy_cmd_ctrl = idedma_ctl = 0;
2847
2848 /* setup DMA if needed */
2849 pciide_channel_dma_setup(cp);
2850
2851 for (drive = 0; drive < 2; drive++) {
2852 drvp = &chp->ch_drive[drive];
2853 /* If no drive, skip */
2854 if ((drvp->drive_flags & DRIVE) == 0)
2855 continue;
2856 /* add timing values, setup DMA if needed */
2857 if (drvp->drive_flags & DRIVE_DMA) {
2858 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2859 /* use Multiword DMA */
2860 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2861 dma_mode = drvp->DMA_mode;
2862 }
2863 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2864 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2865 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2866 CY_CMD_CTRL_IOW_REC_OFF(drive));
2867 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2868 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2869 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2870 CY_CMD_CTRL_IOR_REC_OFF(drive));
2871 }
2872 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2873 chp->ch_drive[0].DMA_mode = dma_mode;
2874 chp->ch_drive[1].DMA_mode = dma_mode;
2875
2876 if (dma_mode == -1)
2877 dma_mode = 0;
2878
2879 if (sc->sc_cy_handle != NULL) {
2880 /* Note: `multiple' is implied. */
2881 cy82c693_write(sc->sc_cy_handle,
2882 (sc->sc_cy_compatchan == 0) ?
2883 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2884 }
2885
2886 pciide_print_modes(cp);
2887
2888 if (idedma_ctl != 0) {
2889 /* Add software bits in status register */
2890 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2891 IDEDMA_CTL, idedma_ctl);
2892 }
2893 }
2894
2895 static struct sis_hostbr_type {
2896 u_int16_t id;
2897 u_int8_t rev;
2898 u_int8_t udma_mode;
2899 char *name;
2900 u_int8_t type;
2901 #define SIS_TYPE_NOUDMA 0
2902 #define SIS_TYPE_66 1
2903 #define SIS_TYPE_100OLD 2
2904 #define SIS_TYPE_100NEW 3
2905 #define SIS_TYPE_133OLD 4
2906 #define SIS_TYPE_133NEW 5
2907 #define SIS_TYPE_SOUTH 6
2908 } sis_hostbr_type[] = {
2909 /* Most infos here are from sos (at) freebsd.org */
2910 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
2911 #if 0
2912 /*
2913 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2914 * have problems with UDMA (info provided by Christos)
2915 */
2916 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
2917 #endif
2918 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
2919 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
2920 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
2921 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
2922 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
2923 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
2924 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
2925 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
2926 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
2927 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
2928 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
2929 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
2930 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
2931 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
2932 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
2933 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
2934 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
2935 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
2936 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
2937 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
2938 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
2939 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
2940 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
2941 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
2942 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
2943 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
2944 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
2945 /*
2946 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
2947 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
2948 */
2949 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
2950 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
2951 };
2952
2953 static struct sis_hostbr_type *sis_hostbr_type_match;
2954
2955 static int
2956 sis_hostbr_match(pa)
2957 struct pci_attach_args *pa;
2958 {
2959 int i;
2960 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
2961 return 0;
2962 sis_hostbr_type_match = NULL;
2963 for (i = 0;
2964 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
2965 i++) {
2966 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
2967 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
2968 sis_hostbr_type_match = &sis_hostbr_type[i];
2969 }
2970 return (sis_hostbr_type_match != NULL);
2971 }
2972
2973 static int sis_south_match(pa)
2974 struct pci_attach_args *pa;
2975 {
2976 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
2977 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
2978 PCI_REVISION(pa->pa_class) >= 0x10);
2979 }
2980
2981 void
2982 sis_chip_map(sc, pa)
2983 struct pciide_softc *sc;
2984 struct pci_attach_args *pa;
2985 {
2986 struct pciide_channel *cp;
2987 int channel;
2988 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2989 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2990 pcireg_t rev = PCI_REVISION(pa->pa_class);
2991 bus_size_t cmdsize, ctlsize;
2992
2993 if (pciide_chipen(sc, pa) == 0)
2994 return;
2995 printf(": Silicon Integrated System ");
2996 pci_find_device(NULL, sis_hostbr_match);
2997 if (sis_hostbr_type_match) {
2998 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
2999 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3000 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3001 SIS_REG_57) & 0x7f);
3002 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3003 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3004 printf("96X UDMA%d",
3005 sis_hostbr_type_match->udma_mode);
3006 sc->sis_type = SIS_TYPE_133NEW;
3007 sc->sc_wdcdev.UDMA_cap =
3008 sis_hostbr_type_match->udma_mode;
3009 } else {
3010 if (pci_find_device(NULL, sis_south_match)) {
3011 sc->sis_type = SIS_TYPE_133OLD;
3012 sc->sc_wdcdev.UDMA_cap =
3013 sis_hostbr_type_match->udma_mode;
3014 } else {
3015 sc->sis_type = SIS_TYPE_100NEW;
3016 sc->sc_wdcdev.UDMA_cap =
3017 sis_hostbr_type_match->udma_mode;
3018 }
3019 }
3020 } else {
3021 sc->sis_type = sis_hostbr_type_match->type;
3022 sc->sc_wdcdev.UDMA_cap =
3023 sis_hostbr_type_match->udma_mode;
3024 }
3025 printf(sis_hostbr_type_match->name);
3026 } else {
3027 printf("5597/5598");
3028 if (rev >= 0xd0) {
3029 sc->sc_wdcdev.UDMA_cap = 2;
3030 sc->sis_type = SIS_TYPE_66;
3031 } else {
3032 sc->sc_wdcdev.UDMA_cap = 0;
3033 sc->sis_type = SIS_TYPE_NOUDMA;
3034 }
3035 }
3036 printf(" IDE controller (rev. 0x%02x)\n", PCI_REVISION(pa->pa_class));
3037 printf("%s: bus-master DMA support present",
3038 sc->sc_wdcdev.sc_dev.dv_xname);
3039 pciide_mapreg_dma(sc, pa);
3040 printf("\n");
3041
3042 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3043 WDC_CAPABILITY_MODE;
3044 if (sc->sc_dma_ok) {
3045 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3046 sc->sc_wdcdev.irqack = pciide_irqack;
3047 if (sc->sis_type >= SIS_TYPE_66)
3048 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3049 }
3050
3051 sc->sc_wdcdev.PIO_cap = 4;
3052 sc->sc_wdcdev.DMA_cap = 2;
3053
3054 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3055 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3056 switch(sc->sis_type) {
3057 case SIS_TYPE_NOUDMA:
3058 case SIS_TYPE_66:
3059 case SIS_TYPE_100OLD:
3060 sc->sc_wdcdev.set_modes = sis_setup_channel;
3061 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3062 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3063 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3064 break;
3065 case SIS_TYPE_100NEW:
3066 case SIS_TYPE_133OLD:
3067 sc->sc_wdcdev.set_modes = sis_setup_channel;
3068 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3069 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3070 break;
3071 case SIS_TYPE_133NEW:
3072 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3073 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3074 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3075 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3076 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3077 break;
3078 }
3079
3080
3081 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3082 cp = &sc->pciide_channels[channel];
3083 if (pciide_chansetup(sc, channel, interface) == 0)
3084 continue;
3085 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3086 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3087 printf("%s: %s channel ignored (disabled)\n",
3088 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3089 continue;
3090 }
3091 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3092 pciide_pci_intr);
3093 if (cp->hw_ok == 0)
3094 continue;
3095 if (pciide_chan_candisable(cp)) {
3096 if (channel == 0)
3097 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3098 else
3099 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3100 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3101 sis_ctr0);
3102 }
3103 pciide_map_compat_intr(pa, cp, channel, interface);
3104 if (cp->hw_ok == 0)
3105 continue;
3106 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3107 }
3108 }
3109
3110 void
3111 sis96x_setup_channel(chp)
3112 struct channel_softc *chp;
3113 {
3114 struct ata_drive_datas *drvp;
3115 int drive;
3116 u_int32_t sis_tim;
3117 u_int32_t idedma_ctl;
3118 int regtim;
3119 struct pciide_channel *cp = (struct pciide_channel*)chp;
3120 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3121
3122 sis_tim = 0;
3123 idedma_ctl = 0;
3124 /* setup DMA if needed */
3125 pciide_channel_dma_setup(cp);
3126
3127 for (drive = 0; drive < 2; drive++) {
3128 regtim = SIS_TIM133(
3129 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3130 chp->channel, drive);
3131 drvp = &chp->ch_drive[drive];
3132 /* If no drive, skip */
3133 if ((drvp->drive_flags & DRIVE) == 0)
3134 continue;
3135 /* add timing values, setup DMA if needed */
3136 if (drvp->drive_flags & DRIVE_UDMA) {
3137 /* use Ultra/DMA */
3138 drvp->drive_flags &= ~DRIVE_DMA;
3139 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3140 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3141 if (drvp->UDMA_mode > 2)
3142 drvp->UDMA_mode = 2;
3143 }
3144 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3145 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3146 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3147 } else if (drvp->drive_flags & DRIVE_DMA) {
3148 /*
3149 * use Multiword DMA
3150 * Timings will be used for both PIO and DMA,
3151 * so adjust DMA mode if needed
3152 */
3153 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3154 drvp->PIO_mode = drvp->DMA_mode + 2;
3155 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3156 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3157 drvp->PIO_mode - 2 : 0;
3158 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3159 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3160 } else {
3161 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3162 }
3163 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3164 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3165 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3166 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3167 }
3168 if (idedma_ctl != 0) {
3169 /* Add software bits in status register */
3170 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3171 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3172 idedma_ctl);
3173 }
3174 pciide_print_modes(cp);
3175 }
3176
3177 void
3178 sis_setup_channel(chp)
3179 struct channel_softc *chp;
3180 {
3181 struct ata_drive_datas *drvp;
3182 int drive;
3183 u_int32_t sis_tim;
3184 u_int32_t idedma_ctl;
3185 struct pciide_channel *cp = (struct pciide_channel*)chp;
3186 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3187
3188 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3189 "channel %d 0x%x\n", chp->channel,
3190 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3191 DEBUG_PROBE);
3192 sis_tim = 0;
3193 idedma_ctl = 0;
3194 /* setup DMA if needed */
3195 pciide_channel_dma_setup(cp);
3196
3197 for (drive = 0; drive < 2; drive++) {
3198 drvp = &chp->ch_drive[drive];
3199 /* If no drive, skip */
3200 if ((drvp->drive_flags & DRIVE) == 0)
3201 continue;
3202 /* add timing values, setup DMA if needed */
3203 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3204 (drvp->drive_flags & DRIVE_UDMA) == 0)
3205 goto pio;
3206
3207 if (drvp->drive_flags & DRIVE_UDMA) {
3208 /* use Ultra/DMA */
3209 drvp->drive_flags &= ~DRIVE_DMA;
3210 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3211 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3212 if (drvp->UDMA_mode > 2)
3213 drvp->UDMA_mode = 2;
3214 }
3215 switch (sc->sis_type) {
3216 case SIS_TYPE_66:
3217 case SIS_TYPE_100OLD:
3218 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3219 SIS_TIM66_UDMA_TIME_OFF(drive);
3220 break;
3221 case SIS_TYPE_100NEW:
3222 sis_tim |=
3223 sis_udma100new_tim[drvp->UDMA_mode] <<
3224 SIS_TIM100_UDMA_TIME_OFF(drive);
3225 case SIS_TYPE_133OLD:
3226 sis_tim |=
3227 sis_udma133old_tim[drvp->UDMA_mode] <<
3228 SIS_TIM100_UDMA_TIME_OFF(drive);
3229 break;
3230 default:
3231 printf("unknown SiS IDE type %d\n",
3232 sc->sis_type);
3233 }
3234 } else {
3235 /*
3236 * use Multiword DMA
3237 * Timings will be used for both PIO and DMA,
3238 * so adjust DMA mode if needed
3239 */
3240 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3241 drvp->PIO_mode = drvp->DMA_mode + 2;
3242 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3243 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3244 drvp->PIO_mode - 2 : 0;
3245 if (drvp->DMA_mode == 0)
3246 drvp->PIO_mode = 0;
3247 }
3248 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3249 pio: switch (sc->sis_type) {
3250 case SIS_TYPE_NOUDMA:
3251 case SIS_TYPE_66:
3252 case SIS_TYPE_100OLD:
3253 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3254 SIS_TIM66_ACT_OFF(drive);
3255 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3256 SIS_TIM66_REC_OFF(drive);
3257 break;
3258 case SIS_TYPE_100NEW:
3259 case SIS_TYPE_133OLD:
3260 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3261 SIS_TIM100_ACT_OFF(drive);
3262 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3263 SIS_TIM100_REC_OFF(drive);
3264 break;
3265 default:
3266 printf("unknown SiS IDE type %d\n",
3267 sc->sis_type);
3268 }
3269 }
3270 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3271 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3272 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3273 if (idedma_ctl != 0) {
3274 /* Add software bits in status register */
3275 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3276 IDEDMA_CTL, idedma_ctl);
3277 }
3278 pciide_print_modes(cp);
3279 }
3280
3281 void
3282 acer_chip_map(sc, pa)
3283 struct pciide_softc *sc;
3284 struct pci_attach_args *pa;
3285 {
3286 struct pciide_channel *cp;
3287 int channel;
3288 pcireg_t cr, interface;
3289 bus_size_t cmdsize, ctlsize;
3290 pcireg_t rev = PCI_REVISION(pa->pa_class);
3291
3292 if (pciide_chipen(sc, pa) == 0)
3293 return;
3294 printf("%s: bus-master DMA support present",
3295 sc->sc_wdcdev.sc_dev.dv_xname);
3296 pciide_mapreg_dma(sc, pa);
3297 printf("\n");
3298 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3299 WDC_CAPABILITY_MODE;
3300 if (sc->sc_dma_ok) {
3301 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3302 if (rev >= 0x20) {
3303 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3304 if (rev >= 0xC4)
3305 sc->sc_wdcdev.UDMA_cap = 5;
3306 else if (rev >= 0xC2)
3307 sc->sc_wdcdev.UDMA_cap = 4;
3308 else
3309 sc->sc_wdcdev.UDMA_cap = 2;
3310 }
3311 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3312 sc->sc_wdcdev.irqack = pciide_irqack;
3313 }
3314
3315 sc->sc_wdcdev.PIO_cap = 4;
3316 sc->sc_wdcdev.DMA_cap = 2;
3317 sc->sc_wdcdev.set_modes = acer_setup_channel;
3318 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3319 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3320
3321 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3322 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3323 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3324
3325 /* Enable "microsoft register bits" R/W. */
3326 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3327 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3328 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3329 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3330 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3331 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3332 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3333 ~ACER_CHANSTATUSREGS_RO);
3334 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3335 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3336 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3337 /* Don't use cr, re-read the real register content instead */
3338 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3339 PCI_CLASS_REG));
3340
3341 /* From linux: enable "Cable Detection" */
3342 if (rev >= 0xC2) {
3343 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3344 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3345 | ACER_0x4B_CDETECT);
3346 }
3347
3348 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3349 cp = &sc->pciide_channels[channel];
3350 if (pciide_chansetup(sc, channel, interface) == 0)
3351 continue;
3352 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3353 printf("%s: %s channel ignored (disabled)\n",
3354 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3355 continue;
3356 }
3357 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3358 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3359 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3360 if (cp->hw_ok == 0)
3361 continue;
3362 if (pciide_chan_candisable(cp)) {
3363 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3364 pci_conf_write(sc->sc_pc, sc->sc_tag,
3365 PCI_CLASS_REG, cr);
3366 }
3367 pciide_map_compat_intr(pa, cp, channel, interface);
3368 acer_setup_channel(&cp->wdc_channel);
3369 }
3370 }
3371
3372 void
3373 acer_setup_channel(chp)
3374 struct channel_softc *chp;
3375 {
3376 struct ata_drive_datas *drvp;
3377 int drive;
3378 u_int32_t acer_fifo_udma;
3379 u_int32_t idedma_ctl;
3380 struct pciide_channel *cp = (struct pciide_channel*)chp;
3381 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3382
3383 idedma_ctl = 0;
3384 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3385 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3386 acer_fifo_udma), DEBUG_PROBE);
3387 /* setup DMA if needed */
3388 pciide_channel_dma_setup(cp);
3389
3390 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3391 DRIVE_UDMA) { /* check 80 pins cable */
3392 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3393 ACER_0x4A_80PIN(chp->channel)) {
3394 if (chp->ch_drive[0].UDMA_mode > 2)
3395 chp->ch_drive[0].UDMA_mode = 2;
3396 if (chp->ch_drive[1].UDMA_mode > 2)
3397 chp->ch_drive[1].UDMA_mode = 2;
3398 }
3399 }
3400
3401 for (drive = 0; drive < 2; drive++) {
3402 drvp = &chp->ch_drive[drive];
3403 /* If no drive, skip */
3404 if ((drvp->drive_flags & DRIVE) == 0)
3405 continue;
3406 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3407 "channel %d drive %d 0x%x\n", chp->channel, drive,
3408 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3409 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3410 /* clear FIFO/DMA mode */
3411 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3412 ACER_UDMA_EN(chp->channel, drive) |
3413 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3414
3415 /* add timing values, setup DMA if needed */
3416 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3417 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3418 acer_fifo_udma |=
3419 ACER_FTH_OPL(chp->channel, drive, 0x1);
3420 goto pio;
3421 }
3422
3423 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3424 if (drvp->drive_flags & DRIVE_UDMA) {
3425 /* use Ultra/DMA */
3426 drvp->drive_flags &= ~DRIVE_DMA;
3427 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3428 acer_fifo_udma |=
3429 ACER_UDMA_TIM(chp->channel, drive,
3430 acer_udma[drvp->UDMA_mode]);
3431 /* XXX disable if one drive < UDMA3 ? */
3432 if (drvp->UDMA_mode >= 3) {
3433 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3434 ACER_0x4B,
3435 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3436 ACER_0x4B) | ACER_0x4B_UDMA66);
3437 }
3438 } else {
3439 /*
3440 * use Multiword DMA
3441 * Timings will be used for both PIO and DMA,
3442 * so adjust DMA mode if needed
3443 */
3444 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3445 drvp->PIO_mode = drvp->DMA_mode + 2;
3446 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3447 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3448 drvp->PIO_mode - 2 : 0;
3449 if (drvp->DMA_mode == 0)
3450 drvp->PIO_mode = 0;
3451 }
3452 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3453 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3454 ACER_IDETIM(chp->channel, drive),
3455 acer_pio[drvp->PIO_mode]);
3456 }
3457 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3458 acer_fifo_udma), DEBUG_PROBE);
3459 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3460 if (idedma_ctl != 0) {
3461 /* Add software bits in status register */
3462 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3463 IDEDMA_CTL, idedma_ctl);
3464 }
3465 pciide_print_modes(cp);
3466 }
3467
3468 int
3469 acer_pci_intr(arg)
3470 void *arg;
3471 {
3472 struct pciide_softc *sc = arg;
3473 struct pciide_channel *cp;
3474 struct channel_softc *wdc_cp;
3475 int i, rv, crv;
3476 u_int32_t chids;
3477
3478 rv = 0;
3479 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3480 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3481 cp = &sc->pciide_channels[i];
3482 wdc_cp = &cp->wdc_channel;
3483 /* If a compat channel skip. */
3484 if (cp->compat)
3485 continue;
3486 if (chids & ACER_CHIDS_INT(i)) {
3487 crv = wdcintr(wdc_cp);
3488 if (crv == 0)
3489 printf("%s:%d: bogus intr\n",
3490 sc->sc_wdcdev.sc_dev.dv_xname, i);
3491 else
3492 rv = 1;
3493 }
3494 }
3495 return rv;
3496 }
3497
3498 void
3499 hpt_chip_map(sc, pa)
3500 struct pciide_softc *sc;
3501 struct pci_attach_args *pa;
3502 {
3503 struct pciide_channel *cp;
3504 int i, compatchan, revision;
3505 pcireg_t interface;
3506 bus_size_t cmdsize, ctlsize;
3507
3508 if (pciide_chipen(sc, pa) == 0)
3509 return;
3510 revision = PCI_REVISION(pa->pa_class);
3511 printf(": Triones/Highpoint ");
3512 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3513 printf("HPT374 IDE Controller\n");
3514 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3515 printf("HPT372 IDE Controller\n");
3516 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3517 if (revision == HPT372_REV)
3518 printf("HPT372 IDE Controller\n");
3519 else if (revision == HPT370_REV)
3520 printf("HPT370 IDE Controller\n");
3521 else if (revision == HPT370A_REV)
3522 printf("HPT370A IDE Controller\n");
3523 else if (revision == HPT366_REV)
3524 printf("HPT366 IDE Controller\n");
3525 else
3526 printf("unknown HPT IDE controller rev %d\n", revision);
3527 } else
3528 printf("unknown HPT IDE controller 0x%x\n",
3529 sc->sc_pp->ide_product);
3530
3531 /*
3532 * when the chip is in native mode it identifies itself as a
3533 * 'misc mass storage'. Fake interface in this case.
3534 */
3535 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3536 interface = PCI_INTERFACE(pa->pa_class);
3537 } else {
3538 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3539 PCIIDE_INTERFACE_PCI(0);
3540 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3541 (revision == HPT370_REV || revision == HPT370A_REV ||
3542 revision == HPT372_REV)) ||
3543 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3544 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3545 interface |= PCIIDE_INTERFACE_PCI(1);
3546 }
3547
3548 printf("%s: bus-master DMA support present",
3549 sc->sc_wdcdev.sc_dev.dv_xname);
3550 pciide_mapreg_dma(sc, pa);
3551 printf("\n");
3552 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3553 WDC_CAPABILITY_MODE;
3554 if (sc->sc_dma_ok) {
3555 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3556 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3557 sc->sc_wdcdev.irqack = pciide_irqack;
3558 }
3559 sc->sc_wdcdev.PIO_cap = 4;
3560 sc->sc_wdcdev.DMA_cap = 2;
3561
3562 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3563 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3564 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3565 revision == HPT366_REV) {
3566 sc->sc_wdcdev.UDMA_cap = 4;
3567 /*
3568 * The 366 has 2 PCI IDE functions, one for primary and one
3569 * for secondary. So we need to call pciide_mapregs_compat()
3570 * with the real channel
3571 */
3572 if (pa->pa_function == 0) {
3573 compatchan = 0;
3574 } else if (pa->pa_function == 1) {
3575 compatchan = 1;
3576 } else {
3577 printf("%s: unexpected PCI function %d\n",
3578 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3579 return;
3580 }
3581 sc->sc_wdcdev.nchannels = 1;
3582 } else {
3583 sc->sc_wdcdev.nchannels = 2;
3584 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3585 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3586 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3587 revision == HPT372_REV))
3588 sc->sc_wdcdev.UDMA_cap = 6;
3589 else
3590 sc->sc_wdcdev.UDMA_cap = 5;
3591 }
3592 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3593 cp = &sc->pciide_channels[i];
3594 if (sc->sc_wdcdev.nchannels > 1) {
3595 compatchan = i;
3596 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3597 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3598 printf("%s: %s channel ignored (disabled)\n",
3599 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3600 continue;
3601 }
3602 }
3603 if (pciide_chansetup(sc, i, interface) == 0)
3604 continue;
3605 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3606 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3607 &ctlsize, hpt_pci_intr);
3608 } else {
3609 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3610 &cmdsize, &ctlsize);
3611 }
3612 if (cp->hw_ok == 0)
3613 return;
3614 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3615 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3616 wdcattach(&cp->wdc_channel);
3617 hpt_setup_channel(&cp->wdc_channel);
3618 }
3619 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3620 (revision == HPT370_REV || revision == HPT370A_REV ||
3621 revision == HPT372_REV)) ||
3622 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3623 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3624 /*
3625 * HPT370_REV and highter has a bit to disable interrupts,
3626 * make sure to clear it
3627 */
3628 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3629 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3630 ~HPT_CSEL_IRQDIS);
3631 }
3632 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3633 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3634 revision == HPT372_REV ) ||
3635 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3636 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3637 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3638 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3639 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3640 return;
3641 }
3642
3643 void
3644 hpt_setup_channel(chp)
3645 struct channel_softc *chp;
3646 {
3647 struct ata_drive_datas *drvp;
3648 int drive;
3649 int cable;
3650 u_int32_t before, after;
3651 u_int32_t idedma_ctl;
3652 struct pciide_channel *cp = (struct pciide_channel*)chp;
3653 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3654 int revision =
3655 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3656
3657 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3658
3659 /* setup DMA if needed */
3660 pciide_channel_dma_setup(cp);
3661
3662 idedma_ctl = 0;
3663
3664 /* Per drive settings */
3665 for (drive = 0; drive < 2; drive++) {
3666 drvp = &chp->ch_drive[drive];
3667 /* If no drive, skip */
3668 if ((drvp->drive_flags & DRIVE) == 0)
3669 continue;
3670 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3671 HPT_IDETIM(chp->channel, drive));
3672
3673 /* add timing values, setup DMA if needed */
3674 if (drvp->drive_flags & DRIVE_UDMA) {
3675 /* use Ultra/DMA */
3676 drvp->drive_flags &= ~DRIVE_DMA;
3677 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3678 drvp->UDMA_mode > 2)
3679 drvp->UDMA_mode = 2;
3680 switch (sc->sc_pp->ide_product) {
3681 case PCI_PRODUCT_TRIONES_HPT374:
3682 after = hpt374_udma[drvp->UDMA_mode];
3683 break;
3684 case PCI_PRODUCT_TRIONES_HPT372:
3685 after = hpt372_udma[drvp->UDMA_mode];
3686 break;
3687 case PCI_PRODUCT_TRIONES_HPT366:
3688 default:
3689 switch(revision) {
3690 case HPT372_REV:
3691 after = hpt372_udma[drvp->UDMA_mode];
3692 break;
3693 case HPT370_REV:
3694 case HPT370A_REV:
3695 after = hpt370_udma[drvp->UDMA_mode];
3696 break;
3697 case HPT366_REV:
3698 default:
3699 after = hpt366_udma[drvp->UDMA_mode];
3700 break;
3701 }
3702 }
3703 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3704 } else if (drvp->drive_flags & DRIVE_DMA) {
3705 /*
3706 * use Multiword DMA.
3707 * Timings will be used for both PIO and DMA, so adjust
3708 * DMA mode if needed
3709 */
3710 if (drvp->PIO_mode >= 3 &&
3711 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3712 drvp->DMA_mode = drvp->PIO_mode - 2;
3713 }
3714 switch (sc->sc_pp->ide_product) {
3715 case PCI_PRODUCT_TRIONES_HPT374:
3716 after = hpt374_dma[drvp->DMA_mode];
3717 break;
3718 case PCI_PRODUCT_TRIONES_HPT372:
3719 after = hpt372_dma[drvp->DMA_mode];
3720 break;
3721 case PCI_PRODUCT_TRIONES_HPT366:
3722 default:
3723 switch(revision) {
3724 case HPT372_REV:
3725 after = hpt372_dma[drvp->DMA_mode];
3726 break;
3727 case HPT370_REV:
3728 case HPT370A_REV:
3729 after = hpt370_dma[drvp->DMA_mode];
3730 break;
3731 case HPT366_REV:
3732 default:
3733 after = hpt366_dma[drvp->DMA_mode];
3734 break;
3735 }
3736 }
3737 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3738 } else {
3739 /* PIO only */
3740 switch (sc->sc_pp->ide_product) {
3741 case PCI_PRODUCT_TRIONES_HPT374:
3742 after = hpt374_pio[drvp->PIO_mode];
3743 break;
3744 case PCI_PRODUCT_TRIONES_HPT372:
3745 after = hpt372_pio[drvp->PIO_mode];
3746 break;
3747 case PCI_PRODUCT_TRIONES_HPT366:
3748 default:
3749 switch(revision) {
3750 case HPT372_REV:
3751 after = hpt372_pio[drvp->PIO_mode];
3752 break;
3753 case HPT370_REV:
3754 case HPT370A_REV:
3755 after = hpt370_pio[drvp->PIO_mode];
3756 break;
3757 case HPT366_REV:
3758 default:
3759 after = hpt366_pio[drvp->PIO_mode];
3760 break;
3761 }
3762 }
3763 }
3764 pci_conf_write(sc->sc_pc, sc->sc_tag,
3765 HPT_IDETIM(chp->channel, drive), after);
3766 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3767 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3768 after, before), DEBUG_PROBE);
3769 }
3770 if (idedma_ctl != 0) {
3771 /* Add software bits in status register */
3772 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3773 IDEDMA_CTL, idedma_ctl);
3774 }
3775 pciide_print_modes(cp);
3776 }
3777
3778 int
3779 hpt_pci_intr(arg)
3780 void *arg;
3781 {
3782 struct pciide_softc *sc = arg;
3783 struct pciide_channel *cp;
3784 struct channel_softc *wdc_cp;
3785 int rv = 0;
3786 int dmastat, i, crv;
3787
3788 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3789 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3790 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3791 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3792 IDEDMA_CTL_INTR)
3793 continue;
3794 cp = &sc->pciide_channels[i];
3795 wdc_cp = &cp->wdc_channel;
3796 crv = wdcintr(wdc_cp);
3797 if (crv == 0) {
3798 printf("%s:%d: bogus intr\n",
3799 sc->sc_wdcdev.sc_dev.dv_xname, i);
3800 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3801 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3802 } else
3803 rv = 1;
3804 }
3805 return rv;
3806 }
3807
3808
3809 /* Macros to test product */
3810 #define PDC_IS_262(sc) \
3811 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3812 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3813 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3814 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3815 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3816 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3817 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3818 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3819 #define PDC_IS_265(sc) \
3820 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3821 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3822 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3823 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3824 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3825 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3826 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3827 #define PDC_IS_268(sc) \
3828 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3829 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3830 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3831 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3832 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3833 #define PDC_IS_276(sc) \
3834 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3835 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3836 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3837
3838 void
3839 pdc202xx_chip_map(sc, pa)
3840 struct pciide_softc *sc;
3841 struct pci_attach_args *pa;
3842 {
3843 struct pciide_channel *cp;
3844 int channel;
3845 pcireg_t interface, st, mode;
3846 bus_size_t cmdsize, ctlsize;
3847
3848 if (!PDC_IS_268(sc)) {
3849 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3850 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3851 st), DEBUG_PROBE);
3852 }
3853 if (pciide_chipen(sc, pa) == 0)
3854 return;
3855
3856 /* turn off RAID mode */
3857 if (!PDC_IS_268(sc))
3858 st &= ~PDC2xx_STATE_IDERAID;
3859
3860 /*
3861 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3862 * mode. We have to fake interface
3863 */
3864 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3865 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3866 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3867
3868 printf("%s: bus-master DMA support present",
3869 sc->sc_wdcdev.sc_dev.dv_xname);
3870 pciide_mapreg_dma(sc, pa);
3871 printf("\n");
3872 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3873 WDC_CAPABILITY_MODE;
3874 if (sc->sc_dma_ok) {
3875 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3876 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3877 sc->sc_wdcdev.irqack = pciide_irqack;
3878 }
3879 sc->sc_wdcdev.PIO_cap = 4;
3880 sc->sc_wdcdev.DMA_cap = 2;
3881 if (PDC_IS_276(sc))
3882 sc->sc_wdcdev.UDMA_cap = 6;
3883 else if (PDC_IS_265(sc))
3884 sc->sc_wdcdev.UDMA_cap = 5;
3885 else if (PDC_IS_262(sc))
3886 sc->sc_wdcdev.UDMA_cap = 4;
3887 else
3888 sc->sc_wdcdev.UDMA_cap = 2;
3889 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3890 pdc20268_setup_channel : pdc202xx_setup_channel;
3891 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3892 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3893
3894 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
3895 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
3896 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
3897 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
3898 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
3899 }
3900
3901 if (!PDC_IS_268(sc)) {
3902 /* setup failsafe defaults */
3903 mode = 0;
3904 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3905 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3906 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3907 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3908 for (channel = 0;
3909 channel < sc->sc_wdcdev.nchannels;
3910 channel++) {
3911 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3912 "drive 0 initial timings 0x%x, now 0x%x\n",
3913 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3914 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3915 DEBUG_PROBE);
3916 pci_conf_write(sc->sc_pc, sc->sc_tag,
3917 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3918 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3919 "drive 1 initial timings 0x%x, now 0x%x\n",
3920 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3921 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3922 pci_conf_write(sc->sc_pc, sc->sc_tag,
3923 PDC2xx_TIM(channel, 1), mode);
3924 }
3925
3926 mode = PDC2xx_SCR_DMA;
3927 if (PDC_IS_265(sc)) {
3928 mode = PDC2xx_SCR_SET_GEN(mode, PDC265_SCR_GEN_LAT);
3929 } else if (PDC_IS_262(sc)) {
3930 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3931 } else {
3932 /* the BIOS set it up this way */
3933 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3934 }
3935 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3936 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3937 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3938 "now 0x%x\n",
3939 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3940 PDC2xx_SCR),
3941 mode), DEBUG_PROBE);
3942 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3943 PDC2xx_SCR, mode);
3944
3945 /* controller initial state register is OK even without BIOS */
3946 /* Set DMA mode to IDE DMA compatibility */
3947 mode =
3948 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3949 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3950 DEBUG_PROBE);
3951 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3952 mode | 0x1);
3953 mode =
3954 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3955 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3956 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3957 mode | 0x1);
3958 }
3959
3960 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3961 cp = &sc->pciide_channels[channel];
3962 if (pciide_chansetup(sc, channel, interface) == 0)
3963 continue;
3964 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3965 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3966 printf("%s: %s channel ignored (disabled)\n",
3967 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3968 continue;
3969 }
3970 if (PDC_IS_265(sc))
3971 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3972 pdc20265_pci_intr);
3973 else
3974 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3975 pdc202xx_pci_intr);
3976 if (cp->hw_ok == 0)
3977 continue;
3978 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3979 st &= ~(PDC_IS_262(sc) ?
3980 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3981 pciide_map_compat_intr(pa, cp, channel, interface);
3982 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3983 }
3984 if (!PDC_IS_268(sc)) {
3985 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3986 "0x%x\n", st), DEBUG_PROBE);
3987 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3988 }
3989 return;
3990 }
3991
3992 void
3993 pdc202xx_setup_channel(chp)
3994 struct channel_softc *chp;
3995 {
3996 struct ata_drive_datas *drvp;
3997 int drive;
3998 pcireg_t mode, st;
3999 u_int32_t idedma_ctl, scr, atapi;
4000 struct pciide_channel *cp = (struct pciide_channel*)chp;
4001 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4002 int channel = chp->channel;
4003
4004 /* setup DMA if needed */
4005 pciide_channel_dma_setup(cp);
4006
4007 idedma_ctl = 0;
4008 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4009 sc->sc_wdcdev.sc_dev.dv_xname,
4010 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4011 DEBUG_PROBE);
4012
4013 /* Per channel settings */
4014 if (PDC_IS_262(sc)) {
4015 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4016 PDC262_U66);
4017 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4018 /* Trim UDMA mode */
4019 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4020 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4021 chp->ch_drive[0].UDMA_mode <= 2) ||
4022 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4023 chp->ch_drive[1].UDMA_mode <= 2)) {
4024 if (chp->ch_drive[0].UDMA_mode > 2)
4025 chp->ch_drive[0].UDMA_mode = 2;
4026 if (chp->ch_drive[1].UDMA_mode > 2)
4027 chp->ch_drive[1].UDMA_mode = 2;
4028 }
4029 /* Set U66 if needed */
4030 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4031 chp->ch_drive[0].UDMA_mode > 2) ||
4032 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4033 chp->ch_drive[1].UDMA_mode > 2))
4034 scr |= PDC262_U66_EN(channel);
4035 else
4036 scr &= ~PDC262_U66_EN(channel);
4037 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4038 PDC262_U66, scr);
4039 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4040 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4041 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4042 PDC262_ATAPI(channel))), DEBUG_PROBE);
4043 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4044 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4045 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4046 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4047 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4048 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4049 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4050 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4051 atapi = 0;
4052 else
4053 atapi = PDC262_ATAPI_UDMA;
4054 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4055 PDC262_ATAPI(channel), atapi);
4056 }
4057 }
4058 for (drive = 0; drive < 2; drive++) {
4059 drvp = &chp->ch_drive[drive];
4060 /* If no drive, skip */
4061 if ((drvp->drive_flags & DRIVE) == 0)
4062 continue;
4063 mode = 0;
4064 if (drvp->drive_flags & DRIVE_UDMA) {
4065 /* use Ultra/DMA */
4066 drvp->drive_flags &= ~DRIVE_DMA;
4067 mode = PDC2xx_TIM_SET_MB(mode,
4068 pdc2xx_udma_mb[drvp->UDMA_mode]);
4069 mode = PDC2xx_TIM_SET_MC(mode,
4070 pdc2xx_udma_mc[drvp->UDMA_mode]);
4071 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4072 } else if (drvp->drive_flags & DRIVE_DMA) {
4073 mode = PDC2xx_TIM_SET_MB(mode,
4074 pdc2xx_dma_mb[drvp->DMA_mode]);
4075 mode = PDC2xx_TIM_SET_MC(mode,
4076 pdc2xx_dma_mc[drvp->DMA_mode]);
4077 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4078 } else {
4079 mode = PDC2xx_TIM_SET_MB(mode,
4080 pdc2xx_dma_mb[0]);
4081 mode = PDC2xx_TIM_SET_MC(mode,
4082 pdc2xx_dma_mc[0]);
4083 }
4084 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4085 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4086 if (drvp->drive_flags & DRIVE_ATA)
4087 mode |= PDC2xx_TIM_PRE;
4088 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4089 if (drvp->PIO_mode >= 3) {
4090 mode |= PDC2xx_TIM_IORDY;
4091 if (drive == 0)
4092 mode |= PDC2xx_TIM_IORDYp;
4093 }
4094 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4095 "timings 0x%x\n",
4096 sc->sc_wdcdev.sc_dev.dv_xname,
4097 chp->channel, drive, mode), DEBUG_PROBE);
4098 pci_conf_write(sc->sc_pc, sc->sc_tag,
4099 PDC2xx_TIM(chp->channel, drive), mode);
4100 }
4101 if (idedma_ctl != 0) {
4102 /* Add software bits in status register */
4103 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4104 IDEDMA_CTL, idedma_ctl);
4105 }
4106 pciide_print_modes(cp);
4107 }
4108
4109 void
4110 pdc20268_setup_channel(chp)
4111 struct channel_softc *chp;
4112 {
4113 struct ata_drive_datas *drvp;
4114 int drive;
4115 u_int32_t idedma_ctl;
4116 struct pciide_channel *cp = (struct pciide_channel*)chp;
4117 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4118 int u100;
4119
4120 /* setup DMA if needed */
4121 pciide_channel_dma_setup(cp);
4122
4123 idedma_ctl = 0;
4124
4125 /* I don't know what this is for, FreeBSD does it ... */
4126 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4127 IDEDMA_CMD + 0x1, 0x0b);
4128
4129 /*
4130 * I don't know what this is for; FreeBSD checks this ... this is not
4131 * cable type detect.
4132 */
4133 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4134 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4135
4136 for (drive = 0; drive < 2; drive++) {
4137 drvp = &chp->ch_drive[drive];
4138 /* If no drive, skip */
4139 if ((drvp->drive_flags & DRIVE) == 0)
4140 continue;
4141 if (drvp->drive_flags & DRIVE_UDMA) {
4142 /* use Ultra/DMA */
4143 drvp->drive_flags &= ~DRIVE_DMA;
4144 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4145 if (drvp->UDMA_mode > 2 && u100 == 0)
4146 drvp->UDMA_mode = 2;
4147 } else if (drvp->drive_flags & DRIVE_DMA) {
4148 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4149 }
4150 }
4151 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4152 if (idedma_ctl != 0) {
4153 /* Add software bits in status register */
4154 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4155 IDEDMA_CTL, idedma_ctl);
4156 }
4157 pciide_print_modes(cp);
4158 }
4159
4160 int
4161 pdc202xx_pci_intr(arg)
4162 void *arg;
4163 {
4164 struct pciide_softc *sc = arg;
4165 struct pciide_channel *cp;
4166 struct channel_softc *wdc_cp;
4167 int i, rv, crv;
4168 u_int32_t scr;
4169
4170 rv = 0;
4171 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4172 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4173 cp = &sc->pciide_channels[i];
4174 wdc_cp = &cp->wdc_channel;
4175 /* If a compat channel skip. */
4176 if (cp->compat)
4177 continue;
4178 if (scr & PDC2xx_SCR_INT(i)) {
4179 crv = wdcintr(wdc_cp);
4180 if (crv == 0)
4181 printf("%s:%d: bogus intr (reg 0x%x)\n",
4182 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4183 else
4184 rv = 1;
4185 }
4186 }
4187 return rv;
4188 }
4189
4190 int
4191 pdc20265_pci_intr(arg)
4192 void *arg;
4193 {
4194 struct pciide_softc *sc = arg;
4195 struct pciide_channel *cp;
4196 struct channel_softc *wdc_cp;
4197 int i, rv, crv;
4198 u_int32_t dmastat;
4199
4200 rv = 0;
4201 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4202 cp = &sc->pciide_channels[i];
4203 wdc_cp = &cp->wdc_channel;
4204 /* If a compat channel skip. */
4205 if (cp->compat)
4206 continue;
4207 /*
4208 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4209 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4210 * So use it instead (requires 2 reg reads instead of 1,
4211 * but we can't do it another way).
4212 */
4213 dmastat = bus_space_read_1(sc->sc_dma_iot,
4214 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4215 if((dmastat & IDEDMA_CTL_INTR) == 0)
4216 continue;
4217 crv = wdcintr(wdc_cp);
4218 if (crv == 0)
4219 printf("%s:%d: bogus intr\n",
4220 sc->sc_wdcdev.sc_dev.dv_xname, i);
4221 else
4222 rv = 1;
4223 }
4224 return rv;
4225 }
4226
4227 static void
4228 pdc20262_dma_start(v, channel, drive)
4229 void *v;
4230 int channel, drive;
4231 {
4232 struct pciide_softc *sc = v;
4233 struct pciide_dma_maps *dma_maps =
4234 &sc->pciide_channels[channel].dma_maps[drive];
4235 int atapi;
4236
4237 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4238 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4239 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4240 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4241 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4242 PDC262_ATAPI(channel), atapi);
4243 }
4244
4245 pciide_dma_start(v, channel, drive);
4246 }
4247
4248 int
4249 pdc20262_dma_finish(v, channel, drive, force)
4250 void *v;
4251 int channel, drive;
4252 int force;
4253 {
4254 struct pciide_softc *sc = v;
4255 struct pciide_dma_maps *dma_maps =
4256 &sc->pciide_channels[channel].dma_maps[drive];
4257 struct channel_softc *chp;
4258 int atapi, error;
4259
4260 error = pciide_dma_finish(v, channel, drive, force);
4261
4262 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4263 chp = sc->wdc_chanarray[channel];
4264 atapi = 0;
4265 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4266 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4267 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4268 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4269 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4270 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4271 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4272 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4273 atapi = PDC262_ATAPI_UDMA;
4274 }
4275 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4276 PDC262_ATAPI(channel), atapi);
4277 }
4278
4279 return error;
4280 }
4281
4282 void
4283 opti_chip_map(sc, pa)
4284 struct pciide_softc *sc;
4285 struct pci_attach_args *pa;
4286 {
4287 struct pciide_channel *cp;
4288 bus_size_t cmdsize, ctlsize;
4289 pcireg_t interface;
4290 u_int8_t init_ctrl;
4291 int channel;
4292
4293 if (pciide_chipen(sc, pa) == 0)
4294 return;
4295 printf("%s: bus-master DMA support present",
4296 sc->sc_wdcdev.sc_dev.dv_xname);
4297
4298 /*
4299 * XXXSCW:
4300 * There seem to be a couple of buggy revisions/implementations
4301 * of the OPTi pciide chipset. This kludge seems to fix one of
4302 * the reported problems (PR/11644) but still fails for the
4303 * other (PR/13151), although the latter may be due to other
4304 * issues too...
4305 */
4306 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4307 printf(" but disabled due to chip rev. <= 0x12");
4308 sc->sc_dma_ok = 0;
4309 } else
4310 pciide_mapreg_dma(sc, pa);
4311
4312 printf("\n");
4313
4314 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4315 WDC_CAPABILITY_MODE;
4316 sc->sc_wdcdev.PIO_cap = 4;
4317 if (sc->sc_dma_ok) {
4318 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4319 sc->sc_wdcdev.irqack = pciide_irqack;
4320 sc->sc_wdcdev.DMA_cap = 2;
4321 }
4322 sc->sc_wdcdev.set_modes = opti_setup_channel;
4323
4324 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4325 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4326
4327 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4328 OPTI_REG_INIT_CONTROL);
4329
4330 interface = PCI_INTERFACE(pa->pa_class);
4331
4332 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4333 cp = &sc->pciide_channels[channel];
4334 if (pciide_chansetup(sc, channel, interface) == 0)
4335 continue;
4336 if (channel == 1 &&
4337 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4338 printf("%s: %s channel ignored (disabled)\n",
4339 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4340 continue;
4341 }
4342 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4343 pciide_pci_intr);
4344 if (cp->hw_ok == 0)
4345 continue;
4346 pciide_map_compat_intr(pa, cp, channel, interface);
4347 if (cp->hw_ok == 0)
4348 continue;
4349 opti_setup_channel(&cp->wdc_channel);
4350 }
4351 }
4352
4353 void
4354 opti_setup_channel(chp)
4355 struct channel_softc *chp;
4356 {
4357 struct ata_drive_datas *drvp;
4358 struct pciide_channel *cp = (struct pciide_channel*)chp;
4359 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4360 int drive, spd;
4361 int mode[2];
4362 u_int8_t rv, mr;
4363
4364 /*
4365 * The `Delay' and `Address Setup Time' fields of the
4366 * Miscellaneous Register are always zero initially.
4367 */
4368 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4369 mr &= ~(OPTI_MISC_DELAY_MASK |
4370 OPTI_MISC_ADDR_SETUP_MASK |
4371 OPTI_MISC_INDEX_MASK);
4372
4373 /* Prime the control register before setting timing values */
4374 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4375
4376 /* Determine the clockrate of the PCIbus the chip is attached to */
4377 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4378 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4379
4380 /* setup DMA if needed */
4381 pciide_channel_dma_setup(cp);
4382
4383 for (drive = 0; drive < 2; drive++) {
4384 drvp = &chp->ch_drive[drive];
4385 /* If no drive, skip */
4386 if ((drvp->drive_flags & DRIVE) == 0) {
4387 mode[drive] = -1;
4388 continue;
4389 }
4390
4391 if ((drvp->drive_flags & DRIVE_DMA)) {
4392 /*
4393 * Timings will be used for both PIO and DMA,
4394 * so adjust DMA mode if needed
4395 */
4396 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4397 drvp->PIO_mode = drvp->DMA_mode + 2;
4398 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4399 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4400 drvp->PIO_mode - 2 : 0;
4401 if (drvp->DMA_mode == 0)
4402 drvp->PIO_mode = 0;
4403
4404 mode[drive] = drvp->DMA_mode + 5;
4405 } else
4406 mode[drive] = drvp->PIO_mode;
4407
4408 if (drive && mode[0] >= 0 &&
4409 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4410 /*
4411 * Can't have two drives using different values
4412 * for `Address Setup Time'.
4413 * Slow down the faster drive to compensate.
4414 */
4415 int d = (opti_tim_as[spd][mode[0]] >
4416 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4417
4418 mode[d] = mode[1-d];
4419 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4420 chp->ch_drive[d].DMA_mode = 0;
4421 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4422 }
4423 }
4424
4425 for (drive = 0; drive < 2; drive++) {
4426 int m;
4427 if ((m = mode[drive]) < 0)
4428 continue;
4429
4430 /* Set the Address Setup Time and select appropriate index */
4431 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4432 rv |= OPTI_MISC_INDEX(drive);
4433 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4434
4435 /* Set the pulse width and recovery timing parameters */
4436 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4437 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4438 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4439 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4440
4441 /* Set the Enhanced Mode register appropriately */
4442 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4443 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4444 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4445 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4446 }
4447
4448 /* Finally, enable the timings */
4449 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4450
4451 pciide_print_modes(cp);
4452 }
4453
4454 #define ACARD_IS_850(sc) \
4455 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4456
4457 void
4458 acard_chip_map(sc, pa)
4459 struct pciide_softc *sc;
4460 struct pci_attach_args *pa;
4461 {
4462 struct pciide_channel *cp;
4463 int i;
4464 pcireg_t interface;
4465 bus_size_t cmdsize, ctlsize;
4466
4467 if (pciide_chipen(sc, pa) == 0)
4468 return;
4469
4470 /*
4471 * when the chip is in native mode it identifies itself as a
4472 * 'misc mass storage'. Fake interface in this case.
4473 */
4474 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4475 interface = PCI_INTERFACE(pa->pa_class);
4476 } else {
4477 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4478 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4479 }
4480
4481 printf("%s: bus-master DMA support present",
4482 sc->sc_wdcdev.sc_dev.dv_xname);
4483 pciide_mapreg_dma(sc, pa);
4484 printf("\n");
4485 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4486 WDC_CAPABILITY_MODE;
4487
4488 if (sc->sc_dma_ok) {
4489 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4490 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4491 sc->sc_wdcdev.irqack = pciide_irqack;
4492 }
4493 sc->sc_wdcdev.PIO_cap = 4;
4494 sc->sc_wdcdev.DMA_cap = 2;
4495 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4496
4497 sc->sc_wdcdev.set_modes = acard_setup_channel;
4498 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4499 sc->sc_wdcdev.nchannels = 2;
4500
4501 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4502 cp = &sc->pciide_channels[i];
4503 if (pciide_chansetup(sc, i, interface) == 0)
4504 continue;
4505 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4506 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4507 &ctlsize, pciide_pci_intr);
4508 } else {
4509 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4510 &cmdsize, &ctlsize);
4511 }
4512 if (cp->hw_ok == 0)
4513 return;
4514 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4515 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4516 wdcattach(&cp->wdc_channel);
4517 acard_setup_channel(&cp->wdc_channel);
4518 }
4519 if (!ACARD_IS_850(sc)) {
4520 u_int32_t reg;
4521 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4522 reg &= ~ATP860_CTRL_INT;
4523 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4524 }
4525 }
4526
4527 void
4528 acard_setup_channel(chp)
4529 struct channel_softc *chp;
4530 {
4531 struct ata_drive_datas *drvp;
4532 struct pciide_channel *cp = (struct pciide_channel*)chp;
4533 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4534 int channel = chp->channel;
4535 int drive;
4536 u_int32_t idetime, udma_mode;
4537 u_int32_t idedma_ctl;
4538
4539 /* setup DMA if needed */
4540 pciide_channel_dma_setup(cp);
4541
4542 if (ACARD_IS_850(sc)) {
4543 idetime = 0;
4544 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4545 udma_mode &= ~ATP850_UDMA_MASK(channel);
4546 } else {
4547 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4548 idetime &= ~ATP860_SETTIME_MASK(channel);
4549 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4550 udma_mode &= ~ATP860_UDMA_MASK(channel);
4551
4552 /* check 80 pins cable */
4553 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4554 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4555 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4556 & ATP860_CTRL_80P(chp->channel)) {
4557 if (chp->ch_drive[0].UDMA_mode > 2)
4558 chp->ch_drive[0].UDMA_mode = 2;
4559 if (chp->ch_drive[1].UDMA_mode > 2)
4560 chp->ch_drive[1].UDMA_mode = 2;
4561 }
4562 }
4563 }
4564
4565 idedma_ctl = 0;
4566
4567 /* Per drive settings */
4568 for (drive = 0; drive < 2; drive++) {
4569 drvp = &chp->ch_drive[drive];
4570 /* If no drive, skip */
4571 if ((drvp->drive_flags & DRIVE) == 0)
4572 continue;
4573 /* add timing values, setup DMA if needed */
4574 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4575 (drvp->drive_flags & DRIVE_UDMA)) {
4576 /* use Ultra/DMA */
4577 if (ACARD_IS_850(sc)) {
4578 idetime |= ATP850_SETTIME(drive,
4579 acard_act_udma[drvp->UDMA_mode],
4580 acard_rec_udma[drvp->UDMA_mode]);
4581 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4582 acard_udma_conf[drvp->UDMA_mode]);
4583 } else {
4584 idetime |= ATP860_SETTIME(channel, drive,
4585 acard_act_udma[drvp->UDMA_mode],
4586 acard_rec_udma[drvp->UDMA_mode]);
4587 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4588 acard_udma_conf[drvp->UDMA_mode]);
4589 }
4590 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4591 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4592 (drvp->drive_flags & DRIVE_DMA)) {
4593 /* use Multiword DMA */
4594 drvp->drive_flags &= ~DRIVE_UDMA;
4595 if (ACARD_IS_850(sc)) {
4596 idetime |= ATP850_SETTIME(drive,
4597 acard_act_dma[drvp->DMA_mode],
4598 acard_rec_dma[drvp->DMA_mode]);
4599 } else {
4600 idetime |= ATP860_SETTIME(channel, drive,
4601 acard_act_dma[drvp->DMA_mode],
4602 acard_rec_dma[drvp->DMA_mode]);
4603 }
4604 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4605 } else {
4606 /* PIO only */
4607 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4608 if (ACARD_IS_850(sc)) {
4609 idetime |= ATP850_SETTIME(drive,
4610 acard_act_pio[drvp->PIO_mode],
4611 acard_rec_pio[drvp->PIO_mode]);
4612 } else {
4613 idetime |= ATP860_SETTIME(channel, drive,
4614 acard_act_pio[drvp->PIO_mode],
4615 acard_rec_pio[drvp->PIO_mode]);
4616 }
4617 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4618 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4619 | ATP8x0_CTRL_EN(channel));
4620 }
4621 }
4622
4623 if (idedma_ctl != 0) {
4624 /* Add software bits in status register */
4625 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4626 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4627 }
4628 pciide_print_modes(cp);
4629
4630 if (ACARD_IS_850(sc)) {
4631 pci_conf_write(sc->sc_pc, sc->sc_tag,
4632 ATP850_IDETIME(channel), idetime);
4633 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4634 } else {
4635 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4636 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4637 }
4638 }
4639
4640 int
4641 acard_pci_intr(arg)
4642 void *arg;
4643 {
4644 struct pciide_softc *sc = arg;
4645 struct pciide_channel *cp;
4646 struct channel_softc *wdc_cp;
4647 int rv = 0;
4648 int dmastat, i, crv;
4649
4650 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4651 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4652 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4653 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4654 continue;
4655 cp = &sc->pciide_channels[i];
4656 wdc_cp = &cp->wdc_channel;
4657 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4658 (void)wdcintr(wdc_cp);
4659 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4660 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4661 continue;
4662 }
4663 crv = wdcintr(wdc_cp);
4664 if (crv == 0)
4665 printf("%s:%d: bogus intr\n",
4666 sc->sc_wdcdev.sc_dev.dv_xname, i);
4667 else if (crv == 1)
4668 rv = 1;
4669 else if (rv == 0)
4670 rv = crv;
4671 }
4672 return rv;
4673 }
4674
4675 static int
4676 sl82c105_bugchk(struct pci_attach_args *pa)
4677 {
4678
4679 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4680 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4681 return (0);
4682
4683 if (PCI_REVISION(pa->pa_class) <= 0x05)
4684 return (1);
4685
4686 return (0);
4687 }
4688
4689 void
4690 sl82c105_chip_map(sc, pa)
4691 struct pciide_softc *sc;
4692 struct pci_attach_args *pa;
4693 {
4694 struct pciide_channel *cp;
4695 bus_size_t cmdsize, ctlsize;
4696 pcireg_t interface, idecr;
4697 int channel;
4698
4699 if (pciide_chipen(sc, pa) == 0)
4700 return;
4701
4702 printf("%s: bus-master DMA support present",
4703 sc->sc_wdcdev.sc_dev.dv_xname);
4704
4705 /*
4706 * Check to see if we're part of the Winbond 83c553 Southbridge.
4707 * If so, we need to disable DMA on rev. <= 5 of that chip.
4708 */
4709 if (pci_find_device(pa, sl82c105_bugchk)) {
4710 printf(" but disabled due to 83c553 rev. <= 0x05");
4711 sc->sc_dma_ok = 0;
4712 } else
4713 pciide_mapreg_dma(sc, pa);
4714 printf("\n");
4715
4716 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4717 WDC_CAPABILITY_MODE;
4718 sc->sc_wdcdev.PIO_cap = 4;
4719 if (sc->sc_dma_ok) {
4720 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4721 sc->sc_wdcdev.irqack = pciide_irqack;
4722 sc->sc_wdcdev.DMA_cap = 2;
4723 }
4724 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4725
4726 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4727 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4728
4729 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4730
4731 interface = PCI_INTERFACE(pa->pa_class);
4732
4733 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4734 cp = &sc->pciide_channels[channel];
4735 if (pciide_chansetup(sc, channel, interface) == 0)
4736 continue;
4737 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4738 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4739 printf("%s: %s channel ignored (disabled)\n",
4740 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4741 continue;
4742 }
4743 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4744 pciide_pci_intr);
4745 if (cp->hw_ok == 0)
4746 continue;
4747 pciide_map_compat_intr(pa, cp, channel, interface);
4748 if (cp->hw_ok == 0)
4749 continue;
4750 sl82c105_setup_channel(&cp->wdc_channel);
4751 }
4752 }
4753
4754 void
4755 sl82c105_setup_channel(chp)
4756 struct channel_softc *chp;
4757 {
4758 struct ata_drive_datas *drvp;
4759 struct pciide_channel *cp = (struct pciide_channel*)chp;
4760 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4761 int pxdx_reg, drive;
4762 pcireg_t pxdx;
4763
4764 /* Set up DMA if needed. */
4765 pciide_channel_dma_setup(cp);
4766
4767 for (drive = 0; drive < 2; drive++) {
4768 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4769 : SYMPH_P1D0CR) + (drive * 4);
4770
4771 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4772
4773 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4774 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4775
4776 drvp = &chp->ch_drive[drive];
4777 /* If no drive, skip. */
4778 if ((drvp->drive_flags & DRIVE) == 0) {
4779 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4780 continue;
4781 }
4782
4783 if (drvp->drive_flags & DRIVE_DMA) {
4784 /*
4785 * Timings will be used for both PIO and DMA,
4786 * so adjust DMA mode if needed.
4787 */
4788 if (drvp->PIO_mode >= 3) {
4789 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4790 drvp->DMA_mode = drvp->PIO_mode - 2;
4791 if (drvp->DMA_mode < 1) {
4792 /*
4793 * Can't mix both PIO and DMA.
4794 * Disable DMA.
4795 */
4796 drvp->drive_flags &= ~DRIVE_DMA;
4797 }
4798 } else {
4799 /*
4800 * Can't mix both PIO and DMA. Disable
4801 * DMA.
4802 */
4803 drvp->drive_flags &= ~DRIVE_DMA;
4804 }
4805 }
4806
4807 if (drvp->drive_flags & DRIVE_DMA) {
4808 /* Use multi-word DMA. */
4809 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4810 PxDx_CMD_ON_SHIFT;
4811 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4812 } else {
4813 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4814 PxDx_CMD_ON_SHIFT;
4815 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4816 }
4817
4818 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4819
4820 /* ...and set the mode for this drive. */
4821 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4822 }
4823
4824 pciide_print_modes(cp);
4825 }
4826
4827 void
4828 serverworks_chip_map(sc, pa)
4829 struct pciide_softc *sc;
4830 struct pci_attach_args *pa;
4831 {
4832 struct pciide_channel *cp;
4833 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4834 pcitag_t pcib_tag;
4835 int channel;
4836 bus_size_t cmdsize, ctlsize;
4837
4838 if (pciide_chipen(sc, pa) == 0)
4839 return;
4840
4841 printf("%s: bus-master DMA support present",
4842 sc->sc_wdcdev.sc_dev.dv_xname);
4843 pciide_mapreg_dma(sc, pa);
4844 printf("\n");
4845 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4846 WDC_CAPABILITY_MODE;
4847
4848 if (sc->sc_dma_ok) {
4849 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4850 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4851 sc->sc_wdcdev.irqack = pciide_irqack;
4852 }
4853 sc->sc_wdcdev.PIO_cap = 4;
4854 sc->sc_wdcdev.DMA_cap = 2;
4855 switch (sc->sc_pp->ide_product) {
4856 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4857 sc->sc_wdcdev.UDMA_cap = 2;
4858 break;
4859 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4860 if (PCI_REVISION(pa->pa_class) < 0x92)
4861 sc->sc_wdcdev.UDMA_cap = 4;
4862 else
4863 sc->sc_wdcdev.UDMA_cap = 5;
4864 break;
4865 }
4866
4867 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4868 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4869 sc->sc_wdcdev.nchannels = 2;
4870
4871 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4872 cp = &sc->pciide_channels[channel];
4873 if (pciide_chansetup(sc, channel, interface) == 0)
4874 continue;
4875 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4876 serverworks_pci_intr);
4877 if (cp->hw_ok == 0)
4878 return;
4879 pciide_map_compat_intr(pa, cp, channel, interface);
4880 if (cp->hw_ok == 0)
4881 return;
4882 serverworks_setup_channel(&cp->wdc_channel);
4883 }
4884
4885 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4886 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4887 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4888 }
4889
4890 void
4891 serverworks_setup_channel(chp)
4892 struct channel_softc *chp;
4893 {
4894 struct ata_drive_datas *drvp;
4895 struct pciide_channel *cp = (struct pciide_channel*)chp;
4896 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4897 int channel = chp->channel;
4898 int drive, unit;
4899 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4900 u_int32_t idedma_ctl;
4901 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4902 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4903
4904 /* setup DMA if needed */
4905 pciide_channel_dma_setup(cp);
4906
4907 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4908 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4909 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4910 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4911
4912 pio_time &= ~(0xffff << (16 * channel));
4913 dma_time &= ~(0xffff << (16 * channel));
4914 pio_mode &= ~(0xff << (8 * channel + 16));
4915 udma_mode &= ~(0xff << (8 * channel + 16));
4916 udma_mode &= ~(3 << (2 * channel));
4917
4918 idedma_ctl = 0;
4919
4920 /* Per drive settings */
4921 for (drive = 0; drive < 2; drive++) {
4922 drvp = &chp->ch_drive[drive];
4923 /* If no drive, skip */
4924 if ((drvp->drive_flags & DRIVE) == 0)
4925 continue;
4926 unit = drive + 2 * channel;
4927 /* add timing values, setup DMA if needed */
4928 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4929 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4930 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4931 (drvp->drive_flags & DRIVE_UDMA)) {
4932 /* use Ultra/DMA, check for 80-pin cable */
4933 if (drvp->UDMA_mode > 2 &&
4934 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4935 drvp->UDMA_mode = 2;
4936 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4937 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4938 udma_mode |= 1 << unit;
4939 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4940 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4941 (drvp->drive_flags & DRIVE_DMA)) {
4942 /* use Multiword DMA */
4943 drvp->drive_flags &= ~DRIVE_UDMA;
4944 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4945 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4946 } else {
4947 /* PIO only */
4948 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4949 }
4950 }
4951
4952 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4953 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4954 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4955 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4956 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4957
4958 if (idedma_ctl != 0) {
4959 /* Add software bits in status register */
4960 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4961 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4962 }
4963 pciide_print_modes(cp);
4964 }
4965
4966 int
4967 serverworks_pci_intr(arg)
4968 void *arg;
4969 {
4970 struct pciide_softc *sc = arg;
4971 struct pciide_channel *cp;
4972 struct channel_softc *wdc_cp;
4973 int rv = 0;
4974 int dmastat, i, crv;
4975
4976 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4977 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4978 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4979 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4980 IDEDMA_CTL_INTR)
4981 continue;
4982 cp = &sc->pciide_channels[i];
4983 wdc_cp = &cp->wdc_channel;
4984 crv = wdcintr(wdc_cp);
4985 if (crv == 0) {
4986 printf("%s:%d: bogus intr\n",
4987 sc->sc_wdcdev.sc_dev.dv_xname, i);
4988 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4989 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4990 } else
4991 rv = 1;
4992 }
4993 return rv;
4994 }
4995