pciide.c revision 1.165 1 /* $NetBSD: pciide.c,v 1.165 2002/08/23 16:02:32 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.165 2002/08/23 16:02:32 bouyer Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_cmd_products[] = {
345 { PCI_PRODUCT_CMDTECH_640,
346 0,
347 "CMD Technology PCI0640",
348 cmd_chip_map
349 },
350 { PCI_PRODUCT_CMDTECH_643,
351 0,
352 "CMD Technology PCI0643",
353 cmd0643_9_chip_map,
354 },
355 { PCI_PRODUCT_CMDTECH_646,
356 0,
357 "CMD Technology PCI0646",
358 cmd0643_9_chip_map,
359 },
360 { PCI_PRODUCT_CMDTECH_648,
361 IDE_PCI_CLASS_OVERRIDE,
362 "CMD Technology PCI0648",
363 cmd0643_9_chip_map,
364 },
365 { PCI_PRODUCT_CMDTECH_649,
366 IDE_PCI_CLASS_OVERRIDE,
367 "CMD Technology PCI0649",
368 cmd0643_9_chip_map,
369 },
370 { PCI_PRODUCT_CMDTECH_680,
371 IDE_PCI_CLASS_OVERRIDE,
372 "Silicon Image 0680",
373 cmd680_chip_map,
374 },
375 { 0,
376 0,
377 NULL,
378 NULL
379 }
380 };
381
382 const struct pciide_product_desc pciide_via_products[] = {
383 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
384 0,
385 NULL,
386 apollo_chip_map,
387 },
388 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
389 0,
390 NULL,
391 apollo_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_cypress_products[] = {
401 { PCI_PRODUCT_CONTAQ_82C693,
402 IDE_16BIT_IOSPACE,
403 "Cypress 82C693 IDE Controller",
404 cy693_chip_map,
405 },
406 { 0,
407 0,
408 NULL,
409 NULL
410 }
411 };
412
413 const struct pciide_product_desc pciide_sis_products[] = {
414 { PCI_PRODUCT_SIS_5597_IDE,
415 0,
416 "Silicon Integrated System 5597/5598 IDE controller",
417 sis_chip_map,
418 },
419 { 0,
420 0,
421 NULL,
422 NULL
423 }
424 };
425
426 const struct pciide_product_desc pciide_acer_products[] = {
427 { PCI_PRODUCT_ALI_M5229,
428 0,
429 "Acer Labs M5229 UDMA IDE Controller",
430 acer_chip_map,
431 },
432 { 0,
433 0,
434 NULL,
435 NULL
436 }
437 };
438
439 const struct pciide_product_desc pciide_promise_products[] = {
440 { PCI_PRODUCT_PROMISE_ULTRA33,
441 IDE_PCI_CLASS_OVERRIDE,
442 "Promise Ultra33/ATA Bus Master IDE Accelerator",
443 pdc202xx_chip_map,
444 },
445 { PCI_PRODUCT_PROMISE_ULTRA66,
446 IDE_PCI_CLASS_OVERRIDE,
447 "Promise Ultra66/ATA Bus Master IDE Accelerator",
448 pdc202xx_chip_map,
449 },
450 { PCI_PRODUCT_PROMISE_ULTRA100,
451 IDE_PCI_CLASS_OVERRIDE,
452 "Promise Ultra100/ATA Bus Master IDE Accelerator",
453 pdc202xx_chip_map,
454 },
455 { PCI_PRODUCT_PROMISE_ULTRA100X,
456 IDE_PCI_CLASS_OVERRIDE,
457 "Promise Ultra100/ATA Bus Master IDE Accelerator",
458 pdc202xx_chip_map,
459 },
460 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
461 IDE_PCI_CLASS_OVERRIDE,
462 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
463 pdc202xx_chip_map,
464 },
465 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
466 IDE_PCI_CLASS_OVERRIDE,
467 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
468 pdc202xx_chip_map,
469 },
470 { PCI_PRODUCT_PROMISE_ULTRA133,
471 IDE_PCI_CLASS_OVERRIDE,
472 "Promise Ultra133/ATA Bus Master IDE Accelerator",
473 pdc202xx_chip_map,
474 },
475 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
476 IDE_PCI_CLASS_OVERRIDE,
477 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
478 pdc202xx_chip_map,
479 },
480 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
481 IDE_PCI_CLASS_OVERRIDE,
482 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
483 pdc202xx_chip_map,
484 },
485 { 0,
486 0,
487 NULL,
488 NULL
489 }
490 };
491
492 const struct pciide_product_desc pciide_opti_products[] = {
493 { PCI_PRODUCT_OPTI_82C621,
494 0,
495 "OPTi 82c621 PCI IDE controller",
496 opti_chip_map,
497 },
498 { PCI_PRODUCT_OPTI_82C568,
499 0,
500 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
501 opti_chip_map,
502 },
503 { PCI_PRODUCT_OPTI_82D568,
504 0,
505 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
506 opti_chip_map,
507 },
508 { 0,
509 0,
510 NULL,
511 NULL
512 }
513 };
514
515 const struct pciide_product_desc pciide_triones_products[] = {
516 { PCI_PRODUCT_TRIONES_HPT366,
517 IDE_PCI_CLASS_OVERRIDE,
518 NULL,
519 hpt_chip_map,
520 },
521 { PCI_PRODUCT_TRIONES_HPT374,
522 IDE_PCI_CLASS_OVERRIDE,
523 NULL,
524 hpt_chip_map
525 },
526 { 0,
527 0,
528 NULL,
529 NULL
530 }
531 };
532
533 const struct pciide_product_desc pciide_acard_products[] = {
534 { PCI_PRODUCT_ACARD_ATP850U,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Acard ATP850U Ultra33 IDE Controller",
537 acard_chip_map,
538 },
539 { PCI_PRODUCT_ACARD_ATP860,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Acard ATP860 Ultra66 IDE Controller",
542 acard_chip_map,
543 },
544 { PCI_PRODUCT_ACARD_ATP860A,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Acard ATP860-A Ultra66 IDE Controller",
547 acard_chip_map,
548 },
549 { 0,
550 0,
551 NULL,
552 NULL
553 }
554 };
555
556 const struct pciide_product_desc pciide_serverworks_products[] = {
557 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
558 0,
559 "ServerWorks OSB4 IDE Controller",
560 serverworks_chip_map,
561 },
562 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
563 0,
564 "ServerWorks CSB5 IDE Controller",
565 serverworks_chip_map,
566 },
567 { 0,
568 0,
569 NULL,
570 }
571 };
572
573 const struct pciide_product_desc pciide_symphony_products[] = {
574 { PCI_PRODUCT_SYMPHONY_82C105,
575 0,
576 "Symphony Labs 82C105 IDE controller",
577 sl82c105_chip_map,
578 },
579 { 0,
580 0,
581 NULL,
582 }
583 };
584
585 const struct pciide_product_desc pciide_winbond_products[] = {
586 { PCI_PRODUCT_WINBOND_W83C553F_1,
587 0,
588 "Winbond W83C553F IDE controller",
589 sl82c105_chip_map,
590 },
591 { 0,
592 0,
593 NULL,
594 }
595 };
596
597 struct pciide_vendor_desc {
598 u_int32_t ide_vendor;
599 const struct pciide_product_desc *ide_products;
600 };
601
602 const struct pciide_vendor_desc pciide_vendors[] = {
603 { PCI_VENDOR_INTEL, pciide_intel_products },
604 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
605 { PCI_VENDOR_VIATECH, pciide_via_products },
606 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
607 { PCI_VENDOR_SIS, pciide_sis_products },
608 { PCI_VENDOR_ALI, pciide_acer_products },
609 { PCI_VENDOR_PROMISE, pciide_promise_products },
610 { PCI_VENDOR_AMD, pciide_amd_products },
611 { PCI_VENDOR_OPTI, pciide_opti_products },
612 { PCI_VENDOR_TRIONES, pciide_triones_products },
613 { PCI_VENDOR_ACARD, pciide_acard_products },
614 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
615 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
616 { PCI_VENDOR_WINBOND, pciide_winbond_products },
617 { 0, NULL }
618 };
619
620 /* options passed via the 'flags' config keyword */
621 #define PCIIDE_OPTIONS_DMA 0x01
622 #define PCIIDE_OPTIONS_NODMA 0x02
623
624 int pciide_match __P((struct device *, struct cfdata *, void *));
625 void pciide_attach __P((struct device *, struct device *, void *));
626
627 struct cfattach pciide_ca = {
628 sizeof(struct pciide_softc), pciide_match, pciide_attach
629 };
630 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
631 int pciide_mapregs_compat __P(( struct pci_attach_args *,
632 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
633 int pciide_mapregs_native __P((struct pci_attach_args *,
634 struct pciide_channel *, bus_size_t *, bus_size_t *,
635 int (*pci_intr) __P((void *))));
636 void pciide_mapreg_dma __P((struct pciide_softc *,
637 struct pci_attach_args *));
638 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
639 void pciide_mapchan __P((struct pci_attach_args *,
640 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
641 int (*pci_intr) __P((void *))));
642 int pciide_chan_candisable __P((struct pciide_channel *));
643 void pciide_map_compat_intr __P(( struct pci_attach_args *,
644 struct pciide_channel *, int, int));
645 int pciide_compat_intr __P((void *));
646 int pciide_pci_intr __P((void *));
647 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
648
649 const struct pciide_product_desc *
650 pciide_lookup_product(id)
651 u_int32_t id;
652 {
653 const struct pciide_product_desc *pp;
654 const struct pciide_vendor_desc *vp;
655
656 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
657 if (PCI_VENDOR(id) == vp->ide_vendor)
658 break;
659
660 if ((pp = vp->ide_products) == NULL)
661 return NULL;
662
663 for (; pp->chip_map != NULL; pp++)
664 if (PCI_PRODUCT(id) == pp->ide_product)
665 break;
666
667 if (pp->chip_map == NULL)
668 return NULL;
669 return pp;
670 }
671
672 int
673 pciide_match(parent, match, aux)
674 struct device *parent;
675 struct cfdata *match;
676 void *aux;
677 {
678 struct pci_attach_args *pa = aux;
679 const struct pciide_product_desc *pp;
680
681 /*
682 * Check the ID register to see that it's a PCI IDE controller.
683 * If it is, we assume that we can deal with it; it _should_
684 * work in a standardized way...
685 */
686 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
687 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
688 return (1);
689 }
690
691 /*
692 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
693 * controllers. Let see if we can deal with it anyway.
694 */
695 pp = pciide_lookup_product(pa->pa_id);
696 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
697 return (1);
698 }
699
700 return (0);
701 }
702
703 void
704 pciide_attach(parent, self, aux)
705 struct device *parent, *self;
706 void *aux;
707 {
708 struct pci_attach_args *pa = aux;
709 pci_chipset_tag_t pc = pa->pa_pc;
710 pcitag_t tag = pa->pa_tag;
711 struct pciide_softc *sc = (struct pciide_softc *)self;
712 pcireg_t csr;
713 char devinfo[256];
714 const char *displaydev;
715
716 sc->sc_pp = pciide_lookup_product(pa->pa_id);
717 if (sc->sc_pp == NULL) {
718 sc->sc_pp = &default_product_desc;
719 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
720 displaydev = devinfo;
721 } else
722 displaydev = sc->sc_pp->ide_name;
723
724 /* if displaydev == NULL, printf is done in chip-specific map */
725 if (displaydev)
726 printf(": %s (rev. 0x%02x)\n", displaydev,
727 PCI_REVISION(pa->pa_class));
728
729 sc->sc_pc = pa->pa_pc;
730 sc->sc_tag = pa->pa_tag;
731 #ifdef WDCDEBUG
732 if (wdcdebug_pciide_mask & DEBUG_PROBE)
733 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
734 #endif
735 sc->sc_pp->chip_map(sc, pa);
736
737 if (sc->sc_dma_ok) {
738 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
739 csr |= PCI_COMMAND_MASTER_ENABLE;
740 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
741 }
742 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
743 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
744 }
745
746 /* tell wether the chip is enabled or not */
747 int
748 pciide_chipen(sc, pa)
749 struct pciide_softc *sc;
750 struct pci_attach_args *pa;
751 {
752 pcireg_t csr;
753 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
754 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
755 PCI_COMMAND_STATUS_REG);
756 printf("%s: device disabled (at %s)\n",
757 sc->sc_wdcdev.sc_dev.dv_xname,
758 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
759 "device" : "bridge");
760 return 0;
761 }
762 return 1;
763 }
764
765 int
766 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
767 struct pci_attach_args *pa;
768 struct pciide_channel *cp;
769 int compatchan;
770 bus_size_t *cmdsizep, *ctlsizep;
771 {
772 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
773 struct channel_softc *wdc_cp = &cp->wdc_channel;
774
775 cp->compat = 1;
776 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
777 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
778
779 wdc_cp->cmd_iot = pa->pa_iot;
780 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
781 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
782 printf("%s: couldn't map %s channel cmd regs\n",
783 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
784 return (0);
785 }
786
787 wdc_cp->ctl_iot = pa->pa_iot;
788 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
789 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
790 printf("%s: couldn't map %s channel ctl regs\n",
791 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
792 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
793 PCIIDE_COMPAT_CMD_SIZE);
794 return (0);
795 }
796
797 return (1);
798 }
799
800 int
801 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
802 struct pci_attach_args * pa;
803 struct pciide_channel *cp;
804 bus_size_t *cmdsizep, *ctlsizep;
805 int (*pci_intr) __P((void *));
806 {
807 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
808 struct channel_softc *wdc_cp = &cp->wdc_channel;
809 const char *intrstr;
810 pci_intr_handle_t intrhandle;
811
812 cp->compat = 0;
813
814 if (sc->sc_pci_ih == NULL) {
815 if (pci_intr_map(pa, &intrhandle) != 0) {
816 printf("%s: couldn't map native-PCI interrupt\n",
817 sc->sc_wdcdev.sc_dev.dv_xname);
818 return 0;
819 }
820 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
821 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
822 intrhandle, IPL_BIO, pci_intr, sc);
823 if (sc->sc_pci_ih != NULL) {
824 printf("%s: using %s for native-PCI interrupt\n",
825 sc->sc_wdcdev.sc_dev.dv_xname,
826 intrstr ? intrstr : "unknown interrupt");
827 } else {
828 printf("%s: couldn't establish native-PCI interrupt",
829 sc->sc_wdcdev.sc_dev.dv_xname);
830 if (intrstr != NULL)
831 printf(" at %s", intrstr);
832 printf("\n");
833 return 0;
834 }
835 }
836 cp->ih = sc->sc_pci_ih;
837 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
838 PCI_MAPREG_TYPE_IO, 0,
839 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
840 printf("%s: couldn't map %s channel cmd regs\n",
841 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
842 return 0;
843 }
844
845 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
846 PCI_MAPREG_TYPE_IO, 0,
847 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
848 printf("%s: couldn't map %s channel ctl regs\n",
849 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
850 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
851 return 0;
852 }
853 /*
854 * In native mode, 4 bytes of I/O space are mapped for the control
855 * register, the control register is at offset 2. Pass the generic
856 * code a handle for only one byte at the right offset.
857 */
858 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
859 &wdc_cp->ctl_ioh) != 0) {
860 printf("%s: unable to subregion %s channel ctl regs\n",
861 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
862 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
863 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
864 return 0;
865 }
866 return (1);
867 }
868
869 void
870 pciide_mapreg_dma(sc, pa)
871 struct pciide_softc *sc;
872 struct pci_attach_args *pa;
873 {
874 pcireg_t maptype;
875 bus_addr_t addr;
876
877 /*
878 * Map DMA registers
879 *
880 * Note that sc_dma_ok is the right variable to test to see if
881 * DMA can be done. If the interface doesn't support DMA,
882 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
883 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
884 * non-zero if the interface supports DMA and the registers
885 * could be mapped.
886 *
887 * XXX Note that despite the fact that the Bus Master IDE specs
888 * XXX say that "The bus master IDE function uses 16 bytes of IO
889 * XXX space," some controllers (at least the United
890 * XXX Microelectronics UM8886BF) place it in memory space.
891 */
892 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
893 PCIIDE_REG_BUS_MASTER_DMA);
894
895 switch (maptype) {
896 case PCI_MAPREG_TYPE_IO:
897 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
898 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
899 &addr, NULL, NULL) == 0);
900 if (sc->sc_dma_ok == 0) {
901 printf(", but unused (couldn't query registers)");
902 break;
903 }
904 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
905 && addr >= 0x10000) {
906 sc->sc_dma_ok = 0;
907 printf(", but unused (registers at unsafe address "
908 "%#lx)", (unsigned long)addr);
909 break;
910 }
911 /* FALLTHROUGH */
912
913 case PCI_MAPREG_MEM_TYPE_32BIT:
914 sc->sc_dma_ok = (pci_mapreg_map(pa,
915 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
916 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
917 sc->sc_dmat = pa->pa_dmat;
918 if (sc->sc_dma_ok == 0) {
919 printf(", but unused (couldn't map registers)");
920 } else {
921 sc->sc_wdcdev.dma_arg = sc;
922 sc->sc_wdcdev.dma_init = pciide_dma_init;
923 sc->sc_wdcdev.dma_start = pciide_dma_start;
924 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
925 }
926
927 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
928 PCIIDE_OPTIONS_NODMA) {
929 printf(", but unused (forced off by config file)");
930 sc->sc_dma_ok = 0;
931 }
932 break;
933
934 default:
935 sc->sc_dma_ok = 0;
936 printf(", but unsupported register maptype (0x%x)", maptype);
937 }
938 }
939
940 int
941 pciide_compat_intr(arg)
942 void *arg;
943 {
944 struct pciide_channel *cp = arg;
945
946 #ifdef DIAGNOSTIC
947 /* should only be called for a compat channel */
948 if (cp->compat == 0)
949 panic("pciide compat intr called for non-compat chan %p\n", cp);
950 #endif
951 return (wdcintr(&cp->wdc_channel));
952 }
953
954 int
955 pciide_pci_intr(arg)
956 void *arg;
957 {
958 struct pciide_softc *sc = arg;
959 struct pciide_channel *cp;
960 struct channel_softc *wdc_cp;
961 int i, rv, crv;
962
963 rv = 0;
964 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
965 cp = &sc->pciide_channels[i];
966 wdc_cp = &cp->wdc_channel;
967
968 /* If a compat channel skip. */
969 if (cp->compat)
970 continue;
971 /* if this channel not waiting for intr, skip */
972 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
973 continue;
974
975 crv = wdcintr(wdc_cp);
976 if (crv == 0)
977 ; /* leave rv alone */
978 else if (crv == 1)
979 rv = 1; /* claim the intr */
980 else if (rv == 0) /* crv should be -1 in this case */
981 rv = crv; /* if we've done no better, take it */
982 }
983 return (rv);
984 }
985
986 void
987 pciide_channel_dma_setup(cp)
988 struct pciide_channel *cp;
989 {
990 int drive;
991 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
992 struct ata_drive_datas *drvp;
993
994 for (drive = 0; drive < 2; drive++) {
995 drvp = &cp->wdc_channel.ch_drive[drive];
996 /* If no drive, skip */
997 if ((drvp->drive_flags & DRIVE) == 0)
998 continue;
999 /* setup DMA if needed */
1000 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1001 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1002 sc->sc_dma_ok == 0) {
1003 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1004 continue;
1005 }
1006 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1007 != 0) {
1008 /* Abort DMA setup */
1009 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1010 continue;
1011 }
1012 }
1013 }
1014
1015 int
1016 pciide_dma_table_setup(sc, channel, drive)
1017 struct pciide_softc *sc;
1018 int channel, drive;
1019 {
1020 bus_dma_segment_t seg;
1021 int error, rseg;
1022 const bus_size_t dma_table_size =
1023 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1024 struct pciide_dma_maps *dma_maps =
1025 &sc->pciide_channels[channel].dma_maps[drive];
1026
1027 /* If table was already allocated, just return */
1028 if (dma_maps->dma_table)
1029 return 0;
1030
1031 /* Allocate memory for the DMA tables and map it */
1032 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1033 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1034 BUS_DMA_NOWAIT)) != 0) {
1035 printf("%s:%d: unable to allocate table DMA for "
1036 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1037 channel, drive, error);
1038 return error;
1039 }
1040 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1041 dma_table_size,
1042 (caddr_t *)&dma_maps->dma_table,
1043 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1044 printf("%s:%d: unable to map table DMA for"
1045 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1046 channel, drive, error);
1047 return error;
1048 }
1049 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1050 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1051 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1052
1053 /* Create and load table DMA map for this disk */
1054 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1055 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1056 &dma_maps->dmamap_table)) != 0) {
1057 printf("%s:%d: unable to create table DMA map for "
1058 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1059 channel, drive, error);
1060 return error;
1061 }
1062 if ((error = bus_dmamap_load(sc->sc_dmat,
1063 dma_maps->dmamap_table,
1064 dma_maps->dma_table,
1065 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1066 printf("%s:%d: unable to load table DMA map for "
1067 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1068 channel, drive, error);
1069 return error;
1070 }
1071 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1072 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1073 DEBUG_PROBE);
1074 /* Create a xfer DMA map for this drive */
1075 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1076 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1077 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1078 &dma_maps->dmamap_xfer)) != 0) {
1079 printf("%s:%d: unable to create xfer DMA map for "
1080 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1081 channel, drive, error);
1082 return error;
1083 }
1084 return 0;
1085 }
1086
1087 int
1088 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1089 void *v;
1090 int channel, drive;
1091 void *databuf;
1092 size_t datalen;
1093 int flags;
1094 {
1095 struct pciide_softc *sc = v;
1096 int error, seg;
1097 struct pciide_dma_maps *dma_maps =
1098 &sc->pciide_channels[channel].dma_maps[drive];
1099
1100 error = bus_dmamap_load(sc->sc_dmat,
1101 dma_maps->dmamap_xfer,
1102 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1103 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1104 if (error) {
1105 printf("%s:%d: unable to load xfer DMA map for"
1106 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1107 channel, drive, error);
1108 return error;
1109 }
1110
1111 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1112 dma_maps->dmamap_xfer->dm_mapsize,
1113 (flags & WDC_DMA_READ) ?
1114 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1115
1116 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1117 #ifdef DIAGNOSTIC
1118 /* A segment must not cross a 64k boundary */
1119 {
1120 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1121 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1122 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1123 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1124 printf("pciide_dma: segment %d physical addr 0x%lx"
1125 " len 0x%lx not properly aligned\n",
1126 seg, phys, len);
1127 panic("pciide_dma: buf align");
1128 }
1129 }
1130 #endif
1131 dma_maps->dma_table[seg].base_addr =
1132 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1133 dma_maps->dma_table[seg].byte_count =
1134 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1135 IDEDMA_BYTE_COUNT_MASK);
1136 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1137 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1138 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1139
1140 }
1141 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1142 htole32(IDEDMA_BYTE_COUNT_EOT);
1143
1144 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1145 dma_maps->dmamap_table->dm_mapsize,
1146 BUS_DMASYNC_PREWRITE);
1147
1148 /* Maps are ready. Start DMA function */
1149 #ifdef DIAGNOSTIC
1150 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1151 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1152 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1153 panic("pciide_dma_init: table align");
1154 }
1155 #endif
1156
1157 /* Clear status bits */
1158 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1159 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1160 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1161 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1162 /* Write table addr */
1163 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1164 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1165 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1166 /* set read/write */
1167 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1168 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1169 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1170 /* remember flags */
1171 dma_maps->dma_flags = flags;
1172 return 0;
1173 }
1174
1175 void
1176 pciide_dma_start(v, channel, drive)
1177 void *v;
1178 int channel, drive;
1179 {
1180 struct pciide_softc *sc = v;
1181
1182 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1183 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1184 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1185 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1186 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1187 }
1188
1189 int
1190 pciide_dma_finish(v, channel, drive, force)
1191 void *v;
1192 int channel, drive;
1193 int force;
1194 {
1195 struct pciide_softc *sc = v;
1196 u_int8_t status;
1197 int error = 0;
1198 struct pciide_dma_maps *dma_maps =
1199 &sc->pciide_channels[channel].dma_maps[drive];
1200
1201 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1202 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1203 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1204 DEBUG_XFERS);
1205
1206 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1207 return WDC_DMAST_NOIRQ;
1208
1209 /* stop DMA channel */
1210 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1211 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1212 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1213 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1214
1215 /* Unload the map of the data buffer */
1216 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1217 dma_maps->dmamap_xfer->dm_mapsize,
1218 (dma_maps->dma_flags & WDC_DMA_READ) ?
1219 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1220 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1221
1222 if ((status & IDEDMA_CTL_ERR) != 0) {
1223 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1224 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1225 error |= WDC_DMAST_ERR;
1226 }
1227
1228 if ((status & IDEDMA_CTL_INTR) == 0) {
1229 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1230 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1231 drive, status);
1232 error |= WDC_DMAST_NOIRQ;
1233 }
1234
1235 if ((status & IDEDMA_CTL_ACT) != 0) {
1236 /* data underrun, may be a valid condition for ATAPI */
1237 error |= WDC_DMAST_UNDER;
1238 }
1239 return error;
1240 }
1241
1242 void
1243 pciide_irqack(chp)
1244 struct channel_softc *chp;
1245 {
1246 struct pciide_channel *cp = (struct pciide_channel*)chp;
1247 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1248
1249 /* clear status bits in IDE DMA registers */
1250 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1251 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1252 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1253 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1254 }
1255
1256 /* some common code used by several chip_map */
1257 int
1258 pciide_chansetup(sc, channel, interface)
1259 struct pciide_softc *sc;
1260 int channel;
1261 pcireg_t interface;
1262 {
1263 struct pciide_channel *cp = &sc->pciide_channels[channel];
1264 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1265 cp->name = PCIIDE_CHANNEL_NAME(channel);
1266 cp->wdc_channel.channel = channel;
1267 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1268 cp->wdc_channel.ch_queue =
1269 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1270 if (cp->wdc_channel.ch_queue == NULL) {
1271 printf("%s %s channel: "
1272 "can't allocate memory for command queue",
1273 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1274 return 0;
1275 }
1276 printf("%s: %s channel %s to %s mode\n",
1277 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1278 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1279 "configured" : "wired",
1280 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1281 "native-PCI" : "compatibility");
1282 return 1;
1283 }
1284
1285 /* some common code used by several chip channel_map */
1286 void
1287 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1288 struct pci_attach_args *pa;
1289 struct pciide_channel *cp;
1290 pcireg_t interface;
1291 bus_size_t *cmdsizep, *ctlsizep;
1292 int (*pci_intr) __P((void *));
1293 {
1294 struct channel_softc *wdc_cp = &cp->wdc_channel;
1295
1296 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1297 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1298 pci_intr);
1299 else
1300 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1301 wdc_cp->channel, cmdsizep, ctlsizep);
1302
1303 if (cp->hw_ok == 0)
1304 return;
1305 wdc_cp->data32iot = wdc_cp->cmd_iot;
1306 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1307 wdcattach(wdc_cp);
1308 }
1309
1310 /*
1311 * Generic code to call to know if a channel can be disabled. Return 1
1312 * if channel can be disabled, 0 if not
1313 */
1314 int
1315 pciide_chan_candisable(cp)
1316 struct pciide_channel *cp;
1317 {
1318 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1319 struct channel_softc *wdc_cp = &cp->wdc_channel;
1320
1321 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1322 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1323 printf("%s: disabling %s channel (no drives)\n",
1324 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1325 cp->hw_ok = 0;
1326 return 1;
1327 }
1328 return 0;
1329 }
1330
1331 /*
1332 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1333 * Set hw_ok=0 on failure
1334 */
1335 void
1336 pciide_map_compat_intr(pa, cp, compatchan, interface)
1337 struct pci_attach_args *pa;
1338 struct pciide_channel *cp;
1339 int compatchan, interface;
1340 {
1341 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1342 struct channel_softc *wdc_cp = &cp->wdc_channel;
1343
1344 if (cp->hw_ok == 0)
1345 return;
1346 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1347 return;
1348
1349 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1350 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1351 pa, compatchan, pciide_compat_intr, cp);
1352 if (cp->ih == NULL) {
1353 #endif
1354 printf("%s: no compatibility interrupt for use by %s "
1355 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1356 cp->hw_ok = 0;
1357 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1358 }
1359 #endif
1360 }
1361
1362 void
1363 pciide_print_modes(cp)
1364 struct pciide_channel *cp;
1365 {
1366 wdc_print_modes(&cp->wdc_channel);
1367 }
1368
1369 void
1370 default_chip_map(sc, pa)
1371 struct pciide_softc *sc;
1372 struct pci_attach_args *pa;
1373 {
1374 struct pciide_channel *cp;
1375 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1376 pcireg_t csr;
1377 int channel, drive;
1378 struct ata_drive_datas *drvp;
1379 u_int8_t idedma_ctl;
1380 bus_size_t cmdsize, ctlsize;
1381 char *failreason;
1382
1383 if (pciide_chipen(sc, pa) == 0)
1384 return;
1385
1386 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1387 printf("%s: bus-master DMA support present",
1388 sc->sc_wdcdev.sc_dev.dv_xname);
1389 if (sc->sc_pp == &default_product_desc &&
1390 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1391 PCIIDE_OPTIONS_DMA) == 0) {
1392 printf(", but unused (no driver support)");
1393 sc->sc_dma_ok = 0;
1394 } else {
1395 pciide_mapreg_dma(sc, pa);
1396 if (sc->sc_dma_ok != 0)
1397 printf(", used without full driver "
1398 "support");
1399 }
1400 } else {
1401 printf("%s: hardware does not support DMA",
1402 sc->sc_wdcdev.sc_dev.dv_xname);
1403 sc->sc_dma_ok = 0;
1404 }
1405 printf("\n");
1406 if (sc->sc_dma_ok) {
1407 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1408 sc->sc_wdcdev.irqack = pciide_irqack;
1409 }
1410 sc->sc_wdcdev.PIO_cap = 0;
1411 sc->sc_wdcdev.DMA_cap = 0;
1412
1413 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1414 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1415 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1416
1417 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1418 cp = &sc->pciide_channels[channel];
1419 if (pciide_chansetup(sc, channel, interface) == 0)
1420 continue;
1421 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1422 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1423 &ctlsize, pciide_pci_intr);
1424 } else {
1425 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1426 channel, &cmdsize, &ctlsize);
1427 }
1428 if (cp->hw_ok == 0)
1429 continue;
1430 /*
1431 * Check to see if something appears to be there.
1432 */
1433 failreason = NULL;
1434 if (!wdcprobe(&cp->wdc_channel)) {
1435 failreason = "not responding; disabled or no drives?";
1436 goto next;
1437 }
1438 /*
1439 * Now, make sure it's actually attributable to this PCI IDE
1440 * channel by trying to access the channel again while the
1441 * PCI IDE controller's I/O space is disabled. (If the
1442 * channel no longer appears to be there, it belongs to
1443 * this controller.) YUCK!
1444 */
1445 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1446 PCI_COMMAND_STATUS_REG);
1447 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1448 csr & ~PCI_COMMAND_IO_ENABLE);
1449 if (wdcprobe(&cp->wdc_channel))
1450 failreason = "other hardware responding at addresses";
1451 pci_conf_write(sc->sc_pc, sc->sc_tag,
1452 PCI_COMMAND_STATUS_REG, csr);
1453 next:
1454 if (failreason) {
1455 printf("%s: %s channel ignored (%s)\n",
1456 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1457 failreason);
1458 cp->hw_ok = 0;
1459 bus_space_unmap(cp->wdc_channel.cmd_iot,
1460 cp->wdc_channel.cmd_ioh, cmdsize);
1461 if (interface & PCIIDE_INTERFACE_PCI(channel))
1462 bus_space_unmap(cp->wdc_channel.ctl_iot,
1463 cp->ctl_baseioh, ctlsize);
1464 else
1465 bus_space_unmap(cp->wdc_channel.ctl_iot,
1466 cp->wdc_channel.ctl_ioh, ctlsize);
1467 } else {
1468 pciide_map_compat_intr(pa, cp, channel, interface);
1469 }
1470 if (cp->hw_ok) {
1471 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1472 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1473 wdcattach(&cp->wdc_channel);
1474 }
1475 }
1476
1477 if (sc->sc_dma_ok == 0)
1478 return;
1479
1480 /* Allocate DMA maps */
1481 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1482 idedma_ctl = 0;
1483 cp = &sc->pciide_channels[channel];
1484 for (drive = 0; drive < 2; drive++) {
1485 drvp = &cp->wdc_channel.ch_drive[drive];
1486 /* If no drive, skip */
1487 if ((drvp->drive_flags & DRIVE) == 0)
1488 continue;
1489 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1490 continue;
1491 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1492 /* Abort DMA setup */
1493 printf("%s:%d:%d: can't allocate DMA maps, "
1494 "using PIO transfers\n",
1495 sc->sc_wdcdev.sc_dev.dv_xname,
1496 channel, drive);
1497 drvp->drive_flags &= ~DRIVE_DMA;
1498 }
1499 printf("%s:%d:%d: using DMA data transfers\n",
1500 sc->sc_wdcdev.sc_dev.dv_xname,
1501 channel, drive);
1502 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1503 }
1504 if (idedma_ctl != 0) {
1505 /* Add software bits in status register */
1506 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1507 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1508 idedma_ctl);
1509 }
1510 }
1511 }
1512
1513 void
1514 piix_chip_map(sc, pa)
1515 struct pciide_softc *sc;
1516 struct pci_attach_args *pa;
1517 {
1518 struct pciide_channel *cp;
1519 int channel;
1520 u_int32_t idetim;
1521 bus_size_t cmdsize, ctlsize;
1522
1523 if (pciide_chipen(sc, pa) == 0)
1524 return;
1525
1526 printf("%s: bus-master DMA support present",
1527 sc->sc_wdcdev.sc_dev.dv_xname);
1528 pciide_mapreg_dma(sc, pa);
1529 printf("\n");
1530 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1531 WDC_CAPABILITY_MODE;
1532 if (sc->sc_dma_ok) {
1533 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1534 sc->sc_wdcdev.irqack = pciide_irqack;
1535 switch(sc->sc_pp->ide_product) {
1536 case PCI_PRODUCT_INTEL_82371AB_IDE:
1537 case PCI_PRODUCT_INTEL_82440MX_IDE:
1538 case PCI_PRODUCT_INTEL_82801AA_IDE:
1539 case PCI_PRODUCT_INTEL_82801AB_IDE:
1540 case PCI_PRODUCT_INTEL_82801BA_IDE:
1541 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1542 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1543 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1544 case PCI_PRODUCT_INTEL_82801DB_IDE:
1545 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1546 }
1547 }
1548 sc->sc_wdcdev.PIO_cap = 4;
1549 sc->sc_wdcdev.DMA_cap = 2;
1550 switch(sc->sc_pp->ide_product) {
1551 case PCI_PRODUCT_INTEL_82801AA_IDE:
1552 sc->sc_wdcdev.UDMA_cap = 4;
1553 break;
1554 case PCI_PRODUCT_INTEL_82801BA_IDE:
1555 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1556 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1557 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1558 case PCI_PRODUCT_INTEL_82801DB_IDE:
1559 sc->sc_wdcdev.UDMA_cap = 5;
1560 break;
1561 default:
1562 sc->sc_wdcdev.UDMA_cap = 2;
1563 }
1564 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1565 sc->sc_wdcdev.set_modes = piix_setup_channel;
1566 else
1567 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1568 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1569 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1570
1571 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1572 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1573 DEBUG_PROBE);
1574 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1575 WDCDEBUG_PRINT((", sidetim=0x%x",
1576 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1577 DEBUG_PROBE);
1578 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1579 WDCDEBUG_PRINT((", udamreg 0x%x",
1580 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1581 DEBUG_PROBE);
1582 }
1583 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1584 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1585 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1586 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1587 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1588 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1589 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1590 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1591 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1592 DEBUG_PROBE);
1593 }
1594
1595 }
1596 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1597
1598 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1599 cp = &sc->pciide_channels[channel];
1600 /* PIIX is compat-only */
1601 if (pciide_chansetup(sc, channel, 0) == 0)
1602 continue;
1603 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1604 if ((PIIX_IDETIM_READ(idetim, channel) &
1605 PIIX_IDETIM_IDE) == 0) {
1606 printf("%s: %s channel ignored (disabled)\n",
1607 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1608 continue;
1609 }
1610 /* PIIX are compat-only pciide devices */
1611 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1612 if (cp->hw_ok == 0)
1613 continue;
1614 if (pciide_chan_candisable(cp)) {
1615 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1616 channel);
1617 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1618 idetim);
1619 }
1620 pciide_map_compat_intr(pa, cp, channel, 0);
1621 if (cp->hw_ok == 0)
1622 continue;
1623 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1624 }
1625
1626 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1627 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1628 DEBUG_PROBE);
1629 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1630 WDCDEBUG_PRINT((", sidetim=0x%x",
1631 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1632 DEBUG_PROBE);
1633 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1634 WDCDEBUG_PRINT((", udamreg 0x%x",
1635 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1636 DEBUG_PROBE);
1637 }
1638 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1639 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1640 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1641 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1642 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1643 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1644 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1645 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1646 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1647 DEBUG_PROBE);
1648 }
1649 }
1650 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1651 }
1652
1653 void
1654 piix_setup_channel(chp)
1655 struct channel_softc *chp;
1656 {
1657 u_int8_t mode[2], drive;
1658 u_int32_t oidetim, idetim, idedma_ctl;
1659 struct pciide_channel *cp = (struct pciide_channel*)chp;
1660 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1661 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1662
1663 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1664 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1665 idedma_ctl = 0;
1666
1667 /* set up new idetim: Enable IDE registers decode */
1668 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1669 chp->channel);
1670
1671 /* setup DMA */
1672 pciide_channel_dma_setup(cp);
1673
1674 /*
1675 * Here we have to mess up with drives mode: PIIX can't have
1676 * different timings for master and slave drives.
1677 * We need to find the best combination.
1678 */
1679
1680 /* If both drives supports DMA, take the lower mode */
1681 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1682 (drvp[1].drive_flags & DRIVE_DMA)) {
1683 mode[0] = mode[1] =
1684 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1685 drvp[0].DMA_mode = mode[0];
1686 drvp[1].DMA_mode = mode[1];
1687 goto ok;
1688 }
1689 /*
1690 * If only one drive supports DMA, use its mode, and
1691 * put the other one in PIO mode 0 if mode not compatible
1692 */
1693 if (drvp[0].drive_flags & DRIVE_DMA) {
1694 mode[0] = drvp[0].DMA_mode;
1695 mode[1] = drvp[1].PIO_mode;
1696 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1697 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1698 mode[1] = drvp[1].PIO_mode = 0;
1699 goto ok;
1700 }
1701 if (drvp[1].drive_flags & DRIVE_DMA) {
1702 mode[1] = drvp[1].DMA_mode;
1703 mode[0] = drvp[0].PIO_mode;
1704 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1705 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1706 mode[0] = drvp[0].PIO_mode = 0;
1707 goto ok;
1708 }
1709 /*
1710 * If both drives are not DMA, takes the lower mode, unless
1711 * one of them is PIO mode < 2
1712 */
1713 if (drvp[0].PIO_mode < 2) {
1714 mode[0] = drvp[0].PIO_mode = 0;
1715 mode[1] = drvp[1].PIO_mode;
1716 } else if (drvp[1].PIO_mode < 2) {
1717 mode[1] = drvp[1].PIO_mode = 0;
1718 mode[0] = drvp[0].PIO_mode;
1719 } else {
1720 mode[0] = mode[1] =
1721 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1722 drvp[0].PIO_mode = mode[0];
1723 drvp[1].PIO_mode = mode[1];
1724 }
1725 ok: /* The modes are setup */
1726 for (drive = 0; drive < 2; drive++) {
1727 if (drvp[drive].drive_flags & DRIVE_DMA) {
1728 idetim |= piix_setup_idetim_timings(
1729 mode[drive], 1, chp->channel);
1730 goto end;
1731 }
1732 }
1733 /* If we are there, none of the drives are DMA */
1734 if (mode[0] >= 2)
1735 idetim |= piix_setup_idetim_timings(
1736 mode[0], 0, chp->channel);
1737 else
1738 idetim |= piix_setup_idetim_timings(
1739 mode[1], 0, chp->channel);
1740 end: /*
1741 * timing mode is now set up in the controller. Enable
1742 * it per-drive
1743 */
1744 for (drive = 0; drive < 2; drive++) {
1745 /* If no drive, skip */
1746 if ((drvp[drive].drive_flags & DRIVE) == 0)
1747 continue;
1748 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1749 if (drvp[drive].drive_flags & DRIVE_DMA)
1750 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1751 }
1752 if (idedma_ctl != 0) {
1753 /* Add software bits in status register */
1754 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1755 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1756 idedma_ctl);
1757 }
1758 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1759 pciide_print_modes(cp);
1760 }
1761
1762 void
1763 piix3_4_setup_channel(chp)
1764 struct channel_softc *chp;
1765 {
1766 struct ata_drive_datas *drvp;
1767 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1768 struct pciide_channel *cp = (struct pciide_channel*)chp;
1769 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1770 int drive;
1771 int channel = chp->channel;
1772
1773 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1774 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1775 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1776 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1777 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1778 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1779 PIIX_SIDETIM_RTC_MASK(channel));
1780
1781 idedma_ctl = 0;
1782 /* If channel disabled, no need to go further */
1783 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1784 return;
1785 /* set up new idetim: Enable IDE registers decode */
1786 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1787
1788 /* setup DMA if needed */
1789 pciide_channel_dma_setup(cp);
1790
1791 for (drive = 0; drive < 2; drive++) {
1792 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1793 PIIX_UDMATIM_SET(0x3, channel, drive));
1794 drvp = &chp->ch_drive[drive];
1795 /* If no drive, skip */
1796 if ((drvp->drive_flags & DRIVE) == 0)
1797 continue;
1798 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1799 (drvp->drive_flags & DRIVE_UDMA) == 0))
1800 goto pio;
1801
1802 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1803 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1804 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1805 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1806 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1807 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1809 ideconf |= PIIX_CONFIG_PINGPONG;
1810 }
1811 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1812 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1813 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1814 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1815 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1816 /* setup Ultra/100 */
1817 if (drvp->UDMA_mode > 2 &&
1818 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1819 drvp->UDMA_mode = 2;
1820 if (drvp->UDMA_mode > 4) {
1821 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1822 } else {
1823 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1824 if (drvp->UDMA_mode > 2) {
1825 ideconf |= PIIX_CONFIG_UDMA66(channel,
1826 drive);
1827 } else {
1828 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1829 drive);
1830 }
1831 }
1832 }
1833 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1834 /* setup Ultra/66 */
1835 if (drvp->UDMA_mode > 2 &&
1836 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1837 drvp->UDMA_mode = 2;
1838 if (drvp->UDMA_mode > 2)
1839 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1840 else
1841 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1842 }
1843 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1844 (drvp->drive_flags & DRIVE_UDMA)) {
1845 /* use Ultra/DMA */
1846 drvp->drive_flags &= ~DRIVE_DMA;
1847 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1848 udmareg |= PIIX_UDMATIM_SET(
1849 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1850 } else {
1851 /* use Multiword DMA */
1852 drvp->drive_flags &= ~DRIVE_UDMA;
1853 if (drive == 0) {
1854 idetim |= piix_setup_idetim_timings(
1855 drvp->DMA_mode, 1, channel);
1856 } else {
1857 sidetim |= piix_setup_sidetim_timings(
1858 drvp->DMA_mode, 1, channel);
1859 idetim =PIIX_IDETIM_SET(idetim,
1860 PIIX_IDETIM_SITRE, channel);
1861 }
1862 }
1863 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1864
1865 pio: /* use PIO mode */
1866 idetim |= piix_setup_idetim_drvs(drvp);
1867 if (drive == 0) {
1868 idetim |= piix_setup_idetim_timings(
1869 drvp->PIO_mode, 0, channel);
1870 } else {
1871 sidetim |= piix_setup_sidetim_timings(
1872 drvp->PIO_mode, 0, channel);
1873 idetim =PIIX_IDETIM_SET(idetim,
1874 PIIX_IDETIM_SITRE, channel);
1875 }
1876 }
1877 if (idedma_ctl != 0) {
1878 /* Add software bits in status register */
1879 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1880 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1881 idedma_ctl);
1882 }
1883 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1884 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1885 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1886 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1887 pciide_print_modes(cp);
1888 }
1889
1890
1891 /* setup ISP and RTC fields, based on mode */
1892 static u_int32_t
1893 piix_setup_idetim_timings(mode, dma, channel)
1894 u_int8_t mode;
1895 u_int8_t dma;
1896 u_int8_t channel;
1897 {
1898
1899 if (dma)
1900 return PIIX_IDETIM_SET(0,
1901 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1902 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1903 channel);
1904 else
1905 return PIIX_IDETIM_SET(0,
1906 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1907 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1908 channel);
1909 }
1910
1911 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1912 static u_int32_t
1913 piix_setup_idetim_drvs(drvp)
1914 struct ata_drive_datas *drvp;
1915 {
1916 u_int32_t ret = 0;
1917 struct channel_softc *chp = drvp->chnl_softc;
1918 u_int8_t channel = chp->channel;
1919 u_int8_t drive = drvp->drive;
1920
1921 /*
1922 * If drive is using UDMA, timings setups are independant
1923 * So just check DMA and PIO here.
1924 */
1925 if (drvp->drive_flags & DRIVE_DMA) {
1926 /* if mode = DMA mode 0, use compatible timings */
1927 if ((drvp->drive_flags & DRIVE_DMA) &&
1928 drvp->DMA_mode == 0) {
1929 drvp->PIO_mode = 0;
1930 return ret;
1931 }
1932 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1933 /*
1934 * PIO and DMA timings are the same, use fast timings for PIO
1935 * too, else use compat timings.
1936 */
1937 if ((piix_isp_pio[drvp->PIO_mode] !=
1938 piix_isp_dma[drvp->DMA_mode]) ||
1939 (piix_rtc_pio[drvp->PIO_mode] !=
1940 piix_rtc_dma[drvp->DMA_mode]))
1941 drvp->PIO_mode = 0;
1942 /* if PIO mode <= 2, use compat timings for PIO */
1943 if (drvp->PIO_mode <= 2) {
1944 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1945 channel);
1946 return ret;
1947 }
1948 }
1949
1950 /*
1951 * Now setup PIO modes. If mode < 2, use compat timings.
1952 * Else enable fast timings. Enable IORDY and prefetch/post
1953 * if PIO mode >= 3.
1954 */
1955
1956 if (drvp->PIO_mode < 2)
1957 return ret;
1958
1959 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1960 if (drvp->PIO_mode >= 3) {
1961 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1962 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1963 }
1964 return ret;
1965 }
1966
1967 /* setup values in SIDETIM registers, based on mode */
1968 static u_int32_t
1969 piix_setup_sidetim_timings(mode, dma, channel)
1970 u_int8_t mode;
1971 u_int8_t dma;
1972 u_int8_t channel;
1973 {
1974 if (dma)
1975 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1976 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1977 else
1978 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1979 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1980 }
1981
1982 void
1983 amd7x6_chip_map(sc, pa)
1984 struct pciide_softc *sc;
1985 struct pci_attach_args *pa;
1986 {
1987 struct pciide_channel *cp;
1988 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1989 int channel;
1990 pcireg_t chanenable;
1991 bus_size_t cmdsize, ctlsize;
1992
1993 if (pciide_chipen(sc, pa) == 0)
1994 return;
1995 printf("%s: bus-master DMA support present",
1996 sc->sc_wdcdev.sc_dev.dv_xname);
1997 pciide_mapreg_dma(sc, pa);
1998 printf("\n");
1999 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2000 WDC_CAPABILITY_MODE;
2001 if (sc->sc_dma_ok) {
2002 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2003 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2004 sc->sc_wdcdev.irqack = pciide_irqack;
2005 }
2006 sc->sc_wdcdev.PIO_cap = 4;
2007 sc->sc_wdcdev.DMA_cap = 2;
2008
2009 switch (sc->sc_pp->ide_product) {
2010 case PCI_PRODUCT_AMD_PBC766_IDE:
2011 case PCI_PRODUCT_AMD_PBC768_IDE:
2012 case PCI_PRODUCT_AMD_PBC8111_IDE:
2013 sc->sc_wdcdev.UDMA_cap = 5;
2014 break;
2015 default:
2016 sc->sc_wdcdev.UDMA_cap = 4;
2017 }
2018 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2019 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2020 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2021 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2022
2023 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2024 DEBUG_PROBE);
2025 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2026 cp = &sc->pciide_channels[channel];
2027 if (pciide_chansetup(sc, channel, interface) == 0)
2028 continue;
2029
2030 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2031 printf("%s: %s channel ignored (disabled)\n",
2032 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2033 continue;
2034 }
2035 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2036 pciide_pci_intr);
2037
2038 if (pciide_chan_candisable(cp))
2039 chanenable &= ~AMD7X6_CHAN_EN(channel);
2040 pciide_map_compat_intr(pa, cp, channel, interface);
2041 if (cp->hw_ok == 0)
2042 continue;
2043
2044 amd7x6_setup_channel(&cp->wdc_channel);
2045 }
2046 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2047 chanenable);
2048 return;
2049 }
2050
2051 void
2052 amd7x6_setup_channel(chp)
2053 struct channel_softc *chp;
2054 {
2055 u_int32_t udmatim_reg, datatim_reg;
2056 u_int8_t idedma_ctl;
2057 int mode, drive;
2058 struct ata_drive_datas *drvp;
2059 struct pciide_channel *cp = (struct pciide_channel*)chp;
2060 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2061 #ifndef PCIIDE_AMD756_ENABLEDMA
2062 int rev = PCI_REVISION(
2063 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2064 #endif
2065
2066 idedma_ctl = 0;
2067 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2068 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2069 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2070 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2071
2072 /* setup DMA if needed */
2073 pciide_channel_dma_setup(cp);
2074
2075 for (drive = 0; drive < 2; drive++) {
2076 drvp = &chp->ch_drive[drive];
2077 /* If no drive, skip */
2078 if ((drvp->drive_flags & DRIVE) == 0)
2079 continue;
2080 /* add timing values, setup DMA if needed */
2081 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2082 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2083 mode = drvp->PIO_mode;
2084 goto pio;
2085 }
2086 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2087 (drvp->drive_flags & DRIVE_UDMA)) {
2088 /* use Ultra/DMA */
2089 drvp->drive_flags &= ~DRIVE_DMA;
2090 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2091 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2092 AMD7X6_UDMA_TIME(chp->channel, drive,
2093 amd7x6_udma_tim[drvp->UDMA_mode]);
2094 /* can use PIO timings, MW DMA unused */
2095 mode = drvp->PIO_mode;
2096 } else {
2097 /* use Multiword DMA, but only if revision is OK */
2098 drvp->drive_flags &= ~DRIVE_UDMA;
2099 #ifndef PCIIDE_AMD756_ENABLEDMA
2100 /*
2101 * The workaround doesn't seem to be necessary
2102 * with all drives, so it can be disabled by
2103 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2104 * triggered.
2105 */
2106 if (sc->sc_pp->ide_product ==
2107 PCI_PRODUCT_AMD_PBC756_IDE &&
2108 AMD756_CHIPREV_DISABLEDMA(rev)) {
2109 printf("%s:%d:%d: multi-word DMA disabled due "
2110 "to chip revision\n",
2111 sc->sc_wdcdev.sc_dev.dv_xname,
2112 chp->channel, drive);
2113 mode = drvp->PIO_mode;
2114 drvp->drive_flags &= ~DRIVE_DMA;
2115 goto pio;
2116 }
2117 #endif
2118 /* mode = min(pio, dma+2) */
2119 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2120 mode = drvp->PIO_mode;
2121 else
2122 mode = drvp->DMA_mode + 2;
2123 }
2124 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2125
2126 pio: /* setup PIO mode */
2127 if (mode <= 2) {
2128 drvp->DMA_mode = 0;
2129 drvp->PIO_mode = 0;
2130 mode = 0;
2131 } else {
2132 drvp->PIO_mode = mode;
2133 drvp->DMA_mode = mode - 2;
2134 }
2135 datatim_reg |=
2136 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2137 amd7x6_pio_set[mode]) |
2138 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2139 amd7x6_pio_rec[mode]);
2140 }
2141 if (idedma_ctl != 0) {
2142 /* Add software bits in status register */
2143 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2144 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2145 idedma_ctl);
2146 }
2147 pciide_print_modes(cp);
2148 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2149 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2150 }
2151
2152 void
2153 apollo_chip_map(sc, pa)
2154 struct pciide_softc *sc;
2155 struct pci_attach_args *pa;
2156 {
2157 struct pciide_channel *cp;
2158 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2159 int channel;
2160 u_int32_t ideconf;
2161 bus_size_t cmdsize, ctlsize;
2162 pcitag_t pcib_tag;
2163 pcireg_t pcib_id, pcib_class;
2164
2165 if (pciide_chipen(sc, pa) == 0)
2166 return;
2167 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2168 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2169 /* and read ID and rev of the ISA bridge */
2170 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2171 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2172 printf(": VIA Technologies ");
2173 switch (PCI_PRODUCT(pcib_id)) {
2174 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2175 printf("VT82C586 (Apollo VP) ");
2176 if(PCI_REVISION(pcib_class) >= 0x02) {
2177 printf("ATA33 controller\n");
2178 sc->sc_wdcdev.UDMA_cap = 2;
2179 } else {
2180 printf("controller\n");
2181 sc->sc_wdcdev.UDMA_cap = 0;
2182 }
2183 break;
2184 case PCI_PRODUCT_VIATECH_VT82C596A:
2185 printf("VT82C596A (Apollo Pro) ");
2186 if (PCI_REVISION(pcib_class) >= 0x12) {
2187 printf("ATA66 controller\n");
2188 sc->sc_wdcdev.UDMA_cap = 4;
2189 } else {
2190 printf("ATA33 controller\n");
2191 sc->sc_wdcdev.UDMA_cap = 2;
2192 }
2193 break;
2194 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2195 printf("VT82C686A (Apollo KX133) ");
2196 if (PCI_REVISION(pcib_class) >= 0x40) {
2197 printf("ATA100 controller\n");
2198 sc->sc_wdcdev.UDMA_cap = 5;
2199 } else {
2200 printf("ATA66 controller\n");
2201 sc->sc_wdcdev.UDMA_cap = 4;
2202 }
2203 break;
2204 case PCI_PRODUCT_VIATECH_VT8231:
2205 printf("VT8231 ATA100 controller\n");
2206 sc->sc_wdcdev.UDMA_cap = 5;
2207 break;
2208 case PCI_PRODUCT_VIATECH_VT8233:
2209 printf("VT8233 ATA100 controller\n");
2210 sc->sc_wdcdev.UDMA_cap = 5;
2211 break;
2212 case PCI_PRODUCT_VIATECH_VT8233A:
2213 printf("VT8233A ATA133 controller\n");
2214 /* XXX use ATA100 untill ATA133 is supported */
2215 sc->sc_wdcdev.UDMA_cap = 5;
2216 break;
2217 default:
2218 printf("unknown ATA controller\n");
2219 sc->sc_wdcdev.UDMA_cap = 0;
2220 }
2221
2222 printf("%s: bus-master DMA support present",
2223 sc->sc_wdcdev.sc_dev.dv_xname);
2224 pciide_mapreg_dma(sc, pa);
2225 printf("\n");
2226 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2227 WDC_CAPABILITY_MODE;
2228 if (sc->sc_dma_ok) {
2229 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2230 sc->sc_wdcdev.irqack = pciide_irqack;
2231 if (sc->sc_wdcdev.UDMA_cap > 0)
2232 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2233 }
2234 sc->sc_wdcdev.PIO_cap = 4;
2235 sc->sc_wdcdev.DMA_cap = 2;
2236 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2237 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2238 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2239
2240 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2241 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2242 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2243 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2244 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2245 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2246 DEBUG_PROBE);
2247
2248 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2249 cp = &sc->pciide_channels[channel];
2250 if (pciide_chansetup(sc, channel, interface) == 0)
2251 continue;
2252
2253 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2254 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2255 printf("%s: %s channel ignored (disabled)\n",
2256 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2257 continue;
2258 }
2259 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2260 pciide_pci_intr);
2261 if (cp->hw_ok == 0)
2262 continue;
2263 if (pciide_chan_candisable(cp)) {
2264 ideconf &= ~APO_IDECONF_EN(channel);
2265 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2266 ideconf);
2267 }
2268 pciide_map_compat_intr(pa, cp, channel, interface);
2269
2270 if (cp->hw_ok == 0)
2271 continue;
2272 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2273 }
2274 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2275 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2276 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2277 }
2278
2279 void
2280 apollo_setup_channel(chp)
2281 struct channel_softc *chp;
2282 {
2283 u_int32_t udmatim_reg, datatim_reg;
2284 u_int8_t idedma_ctl;
2285 int mode, drive;
2286 struct ata_drive_datas *drvp;
2287 struct pciide_channel *cp = (struct pciide_channel*)chp;
2288 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2289
2290 idedma_ctl = 0;
2291 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2292 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2293 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2294 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2295
2296 /* setup DMA if needed */
2297 pciide_channel_dma_setup(cp);
2298
2299 for (drive = 0; drive < 2; drive++) {
2300 drvp = &chp->ch_drive[drive];
2301 /* If no drive, skip */
2302 if ((drvp->drive_flags & DRIVE) == 0)
2303 continue;
2304 /* add timing values, setup DMA if needed */
2305 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2306 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2307 mode = drvp->PIO_mode;
2308 goto pio;
2309 }
2310 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2311 (drvp->drive_flags & DRIVE_UDMA)) {
2312 /* use Ultra/DMA */
2313 drvp->drive_flags &= ~DRIVE_DMA;
2314 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2315 APO_UDMA_EN_MTH(chp->channel, drive);
2316 if (sc->sc_wdcdev.UDMA_cap == 5) {
2317 /* 686b */
2318 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2319 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2320 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2321 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2322 /* 596b or 686a */
2323 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2324 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2325 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2326 } else {
2327 /* 596a or 586b */
2328 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2329 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2330 }
2331 /* can use PIO timings, MW DMA unused */
2332 mode = drvp->PIO_mode;
2333 } else {
2334 /* use Multiword DMA */
2335 drvp->drive_flags &= ~DRIVE_UDMA;
2336 /* mode = min(pio, dma+2) */
2337 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2338 mode = drvp->PIO_mode;
2339 else
2340 mode = drvp->DMA_mode + 2;
2341 }
2342 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2343
2344 pio: /* setup PIO mode */
2345 if (mode <= 2) {
2346 drvp->DMA_mode = 0;
2347 drvp->PIO_mode = 0;
2348 mode = 0;
2349 } else {
2350 drvp->PIO_mode = mode;
2351 drvp->DMA_mode = mode - 2;
2352 }
2353 datatim_reg |=
2354 APO_DATATIM_PULSE(chp->channel, drive,
2355 apollo_pio_set[mode]) |
2356 APO_DATATIM_RECOV(chp->channel, drive,
2357 apollo_pio_rec[mode]);
2358 }
2359 if (idedma_ctl != 0) {
2360 /* Add software bits in status register */
2361 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2362 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2363 idedma_ctl);
2364 }
2365 pciide_print_modes(cp);
2366 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2367 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2368 }
2369
2370 void
2371 cmd_channel_map(pa, sc, channel)
2372 struct pci_attach_args *pa;
2373 struct pciide_softc *sc;
2374 int channel;
2375 {
2376 struct pciide_channel *cp = &sc->pciide_channels[channel];
2377 bus_size_t cmdsize, ctlsize;
2378 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2379 int interface, one_channel;
2380
2381 /*
2382 * The 0648/0649 can be told to identify as a RAID controller.
2383 * In this case, we have to fake interface
2384 */
2385 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2386 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2387 PCIIDE_INTERFACE_SETTABLE(1);
2388 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2389 CMD_CONF_DSA1)
2390 interface |= PCIIDE_INTERFACE_PCI(0) |
2391 PCIIDE_INTERFACE_PCI(1);
2392 } else {
2393 interface = PCI_INTERFACE(pa->pa_class);
2394 }
2395
2396 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2397 cp->name = PCIIDE_CHANNEL_NAME(channel);
2398 cp->wdc_channel.channel = channel;
2399 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2400
2401 /*
2402 * Older CMD64X doesn't have independant channels
2403 */
2404 switch (sc->sc_pp->ide_product) {
2405 case PCI_PRODUCT_CMDTECH_649:
2406 one_channel = 0;
2407 break;
2408 default:
2409 one_channel = 1;
2410 break;
2411 }
2412
2413 if (channel > 0 && one_channel) {
2414 cp->wdc_channel.ch_queue =
2415 sc->pciide_channels[0].wdc_channel.ch_queue;
2416 } else {
2417 cp->wdc_channel.ch_queue =
2418 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2419 }
2420 if (cp->wdc_channel.ch_queue == NULL) {
2421 printf("%s %s channel: "
2422 "can't allocate memory for command queue",
2423 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2424 return;
2425 }
2426
2427 printf("%s: %s channel %s to %s mode\n",
2428 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2429 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2430 "configured" : "wired",
2431 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2432 "native-PCI" : "compatibility");
2433
2434 /*
2435 * with a CMD PCI64x, if we get here, the first channel is enabled:
2436 * there's no way to disable the first channel without disabling
2437 * the whole device
2438 */
2439 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2440 printf("%s: %s channel ignored (disabled)\n",
2441 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2442 return;
2443 }
2444
2445 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2446 if (cp->hw_ok == 0)
2447 return;
2448 if (channel == 1) {
2449 if (pciide_chan_candisable(cp)) {
2450 ctrl &= ~CMD_CTRL_2PORT;
2451 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2452 CMD_CTRL, ctrl);
2453 }
2454 }
2455 pciide_map_compat_intr(pa, cp, channel, interface);
2456 }
2457
2458 int
2459 cmd_pci_intr(arg)
2460 void *arg;
2461 {
2462 struct pciide_softc *sc = arg;
2463 struct pciide_channel *cp;
2464 struct channel_softc *wdc_cp;
2465 int i, rv, crv;
2466 u_int32_t priirq, secirq;
2467
2468 rv = 0;
2469 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2470 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2471 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2472 cp = &sc->pciide_channels[i];
2473 wdc_cp = &cp->wdc_channel;
2474 /* If a compat channel skip. */
2475 if (cp->compat)
2476 continue;
2477 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2478 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2479 crv = wdcintr(wdc_cp);
2480 if (crv == 0)
2481 printf("%s:%d: bogus intr\n",
2482 sc->sc_wdcdev.sc_dev.dv_xname, i);
2483 else
2484 rv = 1;
2485 }
2486 }
2487 return rv;
2488 }
2489
2490 void
2491 cmd_chip_map(sc, pa)
2492 struct pciide_softc *sc;
2493 struct pci_attach_args *pa;
2494 {
2495 int channel;
2496
2497 /*
2498 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2499 * and base adresses registers can be disabled at
2500 * hardware level. In this case, the device is wired
2501 * in compat mode and its first channel is always enabled,
2502 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2503 * In fact, it seems that the first channel of the CMD PCI0640
2504 * can't be disabled.
2505 */
2506
2507 #ifdef PCIIDE_CMD064x_DISABLE
2508 if (pciide_chipen(sc, pa) == 0)
2509 return;
2510 #endif
2511
2512 printf("%s: hardware does not support DMA\n",
2513 sc->sc_wdcdev.sc_dev.dv_xname);
2514 sc->sc_dma_ok = 0;
2515
2516 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2517 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2518 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2519
2520 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2521 cmd_channel_map(pa, sc, channel);
2522 }
2523 }
2524
2525 void
2526 cmd0643_9_chip_map(sc, pa)
2527 struct pciide_softc *sc;
2528 struct pci_attach_args *pa;
2529 {
2530 struct pciide_channel *cp;
2531 int channel;
2532 pcireg_t rev = PCI_REVISION(pa->pa_class);
2533
2534 /*
2535 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2536 * and base adresses registers can be disabled at
2537 * hardware level. In this case, the device is wired
2538 * in compat mode and its first channel is always enabled,
2539 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2540 * In fact, it seems that the first channel of the CMD PCI0640
2541 * can't be disabled.
2542 */
2543
2544 #ifdef PCIIDE_CMD064x_DISABLE
2545 if (pciide_chipen(sc, pa) == 0)
2546 return;
2547 #endif
2548 printf("%s: bus-master DMA support present",
2549 sc->sc_wdcdev.sc_dev.dv_xname);
2550 pciide_mapreg_dma(sc, pa);
2551 printf("\n");
2552 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2553 WDC_CAPABILITY_MODE;
2554 if (sc->sc_dma_ok) {
2555 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2556 switch (sc->sc_pp->ide_product) {
2557 case PCI_PRODUCT_CMDTECH_649:
2558 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2559 sc->sc_wdcdev.UDMA_cap = 5;
2560 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2561 break;
2562 case PCI_PRODUCT_CMDTECH_648:
2563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2564 sc->sc_wdcdev.UDMA_cap = 4;
2565 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2566 break;
2567 case PCI_PRODUCT_CMDTECH_646:
2568 if (rev >= CMD0646U2_REV) {
2569 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2570 sc->sc_wdcdev.UDMA_cap = 2;
2571 } else if (rev >= CMD0646U_REV) {
2572 /*
2573 * Linux's driver claims that the 646U is broken
2574 * with UDMA. Only enable it if we know what we're
2575 * doing
2576 */
2577 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2578 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2579 sc->sc_wdcdev.UDMA_cap = 2;
2580 #endif
2581 /* explicitly disable UDMA */
2582 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2583 CMD_UDMATIM(0), 0);
2584 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2585 CMD_UDMATIM(1), 0);
2586 }
2587 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2588 break;
2589 default:
2590 sc->sc_wdcdev.irqack = pciide_irqack;
2591 }
2592 }
2593
2594 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2595 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2596 sc->sc_wdcdev.PIO_cap = 4;
2597 sc->sc_wdcdev.DMA_cap = 2;
2598 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2599
2600 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2601 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2602 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2603 DEBUG_PROBE);
2604
2605 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2606 cp = &sc->pciide_channels[channel];
2607 cmd_channel_map(pa, sc, channel);
2608 if (cp->hw_ok == 0)
2609 continue;
2610 cmd0643_9_setup_channel(&cp->wdc_channel);
2611 }
2612 /*
2613 * note - this also makes sure we clear the irq disable and reset
2614 * bits
2615 */
2616 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2617 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2618 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2619 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2620 DEBUG_PROBE);
2621 }
2622
2623 void
2624 cmd0643_9_setup_channel(chp)
2625 struct channel_softc *chp;
2626 {
2627 struct ata_drive_datas *drvp;
2628 u_int8_t tim;
2629 u_int32_t idedma_ctl, udma_reg;
2630 int drive;
2631 struct pciide_channel *cp = (struct pciide_channel*)chp;
2632 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2633
2634 idedma_ctl = 0;
2635 /* setup DMA if needed */
2636 pciide_channel_dma_setup(cp);
2637
2638 for (drive = 0; drive < 2; drive++) {
2639 drvp = &chp->ch_drive[drive];
2640 /* If no drive, skip */
2641 if ((drvp->drive_flags & DRIVE) == 0)
2642 continue;
2643 /* add timing values, setup DMA if needed */
2644 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2645 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2646 if (drvp->drive_flags & DRIVE_UDMA) {
2647 /* UltraDMA on a 646U2, 0648 or 0649 */
2648 drvp->drive_flags &= ~DRIVE_DMA;
2649 udma_reg = pciide_pci_read(sc->sc_pc,
2650 sc->sc_tag, CMD_UDMATIM(chp->channel));
2651 if (drvp->UDMA_mode > 2 &&
2652 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2653 CMD_BICSR) &
2654 CMD_BICSR_80(chp->channel)) == 0)
2655 drvp->UDMA_mode = 2;
2656 if (drvp->UDMA_mode > 2)
2657 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2658 else if (sc->sc_wdcdev.UDMA_cap > 2)
2659 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2660 udma_reg |= CMD_UDMATIM_UDMA(drive);
2661 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2662 CMD_UDMATIM_TIM_OFF(drive));
2663 udma_reg |=
2664 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2665 CMD_UDMATIM_TIM_OFF(drive));
2666 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2667 CMD_UDMATIM(chp->channel), udma_reg);
2668 } else {
2669 /*
2670 * use Multiword DMA.
2671 * Timings will be used for both PIO and DMA,
2672 * so adjust DMA mode if needed
2673 * if we have a 0646U2/8/9, turn off UDMA
2674 */
2675 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2676 udma_reg = pciide_pci_read(sc->sc_pc,
2677 sc->sc_tag,
2678 CMD_UDMATIM(chp->channel));
2679 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2680 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2681 CMD_UDMATIM(chp->channel),
2682 udma_reg);
2683 }
2684 if (drvp->PIO_mode >= 3 &&
2685 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2686 drvp->DMA_mode = drvp->PIO_mode - 2;
2687 }
2688 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2689 }
2690 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2691 }
2692 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2693 CMD_DATA_TIM(chp->channel, drive), tim);
2694 }
2695 if (idedma_ctl != 0) {
2696 /* Add software bits in status register */
2697 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2698 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2699 idedma_ctl);
2700 }
2701 pciide_print_modes(cp);
2702 }
2703
2704 void
2705 cmd646_9_irqack(chp)
2706 struct channel_softc *chp;
2707 {
2708 u_int32_t priirq, secirq;
2709 struct pciide_channel *cp = (struct pciide_channel*)chp;
2710 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2711
2712 if (chp->channel == 0) {
2713 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2714 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2715 } else {
2716 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2717 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2718 }
2719 pciide_irqack(chp);
2720 }
2721
2722 void
2723 cmd680_chip_map(sc, pa)
2724 struct pciide_softc *sc;
2725 struct pci_attach_args *pa;
2726 {
2727 struct pciide_channel *cp;
2728 int channel;
2729
2730 if (pciide_chipen(sc, pa) == 0)
2731 return;
2732 printf("%s: bus-master DMA support present",
2733 sc->sc_wdcdev.sc_dev.dv_xname);
2734 pciide_mapreg_dma(sc, pa);
2735 printf("\n");
2736 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2737 WDC_CAPABILITY_MODE;
2738 if (sc->sc_dma_ok) {
2739 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2740 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2741 sc->sc_wdcdev.UDMA_cap = 6;
2742 sc->sc_wdcdev.irqack = pciide_irqack;
2743 }
2744
2745 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2746 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2747 sc->sc_wdcdev.PIO_cap = 4;
2748 sc->sc_wdcdev.DMA_cap = 2;
2749 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2750
2751 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2752 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2753 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2754 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2755 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2756 cp = &sc->pciide_channels[channel];
2757 cmd680_channel_map(pa, sc, channel);
2758 if (cp->hw_ok == 0)
2759 continue;
2760 cmd680_setup_channel(&cp->wdc_channel);
2761 }
2762 }
2763
2764 void
2765 cmd680_channel_map(pa, sc, channel)
2766 struct pci_attach_args *pa;
2767 struct pciide_softc *sc;
2768 int channel;
2769 {
2770 struct pciide_channel *cp = &sc->pciide_channels[channel];
2771 bus_size_t cmdsize, ctlsize;
2772 int interface, i, reg;
2773 static const u_int8_t init_val[] =
2774 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2775 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2776
2777 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2778 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2779 PCIIDE_INTERFACE_SETTABLE(1);
2780 interface |= PCIIDE_INTERFACE_PCI(0) |
2781 PCIIDE_INTERFACE_PCI(1);
2782 } else {
2783 interface = PCI_INTERFACE(pa->pa_class);
2784 }
2785
2786 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2787 cp->name = PCIIDE_CHANNEL_NAME(channel);
2788 cp->wdc_channel.channel = channel;
2789 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2790
2791 cp->wdc_channel.ch_queue =
2792 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2793 if (cp->wdc_channel.ch_queue == NULL) {
2794 printf("%s %s channel: "
2795 "can't allocate memory for command queue",
2796 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2797 return;
2798 }
2799
2800 /* XXX */
2801 reg = 0xa2 + channel * 16;
2802 for (i = 0; i < sizeof(init_val); i++)
2803 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2804
2805 printf("%s: %s channel %s to %s mode\n",
2806 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2807 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2808 "configured" : "wired",
2809 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2810 "native-PCI" : "compatibility");
2811
2812 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2813 if (cp->hw_ok == 0)
2814 return;
2815 pciide_map_compat_intr(pa, cp, channel, interface);
2816 }
2817
2818 void
2819 cmd680_setup_channel(chp)
2820 struct channel_softc *chp;
2821 {
2822 struct ata_drive_datas *drvp;
2823 u_int8_t mode, off, scsc;
2824 u_int16_t val;
2825 u_int32_t idedma_ctl;
2826 int drive;
2827 struct pciide_channel *cp = (struct pciide_channel*)chp;
2828 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2829 pci_chipset_tag_t pc = sc->sc_pc;
2830 pcitag_t pa = sc->sc_tag;
2831 static const u_int8_t udma2_tbl[] =
2832 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2833 static const u_int8_t udma_tbl[] =
2834 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2835 static const u_int16_t dma_tbl[] =
2836 { 0x2208, 0x10c2, 0x10c1 };
2837 static const u_int16_t pio_tbl[] =
2838 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2839
2840 idedma_ctl = 0;
2841 pciide_channel_dma_setup(cp);
2842 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2843
2844 for (drive = 0; drive < 2; drive++) {
2845 drvp = &chp->ch_drive[drive];
2846 /* If no drive, skip */
2847 if ((drvp->drive_flags & DRIVE) == 0)
2848 continue;
2849 mode &= ~(0x03 << (drive * 4));
2850 if (drvp->drive_flags & DRIVE_UDMA) {
2851 drvp->drive_flags &= ~DRIVE_DMA;
2852 off = 0xa0 + chp->channel * 16;
2853 if (drvp->UDMA_mode > 2 &&
2854 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2855 drvp->UDMA_mode = 2;
2856 scsc = pciide_pci_read(pc, pa, 0x8a);
2857 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2858 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2859 scsc = pciide_pci_read(pc, pa, 0x8a);
2860 if ((scsc & 0x30) == 0)
2861 drvp->UDMA_mode = 5;
2862 }
2863 mode |= 0x03 << (drive * 4);
2864 off = 0xac + chp->channel * 16 + drive * 2;
2865 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2866 if (scsc & 0x30)
2867 val |= udma2_tbl[drvp->UDMA_mode];
2868 else
2869 val |= udma_tbl[drvp->UDMA_mode];
2870 pciide_pci_write(pc, pa, off, val);
2871 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2872 } else if (drvp->drive_flags & DRIVE_DMA) {
2873 mode |= 0x02 << (drive * 4);
2874 off = 0xa8 + chp->channel * 16 + drive * 2;
2875 val = dma_tbl[drvp->DMA_mode];
2876 pciide_pci_write(pc, pa, off, val & 0xff);
2877 pciide_pci_write(pc, pa, off, val >> 8);
2878 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2879 } else {
2880 mode |= 0x01 << (drive * 4);
2881 off = 0xa4 + chp->channel * 16 + drive * 2;
2882 val = pio_tbl[drvp->PIO_mode];
2883 pciide_pci_write(pc, pa, off, val & 0xff);
2884 pciide_pci_write(pc, pa, off, val >> 8);
2885 }
2886 }
2887
2888 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2889 if (idedma_ctl != 0) {
2890 /* Add software bits in status register */
2891 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2892 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2893 idedma_ctl);
2894 }
2895 pciide_print_modes(cp);
2896 }
2897
2898 void
2899 cy693_chip_map(sc, pa)
2900 struct pciide_softc *sc;
2901 struct pci_attach_args *pa;
2902 {
2903 struct pciide_channel *cp;
2904 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2905 bus_size_t cmdsize, ctlsize;
2906
2907 if (pciide_chipen(sc, pa) == 0)
2908 return;
2909 /*
2910 * this chip has 2 PCI IDE functions, one for primary and one for
2911 * secondary. So we need to call pciide_mapregs_compat() with
2912 * the real channel
2913 */
2914 if (pa->pa_function == 1) {
2915 sc->sc_cy_compatchan = 0;
2916 } else if (pa->pa_function == 2) {
2917 sc->sc_cy_compatchan = 1;
2918 } else {
2919 printf("%s: unexpected PCI function %d\n",
2920 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2921 return;
2922 }
2923 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2924 printf("%s: bus-master DMA support present",
2925 sc->sc_wdcdev.sc_dev.dv_xname);
2926 pciide_mapreg_dma(sc, pa);
2927 } else {
2928 printf("%s: hardware does not support DMA",
2929 sc->sc_wdcdev.sc_dev.dv_xname);
2930 sc->sc_dma_ok = 0;
2931 }
2932 printf("\n");
2933
2934 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2935 if (sc->sc_cy_handle == NULL) {
2936 printf("%s: unable to map hyperCache control registers\n",
2937 sc->sc_wdcdev.sc_dev.dv_xname);
2938 sc->sc_dma_ok = 0;
2939 }
2940
2941 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2942 WDC_CAPABILITY_MODE;
2943 if (sc->sc_dma_ok) {
2944 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2945 sc->sc_wdcdev.irqack = pciide_irqack;
2946 }
2947 sc->sc_wdcdev.PIO_cap = 4;
2948 sc->sc_wdcdev.DMA_cap = 2;
2949 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2950
2951 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2952 sc->sc_wdcdev.nchannels = 1;
2953
2954 /* Only one channel for this chip; if we are here it's enabled */
2955 cp = &sc->pciide_channels[0];
2956 sc->wdc_chanarray[0] = &cp->wdc_channel;
2957 cp->name = PCIIDE_CHANNEL_NAME(0);
2958 cp->wdc_channel.channel = 0;
2959 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2960 cp->wdc_channel.ch_queue =
2961 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2962 if (cp->wdc_channel.ch_queue == NULL) {
2963 printf("%s primary channel: "
2964 "can't allocate memory for command queue",
2965 sc->sc_wdcdev.sc_dev.dv_xname);
2966 return;
2967 }
2968 printf("%s: primary channel %s to ",
2969 sc->sc_wdcdev.sc_dev.dv_xname,
2970 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2971 "configured" : "wired");
2972 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2973 printf("native-PCI");
2974 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2975 pciide_pci_intr);
2976 } else {
2977 printf("compatibility");
2978 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2979 &cmdsize, &ctlsize);
2980 }
2981 printf(" mode\n");
2982 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2983 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2984 wdcattach(&cp->wdc_channel);
2985 if (pciide_chan_candisable(cp)) {
2986 pci_conf_write(sc->sc_pc, sc->sc_tag,
2987 PCI_COMMAND_STATUS_REG, 0);
2988 }
2989 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2990 if (cp->hw_ok == 0)
2991 return;
2992 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2993 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2994 cy693_setup_channel(&cp->wdc_channel);
2995 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2996 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2997 }
2998
2999 void
3000 cy693_setup_channel(chp)
3001 struct channel_softc *chp;
3002 {
3003 struct ata_drive_datas *drvp;
3004 int drive;
3005 u_int32_t cy_cmd_ctrl;
3006 u_int32_t idedma_ctl;
3007 struct pciide_channel *cp = (struct pciide_channel*)chp;
3008 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3009 int dma_mode = -1;
3010
3011 cy_cmd_ctrl = idedma_ctl = 0;
3012
3013 /* setup DMA if needed */
3014 pciide_channel_dma_setup(cp);
3015
3016 for (drive = 0; drive < 2; drive++) {
3017 drvp = &chp->ch_drive[drive];
3018 /* If no drive, skip */
3019 if ((drvp->drive_flags & DRIVE) == 0)
3020 continue;
3021 /* add timing values, setup DMA if needed */
3022 if (drvp->drive_flags & DRIVE_DMA) {
3023 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3024 /* use Multiword DMA */
3025 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3026 dma_mode = drvp->DMA_mode;
3027 }
3028 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3029 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3030 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3031 CY_CMD_CTRL_IOW_REC_OFF(drive));
3032 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3033 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3034 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3035 CY_CMD_CTRL_IOR_REC_OFF(drive));
3036 }
3037 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3038 chp->ch_drive[0].DMA_mode = dma_mode;
3039 chp->ch_drive[1].DMA_mode = dma_mode;
3040
3041 if (dma_mode == -1)
3042 dma_mode = 0;
3043
3044 if (sc->sc_cy_handle != NULL) {
3045 /* Note: `multiple' is implied. */
3046 cy82c693_write(sc->sc_cy_handle,
3047 (sc->sc_cy_compatchan == 0) ?
3048 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3049 }
3050
3051 pciide_print_modes(cp);
3052
3053 if (idedma_ctl != 0) {
3054 /* Add software bits in status register */
3055 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3056 IDEDMA_CTL, idedma_ctl);
3057 }
3058 }
3059
3060 static int
3061 sis_hostbr_match(pa)
3062 struct pci_attach_args *pa;
3063 {
3064 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3065 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3066 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3067 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3068 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3069 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3070 }
3071
3072 void
3073 sis_chip_map(sc, pa)
3074 struct pciide_softc *sc;
3075 struct pci_attach_args *pa;
3076 {
3077 struct pciide_channel *cp;
3078 int channel;
3079 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3080 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3081 pcireg_t rev = PCI_REVISION(pa->pa_class);
3082 bus_size_t cmdsize, ctlsize;
3083 pcitag_t pchb_tag;
3084 pcireg_t pchb_id, pchb_class;
3085
3086 if (pciide_chipen(sc, pa) == 0)
3087 return;
3088 printf("%s: bus-master DMA support present",
3089 sc->sc_wdcdev.sc_dev.dv_xname);
3090 pciide_mapreg_dma(sc, pa);
3091 printf("\n");
3092
3093 /* get a PCI tag for the host bridge (function 0 of the same device) */
3094 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3095 /* and read ID and rev of the ISA bridge */
3096 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3097 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3098
3099 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3100 WDC_CAPABILITY_MODE;
3101 if (sc->sc_dma_ok) {
3102 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3103 sc->sc_wdcdev.irqack = pciide_irqack;
3104 /*
3105 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3106 * have problems with UDMA (info provided by Christos)
3107 */
3108 if (rev >= 0xd0 &&
3109 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3110 PCI_REVISION(pchb_class) >= 0x03))
3111 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3112 }
3113
3114 sc->sc_wdcdev.PIO_cap = 4;
3115 sc->sc_wdcdev.DMA_cap = 2;
3116 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3117 /*
3118 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3119 * chipsets.
3120 */
3121 sc->sc_wdcdev.UDMA_cap =
3122 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3123 sc->sc_wdcdev.set_modes = sis_setup_channel;
3124
3125 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3126 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3127
3128 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3129 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3130 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3131
3132 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3133 cp = &sc->pciide_channels[channel];
3134 if (pciide_chansetup(sc, channel, interface) == 0)
3135 continue;
3136 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3137 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3138 printf("%s: %s channel ignored (disabled)\n",
3139 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3140 continue;
3141 }
3142 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3143 pciide_pci_intr);
3144 if (cp->hw_ok == 0)
3145 continue;
3146 if (pciide_chan_candisable(cp)) {
3147 if (channel == 0)
3148 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3149 else
3150 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3151 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3152 sis_ctr0);
3153 }
3154 pciide_map_compat_intr(pa, cp, channel, interface);
3155 if (cp->hw_ok == 0)
3156 continue;
3157 sis_setup_channel(&cp->wdc_channel);
3158 }
3159 }
3160
3161 void
3162 sis_setup_channel(chp)
3163 struct channel_softc *chp;
3164 {
3165 struct ata_drive_datas *drvp;
3166 int drive;
3167 u_int32_t sis_tim;
3168 u_int32_t idedma_ctl;
3169 struct pciide_channel *cp = (struct pciide_channel*)chp;
3170 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3171
3172 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3173 "channel %d 0x%x\n", chp->channel,
3174 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3175 DEBUG_PROBE);
3176 sis_tim = 0;
3177 idedma_ctl = 0;
3178 /* setup DMA if needed */
3179 pciide_channel_dma_setup(cp);
3180
3181 for (drive = 0; drive < 2; drive++) {
3182 drvp = &chp->ch_drive[drive];
3183 /* If no drive, skip */
3184 if ((drvp->drive_flags & DRIVE) == 0)
3185 continue;
3186 /* add timing values, setup DMA if needed */
3187 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3188 (drvp->drive_flags & DRIVE_UDMA) == 0)
3189 goto pio;
3190
3191 if (drvp->drive_flags & DRIVE_UDMA) {
3192 /* use Ultra/DMA */
3193 drvp->drive_flags &= ~DRIVE_DMA;
3194 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3195 SIS_TIM_UDMA_TIME_OFF(drive);
3196 sis_tim |= SIS_TIM_UDMA_EN(drive);
3197 } else {
3198 /*
3199 * use Multiword DMA
3200 * Timings will be used for both PIO and DMA,
3201 * so adjust DMA mode if needed
3202 */
3203 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3204 drvp->PIO_mode = drvp->DMA_mode + 2;
3205 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3206 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3207 drvp->PIO_mode - 2 : 0;
3208 if (drvp->DMA_mode == 0)
3209 drvp->PIO_mode = 0;
3210 }
3211 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3212 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3213 SIS_TIM_ACT_OFF(drive);
3214 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3215 SIS_TIM_REC_OFF(drive);
3216 }
3217 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3218 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3219 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3220 if (idedma_ctl != 0) {
3221 /* Add software bits in status register */
3222 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3223 IDEDMA_CTL, idedma_ctl);
3224 }
3225 pciide_print_modes(cp);
3226 }
3227
3228 void
3229 acer_chip_map(sc, pa)
3230 struct pciide_softc *sc;
3231 struct pci_attach_args *pa;
3232 {
3233 struct pciide_channel *cp;
3234 int channel;
3235 pcireg_t cr, interface;
3236 bus_size_t cmdsize, ctlsize;
3237 pcireg_t rev = PCI_REVISION(pa->pa_class);
3238
3239 if (pciide_chipen(sc, pa) == 0)
3240 return;
3241 printf("%s: bus-master DMA support present",
3242 sc->sc_wdcdev.sc_dev.dv_xname);
3243 pciide_mapreg_dma(sc, pa);
3244 printf("\n");
3245 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3246 WDC_CAPABILITY_MODE;
3247 if (sc->sc_dma_ok) {
3248 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3249 if (rev >= 0x20) {
3250 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3251 if (rev >= 0xC4)
3252 sc->sc_wdcdev.UDMA_cap = 5;
3253 else if (rev >= 0xC2)
3254 sc->sc_wdcdev.UDMA_cap = 4;
3255 else
3256 sc->sc_wdcdev.UDMA_cap = 2;
3257 }
3258 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3259 sc->sc_wdcdev.irqack = pciide_irqack;
3260 }
3261
3262 sc->sc_wdcdev.PIO_cap = 4;
3263 sc->sc_wdcdev.DMA_cap = 2;
3264 sc->sc_wdcdev.set_modes = acer_setup_channel;
3265 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3266 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3267
3268 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3269 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3270 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3271
3272 /* Enable "microsoft register bits" R/W. */
3273 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3274 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3275 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3276 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3277 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3278 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3279 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3280 ~ACER_CHANSTATUSREGS_RO);
3281 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3282 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3283 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3284 /* Don't use cr, re-read the real register content instead */
3285 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3286 PCI_CLASS_REG));
3287
3288 /* From linux: enable "Cable Detection" */
3289 if (rev >= 0xC2) {
3290 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3291 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3292 | ACER_0x4B_CDETECT);
3293 }
3294
3295 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3296 cp = &sc->pciide_channels[channel];
3297 if (pciide_chansetup(sc, channel, interface) == 0)
3298 continue;
3299 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3300 printf("%s: %s channel ignored (disabled)\n",
3301 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3302 continue;
3303 }
3304 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3305 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3306 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3307 if (cp->hw_ok == 0)
3308 continue;
3309 if (pciide_chan_candisable(cp)) {
3310 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3311 pci_conf_write(sc->sc_pc, sc->sc_tag,
3312 PCI_CLASS_REG, cr);
3313 }
3314 pciide_map_compat_intr(pa, cp, channel, interface);
3315 acer_setup_channel(&cp->wdc_channel);
3316 }
3317 }
3318
3319 void
3320 acer_setup_channel(chp)
3321 struct channel_softc *chp;
3322 {
3323 struct ata_drive_datas *drvp;
3324 int drive;
3325 u_int32_t acer_fifo_udma;
3326 u_int32_t idedma_ctl;
3327 struct pciide_channel *cp = (struct pciide_channel*)chp;
3328 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3329
3330 idedma_ctl = 0;
3331 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3332 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3333 acer_fifo_udma), DEBUG_PROBE);
3334 /* setup DMA if needed */
3335 pciide_channel_dma_setup(cp);
3336
3337 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3338 DRIVE_UDMA) { /* check 80 pins cable */
3339 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3340 ACER_0x4A_80PIN(chp->channel)) {
3341 if (chp->ch_drive[0].UDMA_mode > 2)
3342 chp->ch_drive[0].UDMA_mode = 2;
3343 if (chp->ch_drive[1].UDMA_mode > 2)
3344 chp->ch_drive[1].UDMA_mode = 2;
3345 }
3346 }
3347
3348 for (drive = 0; drive < 2; drive++) {
3349 drvp = &chp->ch_drive[drive];
3350 /* If no drive, skip */
3351 if ((drvp->drive_flags & DRIVE) == 0)
3352 continue;
3353 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3354 "channel %d drive %d 0x%x\n", chp->channel, drive,
3355 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3356 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3357 /* clear FIFO/DMA mode */
3358 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3359 ACER_UDMA_EN(chp->channel, drive) |
3360 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3361
3362 /* add timing values, setup DMA if needed */
3363 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3364 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3365 acer_fifo_udma |=
3366 ACER_FTH_OPL(chp->channel, drive, 0x1);
3367 goto pio;
3368 }
3369
3370 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3371 if (drvp->drive_flags & DRIVE_UDMA) {
3372 /* use Ultra/DMA */
3373 drvp->drive_flags &= ~DRIVE_DMA;
3374 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3375 acer_fifo_udma |=
3376 ACER_UDMA_TIM(chp->channel, drive,
3377 acer_udma[drvp->UDMA_mode]);
3378 /* XXX disable if one drive < UDMA3 ? */
3379 if (drvp->UDMA_mode >= 3) {
3380 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3381 ACER_0x4B,
3382 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3383 ACER_0x4B) | ACER_0x4B_UDMA66);
3384 }
3385 } else {
3386 /*
3387 * use Multiword DMA
3388 * Timings will be used for both PIO and DMA,
3389 * so adjust DMA mode if needed
3390 */
3391 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3392 drvp->PIO_mode = drvp->DMA_mode + 2;
3393 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3394 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3395 drvp->PIO_mode - 2 : 0;
3396 if (drvp->DMA_mode == 0)
3397 drvp->PIO_mode = 0;
3398 }
3399 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3400 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3401 ACER_IDETIM(chp->channel, drive),
3402 acer_pio[drvp->PIO_mode]);
3403 }
3404 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3405 acer_fifo_udma), DEBUG_PROBE);
3406 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3407 if (idedma_ctl != 0) {
3408 /* Add software bits in status register */
3409 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3410 IDEDMA_CTL, idedma_ctl);
3411 }
3412 pciide_print_modes(cp);
3413 }
3414
3415 int
3416 acer_pci_intr(arg)
3417 void *arg;
3418 {
3419 struct pciide_softc *sc = arg;
3420 struct pciide_channel *cp;
3421 struct channel_softc *wdc_cp;
3422 int i, rv, crv;
3423 u_int32_t chids;
3424
3425 rv = 0;
3426 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3427 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3428 cp = &sc->pciide_channels[i];
3429 wdc_cp = &cp->wdc_channel;
3430 /* If a compat channel skip. */
3431 if (cp->compat)
3432 continue;
3433 if (chids & ACER_CHIDS_INT(i)) {
3434 crv = wdcintr(wdc_cp);
3435 if (crv == 0)
3436 printf("%s:%d: bogus intr\n",
3437 sc->sc_wdcdev.sc_dev.dv_xname, i);
3438 else
3439 rv = 1;
3440 }
3441 }
3442 return rv;
3443 }
3444
3445 void
3446 hpt_chip_map(sc, pa)
3447 struct pciide_softc *sc;
3448 struct pci_attach_args *pa;
3449 {
3450 struct pciide_channel *cp;
3451 int i, compatchan, revision;
3452 pcireg_t interface;
3453 bus_size_t cmdsize, ctlsize;
3454
3455 if (pciide_chipen(sc, pa) == 0)
3456 return;
3457 revision = PCI_REVISION(pa->pa_class);
3458 printf(": Triones/Highpoint ");
3459 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3460 printf("HPT374 IDE Controller\n");
3461 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3462 if (revision == HPT370_REV)
3463 printf("HPT370 IDE Controller\n");
3464 else if (revision == HPT370A_REV)
3465 printf("HPT370A IDE Controller\n");
3466 else if (revision == HPT366_REV)
3467 printf("HPT366 IDE Controller\n");
3468 else
3469 printf("unknown HPT IDE controller rev %d\n", revision);
3470 } else
3471 printf("unknown HPT IDE controller 0x%x\n",
3472 sc->sc_pp->ide_product);
3473
3474 /*
3475 * when the chip is in native mode it identifies itself as a
3476 * 'misc mass storage'. Fake interface in this case.
3477 */
3478 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3479 interface = PCI_INTERFACE(pa->pa_class);
3480 } else {
3481 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3482 PCIIDE_INTERFACE_PCI(0);
3483 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3484 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3485 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3486 interface |= PCIIDE_INTERFACE_PCI(1);
3487 }
3488
3489 printf("%s: bus-master DMA support present",
3490 sc->sc_wdcdev.sc_dev.dv_xname);
3491 pciide_mapreg_dma(sc, pa);
3492 printf("\n");
3493 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3494 WDC_CAPABILITY_MODE;
3495 if (sc->sc_dma_ok) {
3496 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3497 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3498 sc->sc_wdcdev.irqack = pciide_irqack;
3499 }
3500 sc->sc_wdcdev.PIO_cap = 4;
3501 sc->sc_wdcdev.DMA_cap = 2;
3502
3503 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3504 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3505 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3506 revision == HPT366_REV) {
3507 sc->sc_wdcdev.UDMA_cap = 4;
3508 /*
3509 * The 366 has 2 PCI IDE functions, one for primary and one
3510 * for secondary. So we need to call pciide_mapregs_compat()
3511 * with the real channel
3512 */
3513 if (pa->pa_function == 0) {
3514 compatchan = 0;
3515 } else if (pa->pa_function == 1) {
3516 compatchan = 1;
3517 } else {
3518 printf("%s: unexpected PCI function %d\n",
3519 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3520 return;
3521 }
3522 sc->sc_wdcdev.nchannels = 1;
3523 } else {
3524 sc->sc_wdcdev.nchannels = 2;
3525 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3526 sc->sc_wdcdev.UDMA_cap = 6;
3527 else
3528 sc->sc_wdcdev.UDMA_cap = 5;
3529 }
3530 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3531 cp = &sc->pciide_channels[i];
3532 if (sc->sc_wdcdev.nchannels > 1) {
3533 compatchan = i;
3534 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3535 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3536 printf("%s: %s channel ignored (disabled)\n",
3537 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3538 continue;
3539 }
3540 }
3541 if (pciide_chansetup(sc, i, interface) == 0)
3542 continue;
3543 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3544 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3545 &ctlsize, hpt_pci_intr);
3546 } else {
3547 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3548 &cmdsize, &ctlsize);
3549 }
3550 if (cp->hw_ok == 0)
3551 return;
3552 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3553 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3554 wdcattach(&cp->wdc_channel);
3555 hpt_setup_channel(&cp->wdc_channel);
3556 }
3557 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3558 (revision == HPT370_REV || revision == HPT370A_REV)) ||
3559 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3560 /*
3561 * HPT370_REV and highter has a bit to disable interrupts,
3562 * make sure to clear it
3563 */
3564 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3565 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3566 ~HPT_CSEL_IRQDIS);
3567 }
3568 /* set clocks, etc (mandatory on 374, optional otherwise) */
3569 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3570 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3571 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3572 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3573 return;
3574 }
3575
3576 void
3577 hpt_setup_channel(chp)
3578 struct channel_softc *chp;
3579 {
3580 struct ata_drive_datas *drvp;
3581 int drive;
3582 int cable;
3583 u_int32_t before, after;
3584 u_int32_t idedma_ctl;
3585 struct pciide_channel *cp = (struct pciide_channel*)chp;
3586 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3587
3588 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3589
3590 /* setup DMA if needed */
3591 pciide_channel_dma_setup(cp);
3592
3593 idedma_ctl = 0;
3594
3595 /* Per drive settings */
3596 for (drive = 0; drive < 2; drive++) {
3597 drvp = &chp->ch_drive[drive];
3598 /* If no drive, skip */
3599 if ((drvp->drive_flags & DRIVE) == 0)
3600 continue;
3601 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3602 HPT_IDETIM(chp->channel, drive));
3603
3604 /* add timing values, setup DMA if needed */
3605 if (drvp->drive_flags & DRIVE_UDMA) {
3606 /* use Ultra/DMA */
3607 drvp->drive_flags &= ~DRIVE_DMA;
3608 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3609 drvp->UDMA_mode > 2)
3610 drvp->UDMA_mode = 2;
3611 after = (sc->sc_wdcdev.nchannels == 2) ?
3612 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3613 hpt374_udma[drvp->UDMA_mode] :
3614 hpt370_udma[drvp->UDMA_mode]) :
3615 hpt366_udma[drvp->UDMA_mode];
3616 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3617 } else if (drvp->drive_flags & DRIVE_DMA) {
3618 /*
3619 * use Multiword DMA.
3620 * Timings will be used for both PIO and DMA, so adjust
3621 * DMA mode if needed
3622 */
3623 if (drvp->PIO_mode >= 3 &&
3624 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3625 drvp->DMA_mode = drvp->PIO_mode - 2;
3626 }
3627 after = (sc->sc_wdcdev.nchannels == 2) ?
3628 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3629 hpt374_dma[drvp->DMA_mode] :
3630 hpt370_dma[drvp->DMA_mode]) :
3631 hpt366_dma[drvp->DMA_mode];
3632 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3633 } else {
3634 /* PIO only */
3635 after = (sc->sc_wdcdev.nchannels == 2) ?
3636 ( (sc->sc_wdcdev.UDMA_cap == 6) ?
3637 hpt374_pio[drvp->PIO_mode] :
3638 hpt370_pio[drvp->PIO_mode]) :
3639 hpt366_pio[drvp->PIO_mode];
3640 }
3641 pci_conf_write(sc->sc_pc, sc->sc_tag,
3642 HPT_IDETIM(chp->channel, drive), after);
3643 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3644 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3645 after, before), DEBUG_PROBE);
3646 }
3647 if (idedma_ctl != 0) {
3648 /* Add software bits in status register */
3649 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3650 IDEDMA_CTL, idedma_ctl);
3651 }
3652 pciide_print_modes(cp);
3653 }
3654
3655 int
3656 hpt_pci_intr(arg)
3657 void *arg;
3658 {
3659 struct pciide_softc *sc = arg;
3660 struct pciide_channel *cp;
3661 struct channel_softc *wdc_cp;
3662 int rv = 0;
3663 int dmastat, i, crv;
3664
3665 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3666 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3667 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3668 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3669 IDEDMA_CTL_INTR)
3670 continue;
3671 cp = &sc->pciide_channels[i];
3672 wdc_cp = &cp->wdc_channel;
3673 crv = wdcintr(wdc_cp);
3674 if (crv == 0) {
3675 printf("%s:%d: bogus intr\n",
3676 sc->sc_wdcdev.sc_dev.dv_xname, i);
3677 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3678 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3679 } else
3680 rv = 1;
3681 }
3682 return rv;
3683 }
3684
3685
3686 /* Macros to test product */
3687 #define PDC_IS_262(sc) \
3688 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3689 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3690 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3691 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3692 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3693 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3694 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3695 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3696 #define PDC_IS_265(sc) \
3697 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3698 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3699 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3700 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3701 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3702 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3703 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3704 #define PDC_IS_268(sc) \
3705 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3706 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3707 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3708 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3709 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3710
3711 void
3712 pdc202xx_chip_map(sc, pa)
3713 struct pciide_softc *sc;
3714 struct pci_attach_args *pa;
3715 {
3716 struct pciide_channel *cp;
3717 int channel;
3718 pcireg_t interface, st, mode;
3719 bus_size_t cmdsize, ctlsize;
3720
3721 if (!PDC_IS_268(sc)) {
3722 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3723 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3724 st), DEBUG_PROBE);
3725 }
3726 if (pciide_chipen(sc, pa) == 0)
3727 return;
3728
3729 /* turn off RAID mode */
3730 if (!PDC_IS_268(sc))
3731 st &= ~PDC2xx_STATE_IDERAID;
3732
3733 /*
3734 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3735 * mode. We have to fake interface
3736 */
3737 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3738 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3739 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3740
3741 printf("%s: bus-master DMA support present",
3742 sc->sc_wdcdev.sc_dev.dv_xname);
3743 pciide_mapreg_dma(sc, pa);
3744 printf("\n");
3745 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3746 WDC_CAPABILITY_MODE;
3747 if (sc->sc_dma_ok) {
3748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3749 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3750 sc->sc_wdcdev.irqack = pciide_irqack;
3751 }
3752 sc->sc_wdcdev.PIO_cap = 4;
3753 sc->sc_wdcdev.DMA_cap = 2;
3754 if (PDC_IS_265(sc))
3755 sc->sc_wdcdev.UDMA_cap = 5;
3756 else if (PDC_IS_262(sc))
3757 sc->sc_wdcdev.UDMA_cap = 4;
3758 else
3759 sc->sc_wdcdev.UDMA_cap = 2;
3760 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3761 pdc20268_setup_channel : pdc202xx_setup_channel;
3762 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3763 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3764
3765 if (!PDC_IS_268(sc)) {
3766 /* setup failsafe defaults */
3767 mode = 0;
3768 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3769 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3770 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3771 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3772 for (channel = 0;
3773 channel < sc->sc_wdcdev.nchannels;
3774 channel++) {
3775 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3776 "drive 0 initial timings 0x%x, now 0x%x\n",
3777 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3778 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3779 DEBUG_PROBE);
3780 pci_conf_write(sc->sc_pc, sc->sc_tag,
3781 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3782 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3783 "drive 1 initial timings 0x%x, now 0x%x\n",
3784 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3785 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3786 pci_conf_write(sc->sc_pc, sc->sc_tag,
3787 PDC2xx_TIM(channel, 1), mode);
3788 }
3789
3790 mode = PDC2xx_SCR_DMA;
3791 if (PDC_IS_262(sc)) {
3792 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3793 } else {
3794 /* the BIOS set it up this way */
3795 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3796 }
3797 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3798 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3799 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3800 "now 0x%x\n",
3801 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3802 PDC2xx_SCR),
3803 mode), DEBUG_PROBE);
3804 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3805 PDC2xx_SCR, mode);
3806
3807 /* controller initial state register is OK even without BIOS */
3808 /* Set DMA mode to IDE DMA compatibility */
3809 mode =
3810 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3811 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3812 DEBUG_PROBE);
3813 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3814 mode | 0x1);
3815 mode =
3816 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3817 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3818 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3819 mode | 0x1);
3820 }
3821
3822 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3823 cp = &sc->pciide_channels[channel];
3824 if (pciide_chansetup(sc, channel, interface) == 0)
3825 continue;
3826 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3827 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3828 printf("%s: %s channel ignored (disabled)\n",
3829 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3830 continue;
3831 }
3832 if (PDC_IS_265(sc))
3833 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3834 pdc20265_pci_intr);
3835 else
3836 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3837 pdc202xx_pci_intr);
3838 if (cp->hw_ok == 0)
3839 continue;
3840 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3841 st &= ~(PDC_IS_262(sc) ?
3842 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3843 pciide_map_compat_intr(pa, cp, channel, interface);
3844 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3845 }
3846 if (!PDC_IS_268(sc)) {
3847 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3848 "0x%x\n", st), DEBUG_PROBE);
3849 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3850 }
3851 return;
3852 }
3853
3854 void
3855 pdc202xx_setup_channel(chp)
3856 struct channel_softc *chp;
3857 {
3858 struct ata_drive_datas *drvp;
3859 int drive;
3860 pcireg_t mode, st;
3861 u_int32_t idedma_ctl, scr, atapi;
3862 struct pciide_channel *cp = (struct pciide_channel*)chp;
3863 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3864 int channel = chp->channel;
3865
3866 /* setup DMA if needed */
3867 pciide_channel_dma_setup(cp);
3868
3869 idedma_ctl = 0;
3870 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3871 sc->sc_wdcdev.sc_dev.dv_xname,
3872 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3873 DEBUG_PROBE);
3874
3875 /* Per channel settings */
3876 if (PDC_IS_262(sc)) {
3877 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3878 PDC262_U66);
3879 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3880 /* Trim UDMA mode */
3881 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3882 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3883 chp->ch_drive[0].UDMA_mode <= 2) ||
3884 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3885 chp->ch_drive[1].UDMA_mode <= 2)) {
3886 if (chp->ch_drive[0].UDMA_mode > 2)
3887 chp->ch_drive[0].UDMA_mode = 2;
3888 if (chp->ch_drive[1].UDMA_mode > 2)
3889 chp->ch_drive[1].UDMA_mode = 2;
3890 }
3891 /* Set U66 if needed */
3892 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3893 chp->ch_drive[0].UDMA_mode > 2) ||
3894 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3895 chp->ch_drive[1].UDMA_mode > 2))
3896 scr |= PDC262_U66_EN(channel);
3897 else
3898 scr &= ~PDC262_U66_EN(channel);
3899 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3900 PDC262_U66, scr);
3901 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3902 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3903 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3904 PDC262_ATAPI(channel))), DEBUG_PROBE);
3905 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3906 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3907 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3908 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3909 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3910 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3911 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3912 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3913 atapi = 0;
3914 else
3915 atapi = PDC262_ATAPI_UDMA;
3916 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3917 PDC262_ATAPI(channel), atapi);
3918 }
3919 }
3920 for (drive = 0; drive < 2; drive++) {
3921 drvp = &chp->ch_drive[drive];
3922 /* If no drive, skip */
3923 if ((drvp->drive_flags & DRIVE) == 0)
3924 continue;
3925 mode = 0;
3926 if (drvp->drive_flags & DRIVE_UDMA) {
3927 /* use Ultra/DMA */
3928 drvp->drive_flags &= ~DRIVE_DMA;
3929 mode = PDC2xx_TIM_SET_MB(mode,
3930 pdc2xx_udma_mb[drvp->UDMA_mode]);
3931 mode = PDC2xx_TIM_SET_MC(mode,
3932 pdc2xx_udma_mc[drvp->UDMA_mode]);
3933 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3934 } else if (drvp->drive_flags & DRIVE_DMA) {
3935 mode = PDC2xx_TIM_SET_MB(mode,
3936 pdc2xx_dma_mb[drvp->DMA_mode]);
3937 mode = PDC2xx_TIM_SET_MC(mode,
3938 pdc2xx_dma_mc[drvp->DMA_mode]);
3939 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3940 } else {
3941 mode = PDC2xx_TIM_SET_MB(mode,
3942 pdc2xx_dma_mb[0]);
3943 mode = PDC2xx_TIM_SET_MC(mode,
3944 pdc2xx_dma_mc[0]);
3945 }
3946 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3947 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3948 if (drvp->drive_flags & DRIVE_ATA)
3949 mode |= PDC2xx_TIM_PRE;
3950 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3951 if (drvp->PIO_mode >= 3) {
3952 mode |= PDC2xx_TIM_IORDY;
3953 if (drive == 0)
3954 mode |= PDC2xx_TIM_IORDYp;
3955 }
3956 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3957 "timings 0x%x\n",
3958 sc->sc_wdcdev.sc_dev.dv_xname,
3959 chp->channel, drive, mode), DEBUG_PROBE);
3960 pci_conf_write(sc->sc_pc, sc->sc_tag,
3961 PDC2xx_TIM(chp->channel, drive), mode);
3962 }
3963 if (idedma_ctl != 0) {
3964 /* Add software bits in status register */
3965 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3966 IDEDMA_CTL, idedma_ctl);
3967 }
3968 pciide_print_modes(cp);
3969 }
3970
3971 void
3972 pdc20268_setup_channel(chp)
3973 struct channel_softc *chp;
3974 {
3975 struct ata_drive_datas *drvp;
3976 int drive;
3977 u_int32_t idedma_ctl;
3978 struct pciide_channel *cp = (struct pciide_channel*)chp;
3979 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3980 int u100;
3981
3982 /* setup DMA if needed */
3983 pciide_channel_dma_setup(cp);
3984
3985 idedma_ctl = 0;
3986
3987 /* I don't know what this is for, FreeBSD does it ... */
3988 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3989 IDEDMA_CMD + 0x1, 0x0b);
3990
3991 /*
3992 * I don't know what this is for; FreeBSD checks this ... this is not
3993 * cable type detect.
3994 */
3995 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3996 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3997
3998 for (drive = 0; drive < 2; drive++) {
3999 drvp = &chp->ch_drive[drive];
4000 /* If no drive, skip */
4001 if ((drvp->drive_flags & DRIVE) == 0)
4002 continue;
4003 if (drvp->drive_flags & DRIVE_UDMA) {
4004 /* use Ultra/DMA */
4005 drvp->drive_flags &= ~DRIVE_DMA;
4006 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4007 if (drvp->UDMA_mode > 2 && u100 == 0)
4008 drvp->UDMA_mode = 2;
4009 } else if (drvp->drive_flags & DRIVE_DMA) {
4010 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4011 }
4012 }
4013 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4014 if (idedma_ctl != 0) {
4015 /* Add software bits in status register */
4016 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4017 IDEDMA_CTL, idedma_ctl);
4018 }
4019 pciide_print_modes(cp);
4020 }
4021
4022 int
4023 pdc202xx_pci_intr(arg)
4024 void *arg;
4025 {
4026 struct pciide_softc *sc = arg;
4027 struct pciide_channel *cp;
4028 struct channel_softc *wdc_cp;
4029 int i, rv, crv;
4030 u_int32_t scr;
4031
4032 rv = 0;
4033 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4034 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4035 cp = &sc->pciide_channels[i];
4036 wdc_cp = &cp->wdc_channel;
4037 /* If a compat channel skip. */
4038 if (cp->compat)
4039 continue;
4040 if (scr & PDC2xx_SCR_INT(i)) {
4041 crv = wdcintr(wdc_cp);
4042 if (crv == 0)
4043 printf("%s:%d: bogus intr (reg 0x%x)\n",
4044 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4045 else
4046 rv = 1;
4047 }
4048 }
4049 return rv;
4050 }
4051
4052 int
4053 pdc20265_pci_intr(arg)
4054 void *arg;
4055 {
4056 struct pciide_softc *sc = arg;
4057 struct pciide_channel *cp;
4058 struct channel_softc *wdc_cp;
4059 int i, rv, crv;
4060 u_int32_t dmastat;
4061
4062 rv = 0;
4063 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4064 cp = &sc->pciide_channels[i];
4065 wdc_cp = &cp->wdc_channel;
4066 /* If a compat channel skip. */
4067 if (cp->compat)
4068 continue;
4069 /*
4070 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4071 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4072 * So use it instead (requires 2 reg reads instead of 1,
4073 * but we can't do it another way).
4074 */
4075 dmastat = bus_space_read_1(sc->sc_dma_iot,
4076 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4077 if((dmastat & IDEDMA_CTL_INTR) == 0)
4078 continue;
4079 crv = wdcintr(wdc_cp);
4080 if (crv == 0)
4081 printf("%s:%d: bogus intr\n",
4082 sc->sc_wdcdev.sc_dev.dv_xname, i);
4083 else
4084 rv = 1;
4085 }
4086 return rv;
4087 }
4088
4089 void
4090 opti_chip_map(sc, pa)
4091 struct pciide_softc *sc;
4092 struct pci_attach_args *pa;
4093 {
4094 struct pciide_channel *cp;
4095 bus_size_t cmdsize, ctlsize;
4096 pcireg_t interface;
4097 u_int8_t init_ctrl;
4098 int channel;
4099
4100 if (pciide_chipen(sc, pa) == 0)
4101 return;
4102 printf("%s: bus-master DMA support present",
4103 sc->sc_wdcdev.sc_dev.dv_xname);
4104
4105 /*
4106 * XXXSCW:
4107 * There seem to be a couple of buggy revisions/implementations
4108 * of the OPTi pciide chipset. This kludge seems to fix one of
4109 * the reported problems (PR/11644) but still fails for the
4110 * other (PR/13151), although the latter may be due to other
4111 * issues too...
4112 */
4113 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4114 printf(" but disabled due to chip rev. <= 0x12");
4115 sc->sc_dma_ok = 0;
4116 } else
4117 pciide_mapreg_dma(sc, pa);
4118
4119 printf("\n");
4120
4121 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4122 WDC_CAPABILITY_MODE;
4123 sc->sc_wdcdev.PIO_cap = 4;
4124 if (sc->sc_dma_ok) {
4125 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4126 sc->sc_wdcdev.irqack = pciide_irqack;
4127 sc->sc_wdcdev.DMA_cap = 2;
4128 }
4129 sc->sc_wdcdev.set_modes = opti_setup_channel;
4130
4131 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4132 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4133
4134 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4135 OPTI_REG_INIT_CONTROL);
4136
4137 interface = PCI_INTERFACE(pa->pa_class);
4138
4139 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4140 cp = &sc->pciide_channels[channel];
4141 if (pciide_chansetup(sc, channel, interface) == 0)
4142 continue;
4143 if (channel == 1 &&
4144 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4145 printf("%s: %s channel ignored (disabled)\n",
4146 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4147 continue;
4148 }
4149 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4150 pciide_pci_intr);
4151 if (cp->hw_ok == 0)
4152 continue;
4153 pciide_map_compat_intr(pa, cp, channel, interface);
4154 if (cp->hw_ok == 0)
4155 continue;
4156 opti_setup_channel(&cp->wdc_channel);
4157 }
4158 }
4159
4160 void
4161 opti_setup_channel(chp)
4162 struct channel_softc *chp;
4163 {
4164 struct ata_drive_datas *drvp;
4165 struct pciide_channel *cp = (struct pciide_channel*)chp;
4166 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4167 int drive, spd;
4168 int mode[2];
4169 u_int8_t rv, mr;
4170
4171 /*
4172 * The `Delay' and `Address Setup Time' fields of the
4173 * Miscellaneous Register are always zero initially.
4174 */
4175 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4176 mr &= ~(OPTI_MISC_DELAY_MASK |
4177 OPTI_MISC_ADDR_SETUP_MASK |
4178 OPTI_MISC_INDEX_MASK);
4179
4180 /* Prime the control register before setting timing values */
4181 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4182
4183 /* Determine the clockrate of the PCIbus the chip is attached to */
4184 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4185 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4186
4187 /* setup DMA if needed */
4188 pciide_channel_dma_setup(cp);
4189
4190 for (drive = 0; drive < 2; drive++) {
4191 drvp = &chp->ch_drive[drive];
4192 /* If no drive, skip */
4193 if ((drvp->drive_flags & DRIVE) == 0) {
4194 mode[drive] = -1;
4195 continue;
4196 }
4197
4198 if ((drvp->drive_flags & DRIVE_DMA)) {
4199 /*
4200 * Timings will be used for both PIO and DMA,
4201 * so adjust DMA mode if needed
4202 */
4203 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4204 drvp->PIO_mode = drvp->DMA_mode + 2;
4205 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4206 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4207 drvp->PIO_mode - 2 : 0;
4208 if (drvp->DMA_mode == 0)
4209 drvp->PIO_mode = 0;
4210
4211 mode[drive] = drvp->DMA_mode + 5;
4212 } else
4213 mode[drive] = drvp->PIO_mode;
4214
4215 if (drive && mode[0] >= 0 &&
4216 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4217 /*
4218 * Can't have two drives using different values
4219 * for `Address Setup Time'.
4220 * Slow down the faster drive to compensate.
4221 */
4222 int d = (opti_tim_as[spd][mode[0]] >
4223 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4224
4225 mode[d] = mode[1-d];
4226 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4227 chp->ch_drive[d].DMA_mode = 0;
4228 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4229 }
4230 }
4231
4232 for (drive = 0; drive < 2; drive++) {
4233 int m;
4234 if ((m = mode[drive]) < 0)
4235 continue;
4236
4237 /* Set the Address Setup Time and select appropriate index */
4238 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4239 rv |= OPTI_MISC_INDEX(drive);
4240 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4241
4242 /* Set the pulse width and recovery timing parameters */
4243 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4244 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4245 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4246 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4247
4248 /* Set the Enhanced Mode register appropriately */
4249 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4250 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4251 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4252 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4253 }
4254
4255 /* Finally, enable the timings */
4256 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4257
4258 pciide_print_modes(cp);
4259 }
4260
4261 #define ACARD_IS_850(sc) \
4262 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4263
4264 void
4265 acard_chip_map(sc, pa)
4266 struct pciide_softc *sc;
4267 struct pci_attach_args *pa;
4268 {
4269 struct pciide_channel *cp;
4270 int i;
4271 pcireg_t interface;
4272 bus_size_t cmdsize, ctlsize;
4273
4274 if (pciide_chipen(sc, pa) == 0)
4275 return;
4276
4277 /*
4278 * when the chip is in native mode it identifies itself as a
4279 * 'misc mass storage'. Fake interface in this case.
4280 */
4281 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4282 interface = PCI_INTERFACE(pa->pa_class);
4283 } else {
4284 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4285 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4286 }
4287
4288 printf("%s: bus-master DMA support present",
4289 sc->sc_wdcdev.sc_dev.dv_xname);
4290 pciide_mapreg_dma(sc, pa);
4291 printf("\n");
4292 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4293 WDC_CAPABILITY_MODE;
4294
4295 if (sc->sc_dma_ok) {
4296 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4297 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4298 sc->sc_wdcdev.irqack = pciide_irqack;
4299 }
4300 sc->sc_wdcdev.PIO_cap = 4;
4301 sc->sc_wdcdev.DMA_cap = 2;
4302 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4303
4304 sc->sc_wdcdev.set_modes = acard_setup_channel;
4305 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4306 sc->sc_wdcdev.nchannels = 2;
4307
4308 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4309 cp = &sc->pciide_channels[i];
4310 if (pciide_chansetup(sc, i, interface) == 0)
4311 continue;
4312 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4313 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4314 &ctlsize, pciide_pci_intr);
4315 } else {
4316 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4317 &cmdsize, &ctlsize);
4318 }
4319 if (cp->hw_ok == 0)
4320 return;
4321 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4322 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4323 wdcattach(&cp->wdc_channel);
4324 acard_setup_channel(&cp->wdc_channel);
4325 }
4326 if (!ACARD_IS_850(sc)) {
4327 u_int32_t reg;
4328 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4329 reg &= ~ATP860_CTRL_INT;
4330 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4331 }
4332 }
4333
4334 void
4335 acard_setup_channel(chp)
4336 struct channel_softc *chp;
4337 {
4338 struct ata_drive_datas *drvp;
4339 struct pciide_channel *cp = (struct pciide_channel*)chp;
4340 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4341 int channel = chp->channel;
4342 int drive;
4343 u_int32_t idetime, udma_mode;
4344 u_int32_t idedma_ctl;
4345
4346 /* setup DMA if needed */
4347 pciide_channel_dma_setup(cp);
4348
4349 if (ACARD_IS_850(sc)) {
4350 idetime = 0;
4351 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4352 udma_mode &= ~ATP850_UDMA_MASK(channel);
4353 } else {
4354 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4355 idetime &= ~ATP860_SETTIME_MASK(channel);
4356 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4357 udma_mode &= ~ATP860_UDMA_MASK(channel);
4358
4359 /* check 80 pins cable */
4360 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4361 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4362 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4363 & ATP860_CTRL_80P(chp->channel)) {
4364 if (chp->ch_drive[0].UDMA_mode > 2)
4365 chp->ch_drive[0].UDMA_mode = 2;
4366 if (chp->ch_drive[1].UDMA_mode > 2)
4367 chp->ch_drive[1].UDMA_mode = 2;
4368 }
4369 }
4370 }
4371
4372 idedma_ctl = 0;
4373
4374 /* Per drive settings */
4375 for (drive = 0; drive < 2; drive++) {
4376 drvp = &chp->ch_drive[drive];
4377 /* If no drive, skip */
4378 if ((drvp->drive_flags & DRIVE) == 0)
4379 continue;
4380 /* add timing values, setup DMA if needed */
4381 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4382 (drvp->drive_flags & DRIVE_UDMA)) {
4383 /* use Ultra/DMA */
4384 if (ACARD_IS_850(sc)) {
4385 idetime |= ATP850_SETTIME(drive,
4386 acard_act_udma[drvp->UDMA_mode],
4387 acard_rec_udma[drvp->UDMA_mode]);
4388 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4389 acard_udma_conf[drvp->UDMA_mode]);
4390 } else {
4391 idetime |= ATP860_SETTIME(channel, drive,
4392 acard_act_udma[drvp->UDMA_mode],
4393 acard_rec_udma[drvp->UDMA_mode]);
4394 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4395 acard_udma_conf[drvp->UDMA_mode]);
4396 }
4397 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4398 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4399 (drvp->drive_flags & DRIVE_DMA)) {
4400 /* use Multiword DMA */
4401 drvp->drive_flags &= ~DRIVE_UDMA;
4402 if (ACARD_IS_850(sc)) {
4403 idetime |= ATP850_SETTIME(drive,
4404 acard_act_dma[drvp->DMA_mode],
4405 acard_rec_dma[drvp->DMA_mode]);
4406 } else {
4407 idetime |= ATP860_SETTIME(channel, drive,
4408 acard_act_dma[drvp->DMA_mode],
4409 acard_rec_dma[drvp->DMA_mode]);
4410 }
4411 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4412 } else {
4413 /* PIO only */
4414 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4415 if (ACARD_IS_850(sc)) {
4416 idetime |= ATP850_SETTIME(drive,
4417 acard_act_pio[drvp->PIO_mode],
4418 acard_rec_pio[drvp->PIO_mode]);
4419 } else {
4420 idetime |= ATP860_SETTIME(channel, drive,
4421 acard_act_pio[drvp->PIO_mode],
4422 acard_rec_pio[drvp->PIO_mode]);
4423 }
4424 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4425 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4426 | ATP8x0_CTRL_EN(channel));
4427 }
4428 }
4429
4430 if (idedma_ctl != 0) {
4431 /* Add software bits in status register */
4432 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4433 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4434 }
4435 pciide_print_modes(cp);
4436
4437 if (ACARD_IS_850(sc)) {
4438 pci_conf_write(sc->sc_pc, sc->sc_tag,
4439 ATP850_IDETIME(channel), idetime);
4440 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4441 } else {
4442 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4443 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4444 }
4445 }
4446
4447 int
4448 acard_pci_intr(arg)
4449 void *arg;
4450 {
4451 struct pciide_softc *sc = arg;
4452 struct pciide_channel *cp;
4453 struct channel_softc *wdc_cp;
4454 int rv = 0;
4455 int dmastat, i, crv;
4456
4457 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4458 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4459 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4460 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4461 continue;
4462 cp = &sc->pciide_channels[i];
4463 wdc_cp = &cp->wdc_channel;
4464 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4465 (void)wdcintr(wdc_cp);
4466 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4467 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4468 continue;
4469 }
4470 crv = wdcintr(wdc_cp);
4471 if (crv == 0)
4472 printf("%s:%d: bogus intr\n",
4473 sc->sc_wdcdev.sc_dev.dv_xname, i);
4474 else if (crv == 1)
4475 rv = 1;
4476 else if (rv == 0)
4477 rv = crv;
4478 }
4479 return rv;
4480 }
4481
4482 static int
4483 sl82c105_bugchk(struct pci_attach_args *pa)
4484 {
4485
4486 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4487 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4488 return (0);
4489
4490 if (PCI_REVISION(pa->pa_class) <= 0x05)
4491 return (1);
4492
4493 return (0);
4494 }
4495
4496 void
4497 sl82c105_chip_map(sc, pa)
4498 struct pciide_softc *sc;
4499 struct pci_attach_args *pa;
4500 {
4501 struct pciide_channel *cp;
4502 bus_size_t cmdsize, ctlsize;
4503 pcireg_t interface, idecr;
4504 int channel;
4505
4506 if (pciide_chipen(sc, pa) == 0)
4507 return;
4508
4509 printf("%s: bus-master DMA support present",
4510 sc->sc_wdcdev.sc_dev.dv_xname);
4511
4512 /*
4513 * Check to see if we're part of the Winbond 83c553 Southbridge.
4514 * If so, we need to disable DMA on rev. <= 5 of that chip.
4515 */
4516 if (pci_find_device(pa, sl82c105_bugchk)) {
4517 printf(" but disabled due to 83c553 rev. <= 0x05");
4518 sc->sc_dma_ok = 0;
4519 } else
4520 pciide_mapreg_dma(sc, pa);
4521 printf("\n");
4522
4523 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4524 WDC_CAPABILITY_MODE;
4525 sc->sc_wdcdev.PIO_cap = 4;
4526 if (sc->sc_dma_ok) {
4527 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4528 sc->sc_wdcdev.irqack = pciide_irqack;
4529 sc->sc_wdcdev.DMA_cap = 2;
4530 }
4531 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4532
4533 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4534 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4535
4536 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4537
4538 interface = PCI_INTERFACE(pa->pa_class);
4539
4540 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4541 cp = &sc->pciide_channels[channel];
4542 if (pciide_chansetup(sc, channel, interface) == 0)
4543 continue;
4544 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4545 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4546 printf("%s: %s channel ignored (disabled)\n",
4547 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4548 continue;
4549 }
4550 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4551 pciide_pci_intr);
4552 if (cp->hw_ok == 0)
4553 continue;
4554 pciide_map_compat_intr(pa, cp, channel, interface);
4555 if (cp->hw_ok == 0)
4556 continue;
4557 sl82c105_setup_channel(&cp->wdc_channel);
4558 }
4559 }
4560
4561 void
4562 sl82c105_setup_channel(chp)
4563 struct channel_softc *chp;
4564 {
4565 struct ata_drive_datas *drvp;
4566 struct pciide_channel *cp = (struct pciide_channel*)chp;
4567 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4568 int pxdx_reg, drive;
4569 pcireg_t pxdx;
4570
4571 /* Set up DMA if needed. */
4572 pciide_channel_dma_setup(cp);
4573
4574 for (drive = 0; drive < 2; drive++) {
4575 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4576 : SYMPH_P1D0CR) + (drive * 4);
4577
4578 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4579
4580 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4581 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4582
4583 drvp = &chp->ch_drive[drive];
4584 /* If no drive, skip. */
4585 if ((drvp->drive_flags & DRIVE) == 0) {
4586 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4587 continue;
4588 }
4589
4590 if (drvp->drive_flags & DRIVE_DMA) {
4591 /*
4592 * Timings will be used for both PIO and DMA,
4593 * so adjust DMA mode if needed.
4594 */
4595 if (drvp->PIO_mode >= 3) {
4596 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4597 drvp->DMA_mode = drvp->PIO_mode - 2;
4598 if (drvp->DMA_mode < 1) {
4599 /*
4600 * Can't mix both PIO and DMA.
4601 * Disable DMA.
4602 */
4603 drvp->drive_flags &= ~DRIVE_DMA;
4604 }
4605 } else {
4606 /*
4607 * Can't mix both PIO and DMA. Disable
4608 * DMA.
4609 */
4610 drvp->drive_flags &= ~DRIVE_DMA;
4611 }
4612 }
4613
4614 if (drvp->drive_flags & DRIVE_DMA) {
4615 /* Use multi-word DMA. */
4616 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4617 PxDx_CMD_ON_SHIFT;
4618 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4619 } else {
4620 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4621 PxDx_CMD_ON_SHIFT;
4622 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4623 }
4624
4625 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4626
4627 /* ...and set the mode for this drive. */
4628 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4629 }
4630
4631 pciide_print_modes(cp);
4632 }
4633
4634 void
4635 serverworks_chip_map(sc, pa)
4636 struct pciide_softc *sc;
4637 struct pci_attach_args *pa;
4638 {
4639 struct pciide_channel *cp;
4640 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4641 pcitag_t pcib_tag;
4642 int channel;
4643 bus_size_t cmdsize, ctlsize;
4644
4645 if (pciide_chipen(sc, pa) == 0)
4646 return;
4647
4648 printf("%s: bus-master DMA support present",
4649 sc->sc_wdcdev.sc_dev.dv_xname);
4650 pciide_mapreg_dma(sc, pa);
4651 printf("\n");
4652 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4653 WDC_CAPABILITY_MODE;
4654
4655 if (sc->sc_dma_ok) {
4656 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4657 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4658 sc->sc_wdcdev.irqack = pciide_irqack;
4659 }
4660 sc->sc_wdcdev.PIO_cap = 4;
4661 sc->sc_wdcdev.DMA_cap = 2;
4662 switch (sc->sc_pp->ide_product) {
4663 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4664 sc->sc_wdcdev.UDMA_cap = 2;
4665 break;
4666 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4667 if (PCI_REVISION(pa->pa_class) < 0x92)
4668 sc->sc_wdcdev.UDMA_cap = 4;
4669 else
4670 sc->sc_wdcdev.UDMA_cap = 5;
4671 break;
4672 }
4673
4674 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4675 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4676 sc->sc_wdcdev.nchannels = 2;
4677
4678 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4679 cp = &sc->pciide_channels[channel];
4680 if (pciide_chansetup(sc, channel, interface) == 0)
4681 continue;
4682 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4683 serverworks_pci_intr);
4684 if (cp->hw_ok == 0)
4685 return;
4686 pciide_map_compat_intr(pa, cp, channel, interface);
4687 if (cp->hw_ok == 0)
4688 return;
4689 serverworks_setup_channel(&cp->wdc_channel);
4690 }
4691
4692 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4693 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4694 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4695 }
4696
4697 void
4698 serverworks_setup_channel(chp)
4699 struct channel_softc *chp;
4700 {
4701 struct ata_drive_datas *drvp;
4702 struct pciide_channel *cp = (struct pciide_channel*)chp;
4703 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4704 int channel = chp->channel;
4705 int drive, unit;
4706 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4707 u_int32_t idedma_ctl;
4708 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4709 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4710
4711 /* setup DMA if needed */
4712 pciide_channel_dma_setup(cp);
4713
4714 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4715 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4716 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4717 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4718
4719 pio_time &= ~(0xffff << (16 * channel));
4720 dma_time &= ~(0xffff << (16 * channel));
4721 pio_mode &= ~(0xff << (8 * channel + 16));
4722 udma_mode &= ~(0xff << (8 * channel + 16));
4723 udma_mode &= ~(3 << (2 * channel));
4724
4725 idedma_ctl = 0;
4726
4727 /* Per drive settings */
4728 for (drive = 0; drive < 2; drive++) {
4729 drvp = &chp->ch_drive[drive];
4730 /* If no drive, skip */
4731 if ((drvp->drive_flags & DRIVE) == 0)
4732 continue;
4733 unit = drive + 2 * channel;
4734 /* add timing values, setup DMA if needed */
4735 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4736 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4737 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4738 (drvp->drive_flags & DRIVE_UDMA)) {
4739 /* use Ultra/DMA, check for 80-pin cable */
4740 if (drvp->UDMA_mode > 2 &&
4741 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4742 drvp->UDMA_mode = 2;
4743 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4744 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4745 udma_mode |= 1 << unit;
4746 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4747 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4748 (drvp->drive_flags & DRIVE_DMA)) {
4749 /* use Multiword DMA */
4750 drvp->drive_flags &= ~DRIVE_UDMA;
4751 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4752 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4753 } else {
4754 /* PIO only */
4755 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4756 }
4757 }
4758
4759 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4760 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4761 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4762 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4763 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4764
4765 if (idedma_ctl != 0) {
4766 /* Add software bits in status register */
4767 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4768 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4769 }
4770 pciide_print_modes(cp);
4771 }
4772
4773 int
4774 serverworks_pci_intr(arg)
4775 void *arg;
4776 {
4777 struct pciide_softc *sc = arg;
4778 struct pciide_channel *cp;
4779 struct channel_softc *wdc_cp;
4780 int rv = 0;
4781 int dmastat, i, crv;
4782
4783 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4784 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4785 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4786 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4787 IDEDMA_CTL_INTR)
4788 continue;
4789 cp = &sc->pciide_channels[i];
4790 wdc_cp = &cp->wdc_channel;
4791 crv = wdcintr(wdc_cp);
4792 if (crv == 0) {
4793 printf("%s:%d: bogus intr\n",
4794 sc->sc_wdcdev.sc_dev.dv_xname, i);
4795 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4796 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4797 } else
4798 rv = 1;
4799 }
4800 return rv;
4801 }
4802