pciide.c revision 1.153.2.11 1 /* $NetBSD: pciide.c,v 1.153.2.11 2003/04/28 06:25:40 tron Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.153.2.11 2003/04/28 06:25:40 tron Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191
192 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void pdc202xx_setup_channel __P((struct channel_softc*));
194 void pdc20268_setup_channel __P((struct channel_softc*));
195 int pdc202xx_pci_intr __P((void *));
196 int pdc20265_pci_intr __P((void *));
197 static void pdc20262_dma_start __P((void*, int, int));
198 static int pdc20262_dma_finish __P((void*, int, int, int));
199
200 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
201 void opti_setup_channel __P((struct channel_softc*));
202
203 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
204 void hpt_setup_channel __P((struct channel_softc*));
205 int hpt_pci_intr __P((void *));
206
207 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
208 void acard_setup_channel __P((struct channel_softc*));
209 int acard_pci_intr __P((void *));
210
211 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
212 void serverworks_setup_channel __P((struct channel_softc*));
213 int serverworks_pci_intr __P((void *));
214
215 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
216 void sl82c105_setup_channel __P((struct channel_softc*));
217
218 void pciide_channel_dma_setup __P((struct pciide_channel *));
219 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
220 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
221 void pciide_dma_start __P((void*, int, int));
222 int pciide_dma_finish __P((void*, int, int, int));
223 void pciide_irqack __P((struct channel_softc *));
224 void pciide_print_modes __P((struct pciide_channel *));
225
226 struct pciide_product_desc {
227 u_int32_t ide_product;
228 int ide_flags;
229 const char *ide_name;
230 /* map and setup chip, probe drives */
231 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
232 };
233
234 /* Flags for ide_flags */
235 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
236 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
237
238 /* Default product description for devices not known from this controller */
239 const struct pciide_product_desc default_product_desc = {
240 0,
241 0,
242 "Generic PCI IDE controller",
243 default_chip_map,
244 };
245
246 const struct pciide_product_desc pciide_intel_products[] = {
247 { PCI_PRODUCT_INTEL_82092AA,
248 0,
249 "Intel 82092AA IDE controller",
250 default_chip_map,
251 },
252 { PCI_PRODUCT_INTEL_82371FB_IDE,
253 0,
254 "Intel 82371FB IDE controller (PIIX)",
255 piix_chip_map,
256 },
257 { PCI_PRODUCT_INTEL_82371SB_IDE,
258 0,
259 "Intel 82371SB IDE Interface (PIIX3)",
260 piix_chip_map,
261 },
262 { PCI_PRODUCT_INTEL_82371AB_IDE,
263 0,
264 "Intel 82371AB IDE controller (PIIX4)",
265 piix_chip_map,
266 },
267 { PCI_PRODUCT_INTEL_82440MX_IDE,
268 0,
269 "Intel 82440MX IDE controller",
270 piix_chip_map
271 },
272 { PCI_PRODUCT_INTEL_82801AA_IDE,
273 0,
274 "Intel 82801AA IDE Controller (ICH)",
275 piix_chip_map,
276 },
277 { PCI_PRODUCT_INTEL_82801AB_IDE,
278 0,
279 "Intel 82801AB IDE Controller (ICH0)",
280 piix_chip_map,
281 },
282 { PCI_PRODUCT_INTEL_82801BA_IDE,
283 0,
284 "Intel 82801BA IDE Controller (ICH2)",
285 piix_chip_map,
286 },
287 { PCI_PRODUCT_INTEL_82801BAM_IDE,
288 0,
289 "Intel 82801BAM IDE Controller (ICH2)",
290 piix_chip_map,
291 },
292 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
293 0,
294 "Intel 82801CA IDE Controller",
295 piix_chip_map,
296 },
297 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
298 0,
299 "Intel 82801CA IDE Controller",
300 piix_chip_map,
301 },
302 { PCI_PRODUCT_INTEL_82801DB_IDE,
303 0,
304 "Intel 82801DB IDE Controller (ICH4)",
305 piix_chip_map,
306 },
307 { 0,
308 0,
309 NULL,
310 NULL
311 }
312 };
313
314 const struct pciide_product_desc pciide_amd_products[] = {
315 { PCI_PRODUCT_AMD_PBC756_IDE,
316 0,
317 "Advanced Micro Devices AMD756 IDE Controller",
318 amd7x6_chip_map
319 },
320 { PCI_PRODUCT_AMD_PBC766_IDE,
321 0,
322 "Advanced Micro Devices AMD766 IDE Controller",
323 amd7x6_chip_map
324 },
325 { PCI_PRODUCT_AMD_PBC768_IDE,
326 0,
327 "Advanced Micro Devices AMD768 IDE Controller",
328 amd7x6_chip_map
329 },
330 { 0,
331 0,
332 NULL,
333 NULL
334 }
335 };
336
337 const struct pciide_product_desc pciide_cmd_products[] = {
338 { PCI_PRODUCT_CMDTECH_640,
339 0,
340 "CMD Technology PCI0640",
341 cmd_chip_map
342 },
343 { PCI_PRODUCT_CMDTECH_643,
344 0,
345 "CMD Technology PCI0643",
346 cmd0643_9_chip_map,
347 },
348 { PCI_PRODUCT_CMDTECH_646,
349 0,
350 "CMD Technology PCI0646",
351 cmd0643_9_chip_map,
352 },
353 { PCI_PRODUCT_CMDTECH_648,
354 IDE_PCI_CLASS_OVERRIDE,
355 "CMD Technology PCI0648",
356 cmd0643_9_chip_map,
357 },
358 { PCI_PRODUCT_CMDTECH_649,
359 IDE_PCI_CLASS_OVERRIDE,
360 "CMD Technology PCI0649",
361 cmd0643_9_chip_map,
362 },
363 { 0,
364 0,
365 NULL,
366 NULL
367 }
368 };
369
370 const struct pciide_product_desc pciide_via_products[] = {
371 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
372 0,
373 NULL,
374 apollo_chip_map,
375 },
376 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
377 0,
378 NULL,
379 apollo_chip_map,
380 },
381 { 0,
382 0,
383 NULL,
384 NULL
385 }
386 };
387
388 const struct pciide_product_desc pciide_cypress_products[] = {
389 { PCI_PRODUCT_CONTAQ_82C693,
390 IDE_16BIT_IOSPACE,
391 "Cypress 82C693 IDE Controller",
392 cy693_chip_map,
393 },
394 { 0,
395 0,
396 NULL,
397 NULL
398 }
399 };
400
401 const struct pciide_product_desc pciide_sis_products[] = {
402 { PCI_PRODUCT_SIS_5597_IDE,
403 0,
404 "Silicon Integrated System 5597/5598 IDE controller",
405 sis_chip_map,
406 },
407 { 0,
408 0,
409 NULL,
410 NULL
411 }
412 };
413
414 const struct pciide_product_desc pciide_acer_products[] = {
415 { PCI_PRODUCT_ALI_M5229,
416 0,
417 "Acer Labs M5229 UDMA IDE Controller",
418 acer_chip_map,
419 },
420 { 0,
421 0,
422 NULL,
423 NULL
424 }
425 };
426
427 const struct pciide_product_desc pciide_promise_products[] = {
428 { PCI_PRODUCT_PROMISE_ULTRA33,
429 IDE_PCI_CLASS_OVERRIDE,
430 "Promise Ultra33/ATA Bus Master IDE Accelerator",
431 pdc202xx_chip_map,
432 },
433 { PCI_PRODUCT_PROMISE_ULTRA66,
434 IDE_PCI_CLASS_OVERRIDE,
435 "Promise Ultra66/ATA Bus Master IDE Accelerator",
436 pdc202xx_chip_map,
437 },
438 { PCI_PRODUCT_PROMISE_ULTRA100,
439 IDE_PCI_CLASS_OVERRIDE,
440 "Promise Ultra100/ATA Bus Master IDE Accelerator",
441 pdc202xx_chip_map,
442 },
443 { PCI_PRODUCT_PROMISE_ULTRA100X,
444 IDE_PCI_CLASS_OVERRIDE,
445 "Promise Ultra100/ATA Bus Master IDE Accelerator",
446 pdc202xx_chip_map,
447 },
448 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
449 IDE_PCI_CLASS_OVERRIDE,
450 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
451 pdc202xx_chip_map,
452 },
453 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
454 IDE_PCI_CLASS_OVERRIDE,
455 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
456 pdc202xx_chip_map,
457 },
458 { PCI_PRODUCT_PROMISE_ULTRA133,
459 IDE_PCI_CLASS_OVERRIDE,
460 "Promise Ultra133/ATA Bus Master IDE Accelerator",
461 pdc202xx_chip_map,
462 },
463 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
464 IDE_PCI_CLASS_OVERRIDE,
465 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
466 pdc202xx_chip_map,
467 },
468 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
469 IDE_PCI_CLASS_OVERRIDE,
470 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
471 pdc202xx_chip_map,
472 },
473 { 0,
474 0,
475 NULL,
476 NULL
477 }
478 };
479
480 const struct pciide_product_desc pciide_opti_products[] = {
481 { PCI_PRODUCT_OPTI_82C621,
482 0,
483 "OPTi 82c621 PCI IDE controller",
484 opti_chip_map,
485 },
486 { PCI_PRODUCT_OPTI_82C568,
487 0,
488 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
489 opti_chip_map,
490 },
491 { PCI_PRODUCT_OPTI_82D568,
492 0,
493 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
494 opti_chip_map,
495 },
496 { 0,
497 0,
498 NULL,
499 NULL
500 }
501 };
502
503 const struct pciide_product_desc pciide_triones_products[] = {
504 { PCI_PRODUCT_TRIONES_HPT366,
505 IDE_PCI_CLASS_OVERRIDE,
506 NULL,
507 hpt_chip_map,
508 },
509 { PCI_PRODUCT_TRIONES_HPT372,
510 IDE_PCI_CLASS_OVERRIDE,
511 NULL,
512 hpt_chip_map
513 },
514 { PCI_PRODUCT_TRIONES_HPT374,
515 IDE_PCI_CLASS_OVERRIDE,
516 NULL,
517 hpt_chip_map
518 },
519 { 0,
520 0,
521 NULL,
522 NULL
523 }
524 };
525
526 const struct pciide_product_desc pciide_acard_products[] = {
527 { PCI_PRODUCT_ACARD_ATP850U,
528 IDE_PCI_CLASS_OVERRIDE,
529 "Acard ATP850U Ultra33 IDE Controller",
530 acard_chip_map,
531 },
532 { PCI_PRODUCT_ACARD_ATP860,
533 IDE_PCI_CLASS_OVERRIDE,
534 "Acard ATP860 Ultra66 IDE Controller",
535 acard_chip_map,
536 },
537 { PCI_PRODUCT_ACARD_ATP860A,
538 IDE_PCI_CLASS_OVERRIDE,
539 "Acard ATP860-A Ultra66 IDE Controller",
540 acard_chip_map,
541 },
542 { 0,
543 0,
544 NULL,
545 NULL
546 }
547 };
548
549 const struct pciide_product_desc pciide_serverworks_products[] = {
550 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
551 0,
552 "ServerWorks OSB4 IDE Controller",
553 serverworks_chip_map,
554 },
555 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
556 0,
557 "ServerWorks CSB5 IDE Controller",
558 serverworks_chip_map,
559 },
560 { 0,
561 0,
562 NULL,
563 }
564 };
565
566 const struct pciide_product_desc pciide_symphony_products[] = {
567 { PCI_PRODUCT_SYMPHONY_82C105,
568 0,
569 "Symphony Labs 82C105 IDE controller",
570 sl82c105_chip_map,
571 },
572 { 0,
573 0,
574 NULL,
575 }
576 };
577
578 const struct pciide_product_desc pciide_winbond_products[] = {
579 { PCI_PRODUCT_WINBOND_W83C553F_1,
580 0,
581 "Winbond W83C553F IDE controller",
582 sl82c105_chip_map,
583 },
584 { 0,
585 0,
586 NULL,
587 }
588 };
589
590 struct pciide_vendor_desc {
591 u_int32_t ide_vendor;
592 const struct pciide_product_desc *ide_products;
593 };
594
595 const struct pciide_vendor_desc pciide_vendors[] = {
596 { PCI_VENDOR_INTEL, pciide_intel_products },
597 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
598 { PCI_VENDOR_VIATECH, pciide_via_products },
599 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
600 { PCI_VENDOR_SIS, pciide_sis_products },
601 { PCI_VENDOR_ALI, pciide_acer_products },
602 { PCI_VENDOR_PROMISE, pciide_promise_products },
603 { PCI_VENDOR_AMD, pciide_amd_products },
604 { PCI_VENDOR_OPTI, pciide_opti_products },
605 { PCI_VENDOR_TRIONES, pciide_triones_products },
606 { PCI_VENDOR_ACARD, pciide_acard_products },
607 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
608 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
609 { PCI_VENDOR_WINBOND, pciide_winbond_products },
610 { 0, NULL }
611 };
612
613 /* options passed via the 'flags' config keyword */
614 #define PCIIDE_OPTIONS_DMA 0x01
615 #define PCIIDE_OPTIONS_NODMA 0x02
616
617 int pciide_match __P((struct device *, struct cfdata *, void *));
618 void pciide_attach __P((struct device *, struct device *, void *));
619
620 struct cfattach pciide_ca = {
621 sizeof(struct pciide_softc), pciide_match, pciide_attach
622 };
623 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
624 int pciide_mapregs_compat __P(( struct pci_attach_args *,
625 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
626 int pciide_mapregs_native __P((struct pci_attach_args *,
627 struct pciide_channel *, bus_size_t *, bus_size_t *,
628 int (*pci_intr) __P((void *))));
629 void pciide_mapreg_dma __P((struct pciide_softc *,
630 struct pci_attach_args *));
631 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
632 void pciide_mapchan __P((struct pci_attach_args *,
633 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
634 int (*pci_intr) __P((void *))));
635 int pciide_chan_candisable __P((struct pciide_channel *));
636 void pciide_map_compat_intr __P(( struct pci_attach_args *,
637 struct pciide_channel *, int, int));
638 int pciide_compat_intr __P((void *));
639 int pciide_pci_intr __P((void *));
640 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
641
642 const struct pciide_product_desc *
643 pciide_lookup_product(id)
644 u_int32_t id;
645 {
646 const struct pciide_product_desc *pp;
647 const struct pciide_vendor_desc *vp;
648
649 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
650 if (PCI_VENDOR(id) == vp->ide_vendor)
651 break;
652
653 if ((pp = vp->ide_products) == NULL)
654 return NULL;
655
656 for (; pp->chip_map != NULL; pp++)
657 if (PCI_PRODUCT(id) == pp->ide_product)
658 break;
659
660 if (pp->chip_map == NULL)
661 return NULL;
662 return pp;
663 }
664
665 int
666 pciide_match(parent, match, aux)
667 struct device *parent;
668 struct cfdata *match;
669 void *aux;
670 {
671 struct pci_attach_args *pa = aux;
672 const struct pciide_product_desc *pp;
673
674 /*
675 * Check the ID register to see that it's a PCI IDE controller.
676 * If it is, we assume that we can deal with it; it _should_
677 * work in a standardized way...
678 */
679 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
680 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
681 return (1);
682 }
683
684 /*
685 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
686 * controllers. Let see if we can deal with it anyway.
687 */
688 pp = pciide_lookup_product(pa->pa_id);
689 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
690 return (1);
691 }
692
693 return (0);
694 }
695
696 void
697 pciide_attach(parent, self, aux)
698 struct device *parent, *self;
699 void *aux;
700 {
701 struct pci_attach_args *pa = aux;
702 pci_chipset_tag_t pc = pa->pa_pc;
703 pcitag_t tag = pa->pa_tag;
704 struct pciide_softc *sc = (struct pciide_softc *)self;
705 pcireg_t csr;
706 char devinfo[256];
707 const char *displaydev;
708
709 sc->sc_pp = pciide_lookup_product(pa->pa_id);
710 if (sc->sc_pp == NULL) {
711 sc->sc_pp = &default_product_desc;
712 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
713 displaydev = devinfo;
714 } else
715 displaydev = sc->sc_pp->ide_name;
716
717 /* if displaydev == NULL, printf is done in chip-specific map */
718 if (displaydev)
719 printf(": %s (rev. 0x%02x)\n", displaydev,
720 PCI_REVISION(pa->pa_class));
721
722 sc->sc_pc = pa->pa_pc;
723 sc->sc_tag = pa->pa_tag;
724 #ifdef WDCDEBUG
725 if (wdcdebug_pciide_mask & DEBUG_PROBE)
726 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
727 #endif
728 sc->sc_pp->chip_map(sc, pa);
729
730 if (sc->sc_dma_ok) {
731 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
732 csr |= PCI_COMMAND_MASTER_ENABLE;
733 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
734 }
735 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
736 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
737 }
738
739 /* tell wether the chip is enabled or not */
740 int
741 pciide_chipen(sc, pa)
742 struct pciide_softc *sc;
743 struct pci_attach_args *pa;
744 {
745 pcireg_t csr;
746 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
747 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
748 PCI_COMMAND_STATUS_REG);
749 printf("%s: device disabled (at %s)\n",
750 sc->sc_wdcdev.sc_dev.dv_xname,
751 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
752 "device" : "bridge");
753 return 0;
754 }
755 return 1;
756 }
757
758 int
759 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
760 struct pci_attach_args *pa;
761 struct pciide_channel *cp;
762 int compatchan;
763 bus_size_t *cmdsizep, *ctlsizep;
764 {
765 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
766 struct channel_softc *wdc_cp = &cp->wdc_channel;
767
768 cp->compat = 1;
769 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
770 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
771
772 wdc_cp->cmd_iot = pa->pa_iot;
773 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
774 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
775 printf("%s: couldn't map %s channel cmd regs\n",
776 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
777 return (0);
778 }
779
780 wdc_cp->ctl_iot = pa->pa_iot;
781 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
782 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
783 printf("%s: couldn't map %s channel ctl regs\n",
784 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
785 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
786 PCIIDE_COMPAT_CMD_SIZE);
787 return (0);
788 }
789
790 return (1);
791 }
792
793 int
794 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
795 struct pci_attach_args * pa;
796 struct pciide_channel *cp;
797 bus_size_t *cmdsizep, *ctlsizep;
798 int (*pci_intr) __P((void *));
799 {
800 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
801 struct channel_softc *wdc_cp = &cp->wdc_channel;
802 const char *intrstr;
803 pci_intr_handle_t intrhandle;
804
805 cp->compat = 0;
806
807 if (sc->sc_pci_ih == NULL) {
808 if (pci_intr_map(pa, &intrhandle) != 0) {
809 printf("%s: couldn't map native-PCI interrupt\n",
810 sc->sc_wdcdev.sc_dev.dv_xname);
811 return 0;
812 }
813 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
814 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
815 intrhandle, IPL_BIO, pci_intr, sc);
816 if (sc->sc_pci_ih != NULL) {
817 printf("%s: using %s for native-PCI interrupt\n",
818 sc->sc_wdcdev.sc_dev.dv_xname,
819 intrstr ? intrstr : "unknown interrupt");
820 } else {
821 printf("%s: couldn't establish native-PCI interrupt",
822 sc->sc_wdcdev.sc_dev.dv_xname);
823 if (intrstr != NULL)
824 printf(" at %s", intrstr);
825 printf("\n");
826 return 0;
827 }
828 }
829 cp->ih = sc->sc_pci_ih;
830 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
831 PCI_MAPREG_TYPE_IO, 0,
832 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
833 printf("%s: couldn't map %s channel cmd regs\n",
834 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
835 return 0;
836 }
837
838 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
839 PCI_MAPREG_TYPE_IO, 0,
840 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
841 printf("%s: couldn't map %s channel ctl regs\n",
842 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
843 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
844 return 0;
845 }
846 /*
847 * In native mode, 4 bytes of I/O space are mapped for the control
848 * register, the control register is at offset 2. Pass the generic
849 * code a handle for only one byte at the rigth offset.
850 */
851 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
852 &wdc_cp->ctl_ioh) != 0) {
853 printf("%s: unable to subregion %s channel ctl regs\n",
854 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
855 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
856 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
857 return 0;
858 }
859 return (1);
860 }
861
862 void
863 pciide_mapreg_dma(sc, pa)
864 struct pciide_softc *sc;
865 struct pci_attach_args *pa;
866 {
867 pcireg_t maptype;
868 bus_addr_t addr;
869
870 /*
871 * Map DMA registers
872 *
873 * Note that sc_dma_ok is the right variable to test to see if
874 * DMA can be done. If the interface doesn't support DMA,
875 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
876 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
877 * non-zero if the interface supports DMA and the registers
878 * could be mapped.
879 *
880 * XXX Note that despite the fact that the Bus Master IDE specs
881 * XXX say that "The bus master IDE function uses 16 bytes of IO
882 * XXX space," some controllers (at least the United
883 * XXX Microelectronics UM8886BF) place it in memory space.
884 */
885 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
886 PCIIDE_REG_BUS_MASTER_DMA);
887
888 switch (maptype) {
889 case PCI_MAPREG_TYPE_IO:
890 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
891 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
892 &addr, NULL, NULL) == 0);
893 if (sc->sc_dma_ok == 0) {
894 printf(", but unused (couldn't query registers)");
895 break;
896 }
897 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
898 && addr >= 0x10000) {
899 sc->sc_dma_ok = 0;
900 printf(", but unused (registers at unsafe address "
901 "%#lx)", (unsigned long)addr);
902 break;
903 }
904 /* FALLTHROUGH */
905
906 case PCI_MAPREG_MEM_TYPE_32BIT:
907 sc->sc_dma_ok = (pci_mapreg_map(pa,
908 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
909 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
910 sc->sc_dmat = pa->pa_dmat;
911 if (sc->sc_dma_ok == 0) {
912 printf(", but unused (couldn't map registers)");
913 } else {
914 sc->sc_wdcdev.dma_arg = sc;
915 sc->sc_wdcdev.dma_init = pciide_dma_init;
916 sc->sc_wdcdev.dma_start = pciide_dma_start;
917 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
918 }
919
920 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
921 PCIIDE_OPTIONS_NODMA) {
922 printf(", but unused (forced off by config file)");
923 sc->sc_dma_ok = 0;
924 }
925 break;
926
927 default:
928 sc->sc_dma_ok = 0;
929 printf(", but unsupported register maptype (0x%x)", maptype);
930 }
931 }
932
933 int
934 pciide_compat_intr(arg)
935 void *arg;
936 {
937 struct pciide_channel *cp = arg;
938
939 #ifdef DIAGNOSTIC
940 /* should only be called for a compat channel */
941 if (cp->compat == 0)
942 panic("pciide compat intr called for non-compat chan %p\n", cp);
943 #endif
944 return (wdcintr(&cp->wdc_channel));
945 }
946
947 int
948 pciide_pci_intr(arg)
949 void *arg;
950 {
951 struct pciide_softc *sc = arg;
952 struct pciide_channel *cp;
953 struct channel_softc *wdc_cp;
954 int i, rv, crv;
955
956 rv = 0;
957 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
958 cp = &sc->pciide_channels[i];
959 wdc_cp = &cp->wdc_channel;
960
961 /* If a compat channel skip. */
962 if (cp->compat)
963 continue;
964 /* if this channel not waiting for intr, skip */
965 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
966 continue;
967
968 crv = wdcintr(wdc_cp);
969 if (crv == 0)
970 ; /* leave rv alone */
971 else if (crv == 1)
972 rv = 1; /* claim the intr */
973 else if (rv == 0) /* crv should be -1 in this case */
974 rv = crv; /* if we've done no better, take it */
975 }
976 return (rv);
977 }
978
979 void
980 pciide_channel_dma_setup(cp)
981 struct pciide_channel *cp;
982 {
983 int drive;
984 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
985 struct ata_drive_datas *drvp;
986
987 for (drive = 0; drive < 2; drive++) {
988 drvp = &cp->wdc_channel.ch_drive[drive];
989 /* If no drive, skip */
990 if ((drvp->drive_flags & DRIVE) == 0)
991 continue;
992 /* setup DMA if needed */
993 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
994 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
995 sc->sc_dma_ok == 0) {
996 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
997 continue;
998 }
999 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1000 != 0) {
1001 /* Abort DMA setup */
1002 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1003 continue;
1004 }
1005 }
1006 }
1007
1008 int
1009 pciide_dma_table_setup(sc, channel, drive)
1010 struct pciide_softc *sc;
1011 int channel, drive;
1012 {
1013 bus_dma_segment_t seg;
1014 int error, rseg;
1015 const bus_size_t dma_table_size =
1016 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1017 struct pciide_dma_maps *dma_maps =
1018 &sc->pciide_channels[channel].dma_maps[drive];
1019
1020 /* If table was already allocated, just return */
1021 if (dma_maps->dma_table)
1022 return 0;
1023
1024 /* Allocate memory for the DMA tables and map it */
1025 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1026 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1027 BUS_DMA_NOWAIT)) != 0) {
1028 printf("%s:%d: unable to allocate table DMA for "
1029 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1030 channel, drive, error);
1031 return error;
1032 }
1033 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1034 dma_table_size,
1035 (caddr_t *)&dma_maps->dma_table,
1036 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1037 printf("%s:%d: unable to map table DMA for"
1038 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1039 channel, drive, error);
1040 return error;
1041 }
1042 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1043 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1044 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1045
1046 /* Create and load table DMA map for this disk */
1047 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1048 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1049 &dma_maps->dmamap_table)) != 0) {
1050 printf("%s:%d: unable to create table DMA map for "
1051 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1052 channel, drive, error);
1053 return error;
1054 }
1055 if ((error = bus_dmamap_load(sc->sc_dmat,
1056 dma_maps->dmamap_table,
1057 dma_maps->dma_table,
1058 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1059 printf("%s:%d: unable to load table DMA map for "
1060 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1061 channel, drive, error);
1062 return error;
1063 }
1064 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1065 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1066 DEBUG_PROBE);
1067 /* Create a xfer DMA map for this drive */
1068 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1069 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1070 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1071 &dma_maps->dmamap_xfer)) != 0) {
1072 printf("%s:%d: unable to create xfer DMA map for "
1073 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1074 channel, drive, error);
1075 return error;
1076 }
1077 return 0;
1078 }
1079
1080 int
1081 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1082 void *v;
1083 int channel, drive;
1084 void *databuf;
1085 size_t datalen;
1086 int flags;
1087 {
1088 struct pciide_softc *sc = v;
1089 int error, seg;
1090 struct pciide_dma_maps *dma_maps =
1091 &sc->pciide_channels[channel].dma_maps[drive];
1092
1093 error = bus_dmamap_load(sc->sc_dmat,
1094 dma_maps->dmamap_xfer,
1095 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1096 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1097 if (error) {
1098 printf("%s:%d: unable to load xfer DMA map for"
1099 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1100 channel, drive, error);
1101 return error;
1102 }
1103
1104 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1105 dma_maps->dmamap_xfer->dm_mapsize,
1106 (flags & WDC_DMA_READ) ?
1107 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1108
1109 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1110 #ifdef DIAGNOSTIC
1111 /* A segment must not cross a 64k boundary */
1112 {
1113 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1114 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1115 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1116 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1117 printf("pciide_dma: segment %d physical addr 0x%lx"
1118 " len 0x%lx not properly aligned\n",
1119 seg, phys, len);
1120 panic("pciide_dma: buf align");
1121 }
1122 }
1123 #endif
1124 dma_maps->dma_table[seg].base_addr =
1125 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1126 dma_maps->dma_table[seg].byte_count =
1127 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1128 IDEDMA_BYTE_COUNT_MASK);
1129 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1130 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1131 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1132
1133 }
1134 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1135 htole32(IDEDMA_BYTE_COUNT_EOT);
1136
1137 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1138 dma_maps->dmamap_table->dm_mapsize,
1139 BUS_DMASYNC_PREWRITE);
1140
1141 /* Maps are ready. Start DMA function */
1142 #ifdef DIAGNOSTIC
1143 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1144 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1145 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1146 panic("pciide_dma_init: table align");
1147 }
1148 #endif
1149
1150 /* Clear status bits */
1151 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1152 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1153 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1154 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1155 /* Write table addr */
1156 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1157 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1158 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1159 /* set read/write */
1160 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1161 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1162 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1163 /* remember flags */
1164 dma_maps->dma_flags = flags;
1165 return 0;
1166 }
1167
1168 void
1169 pciide_dma_start(v, channel, drive)
1170 void *v;
1171 int channel, drive;
1172 {
1173 struct pciide_softc *sc = v;
1174
1175 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1176 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1177 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1178 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1179 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1180 }
1181
1182 int
1183 pciide_dma_finish(v, channel, drive, force)
1184 void *v;
1185 int channel, drive;
1186 int force;
1187 {
1188 struct pciide_softc *sc = v;
1189 u_int8_t status;
1190 int error = 0;
1191 struct pciide_dma_maps *dma_maps =
1192 &sc->pciide_channels[channel].dma_maps[drive];
1193
1194 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1195 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1196 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1197 DEBUG_XFERS);
1198
1199 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1200 return WDC_DMAST_NOIRQ;
1201
1202 /* stop DMA channel */
1203 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1204 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1205 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1206 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1207
1208 /* Unload the map of the data buffer */
1209 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1210 dma_maps->dmamap_xfer->dm_mapsize,
1211 (dma_maps->dma_flags & WDC_DMA_READ) ?
1212 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1213 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1214
1215 if ((status & IDEDMA_CTL_ERR) != 0) {
1216 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1217 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1218 error |= WDC_DMAST_ERR;
1219 }
1220
1221 if ((status & IDEDMA_CTL_INTR) == 0) {
1222 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1223 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1224 drive, status);
1225 error |= WDC_DMAST_NOIRQ;
1226 }
1227
1228 if ((status & IDEDMA_CTL_ACT) != 0) {
1229 /* data underrun, may be a valid condition for ATAPI */
1230 error |= WDC_DMAST_UNDER;
1231 }
1232 return error;
1233 }
1234
1235 void
1236 pciide_irqack(chp)
1237 struct channel_softc *chp;
1238 {
1239 struct pciide_channel *cp = (struct pciide_channel*)chp;
1240 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1241
1242 /* clear status bits in IDE DMA registers */
1243 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1244 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1245 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1246 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1247 }
1248
1249 /* some common code used by several chip_map */
1250 int
1251 pciide_chansetup(sc, channel, interface)
1252 struct pciide_softc *sc;
1253 int channel;
1254 pcireg_t interface;
1255 {
1256 struct pciide_channel *cp = &sc->pciide_channels[channel];
1257 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1258 cp->name = PCIIDE_CHANNEL_NAME(channel);
1259 cp->wdc_channel.channel = channel;
1260 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1261 cp->wdc_channel.ch_queue =
1262 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1263 if (cp->wdc_channel.ch_queue == NULL) {
1264 printf("%s %s channel: "
1265 "can't allocate memory for command queue",
1266 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1267 return 0;
1268 }
1269 printf("%s: %s channel %s to %s mode\n",
1270 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1271 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1272 "configured" : "wired",
1273 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1274 "native-PCI" : "compatibility");
1275 return 1;
1276 }
1277
1278 /* some common code used by several chip channel_map */
1279 void
1280 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1281 struct pci_attach_args *pa;
1282 struct pciide_channel *cp;
1283 pcireg_t interface;
1284 bus_size_t *cmdsizep, *ctlsizep;
1285 int (*pci_intr) __P((void *));
1286 {
1287 struct channel_softc *wdc_cp = &cp->wdc_channel;
1288
1289 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1290 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1291 pci_intr);
1292 else
1293 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1294 wdc_cp->channel, cmdsizep, ctlsizep);
1295
1296 if (cp->hw_ok == 0)
1297 return;
1298 wdc_cp->data32iot = wdc_cp->cmd_iot;
1299 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1300 wdcattach(wdc_cp);
1301 }
1302
1303 /*
1304 * Generic code to call to know if a channel can be disabled. Return 1
1305 * if channel can be disabled, 0 if not
1306 */
1307 int
1308 pciide_chan_candisable(cp)
1309 struct pciide_channel *cp;
1310 {
1311 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1312 struct channel_softc *wdc_cp = &cp->wdc_channel;
1313
1314 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1315 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1316 printf("%s: disabling %s channel (no drives)\n",
1317 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1318 cp->hw_ok = 0;
1319 return 1;
1320 }
1321 return 0;
1322 }
1323
1324 /*
1325 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1326 * Set hw_ok=0 on failure
1327 */
1328 void
1329 pciide_map_compat_intr(pa, cp, compatchan, interface)
1330 struct pci_attach_args *pa;
1331 struct pciide_channel *cp;
1332 int compatchan, interface;
1333 {
1334 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1335 struct channel_softc *wdc_cp = &cp->wdc_channel;
1336
1337 if (cp->hw_ok == 0)
1338 return;
1339 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1340 return;
1341
1342 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1343 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1344 pa, compatchan, pciide_compat_intr, cp);
1345 if (cp->ih == NULL) {
1346 #endif
1347 printf("%s: no compatibility interrupt for use by %s "
1348 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1349 cp->hw_ok = 0;
1350 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1351 }
1352 #endif
1353 }
1354
1355 void
1356 pciide_print_modes(cp)
1357 struct pciide_channel *cp;
1358 {
1359 wdc_print_modes(&cp->wdc_channel);
1360 }
1361
1362 void
1363 default_chip_map(sc, pa)
1364 struct pciide_softc *sc;
1365 struct pci_attach_args *pa;
1366 {
1367 struct pciide_channel *cp;
1368 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1369 pcireg_t csr;
1370 int channel, drive;
1371 struct ata_drive_datas *drvp;
1372 u_int8_t idedma_ctl;
1373 bus_size_t cmdsize, ctlsize;
1374 char *failreason;
1375
1376 if (pciide_chipen(sc, pa) == 0)
1377 return;
1378
1379 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1380 printf("%s: bus-master DMA support present",
1381 sc->sc_wdcdev.sc_dev.dv_xname);
1382 if (sc->sc_pp == &default_product_desc &&
1383 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1384 PCIIDE_OPTIONS_DMA) == 0) {
1385 printf(", but unused (no driver support)");
1386 sc->sc_dma_ok = 0;
1387 } else {
1388 pciide_mapreg_dma(sc, pa);
1389 if (sc->sc_dma_ok != 0)
1390 printf(", used without full driver "
1391 "support");
1392 }
1393 } else {
1394 printf("%s: hardware does not support DMA",
1395 sc->sc_wdcdev.sc_dev.dv_xname);
1396 sc->sc_dma_ok = 0;
1397 }
1398 printf("\n");
1399 if (sc->sc_dma_ok) {
1400 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1401 sc->sc_wdcdev.irqack = pciide_irqack;
1402 }
1403 sc->sc_wdcdev.PIO_cap = 0;
1404 sc->sc_wdcdev.DMA_cap = 0;
1405
1406 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1407 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1408 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1409
1410 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1411 cp = &sc->pciide_channels[channel];
1412 if (pciide_chansetup(sc, channel, interface) == 0)
1413 continue;
1414 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1415 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1416 &ctlsize, pciide_pci_intr);
1417 } else {
1418 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1419 channel, &cmdsize, &ctlsize);
1420 }
1421 if (cp->hw_ok == 0)
1422 continue;
1423 /*
1424 * Check to see if something appears to be there.
1425 */
1426 failreason = NULL;
1427 if (!wdcprobe(&cp->wdc_channel)) {
1428 failreason = "not responding; disabled or no drives?";
1429 goto next;
1430 }
1431 /*
1432 * Now, make sure it's actually attributable to this PCI IDE
1433 * channel by trying to access the channel again while the
1434 * PCI IDE controller's I/O space is disabled. (If the
1435 * channel no longer appears to be there, it belongs to
1436 * this controller.) YUCK!
1437 */
1438 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1439 PCI_COMMAND_STATUS_REG);
1440 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1441 csr & ~PCI_COMMAND_IO_ENABLE);
1442 if (wdcprobe(&cp->wdc_channel))
1443 failreason = "other hardware responding at addresses";
1444 pci_conf_write(sc->sc_pc, sc->sc_tag,
1445 PCI_COMMAND_STATUS_REG, csr);
1446 next:
1447 if (failreason) {
1448 printf("%s: %s channel ignored (%s)\n",
1449 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1450 failreason);
1451 cp->hw_ok = 0;
1452 bus_space_unmap(cp->wdc_channel.cmd_iot,
1453 cp->wdc_channel.cmd_ioh, cmdsize);
1454 if (interface & PCIIDE_INTERFACE_PCI(channel))
1455 bus_space_unmap(cp->wdc_channel.ctl_iot,
1456 cp->ctl_baseioh, ctlsize);
1457 else
1458 bus_space_unmap(cp->wdc_channel.ctl_iot,
1459 cp->wdc_channel.ctl_ioh, ctlsize);
1460 } else {
1461 pciide_map_compat_intr(pa, cp, channel, interface);
1462 }
1463 if (cp->hw_ok) {
1464 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1465 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1466 wdcattach(&cp->wdc_channel);
1467 }
1468 }
1469
1470 if (sc->sc_dma_ok == 0)
1471 return;
1472
1473 /* Allocate DMA maps */
1474 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1475 idedma_ctl = 0;
1476 cp = &sc->pciide_channels[channel];
1477 for (drive = 0; drive < 2; drive++) {
1478 drvp = &cp->wdc_channel.ch_drive[drive];
1479 /* If no drive, skip */
1480 if ((drvp->drive_flags & DRIVE) == 0)
1481 continue;
1482 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1483 continue;
1484 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1485 /* Abort DMA setup */
1486 printf("%s:%d:%d: can't allocate DMA maps, "
1487 "using PIO transfers\n",
1488 sc->sc_wdcdev.sc_dev.dv_xname,
1489 channel, drive);
1490 drvp->drive_flags &= ~DRIVE_DMA;
1491 }
1492 printf("%s:%d:%d: using DMA data transfers\n",
1493 sc->sc_wdcdev.sc_dev.dv_xname,
1494 channel, drive);
1495 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1496 }
1497 if (idedma_ctl != 0) {
1498 /* Add software bits in status register */
1499 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1500 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1501 idedma_ctl);
1502 }
1503 }
1504 }
1505
1506 void
1507 piix_chip_map(sc, pa)
1508 struct pciide_softc *sc;
1509 struct pci_attach_args *pa;
1510 {
1511 struct pciide_channel *cp;
1512 int channel;
1513 u_int32_t idetim;
1514 bus_size_t cmdsize, ctlsize;
1515
1516 if (pciide_chipen(sc, pa) == 0)
1517 return;
1518
1519 printf("%s: bus-master DMA support present",
1520 sc->sc_wdcdev.sc_dev.dv_xname);
1521 pciide_mapreg_dma(sc, pa);
1522 printf("\n");
1523 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1524 WDC_CAPABILITY_MODE;
1525 if (sc->sc_dma_ok) {
1526 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1527 sc->sc_wdcdev.irqack = pciide_irqack;
1528 switch(sc->sc_pp->ide_product) {
1529 case PCI_PRODUCT_INTEL_82371AB_IDE:
1530 case PCI_PRODUCT_INTEL_82440MX_IDE:
1531 case PCI_PRODUCT_INTEL_82801AA_IDE:
1532 case PCI_PRODUCT_INTEL_82801AB_IDE:
1533 case PCI_PRODUCT_INTEL_82801BA_IDE:
1534 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1535 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1536 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1537 case PCI_PRODUCT_INTEL_82801DB_IDE:
1538 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1539 }
1540 }
1541 sc->sc_wdcdev.PIO_cap = 4;
1542 sc->sc_wdcdev.DMA_cap = 2;
1543 switch(sc->sc_pp->ide_product) {
1544 case PCI_PRODUCT_INTEL_82801AA_IDE:
1545 sc->sc_wdcdev.UDMA_cap = 4;
1546 break;
1547 case PCI_PRODUCT_INTEL_82801BA_IDE:
1548 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1549 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1550 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1551 case PCI_PRODUCT_INTEL_82801DB_IDE:
1552 sc->sc_wdcdev.UDMA_cap = 5;
1553 break;
1554 default:
1555 sc->sc_wdcdev.UDMA_cap = 2;
1556 }
1557 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1558 sc->sc_wdcdev.set_modes = piix_setup_channel;
1559 else
1560 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1561 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1562 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1563
1564 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1565 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1566 DEBUG_PROBE);
1567 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1568 WDCDEBUG_PRINT((", sidetim=0x%x",
1569 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1570 DEBUG_PROBE);
1571 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1572 WDCDEBUG_PRINT((", udamreg 0x%x",
1573 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1574 DEBUG_PROBE);
1575 }
1576 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1577 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1578 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1579 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1580 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1581 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1582 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1583 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1584 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1585 DEBUG_PROBE);
1586 }
1587
1588 }
1589 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1590
1591 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1592 cp = &sc->pciide_channels[channel];
1593 /* PIIX is compat-only */
1594 if (pciide_chansetup(sc, channel, 0) == 0)
1595 continue;
1596 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1597 if ((PIIX_IDETIM_READ(idetim, channel) &
1598 PIIX_IDETIM_IDE) == 0) {
1599 printf("%s: %s channel ignored (disabled)\n",
1600 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1601 continue;
1602 }
1603 /* PIIX are compat-only pciide devices */
1604 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1605 if (cp->hw_ok == 0)
1606 continue;
1607 if (pciide_chan_candisable(cp)) {
1608 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1609 channel);
1610 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1611 idetim);
1612 }
1613 pciide_map_compat_intr(pa, cp, channel, 0);
1614 if (cp->hw_ok == 0)
1615 continue;
1616 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1617 }
1618
1619 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1620 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1621 DEBUG_PROBE);
1622 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1623 WDCDEBUG_PRINT((", sidetim=0x%x",
1624 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1625 DEBUG_PROBE);
1626 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1627 WDCDEBUG_PRINT((", udamreg 0x%x",
1628 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1629 DEBUG_PROBE);
1630 }
1631 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1632 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1633 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1634 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1635 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1636 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1637 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1638 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1639 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1640 DEBUG_PROBE);
1641 }
1642 }
1643 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1644 }
1645
1646 void
1647 piix_setup_channel(chp)
1648 struct channel_softc *chp;
1649 {
1650 u_int8_t mode[2], drive;
1651 u_int32_t oidetim, idetim, idedma_ctl;
1652 struct pciide_channel *cp = (struct pciide_channel*)chp;
1653 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1654 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1655
1656 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1657 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1658 idedma_ctl = 0;
1659
1660 /* set up new idetim: Enable IDE registers decode */
1661 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1662 chp->channel);
1663
1664 /* setup DMA */
1665 pciide_channel_dma_setup(cp);
1666
1667 /*
1668 * Here we have to mess up with drives mode: PIIX can't have
1669 * different timings for master and slave drives.
1670 * We need to find the best combination.
1671 */
1672
1673 /* If both drives supports DMA, take the lower mode */
1674 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1675 (drvp[1].drive_flags & DRIVE_DMA)) {
1676 mode[0] = mode[1] =
1677 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1678 drvp[0].DMA_mode = mode[0];
1679 drvp[1].DMA_mode = mode[1];
1680 goto ok;
1681 }
1682 /*
1683 * If only one drive supports DMA, use its mode, and
1684 * put the other one in PIO mode 0 if mode not compatible
1685 */
1686 if (drvp[0].drive_flags & DRIVE_DMA) {
1687 mode[0] = drvp[0].DMA_mode;
1688 mode[1] = drvp[1].PIO_mode;
1689 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1690 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1691 mode[1] = drvp[1].PIO_mode = 0;
1692 goto ok;
1693 }
1694 if (drvp[1].drive_flags & DRIVE_DMA) {
1695 mode[1] = drvp[1].DMA_mode;
1696 mode[0] = drvp[0].PIO_mode;
1697 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1698 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1699 mode[0] = drvp[0].PIO_mode = 0;
1700 goto ok;
1701 }
1702 /*
1703 * If both drives are not DMA, takes the lower mode, unless
1704 * one of them is PIO mode < 2
1705 */
1706 if (drvp[0].PIO_mode < 2) {
1707 mode[0] = drvp[0].PIO_mode = 0;
1708 mode[1] = drvp[1].PIO_mode;
1709 } else if (drvp[1].PIO_mode < 2) {
1710 mode[1] = drvp[1].PIO_mode = 0;
1711 mode[0] = drvp[0].PIO_mode;
1712 } else {
1713 mode[0] = mode[1] =
1714 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1715 drvp[0].PIO_mode = mode[0];
1716 drvp[1].PIO_mode = mode[1];
1717 }
1718 ok: /* The modes are setup */
1719 for (drive = 0; drive < 2; drive++) {
1720 if (drvp[drive].drive_flags & DRIVE_DMA) {
1721 idetim |= piix_setup_idetim_timings(
1722 mode[drive], 1, chp->channel);
1723 goto end;
1724 }
1725 }
1726 /* If we are there, none of the drives are DMA */
1727 if (mode[0] >= 2)
1728 idetim |= piix_setup_idetim_timings(
1729 mode[0], 0, chp->channel);
1730 else
1731 idetim |= piix_setup_idetim_timings(
1732 mode[1], 0, chp->channel);
1733 end: /*
1734 * timing mode is now set up in the controller. Enable
1735 * it per-drive
1736 */
1737 for (drive = 0; drive < 2; drive++) {
1738 /* If no drive, skip */
1739 if ((drvp[drive].drive_flags & DRIVE) == 0)
1740 continue;
1741 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1742 if (drvp[drive].drive_flags & DRIVE_DMA)
1743 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1744 }
1745 if (idedma_ctl != 0) {
1746 /* Add software bits in status register */
1747 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1748 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1749 idedma_ctl);
1750 }
1751 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1752 pciide_print_modes(cp);
1753 }
1754
1755 void
1756 piix3_4_setup_channel(chp)
1757 struct channel_softc *chp;
1758 {
1759 struct ata_drive_datas *drvp;
1760 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1761 struct pciide_channel *cp = (struct pciide_channel*)chp;
1762 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1763 int drive;
1764 int channel = chp->channel;
1765
1766 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1767 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1768 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1769 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1770 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1771 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1772 PIIX_SIDETIM_RTC_MASK(channel));
1773
1774 idedma_ctl = 0;
1775 /* If channel disabled, no need to go further */
1776 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1777 return;
1778 /* set up new idetim: Enable IDE registers decode */
1779 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1780
1781 /* setup DMA if needed */
1782 pciide_channel_dma_setup(cp);
1783
1784 for (drive = 0; drive < 2; drive++) {
1785 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1786 PIIX_UDMATIM_SET(0x3, channel, drive));
1787 drvp = &chp->ch_drive[drive];
1788 /* If no drive, skip */
1789 if ((drvp->drive_flags & DRIVE) == 0)
1790 continue;
1791 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1792 (drvp->drive_flags & DRIVE_UDMA) == 0))
1793 goto pio;
1794
1795 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1796 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1797 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1798 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1799 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1800 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1801 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1802 ideconf |= PIIX_CONFIG_PINGPONG;
1803 }
1804 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1805 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1806 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1807 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1808 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1809 /* setup Ultra/100 */
1810 if (drvp->UDMA_mode > 2 &&
1811 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1812 drvp->UDMA_mode = 2;
1813 if (drvp->UDMA_mode > 4) {
1814 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1815 } else {
1816 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1817 if (drvp->UDMA_mode > 2) {
1818 ideconf |= PIIX_CONFIG_UDMA66(channel,
1819 drive);
1820 } else {
1821 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1822 drive);
1823 }
1824 }
1825 }
1826 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1827 /* setup Ultra/66 */
1828 if (drvp->UDMA_mode > 2 &&
1829 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1830 drvp->UDMA_mode = 2;
1831 if (drvp->UDMA_mode > 2)
1832 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1833 else
1834 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1835 }
1836 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1837 (drvp->drive_flags & DRIVE_UDMA)) {
1838 /* use Ultra/DMA */
1839 drvp->drive_flags &= ~DRIVE_DMA;
1840 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1841 udmareg |= PIIX_UDMATIM_SET(
1842 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1843 } else {
1844 /* use Multiword DMA */
1845 drvp->drive_flags &= ~DRIVE_UDMA;
1846 if (drive == 0) {
1847 idetim |= piix_setup_idetim_timings(
1848 drvp->DMA_mode, 1, channel);
1849 } else {
1850 sidetim |= piix_setup_sidetim_timings(
1851 drvp->DMA_mode, 1, channel);
1852 idetim =PIIX_IDETIM_SET(idetim,
1853 PIIX_IDETIM_SITRE, channel);
1854 }
1855 }
1856 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1857
1858 pio: /* use PIO mode */
1859 idetim |= piix_setup_idetim_drvs(drvp);
1860 if (drive == 0) {
1861 idetim |= piix_setup_idetim_timings(
1862 drvp->PIO_mode, 0, channel);
1863 } else {
1864 sidetim |= piix_setup_sidetim_timings(
1865 drvp->PIO_mode, 0, channel);
1866 idetim =PIIX_IDETIM_SET(idetim,
1867 PIIX_IDETIM_SITRE, channel);
1868 }
1869 }
1870 if (idedma_ctl != 0) {
1871 /* Add software bits in status register */
1872 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1873 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1874 idedma_ctl);
1875 }
1876 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1877 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1878 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1879 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1880 pciide_print_modes(cp);
1881 }
1882
1883
1884 /* setup ISP and RTC fields, based on mode */
1885 static u_int32_t
1886 piix_setup_idetim_timings(mode, dma, channel)
1887 u_int8_t mode;
1888 u_int8_t dma;
1889 u_int8_t channel;
1890 {
1891
1892 if (dma)
1893 return PIIX_IDETIM_SET(0,
1894 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1895 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1896 channel);
1897 else
1898 return PIIX_IDETIM_SET(0,
1899 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1900 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1901 channel);
1902 }
1903
1904 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1905 static u_int32_t
1906 piix_setup_idetim_drvs(drvp)
1907 struct ata_drive_datas *drvp;
1908 {
1909 u_int32_t ret = 0;
1910 struct channel_softc *chp = drvp->chnl_softc;
1911 u_int8_t channel = chp->channel;
1912 u_int8_t drive = drvp->drive;
1913
1914 /*
1915 * If drive is using UDMA, timings setups are independant
1916 * So just check DMA and PIO here.
1917 */
1918 if (drvp->drive_flags & DRIVE_DMA) {
1919 /* if mode = DMA mode 0, use compatible timings */
1920 if ((drvp->drive_flags & DRIVE_DMA) &&
1921 drvp->DMA_mode == 0) {
1922 drvp->PIO_mode = 0;
1923 return ret;
1924 }
1925 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1926 /*
1927 * PIO and DMA timings are the same, use fast timings for PIO
1928 * too, else use compat timings.
1929 */
1930 if ((piix_isp_pio[drvp->PIO_mode] !=
1931 piix_isp_dma[drvp->DMA_mode]) ||
1932 (piix_rtc_pio[drvp->PIO_mode] !=
1933 piix_rtc_dma[drvp->DMA_mode]))
1934 drvp->PIO_mode = 0;
1935 /* if PIO mode <= 2, use compat timings for PIO */
1936 if (drvp->PIO_mode <= 2) {
1937 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1938 channel);
1939 return ret;
1940 }
1941 }
1942
1943 /*
1944 * Now setup PIO modes. If mode < 2, use compat timings.
1945 * Else enable fast timings. Enable IORDY and prefetch/post
1946 * if PIO mode >= 3.
1947 */
1948
1949 if (drvp->PIO_mode < 2)
1950 return ret;
1951
1952 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1953 if (drvp->PIO_mode >= 3) {
1954 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1955 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1956 }
1957 return ret;
1958 }
1959
1960 /* setup values in SIDETIM registers, based on mode */
1961 static u_int32_t
1962 piix_setup_sidetim_timings(mode, dma, channel)
1963 u_int8_t mode;
1964 u_int8_t dma;
1965 u_int8_t channel;
1966 {
1967 if (dma)
1968 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1969 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1970 else
1971 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1972 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1973 }
1974
1975 void
1976 amd7x6_chip_map(sc, pa)
1977 struct pciide_softc *sc;
1978 struct pci_attach_args *pa;
1979 {
1980 struct pciide_channel *cp;
1981 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1982 int channel;
1983 pcireg_t chanenable;
1984 bus_size_t cmdsize, ctlsize;
1985
1986 if (pciide_chipen(sc, pa) == 0)
1987 return;
1988 printf("%s: bus-master DMA support present",
1989 sc->sc_wdcdev.sc_dev.dv_xname);
1990 pciide_mapreg_dma(sc, pa);
1991 printf("\n");
1992 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1993 WDC_CAPABILITY_MODE;
1994 if (sc->sc_dma_ok) {
1995 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1996 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1997 sc->sc_wdcdev.irqack = pciide_irqack;
1998 }
1999 sc->sc_wdcdev.PIO_cap = 4;
2000 sc->sc_wdcdev.DMA_cap = 2;
2001
2002 switch (sc->sc_pp->ide_product) {
2003 case PCI_PRODUCT_AMD_PBC766_IDE:
2004 case PCI_PRODUCT_AMD_PBC768_IDE:
2005 sc->sc_wdcdev.UDMA_cap = 5;
2006 break;
2007 default:
2008 sc->sc_wdcdev.UDMA_cap = 4;
2009 }
2010 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2011 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2012 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2013 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2014
2015 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2016 DEBUG_PROBE);
2017 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2018 cp = &sc->pciide_channels[channel];
2019 if (pciide_chansetup(sc, channel, interface) == 0)
2020 continue;
2021
2022 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2023 printf("%s: %s channel ignored (disabled)\n",
2024 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2025 continue;
2026 }
2027 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2028 pciide_pci_intr);
2029
2030 if (pciide_chan_candisable(cp))
2031 chanenable &= ~AMD7X6_CHAN_EN(channel);
2032 pciide_map_compat_intr(pa, cp, channel, interface);
2033 if (cp->hw_ok == 0)
2034 continue;
2035
2036 amd7x6_setup_channel(&cp->wdc_channel);
2037 }
2038 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2039 chanenable);
2040 return;
2041 }
2042
2043 void
2044 amd7x6_setup_channel(chp)
2045 struct channel_softc *chp;
2046 {
2047 u_int32_t udmatim_reg, datatim_reg;
2048 u_int8_t idedma_ctl;
2049 int mode, drive;
2050 struct ata_drive_datas *drvp;
2051 struct pciide_channel *cp = (struct pciide_channel*)chp;
2052 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2053 #ifndef PCIIDE_AMD756_ENABLEDMA
2054 int rev = PCI_REVISION(
2055 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2056 #endif
2057
2058 idedma_ctl = 0;
2059 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2060 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2061 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2062 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2063
2064 /* setup DMA if needed */
2065 pciide_channel_dma_setup(cp);
2066
2067 for (drive = 0; drive < 2; drive++) {
2068 drvp = &chp->ch_drive[drive];
2069 /* If no drive, skip */
2070 if ((drvp->drive_flags & DRIVE) == 0)
2071 continue;
2072 /* add timing values, setup DMA if needed */
2073 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2074 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2075 mode = drvp->PIO_mode;
2076 goto pio;
2077 }
2078 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2079 (drvp->drive_flags & DRIVE_UDMA)) {
2080 /* use Ultra/DMA */
2081 drvp->drive_flags &= ~DRIVE_DMA;
2082 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2083 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2084 AMD7X6_UDMA_TIME(chp->channel, drive,
2085 amd7x6_udma_tim[drvp->UDMA_mode]);
2086 /* can use PIO timings, MW DMA unused */
2087 mode = drvp->PIO_mode;
2088 } else {
2089 /* use Multiword DMA, but only if revision is OK */
2090 drvp->drive_flags &= ~DRIVE_UDMA;
2091 #ifndef PCIIDE_AMD756_ENABLEDMA
2092 /*
2093 * The workaround doesn't seem to be necessary
2094 * with all drives, so it can be disabled by
2095 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2096 * triggered.
2097 */
2098 if (sc->sc_pp->ide_product ==
2099 PCI_PRODUCT_AMD_PBC756_IDE &&
2100 AMD756_CHIPREV_DISABLEDMA(rev)) {
2101 printf("%s:%d:%d: multi-word DMA disabled due "
2102 "to chip revision\n",
2103 sc->sc_wdcdev.sc_dev.dv_xname,
2104 chp->channel, drive);
2105 mode = drvp->PIO_mode;
2106 drvp->drive_flags &= ~DRIVE_DMA;
2107 goto pio;
2108 }
2109 #endif
2110 /* mode = min(pio, dma+2) */
2111 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2112 mode = drvp->PIO_mode;
2113 else
2114 mode = drvp->DMA_mode + 2;
2115 }
2116 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2117
2118 pio: /* setup PIO mode */
2119 if (mode <= 2) {
2120 drvp->DMA_mode = 0;
2121 drvp->PIO_mode = 0;
2122 mode = 0;
2123 } else {
2124 drvp->PIO_mode = mode;
2125 drvp->DMA_mode = mode - 2;
2126 }
2127 datatim_reg |=
2128 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2129 amd7x6_pio_set[mode]) |
2130 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2131 amd7x6_pio_rec[mode]);
2132 }
2133 if (idedma_ctl != 0) {
2134 /* Add software bits in status register */
2135 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2136 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2137 idedma_ctl);
2138 }
2139 pciide_print_modes(cp);
2140 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2141 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2142 }
2143
2144 void
2145 apollo_chip_map(sc, pa)
2146 struct pciide_softc *sc;
2147 struct pci_attach_args *pa;
2148 {
2149 struct pciide_channel *cp;
2150 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2151 int channel;
2152 u_int32_t ideconf;
2153 bus_size_t cmdsize, ctlsize;
2154 pcitag_t pcib_tag;
2155 pcireg_t pcib_id, pcib_class;
2156
2157 if (pciide_chipen(sc, pa) == 0)
2158 return;
2159 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2160 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2161 /* and read ID and rev of the ISA bridge */
2162 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2163 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2164 printf(": VIA Technologies ");
2165 switch (PCI_PRODUCT(pcib_id)) {
2166 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2167 printf("VT82C586 (Apollo VP) ");
2168 if(PCI_REVISION(pcib_class) >= 0x02) {
2169 printf("ATA33 controller\n");
2170 sc->sc_wdcdev.UDMA_cap = 2;
2171 } else {
2172 printf("controller\n");
2173 sc->sc_wdcdev.UDMA_cap = 0;
2174 }
2175 break;
2176 case PCI_PRODUCT_VIATECH_VT82C596A:
2177 printf("VT82C596A (Apollo Pro) ");
2178 if (PCI_REVISION(pcib_class) >= 0x12) {
2179 printf("ATA66 controller\n");
2180 sc->sc_wdcdev.UDMA_cap = 4;
2181 } else {
2182 printf("ATA33 controller\n");
2183 sc->sc_wdcdev.UDMA_cap = 2;
2184 }
2185 break;
2186 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2187 printf("VT82C686A (Apollo KX133) ");
2188 if (PCI_REVISION(pcib_class) >= 0x40) {
2189 printf("ATA100 controller\n");
2190 sc->sc_wdcdev.UDMA_cap = 5;
2191 } else {
2192 printf("ATA66 controller\n");
2193 sc->sc_wdcdev.UDMA_cap = 4;
2194 }
2195 break;
2196 case PCI_PRODUCT_VIATECH_VT8231:
2197 printf("VT8231 ATA100 controller\n");
2198 sc->sc_wdcdev.UDMA_cap = 5;
2199 break;
2200 case PCI_PRODUCT_VIATECH_VT8233:
2201 printf("VT8233 ATA100 controller\n");
2202 sc->sc_wdcdev.UDMA_cap = 5;
2203 break;
2204 case PCI_PRODUCT_VIATECH_VT8233A:
2205 printf("VT8233A ATA133 controller\n");
2206 sc->sc_wdcdev.UDMA_cap = 6;
2207 break;
2208 case PCI_PRODUCT_VIATECH_VT8235:
2209 printf("VT8235 ATA133 controller\n");
2210 sc->sc_wdcdev.UDMA_cap = 6;
2211 break;
2212 default:
2213 printf("unknown ATA controller\n");
2214 sc->sc_wdcdev.UDMA_cap = 0;
2215 }
2216
2217 printf("%s: bus-master DMA support present",
2218 sc->sc_wdcdev.sc_dev.dv_xname);
2219 pciide_mapreg_dma(sc, pa);
2220 printf("\n");
2221 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2222 WDC_CAPABILITY_MODE;
2223 if (sc->sc_dma_ok) {
2224 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2225 sc->sc_wdcdev.irqack = pciide_irqack;
2226 if (sc->sc_wdcdev.UDMA_cap > 0)
2227 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2228 }
2229 sc->sc_wdcdev.PIO_cap = 4;
2230 sc->sc_wdcdev.DMA_cap = 2;
2231 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2232 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2233 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2234
2235 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2236 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2237 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2238 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2239 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2240 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2241 DEBUG_PROBE);
2242
2243 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2244 cp = &sc->pciide_channels[channel];
2245 if (pciide_chansetup(sc, channel, interface) == 0)
2246 continue;
2247
2248 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2249 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2250 printf("%s: %s channel ignored (disabled)\n",
2251 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2252 continue;
2253 }
2254 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2255 pciide_pci_intr);
2256 if (cp->hw_ok == 0)
2257 continue;
2258 if (pciide_chan_candisable(cp)) {
2259 ideconf &= ~APO_IDECONF_EN(channel);
2260 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2261 ideconf);
2262 }
2263 pciide_map_compat_intr(pa, cp, channel, interface);
2264
2265 if (cp->hw_ok == 0)
2266 continue;
2267 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2268 }
2269 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2270 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2271 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2272 }
2273
2274 void
2275 apollo_setup_channel(chp)
2276 struct channel_softc *chp;
2277 {
2278 u_int32_t udmatim_reg, datatim_reg;
2279 u_int8_t idedma_ctl;
2280 int mode, drive;
2281 struct ata_drive_datas *drvp;
2282 struct pciide_channel *cp = (struct pciide_channel*)chp;
2283 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2284
2285 idedma_ctl = 0;
2286 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2287 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2288 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2289 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2290
2291 /* setup DMA if needed */
2292 pciide_channel_dma_setup(cp);
2293
2294 for (drive = 0; drive < 2; drive++) {
2295 drvp = &chp->ch_drive[drive];
2296 /* If no drive, skip */
2297 if ((drvp->drive_flags & DRIVE) == 0)
2298 continue;
2299 /* add timing values, setup DMA if needed */
2300 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2301 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2302 mode = drvp->PIO_mode;
2303 goto pio;
2304 }
2305 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2306 (drvp->drive_flags & DRIVE_UDMA)) {
2307 /* use Ultra/DMA */
2308 drvp->drive_flags &= ~DRIVE_DMA;
2309 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2310 APO_UDMA_EN_MTH(chp->channel, drive);
2311 if (sc->sc_wdcdev.UDMA_cap == 6) {
2312 /* 8233a */
2313 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2314 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2315 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2316 /* 686b */
2317 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2318 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2319 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2320 /* 596b or 686a */
2321 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2322 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2323 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2324 } else {
2325 /* 596a or 586b */
2326 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2327 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2328 }
2329 /* can use PIO timings, MW DMA unused */
2330 mode = drvp->PIO_mode;
2331 } else {
2332 /* use Multiword DMA */
2333 drvp->drive_flags &= ~DRIVE_UDMA;
2334 /* mode = min(pio, dma+2) */
2335 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2336 mode = drvp->PIO_mode;
2337 else
2338 mode = drvp->DMA_mode + 2;
2339 }
2340 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2341
2342 pio: /* setup PIO mode */
2343 if (mode <= 2) {
2344 drvp->DMA_mode = 0;
2345 drvp->PIO_mode = 0;
2346 mode = 0;
2347 } else {
2348 drvp->PIO_mode = mode;
2349 drvp->DMA_mode = mode - 2;
2350 }
2351 datatim_reg |=
2352 APO_DATATIM_PULSE(chp->channel, drive,
2353 apollo_pio_set[mode]) |
2354 APO_DATATIM_RECOV(chp->channel, drive,
2355 apollo_pio_rec[mode]);
2356 }
2357 if (idedma_ctl != 0) {
2358 /* Add software bits in status register */
2359 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2360 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2361 idedma_ctl);
2362 }
2363 pciide_print_modes(cp);
2364 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2365 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2366 }
2367
2368 void
2369 cmd_channel_map(pa, sc, channel)
2370 struct pci_attach_args *pa;
2371 struct pciide_softc *sc;
2372 int channel;
2373 {
2374 struct pciide_channel *cp = &sc->pciide_channels[channel];
2375 bus_size_t cmdsize, ctlsize;
2376 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2377 int interface, one_channel;
2378
2379 /*
2380 * The 0648/0649 can be told to identify as a RAID controller.
2381 * In this case, we have to fake interface
2382 */
2383 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2384 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2385 PCIIDE_INTERFACE_SETTABLE(1);
2386 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2387 CMD_CONF_DSA1)
2388 interface |= PCIIDE_INTERFACE_PCI(0) |
2389 PCIIDE_INTERFACE_PCI(1);
2390 } else {
2391 interface = PCI_INTERFACE(pa->pa_class);
2392 }
2393
2394 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2395 cp->name = PCIIDE_CHANNEL_NAME(channel);
2396 cp->wdc_channel.channel = channel;
2397 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2398
2399 /*
2400 * Older CMD64X doesn't have independant channels
2401 */
2402 switch (sc->sc_pp->ide_product) {
2403 case PCI_PRODUCT_CMDTECH_649:
2404 one_channel = 0;
2405 break;
2406 default:
2407 one_channel = 1;
2408 break;
2409 }
2410
2411 if (channel > 0 && one_channel) {
2412 cp->wdc_channel.ch_queue =
2413 sc->pciide_channels[0].wdc_channel.ch_queue;
2414 } else {
2415 cp->wdc_channel.ch_queue =
2416 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2417 }
2418 if (cp->wdc_channel.ch_queue == NULL) {
2419 printf("%s %s channel: "
2420 "can't allocate memory for command queue",
2421 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2422 return;
2423 }
2424
2425 printf("%s: %s channel %s to %s mode\n",
2426 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2427 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2428 "configured" : "wired",
2429 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2430 "native-PCI" : "compatibility");
2431
2432 /*
2433 * with a CMD PCI64x, if we get here, the first channel is enabled:
2434 * there's no way to disable the first channel without disabling
2435 * the whole device
2436 */
2437 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2438 printf("%s: %s channel ignored (disabled)\n",
2439 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2440 return;
2441 }
2442
2443 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2444 if (cp->hw_ok == 0)
2445 return;
2446 if (channel == 1) {
2447 if (pciide_chan_candisable(cp)) {
2448 ctrl &= ~CMD_CTRL_2PORT;
2449 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2450 CMD_CTRL, ctrl);
2451 }
2452 }
2453 pciide_map_compat_intr(pa, cp, channel, interface);
2454 }
2455
2456 int
2457 cmd_pci_intr(arg)
2458 void *arg;
2459 {
2460 struct pciide_softc *sc = arg;
2461 struct pciide_channel *cp;
2462 struct channel_softc *wdc_cp;
2463 int i, rv, crv;
2464 u_int32_t priirq, secirq;
2465
2466 rv = 0;
2467 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2468 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2469 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2470 cp = &sc->pciide_channels[i];
2471 wdc_cp = &cp->wdc_channel;
2472 /* If a compat channel skip. */
2473 if (cp->compat)
2474 continue;
2475 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2476 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2477 crv = wdcintr(wdc_cp);
2478 if (crv == 0)
2479 printf("%s:%d: bogus intr\n",
2480 sc->sc_wdcdev.sc_dev.dv_xname, i);
2481 else
2482 rv = 1;
2483 }
2484 }
2485 return rv;
2486 }
2487
2488 void
2489 cmd_chip_map(sc, pa)
2490 struct pciide_softc *sc;
2491 struct pci_attach_args *pa;
2492 {
2493 int channel;
2494
2495 /*
2496 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2497 * and base adresses registers can be disabled at
2498 * hardware level. In this case, the device is wired
2499 * in compat mode and its first channel is always enabled,
2500 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2501 * In fact, it seems that the first channel of the CMD PCI0640
2502 * can't be disabled.
2503 */
2504
2505 #ifdef PCIIDE_CMD064x_DISABLE
2506 if (pciide_chipen(sc, pa) == 0)
2507 return;
2508 #endif
2509
2510 printf("%s: hardware does not support DMA\n",
2511 sc->sc_wdcdev.sc_dev.dv_xname);
2512 sc->sc_dma_ok = 0;
2513
2514 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2515 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2516 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2517
2518 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2519 cmd_channel_map(pa, sc, channel);
2520 }
2521 }
2522
2523 void
2524 cmd0643_9_chip_map(sc, pa)
2525 struct pciide_softc *sc;
2526 struct pci_attach_args *pa;
2527 {
2528 struct pciide_channel *cp;
2529 int channel;
2530 pcireg_t rev = PCI_REVISION(pa->pa_class);
2531
2532 /*
2533 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2534 * and base adresses registers can be disabled at
2535 * hardware level. In this case, the device is wired
2536 * in compat mode and its first channel is always enabled,
2537 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2538 * In fact, it seems that the first channel of the CMD PCI0640
2539 * can't be disabled.
2540 */
2541
2542 #ifdef PCIIDE_CMD064x_DISABLE
2543 if (pciide_chipen(sc, pa) == 0)
2544 return;
2545 #endif
2546 printf("%s: bus-master DMA support present",
2547 sc->sc_wdcdev.sc_dev.dv_xname);
2548 pciide_mapreg_dma(sc, pa);
2549 printf("\n");
2550 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2551 WDC_CAPABILITY_MODE;
2552 if (sc->sc_dma_ok) {
2553 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2554 switch (sc->sc_pp->ide_product) {
2555 case PCI_PRODUCT_CMDTECH_649:
2556 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2557 sc->sc_wdcdev.UDMA_cap = 5;
2558 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2559 break;
2560 case PCI_PRODUCT_CMDTECH_648:
2561 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2562 sc->sc_wdcdev.UDMA_cap = 4;
2563 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2564 break;
2565 case PCI_PRODUCT_CMDTECH_646:
2566 if (rev >= CMD0646U2_REV) {
2567 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2568 sc->sc_wdcdev.UDMA_cap = 2;
2569 } else if (rev >= CMD0646U_REV) {
2570 /*
2571 * Linux's driver claims that the 646U is broken
2572 * with UDMA. Only enable it if we know what we're
2573 * doing
2574 */
2575 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2576 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2577 sc->sc_wdcdev.UDMA_cap = 2;
2578 #endif
2579 /* explicitly disable UDMA */
2580 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2581 CMD_UDMATIM(0), 0);
2582 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2583 CMD_UDMATIM(1), 0);
2584 }
2585 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2586 break;
2587 default:
2588 sc->sc_wdcdev.irqack = pciide_irqack;
2589 }
2590 }
2591
2592 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2593 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2594 sc->sc_wdcdev.PIO_cap = 4;
2595 sc->sc_wdcdev.DMA_cap = 2;
2596 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2597
2598 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2599 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2600 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2601 DEBUG_PROBE);
2602
2603 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2604 cp = &sc->pciide_channels[channel];
2605 cmd_channel_map(pa, sc, channel);
2606 if (cp->hw_ok == 0)
2607 continue;
2608 cmd0643_9_setup_channel(&cp->wdc_channel);
2609 }
2610 /*
2611 * note - this also makes sure we clear the irq disable and reset
2612 * bits
2613 */
2614 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2615 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2616 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2617 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2618 DEBUG_PROBE);
2619 }
2620
2621 void
2622 cmd0643_9_setup_channel(chp)
2623 struct channel_softc *chp;
2624 {
2625 struct ata_drive_datas *drvp;
2626 u_int8_t tim;
2627 u_int32_t idedma_ctl, udma_reg;
2628 int drive;
2629 struct pciide_channel *cp = (struct pciide_channel*)chp;
2630 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2631
2632 idedma_ctl = 0;
2633 /* setup DMA if needed */
2634 pciide_channel_dma_setup(cp);
2635
2636 for (drive = 0; drive < 2; drive++) {
2637 drvp = &chp->ch_drive[drive];
2638 /* If no drive, skip */
2639 if ((drvp->drive_flags & DRIVE) == 0)
2640 continue;
2641 /* add timing values, setup DMA if needed */
2642 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2643 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2644 if (drvp->drive_flags & DRIVE_UDMA) {
2645 /* UltraDMA on a 646U2, 0648 or 0649 */
2646 drvp->drive_flags &= ~DRIVE_DMA;
2647 udma_reg = pciide_pci_read(sc->sc_pc,
2648 sc->sc_tag, CMD_UDMATIM(chp->channel));
2649 if (drvp->UDMA_mode > 2 &&
2650 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2651 CMD_BICSR) &
2652 CMD_BICSR_80(chp->channel)) == 0)
2653 drvp->UDMA_mode = 2;
2654 if (drvp->UDMA_mode > 2)
2655 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2656 else if (sc->sc_wdcdev.UDMA_cap > 2)
2657 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2658 udma_reg |= CMD_UDMATIM_UDMA(drive);
2659 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2660 CMD_UDMATIM_TIM_OFF(drive));
2661 udma_reg |=
2662 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2663 CMD_UDMATIM_TIM_OFF(drive));
2664 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2665 CMD_UDMATIM(chp->channel), udma_reg);
2666 } else {
2667 /*
2668 * use Multiword DMA.
2669 * Timings will be used for both PIO and DMA,
2670 * so adjust DMA mode if needed
2671 * if we have a 0646U2/8/9, turn off UDMA
2672 */
2673 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2674 udma_reg = pciide_pci_read(sc->sc_pc,
2675 sc->sc_tag,
2676 CMD_UDMATIM(chp->channel));
2677 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2678 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2679 CMD_UDMATIM(chp->channel),
2680 udma_reg);
2681 }
2682 if (drvp->PIO_mode >= 3 &&
2683 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2684 drvp->DMA_mode = drvp->PIO_mode - 2;
2685 }
2686 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2687 }
2688 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2689 }
2690 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2691 CMD_DATA_TIM(chp->channel, drive), tim);
2692 }
2693 if (idedma_ctl != 0) {
2694 /* Add software bits in status register */
2695 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2696 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2697 idedma_ctl);
2698 }
2699 pciide_print_modes(cp);
2700 }
2701
2702 void
2703 cmd646_9_irqack(chp)
2704 struct channel_softc *chp;
2705 {
2706 u_int32_t priirq, secirq;
2707 struct pciide_channel *cp = (struct pciide_channel*)chp;
2708 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2709
2710 if (chp->channel == 0) {
2711 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2712 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2713 } else {
2714 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2715 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2716 }
2717 pciide_irqack(chp);
2718 }
2719
2720 void
2721 cy693_chip_map(sc, pa)
2722 struct pciide_softc *sc;
2723 struct pci_attach_args *pa;
2724 {
2725 struct pciide_channel *cp;
2726 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2727 bus_size_t cmdsize, ctlsize;
2728
2729 if (pciide_chipen(sc, pa) == 0)
2730 return;
2731 /*
2732 * this chip has 2 PCI IDE functions, one for primary and one for
2733 * secondary. So we need to call pciide_mapregs_compat() with
2734 * the real channel
2735 */
2736 if (pa->pa_function == 1) {
2737 sc->sc_cy_compatchan = 0;
2738 } else if (pa->pa_function == 2) {
2739 sc->sc_cy_compatchan = 1;
2740 } else {
2741 printf("%s: unexpected PCI function %d\n",
2742 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2743 return;
2744 }
2745 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2746 printf("%s: bus-master DMA support present",
2747 sc->sc_wdcdev.sc_dev.dv_xname);
2748 pciide_mapreg_dma(sc, pa);
2749 } else {
2750 printf("%s: hardware does not support DMA",
2751 sc->sc_wdcdev.sc_dev.dv_xname);
2752 sc->sc_dma_ok = 0;
2753 }
2754 printf("\n");
2755
2756 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2757 if (sc->sc_cy_handle == NULL) {
2758 printf("%s: unable to map hyperCache control registers\n",
2759 sc->sc_wdcdev.sc_dev.dv_xname);
2760 sc->sc_dma_ok = 0;
2761 }
2762
2763 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2764 WDC_CAPABILITY_MODE;
2765 if (sc->sc_dma_ok) {
2766 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2767 sc->sc_wdcdev.irqack = pciide_irqack;
2768 }
2769 sc->sc_wdcdev.PIO_cap = 4;
2770 sc->sc_wdcdev.DMA_cap = 2;
2771 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2772
2773 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2774 sc->sc_wdcdev.nchannels = 1;
2775
2776 /* Only one channel for this chip; if we are here it's enabled */
2777 cp = &sc->pciide_channels[0];
2778 sc->wdc_chanarray[0] = &cp->wdc_channel;
2779 cp->name = PCIIDE_CHANNEL_NAME(0);
2780 cp->wdc_channel.channel = 0;
2781 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2782 cp->wdc_channel.ch_queue =
2783 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2784 if (cp->wdc_channel.ch_queue == NULL) {
2785 printf("%s primary channel: "
2786 "can't allocate memory for command queue",
2787 sc->sc_wdcdev.sc_dev.dv_xname);
2788 return;
2789 }
2790 printf("%s: primary channel %s to ",
2791 sc->sc_wdcdev.sc_dev.dv_xname,
2792 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2793 "configured" : "wired");
2794 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2795 printf("native-PCI");
2796 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2797 pciide_pci_intr);
2798 } else {
2799 printf("compatibility");
2800 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2801 &cmdsize, &ctlsize);
2802 }
2803 printf(" mode\n");
2804 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2805 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2806 wdcattach(&cp->wdc_channel);
2807 if (pciide_chan_candisable(cp)) {
2808 pci_conf_write(sc->sc_pc, sc->sc_tag,
2809 PCI_COMMAND_STATUS_REG, 0);
2810 }
2811 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2812 if (cp->hw_ok == 0)
2813 return;
2814 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2815 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2816 cy693_setup_channel(&cp->wdc_channel);
2817 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2818 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2819 }
2820
2821 void
2822 cy693_setup_channel(chp)
2823 struct channel_softc *chp;
2824 {
2825 struct ata_drive_datas *drvp;
2826 int drive;
2827 u_int32_t cy_cmd_ctrl;
2828 u_int32_t idedma_ctl;
2829 struct pciide_channel *cp = (struct pciide_channel*)chp;
2830 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2831 int dma_mode = -1;
2832
2833 cy_cmd_ctrl = idedma_ctl = 0;
2834
2835 /* setup DMA if needed */
2836 pciide_channel_dma_setup(cp);
2837
2838 for (drive = 0; drive < 2; drive++) {
2839 drvp = &chp->ch_drive[drive];
2840 /* If no drive, skip */
2841 if ((drvp->drive_flags & DRIVE) == 0)
2842 continue;
2843 /* add timing values, setup DMA if needed */
2844 if (drvp->drive_flags & DRIVE_DMA) {
2845 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2846 /* use Multiword DMA */
2847 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2848 dma_mode = drvp->DMA_mode;
2849 }
2850 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2851 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2852 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2853 CY_CMD_CTRL_IOW_REC_OFF(drive));
2854 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2855 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2856 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2857 CY_CMD_CTRL_IOR_REC_OFF(drive));
2858 }
2859 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2860 chp->ch_drive[0].DMA_mode = dma_mode;
2861 chp->ch_drive[1].DMA_mode = dma_mode;
2862
2863 if (dma_mode == -1)
2864 dma_mode = 0;
2865
2866 if (sc->sc_cy_handle != NULL) {
2867 /* Note: `multiple' is implied. */
2868 cy82c693_write(sc->sc_cy_handle,
2869 (sc->sc_cy_compatchan == 0) ?
2870 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2871 }
2872
2873 pciide_print_modes(cp);
2874
2875 if (idedma_ctl != 0) {
2876 /* Add software bits in status register */
2877 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2878 IDEDMA_CTL, idedma_ctl);
2879 }
2880 }
2881
2882 static int
2883 sis_hostbr_match(pa)
2884 struct pci_attach_args *pa;
2885 {
2886 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2887 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2888 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2889 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2890 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2891 }
2892
2893 void
2894 sis_chip_map(sc, pa)
2895 struct pciide_softc *sc;
2896 struct pci_attach_args *pa;
2897 {
2898 struct pciide_channel *cp;
2899 int channel;
2900 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2901 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2902 pcireg_t rev = PCI_REVISION(pa->pa_class);
2903 bus_size_t cmdsize, ctlsize;
2904 pcitag_t pchb_tag;
2905 pcireg_t pchb_id, pchb_class;
2906
2907 if (pciide_chipen(sc, pa) == 0)
2908 return;
2909 printf("%s: bus-master DMA support present",
2910 sc->sc_wdcdev.sc_dev.dv_xname);
2911 pciide_mapreg_dma(sc, pa);
2912 printf("\n");
2913
2914 /* get a PCI tag for the host bridge (function 0 of the same device) */
2915 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2916 /* and read ID and rev of the ISA bridge */
2917 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2918 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2919
2920 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2921 WDC_CAPABILITY_MODE;
2922 if (sc->sc_dma_ok) {
2923 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2924 sc->sc_wdcdev.irqack = pciide_irqack;
2925 /*
2926 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2927 * have problems with UDMA (info provided by Christos)
2928 */
2929 if (rev >= 0xd0 &&
2930 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2931 PCI_REVISION(pchb_class) >= 0x03))
2932 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2933 }
2934
2935 sc->sc_wdcdev.PIO_cap = 4;
2936 sc->sc_wdcdev.DMA_cap = 2;
2937 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2938 /*
2939 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2940 * chipsets.
2941 */
2942 sc->sc_wdcdev.UDMA_cap =
2943 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2944 sc->sc_wdcdev.set_modes = sis_setup_channel;
2945
2946 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2947 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2948
2949 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2950 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2951 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2952
2953 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2954 cp = &sc->pciide_channels[channel];
2955 if (pciide_chansetup(sc, channel, interface) == 0)
2956 continue;
2957 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2958 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2959 printf("%s: %s channel ignored (disabled)\n",
2960 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2961 continue;
2962 }
2963 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2964 pciide_pci_intr);
2965 if (cp->hw_ok == 0)
2966 continue;
2967 if (pciide_chan_candisable(cp)) {
2968 if (channel == 0)
2969 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2970 else
2971 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2972 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2973 sis_ctr0);
2974 }
2975 pciide_map_compat_intr(pa, cp, channel, interface);
2976 if (cp->hw_ok == 0)
2977 continue;
2978 sis_setup_channel(&cp->wdc_channel);
2979 }
2980 }
2981
2982 void
2983 sis_setup_channel(chp)
2984 struct channel_softc *chp;
2985 {
2986 struct ata_drive_datas *drvp;
2987 int drive;
2988 u_int32_t sis_tim;
2989 u_int32_t idedma_ctl;
2990 struct pciide_channel *cp = (struct pciide_channel*)chp;
2991 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2992
2993 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2994 "channel %d 0x%x\n", chp->channel,
2995 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2996 DEBUG_PROBE);
2997 sis_tim = 0;
2998 idedma_ctl = 0;
2999 /* setup DMA if needed */
3000 pciide_channel_dma_setup(cp);
3001
3002 for (drive = 0; drive < 2; drive++) {
3003 drvp = &chp->ch_drive[drive];
3004 /* If no drive, skip */
3005 if ((drvp->drive_flags & DRIVE) == 0)
3006 continue;
3007 /* add timing values, setup DMA if needed */
3008 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3009 (drvp->drive_flags & DRIVE_UDMA) == 0)
3010 goto pio;
3011
3012 if (drvp->drive_flags & DRIVE_UDMA) {
3013 /* use Ultra/DMA */
3014 drvp->drive_flags &= ~DRIVE_DMA;
3015 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3016 SIS_TIM_UDMA_TIME_OFF(drive);
3017 sis_tim |= SIS_TIM_UDMA_EN(drive);
3018 } else {
3019 /*
3020 * use Multiword DMA
3021 * Timings will be used for both PIO and DMA,
3022 * so adjust DMA mode if needed
3023 */
3024 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3025 drvp->PIO_mode = drvp->DMA_mode + 2;
3026 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3027 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3028 drvp->PIO_mode - 2 : 0;
3029 if (drvp->DMA_mode == 0)
3030 drvp->PIO_mode = 0;
3031 }
3032 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3033 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3034 SIS_TIM_ACT_OFF(drive);
3035 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3036 SIS_TIM_REC_OFF(drive);
3037 }
3038 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3039 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3040 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3041 if (idedma_ctl != 0) {
3042 /* Add software bits in status register */
3043 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3044 IDEDMA_CTL, idedma_ctl);
3045 }
3046 pciide_print_modes(cp);
3047 }
3048
3049 void
3050 acer_chip_map(sc, pa)
3051 struct pciide_softc *sc;
3052 struct pci_attach_args *pa;
3053 {
3054 struct pciide_channel *cp;
3055 int channel;
3056 pcireg_t cr, interface;
3057 bus_size_t cmdsize, ctlsize;
3058 pcireg_t rev = PCI_REVISION(pa->pa_class);
3059
3060 if (pciide_chipen(sc, pa) == 0)
3061 return;
3062 printf("%s: bus-master DMA support present",
3063 sc->sc_wdcdev.sc_dev.dv_xname);
3064 pciide_mapreg_dma(sc, pa);
3065 printf("\n");
3066 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3067 WDC_CAPABILITY_MODE;
3068 if (sc->sc_dma_ok) {
3069 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3070 if (rev >= 0x20) {
3071 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3072 if (rev >= 0xC4)
3073 sc->sc_wdcdev.UDMA_cap = 5;
3074 else if (rev >= 0xC2)
3075 sc->sc_wdcdev.UDMA_cap = 4;
3076 else
3077 sc->sc_wdcdev.UDMA_cap = 2;
3078 }
3079 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3080 sc->sc_wdcdev.irqack = pciide_irqack;
3081 }
3082
3083 sc->sc_wdcdev.PIO_cap = 4;
3084 sc->sc_wdcdev.DMA_cap = 2;
3085 sc->sc_wdcdev.set_modes = acer_setup_channel;
3086 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3087 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3088
3089 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3090 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3091 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3092
3093 /* Enable "microsoft register bits" R/W. */
3094 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3095 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3096 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3097 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3098 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3099 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3100 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3101 ~ACER_CHANSTATUSREGS_RO);
3102 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3103 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3104 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3105 /* Don't use cr, re-read the real register content instead */
3106 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3107 PCI_CLASS_REG));
3108
3109 /* From linux: enable "Cable Detection" */
3110 if (rev >= 0xC2) {
3111 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3112 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3113 | ACER_0x4B_CDETECT);
3114 }
3115
3116 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3117 cp = &sc->pciide_channels[channel];
3118 if (pciide_chansetup(sc, channel, interface) == 0)
3119 continue;
3120 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3121 printf("%s: %s channel ignored (disabled)\n",
3122 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3123 continue;
3124 }
3125 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3126 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3127 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3128 if (cp->hw_ok == 0)
3129 continue;
3130 if (pciide_chan_candisable(cp)) {
3131 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3132 pci_conf_write(sc->sc_pc, sc->sc_tag,
3133 PCI_CLASS_REG, cr);
3134 }
3135 pciide_map_compat_intr(pa, cp, channel, interface);
3136 acer_setup_channel(&cp->wdc_channel);
3137 }
3138 }
3139
3140 void
3141 acer_setup_channel(chp)
3142 struct channel_softc *chp;
3143 {
3144 struct ata_drive_datas *drvp;
3145 int drive;
3146 u_int32_t acer_fifo_udma;
3147 u_int32_t idedma_ctl;
3148 struct pciide_channel *cp = (struct pciide_channel*)chp;
3149 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3150
3151 idedma_ctl = 0;
3152 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3153 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3154 acer_fifo_udma), DEBUG_PROBE);
3155 /* setup DMA if needed */
3156 pciide_channel_dma_setup(cp);
3157
3158 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3159 DRIVE_UDMA) { /* check 80 pins cable */
3160 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3161 ACER_0x4A_80PIN(chp->channel)) {
3162 if (chp->ch_drive[0].UDMA_mode > 2)
3163 chp->ch_drive[0].UDMA_mode = 2;
3164 if (chp->ch_drive[1].UDMA_mode > 2)
3165 chp->ch_drive[1].UDMA_mode = 2;
3166 }
3167 }
3168
3169 for (drive = 0; drive < 2; drive++) {
3170 drvp = &chp->ch_drive[drive];
3171 /* If no drive, skip */
3172 if ((drvp->drive_flags & DRIVE) == 0)
3173 continue;
3174 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3175 "channel %d drive %d 0x%x\n", chp->channel, drive,
3176 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3177 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3178 /* clear FIFO/DMA mode */
3179 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3180 ACER_UDMA_EN(chp->channel, drive) |
3181 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3182
3183 /* add timing values, setup DMA if needed */
3184 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3185 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3186 acer_fifo_udma |=
3187 ACER_FTH_OPL(chp->channel, drive, 0x1);
3188 goto pio;
3189 }
3190
3191 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3192 if (drvp->drive_flags & DRIVE_UDMA) {
3193 /* use Ultra/DMA */
3194 drvp->drive_flags &= ~DRIVE_DMA;
3195 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3196 acer_fifo_udma |=
3197 ACER_UDMA_TIM(chp->channel, drive,
3198 acer_udma[drvp->UDMA_mode]);
3199 /* XXX disable if one drive < UDMA3 ? */
3200 if (drvp->UDMA_mode >= 3) {
3201 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3202 ACER_0x4B,
3203 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3204 ACER_0x4B) | ACER_0x4B_UDMA66);
3205 }
3206 } else {
3207 /*
3208 * use Multiword DMA
3209 * Timings will be used for both PIO and DMA,
3210 * so adjust DMA mode if needed
3211 */
3212 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3213 drvp->PIO_mode = drvp->DMA_mode + 2;
3214 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3215 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3216 drvp->PIO_mode - 2 : 0;
3217 if (drvp->DMA_mode == 0)
3218 drvp->PIO_mode = 0;
3219 }
3220 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3221 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3222 ACER_IDETIM(chp->channel, drive),
3223 acer_pio[drvp->PIO_mode]);
3224 }
3225 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3226 acer_fifo_udma), DEBUG_PROBE);
3227 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3228 if (idedma_ctl != 0) {
3229 /* Add software bits in status register */
3230 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3231 IDEDMA_CTL, idedma_ctl);
3232 }
3233 pciide_print_modes(cp);
3234 }
3235
3236 int
3237 acer_pci_intr(arg)
3238 void *arg;
3239 {
3240 struct pciide_softc *sc = arg;
3241 struct pciide_channel *cp;
3242 struct channel_softc *wdc_cp;
3243 int i, rv, crv;
3244 u_int32_t chids;
3245
3246 rv = 0;
3247 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3248 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3249 cp = &sc->pciide_channels[i];
3250 wdc_cp = &cp->wdc_channel;
3251 /* If a compat channel skip. */
3252 if (cp->compat)
3253 continue;
3254 if (chids & ACER_CHIDS_INT(i)) {
3255 crv = wdcintr(wdc_cp);
3256 if (crv == 0)
3257 printf("%s:%d: bogus intr\n",
3258 sc->sc_wdcdev.sc_dev.dv_xname, i);
3259 else
3260 rv = 1;
3261 }
3262 }
3263 return rv;
3264 }
3265
3266 void
3267 hpt_chip_map(sc, pa)
3268 struct pciide_softc *sc;
3269 struct pci_attach_args *pa;
3270 {
3271 struct pciide_channel *cp;
3272 int i, compatchan, revision;
3273 pcireg_t interface;
3274 bus_size_t cmdsize, ctlsize;
3275
3276 if (pciide_chipen(sc, pa) == 0)
3277 return;
3278 revision = PCI_REVISION(pa->pa_class);
3279 printf(": Triones/Highpoint ");
3280 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3281 printf("HPT374 IDE Controller\n");
3282 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3283 printf("HPT372 IDE Controller\n");
3284 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3285 if (revision == HPT372_REV)
3286 printf("HPT372 IDE Controller\n");
3287 else if (revision == HPT370_REV)
3288 printf("HPT370 IDE Controller\n");
3289 else if (revision == HPT370A_REV)
3290 printf("HPT370A IDE Controller\n");
3291 else if (revision == HPT366_REV)
3292 printf("HPT366 IDE Controller\n");
3293 else
3294 printf("unknown HPT IDE controller rev %d\n", revision);
3295 } else
3296 printf("unknown HPT IDE controller 0x%x\n",
3297 sc->sc_pp->ide_product);
3298
3299 /*
3300 * when the chip is in native mode it identifies itself as a
3301 * 'misc mass storage'. Fake interface in this case.
3302 */
3303 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3304 interface = PCI_INTERFACE(pa->pa_class);
3305 } else {
3306 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3307 PCIIDE_INTERFACE_PCI(0);
3308 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3309 (revision == HPT370_REV || revision == HPT370A_REV ||
3310 revision == HPT372_REV)) ||
3311 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3312 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3313 interface |= PCIIDE_INTERFACE_PCI(1);
3314 }
3315
3316 printf("%s: bus-master DMA support present",
3317 sc->sc_wdcdev.sc_dev.dv_xname);
3318 pciide_mapreg_dma(sc, pa);
3319 printf("\n");
3320 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3321 WDC_CAPABILITY_MODE;
3322 if (sc->sc_dma_ok) {
3323 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3324 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3325 sc->sc_wdcdev.irqack = pciide_irqack;
3326 }
3327 sc->sc_wdcdev.PIO_cap = 4;
3328 sc->sc_wdcdev.DMA_cap = 2;
3329
3330 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3331 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3332 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3333 revision == HPT366_REV) {
3334 sc->sc_wdcdev.UDMA_cap = 4;
3335 /*
3336 * The 366 has 2 PCI IDE functions, one for primary and one
3337 * for secondary. So we need to call pciide_mapregs_compat()
3338 * with the real channel
3339 */
3340 if (pa->pa_function == 0) {
3341 compatchan = 0;
3342 } else if (pa->pa_function == 1) {
3343 compatchan = 1;
3344 } else {
3345 printf("%s: unexpected PCI function %d\n",
3346 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3347 return;
3348 }
3349 sc->sc_wdcdev.nchannels = 1;
3350 } else {
3351 sc->sc_wdcdev.nchannels = 2;
3352 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3353 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3354 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3355 revision == HPT372_REV))
3356 sc->sc_wdcdev.UDMA_cap = 6;
3357 else
3358 sc->sc_wdcdev.UDMA_cap = 5;
3359 }
3360 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3361 cp = &sc->pciide_channels[i];
3362 if (sc->sc_wdcdev.nchannels > 1) {
3363 compatchan = i;
3364 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3365 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3366 printf("%s: %s channel ignored (disabled)\n",
3367 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3368 continue;
3369 }
3370 }
3371 if (pciide_chansetup(sc, i, interface) == 0)
3372 continue;
3373 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3374 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3375 &ctlsize, hpt_pci_intr);
3376 } else {
3377 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3378 &cmdsize, &ctlsize);
3379 }
3380 if (cp->hw_ok == 0)
3381 return;
3382 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3383 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3384 wdcattach(&cp->wdc_channel);
3385 hpt_setup_channel(&cp->wdc_channel);
3386 }
3387 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3388 (revision == HPT370_REV || revision == HPT370A_REV ||
3389 revision == HPT372_REV)) ||
3390 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3391 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3392 /*
3393 * HPT370_REV and highter has a bit to disable interrupts,
3394 * make sure to clear it
3395 */
3396 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3397 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3398 ~HPT_CSEL_IRQDIS);
3399 }
3400 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3401 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3402 revision == HPT372_REV ) ||
3403 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3404 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3405 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3406 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3407 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3408 return;
3409 }
3410
3411 void
3412 hpt_setup_channel(chp)
3413 struct channel_softc *chp;
3414 {
3415 struct ata_drive_datas *drvp;
3416 int drive;
3417 int cable;
3418 u_int32_t before, after;
3419 u_int32_t idedma_ctl;
3420 struct pciide_channel *cp = (struct pciide_channel*)chp;
3421 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3422 int revision =
3423 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3424
3425 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3426
3427 /* setup DMA if needed */
3428 pciide_channel_dma_setup(cp);
3429
3430 idedma_ctl = 0;
3431
3432 /* Per drive settings */
3433 for (drive = 0; drive < 2; drive++) {
3434 drvp = &chp->ch_drive[drive];
3435 /* If no drive, skip */
3436 if ((drvp->drive_flags & DRIVE) == 0)
3437 continue;
3438 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3439 HPT_IDETIM(chp->channel, drive));
3440
3441 /* add timing values, setup DMA if needed */
3442 if (drvp->drive_flags & DRIVE_UDMA) {
3443 /* use Ultra/DMA */
3444 drvp->drive_flags &= ~DRIVE_DMA;
3445 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3446 drvp->UDMA_mode > 2)
3447 drvp->UDMA_mode = 2;
3448 switch (sc->sc_pp->ide_product) {
3449 case PCI_PRODUCT_TRIONES_HPT374:
3450 after = hpt374_udma[drvp->UDMA_mode];
3451 break;
3452 case PCI_PRODUCT_TRIONES_HPT372:
3453 after = hpt372_udma[drvp->UDMA_mode];
3454 break;
3455 case PCI_PRODUCT_TRIONES_HPT366:
3456 default:
3457 switch(revision) {
3458 case HPT372_REV:
3459 after = hpt372_udma[drvp->UDMA_mode];
3460 break;
3461 case HPT370_REV:
3462 case HPT370A_REV:
3463 after = hpt370_udma[drvp->UDMA_mode];
3464 break;
3465 case HPT366_REV:
3466 default:
3467 after = hpt366_udma[drvp->UDMA_mode];
3468 break;
3469 }
3470 }
3471 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3472 } else if (drvp->drive_flags & DRIVE_DMA) {
3473 /*
3474 * use Multiword DMA.
3475 * Timings will be used for both PIO and DMA, so adjust
3476 * DMA mode if needed
3477 */
3478 if (drvp->PIO_mode >= 3 &&
3479 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3480 drvp->DMA_mode = drvp->PIO_mode - 2;
3481 }
3482 switch (sc->sc_pp->ide_product) {
3483 case PCI_PRODUCT_TRIONES_HPT374:
3484 after = hpt374_dma[drvp->DMA_mode];
3485 break;
3486 case PCI_PRODUCT_TRIONES_HPT372:
3487 after = hpt372_dma[drvp->DMA_mode];
3488 break;
3489 case PCI_PRODUCT_TRIONES_HPT366:
3490 default:
3491 switch(revision) {
3492 case HPT372_REV:
3493 after = hpt372_dma[drvp->DMA_mode];
3494 break;
3495 case HPT370_REV:
3496 case HPT370A_REV:
3497 after = hpt370_dma[drvp->DMA_mode];
3498 break;
3499 case HPT366_REV:
3500 default:
3501 after = hpt366_dma[drvp->DMA_mode];
3502 break;
3503 }
3504 }
3505 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3506 } else {
3507 /* PIO only */
3508 switch (sc->sc_pp->ide_product) {
3509 case PCI_PRODUCT_TRIONES_HPT374:
3510 after = hpt374_pio[drvp->PIO_mode];
3511 break;
3512 case PCI_PRODUCT_TRIONES_HPT372:
3513 after = hpt372_pio[drvp->PIO_mode];
3514 break;
3515 case PCI_PRODUCT_TRIONES_HPT366:
3516 default:
3517 switch(revision) {
3518 case HPT372_REV:
3519 after = hpt372_pio[drvp->PIO_mode];
3520 break;
3521 case HPT370_REV:
3522 case HPT370A_REV:
3523 after = hpt370_pio[drvp->PIO_mode];
3524 break;
3525 case HPT366_REV:
3526 default:
3527 after = hpt366_pio[drvp->PIO_mode];
3528 break;
3529 }
3530 }
3531 }
3532 pci_conf_write(sc->sc_pc, sc->sc_tag,
3533 HPT_IDETIM(chp->channel, drive), after);
3534 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3535 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3536 after, before), DEBUG_PROBE);
3537 }
3538 if (idedma_ctl != 0) {
3539 /* Add software bits in status register */
3540 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3541 IDEDMA_CTL, idedma_ctl);
3542 }
3543 pciide_print_modes(cp);
3544 }
3545
3546 int
3547 hpt_pci_intr(arg)
3548 void *arg;
3549 {
3550 struct pciide_softc *sc = arg;
3551 struct pciide_channel *cp;
3552 struct channel_softc *wdc_cp;
3553 int rv = 0;
3554 int dmastat, i, crv;
3555
3556 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3557 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3558 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3559 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3560 IDEDMA_CTL_INTR)
3561 continue;
3562 cp = &sc->pciide_channels[i];
3563 wdc_cp = &cp->wdc_channel;
3564 crv = wdcintr(wdc_cp);
3565 if (crv == 0) {
3566 printf("%s:%d: bogus intr\n",
3567 sc->sc_wdcdev.sc_dev.dv_xname, i);
3568 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3569 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3570 } else
3571 rv = 1;
3572 }
3573 return rv;
3574 }
3575
3576
3577 /* Macros to test product */
3578 #define PDC_IS_262(sc) \
3579 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3580 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3581 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3582 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3583 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3584 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3585 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3586 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3587 #define PDC_IS_265(sc) \
3588 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3589 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3590 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3591 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3592 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3593 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3594 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3595 #define PDC_IS_268(sc) \
3596 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3597 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3598 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3599 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3600 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3601 #define PDC_IS_276(sc) \
3602 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3603 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3604 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3605
3606 void
3607 pdc202xx_chip_map(sc, pa)
3608 struct pciide_softc *sc;
3609 struct pci_attach_args *pa;
3610 {
3611 struct pciide_channel *cp;
3612 int channel;
3613 pcireg_t interface, st, mode;
3614 bus_size_t cmdsize, ctlsize;
3615
3616 if (!PDC_IS_268(sc)) {
3617 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3618 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3619 st), DEBUG_PROBE);
3620 }
3621 if (pciide_chipen(sc, pa) == 0)
3622 return;
3623
3624 /* turn off RAID mode */
3625 if (!PDC_IS_268(sc))
3626 st &= ~PDC2xx_STATE_IDERAID;
3627
3628 /*
3629 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3630 * mode. We have to fake interface
3631 */
3632 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3633 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3634 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3635
3636 printf("%s: bus-master DMA support present",
3637 sc->sc_wdcdev.sc_dev.dv_xname);
3638 pciide_mapreg_dma(sc, pa);
3639 printf("\n");
3640 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3641 WDC_CAPABILITY_MODE;
3642 if (sc->sc_dma_ok) {
3643 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3644 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3645 sc->sc_wdcdev.irqack = pciide_irqack;
3646 }
3647 sc->sc_wdcdev.PIO_cap = 4;
3648 sc->sc_wdcdev.DMA_cap = 2;
3649 if (PDC_IS_276(sc))
3650 sc->sc_wdcdev.UDMA_cap = 6;
3651 else if (PDC_IS_265(sc))
3652 sc->sc_wdcdev.UDMA_cap = 5;
3653 else if (PDC_IS_262(sc))
3654 sc->sc_wdcdev.UDMA_cap = 4;
3655 else
3656 sc->sc_wdcdev.UDMA_cap = 2;
3657 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3658 pdc20268_setup_channel : pdc202xx_setup_channel;
3659 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3660 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3661
3662 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
3663 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
3664 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
3665 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
3666 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
3667 }
3668
3669 if (!PDC_IS_268(sc)) {
3670 /* setup failsafe defaults */
3671 mode = 0;
3672 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3673 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3674 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3675 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3676 for (channel = 0;
3677 channel < sc->sc_wdcdev.nchannels;
3678 channel++) {
3679 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3680 "drive 0 initial timings 0x%x, now 0x%x\n",
3681 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3682 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3683 DEBUG_PROBE);
3684 pci_conf_write(sc->sc_pc, sc->sc_tag,
3685 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3686 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3687 "drive 1 initial timings 0x%x, now 0x%x\n",
3688 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3689 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3690 pci_conf_write(sc->sc_pc, sc->sc_tag,
3691 PDC2xx_TIM(channel, 1), mode);
3692 }
3693
3694 mode = PDC2xx_SCR_DMA;
3695 if (PDC_IS_262(sc)) {
3696 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3697 } else {
3698 /* the BIOS set it up this way */
3699 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3700 }
3701 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3702 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3703 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3704 "now 0x%x\n",
3705 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3706 PDC2xx_SCR),
3707 mode), DEBUG_PROBE);
3708 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3709 PDC2xx_SCR, mode);
3710
3711 /* controller initial state register is OK even without BIOS */
3712 /* Set DMA mode to IDE DMA compatibility */
3713 mode =
3714 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3715 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3716 DEBUG_PROBE);
3717 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3718 mode | 0x1);
3719 mode =
3720 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3721 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3722 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3723 mode | 0x1);
3724 }
3725
3726 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3727 cp = &sc->pciide_channels[channel];
3728 if (pciide_chansetup(sc, channel, interface) == 0)
3729 continue;
3730 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3731 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3732 printf("%s: %s channel ignored (disabled)\n",
3733 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3734 continue;
3735 }
3736 if (PDC_IS_265(sc))
3737 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3738 pdc20265_pci_intr);
3739 else
3740 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3741 pdc202xx_pci_intr);
3742 if (cp->hw_ok == 0)
3743 continue;
3744 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3745 st &= ~(PDC_IS_262(sc) ?
3746 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3747 pciide_map_compat_intr(pa, cp, channel, interface);
3748 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3749 }
3750 if (!PDC_IS_268(sc)) {
3751 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3752 "0x%x\n", st), DEBUG_PROBE);
3753 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3754 }
3755 return;
3756 }
3757
3758 void
3759 pdc202xx_setup_channel(chp)
3760 struct channel_softc *chp;
3761 {
3762 struct ata_drive_datas *drvp;
3763 int drive;
3764 pcireg_t mode, st;
3765 u_int32_t idedma_ctl, scr, atapi;
3766 struct pciide_channel *cp = (struct pciide_channel*)chp;
3767 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3768 int channel = chp->channel;
3769
3770 /* setup DMA if needed */
3771 pciide_channel_dma_setup(cp);
3772
3773 idedma_ctl = 0;
3774 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3775 sc->sc_wdcdev.sc_dev.dv_xname,
3776 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3777 DEBUG_PROBE);
3778
3779 /* Per channel settings */
3780 if (PDC_IS_262(sc)) {
3781 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3782 PDC262_U66);
3783 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3784 /* Trim UDMA mode */
3785 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3786 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3787 chp->ch_drive[0].UDMA_mode <= 2) ||
3788 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3789 chp->ch_drive[1].UDMA_mode <= 2)) {
3790 if (chp->ch_drive[0].UDMA_mode > 2)
3791 chp->ch_drive[0].UDMA_mode = 2;
3792 if (chp->ch_drive[1].UDMA_mode > 2)
3793 chp->ch_drive[1].UDMA_mode = 2;
3794 }
3795 /* Set U66 if needed */
3796 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3797 chp->ch_drive[0].UDMA_mode > 2) ||
3798 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3799 chp->ch_drive[1].UDMA_mode > 2))
3800 scr |= PDC262_U66_EN(channel);
3801 else
3802 scr &= ~PDC262_U66_EN(channel);
3803 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3804 PDC262_U66, scr);
3805 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3806 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3807 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3808 PDC262_ATAPI(channel))), DEBUG_PROBE);
3809 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3810 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3811 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3812 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3813 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3814 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3815 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3816 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3817 atapi = 0;
3818 else
3819 atapi = PDC262_ATAPI_UDMA;
3820 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3821 PDC262_ATAPI(channel), atapi);
3822 }
3823 }
3824 for (drive = 0; drive < 2; drive++) {
3825 drvp = &chp->ch_drive[drive];
3826 /* If no drive, skip */
3827 if ((drvp->drive_flags & DRIVE) == 0)
3828 continue;
3829 mode = 0;
3830 if (drvp->drive_flags & DRIVE_UDMA) {
3831 /* use Ultra/DMA */
3832 drvp->drive_flags &= ~DRIVE_DMA;
3833 mode = PDC2xx_TIM_SET_MB(mode,
3834 pdc2xx_udma_mb[drvp->UDMA_mode]);
3835 mode = PDC2xx_TIM_SET_MC(mode,
3836 pdc2xx_udma_mc[drvp->UDMA_mode]);
3837 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3838 } else if (drvp->drive_flags & DRIVE_DMA) {
3839 mode = PDC2xx_TIM_SET_MB(mode,
3840 pdc2xx_dma_mb[drvp->DMA_mode]);
3841 mode = PDC2xx_TIM_SET_MC(mode,
3842 pdc2xx_dma_mc[drvp->DMA_mode]);
3843 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3844 } else {
3845 mode = PDC2xx_TIM_SET_MB(mode,
3846 pdc2xx_dma_mb[0]);
3847 mode = PDC2xx_TIM_SET_MC(mode,
3848 pdc2xx_dma_mc[0]);
3849 }
3850 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3851 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3852 if (drvp->drive_flags & DRIVE_ATA)
3853 mode |= PDC2xx_TIM_PRE;
3854 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3855 if (drvp->PIO_mode >= 3) {
3856 mode |= PDC2xx_TIM_IORDY;
3857 if (drive == 0)
3858 mode |= PDC2xx_TIM_IORDYp;
3859 }
3860 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3861 "timings 0x%x\n",
3862 sc->sc_wdcdev.sc_dev.dv_xname,
3863 chp->channel, drive, mode), DEBUG_PROBE);
3864 pci_conf_write(sc->sc_pc, sc->sc_tag,
3865 PDC2xx_TIM(chp->channel, drive), mode);
3866 }
3867 if (idedma_ctl != 0) {
3868 /* Add software bits in status register */
3869 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3870 IDEDMA_CTL, idedma_ctl);
3871 }
3872 pciide_print_modes(cp);
3873 }
3874
3875 void
3876 pdc20268_setup_channel(chp)
3877 struct channel_softc *chp;
3878 {
3879 struct ata_drive_datas *drvp;
3880 int drive;
3881 u_int32_t idedma_ctl;
3882 struct pciide_channel *cp = (struct pciide_channel*)chp;
3883 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3884 int u100;
3885
3886 /* setup DMA if needed */
3887 pciide_channel_dma_setup(cp);
3888
3889 idedma_ctl = 0;
3890
3891 /* I don't know what this is for, FreeBSD does it ... */
3892 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3893 IDEDMA_CMD + 0x1, 0x0b);
3894
3895 /*
3896 * I don't know what this is for; FreeBSD checks this ... this is not
3897 * cable type detect.
3898 */
3899 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3900 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3901
3902 for (drive = 0; drive < 2; drive++) {
3903 drvp = &chp->ch_drive[drive];
3904 /* If no drive, skip */
3905 if ((drvp->drive_flags & DRIVE) == 0)
3906 continue;
3907 if (drvp->drive_flags & DRIVE_UDMA) {
3908 /* use Ultra/DMA */
3909 drvp->drive_flags &= ~DRIVE_DMA;
3910 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3911 if (drvp->UDMA_mode > 2 && u100 == 0)
3912 drvp->UDMA_mode = 2;
3913 } else if (drvp->drive_flags & DRIVE_DMA) {
3914 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3915 }
3916 }
3917 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3918 if (idedma_ctl != 0) {
3919 /* Add software bits in status register */
3920 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3921 IDEDMA_CTL, idedma_ctl);
3922 }
3923 pciide_print_modes(cp);
3924 }
3925
3926 int
3927 pdc202xx_pci_intr(arg)
3928 void *arg;
3929 {
3930 struct pciide_softc *sc = arg;
3931 struct pciide_channel *cp;
3932 struct channel_softc *wdc_cp;
3933 int i, rv, crv;
3934 u_int32_t scr;
3935
3936 rv = 0;
3937 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3938 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3939 cp = &sc->pciide_channels[i];
3940 wdc_cp = &cp->wdc_channel;
3941 /* If a compat channel skip. */
3942 if (cp->compat)
3943 continue;
3944 if (scr & PDC2xx_SCR_INT(i)) {
3945 crv = wdcintr(wdc_cp);
3946 if (crv == 0)
3947 printf("%s:%d: bogus intr (reg 0x%x)\n",
3948 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3949 else
3950 rv = 1;
3951 }
3952 }
3953 return rv;
3954 }
3955
3956 int
3957 pdc20265_pci_intr(arg)
3958 void *arg;
3959 {
3960 struct pciide_softc *sc = arg;
3961 struct pciide_channel *cp;
3962 struct channel_softc *wdc_cp;
3963 int i, rv, crv;
3964 u_int32_t dmastat;
3965
3966 rv = 0;
3967 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3968 cp = &sc->pciide_channels[i];
3969 wdc_cp = &cp->wdc_channel;
3970 /* If a compat channel skip. */
3971 if (cp->compat)
3972 continue;
3973 /*
3974 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3975 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3976 * So use it instead (requires 2 reg reads instead of 1,
3977 * but we can't do it another way).
3978 */
3979 dmastat = bus_space_read_1(sc->sc_dma_iot,
3980 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3981 if((dmastat & IDEDMA_CTL_INTR) == 0)
3982 continue;
3983 crv = wdcintr(wdc_cp);
3984 if (crv == 0)
3985 printf("%s:%d: bogus intr\n",
3986 sc->sc_wdcdev.sc_dev.dv_xname, i);
3987 else
3988 rv = 1;
3989 }
3990 return rv;
3991 }
3992
3993 static void
3994 pdc20262_dma_start(v, channel, drive)
3995 void *v;
3996 int channel, drive;
3997 {
3998 struct pciide_softc *sc = v;
3999 struct pciide_dma_maps *dma_maps =
4000 &sc->pciide_channels[channel].dma_maps[drive];
4001 int atapi;
4002
4003 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4004 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4005 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4006 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4007 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4008 PDC262_ATAPI(channel), atapi);
4009 }
4010
4011 pciide_dma_start(v, channel, drive);
4012 }
4013
4014 int
4015 pdc20262_dma_finish(v, channel, drive, force)
4016 void *v;
4017 int channel, drive;
4018 int force;
4019 {
4020 struct pciide_softc *sc = v;
4021 struct pciide_dma_maps *dma_maps =
4022 &sc->pciide_channels[channel].dma_maps[drive];
4023 struct channel_softc *chp;
4024 int atapi, error;
4025
4026 error = pciide_dma_finish(v, channel, drive, force);
4027
4028 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4029 chp = sc->wdc_chanarray[channel];
4030 atapi = 0;
4031 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4032 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4033 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4034 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4035 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4036 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4037 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4038 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4039 atapi = PDC262_ATAPI_UDMA;
4040 }
4041 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4042 PDC262_ATAPI(channel), atapi);
4043 }
4044
4045 return error;
4046 }
4047
4048 void
4049 opti_chip_map(sc, pa)
4050 struct pciide_softc *sc;
4051 struct pci_attach_args *pa;
4052 {
4053 struct pciide_channel *cp;
4054 bus_size_t cmdsize, ctlsize;
4055 pcireg_t interface;
4056 u_int8_t init_ctrl;
4057 int channel;
4058
4059 if (pciide_chipen(sc, pa) == 0)
4060 return;
4061 printf("%s: bus-master DMA support present",
4062 sc->sc_wdcdev.sc_dev.dv_xname);
4063
4064 /*
4065 * XXXSCW:
4066 * There seem to be a couple of buggy revisions/implementations
4067 * of the OPTi pciide chipset. This kludge seems to fix one of
4068 * the reported problems (PR/11644) but still fails for the
4069 * other (PR/13151), although the latter may be due to other
4070 * issues too...
4071 */
4072 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4073 printf(" but disabled due to chip rev. <= 0x12");
4074 sc->sc_dma_ok = 0;
4075 } else
4076 pciide_mapreg_dma(sc, pa);
4077
4078 printf("\n");
4079
4080 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4081 WDC_CAPABILITY_MODE;
4082 sc->sc_wdcdev.PIO_cap = 4;
4083 if (sc->sc_dma_ok) {
4084 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4085 sc->sc_wdcdev.irqack = pciide_irqack;
4086 sc->sc_wdcdev.DMA_cap = 2;
4087 }
4088 sc->sc_wdcdev.set_modes = opti_setup_channel;
4089
4090 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4091 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4092
4093 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4094 OPTI_REG_INIT_CONTROL);
4095
4096 interface = PCI_INTERFACE(pa->pa_class);
4097
4098 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4099 cp = &sc->pciide_channels[channel];
4100 if (pciide_chansetup(sc, channel, interface) == 0)
4101 continue;
4102 if (channel == 1 &&
4103 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4104 printf("%s: %s channel ignored (disabled)\n",
4105 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4106 continue;
4107 }
4108 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4109 pciide_pci_intr);
4110 if (cp->hw_ok == 0)
4111 continue;
4112 pciide_map_compat_intr(pa, cp, channel, interface);
4113 if (cp->hw_ok == 0)
4114 continue;
4115 opti_setup_channel(&cp->wdc_channel);
4116 }
4117 }
4118
4119 void
4120 opti_setup_channel(chp)
4121 struct channel_softc *chp;
4122 {
4123 struct ata_drive_datas *drvp;
4124 struct pciide_channel *cp = (struct pciide_channel*)chp;
4125 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4126 int drive, spd;
4127 int mode[2];
4128 u_int8_t rv, mr;
4129
4130 /*
4131 * The `Delay' and `Address Setup Time' fields of the
4132 * Miscellaneous Register are always zero initially.
4133 */
4134 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4135 mr &= ~(OPTI_MISC_DELAY_MASK |
4136 OPTI_MISC_ADDR_SETUP_MASK |
4137 OPTI_MISC_INDEX_MASK);
4138
4139 /* Prime the control register before setting timing values */
4140 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4141
4142 /* Determine the clockrate of the PCIbus the chip is attached to */
4143 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4144 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4145
4146 /* setup DMA if needed */
4147 pciide_channel_dma_setup(cp);
4148
4149 for (drive = 0; drive < 2; drive++) {
4150 drvp = &chp->ch_drive[drive];
4151 /* If no drive, skip */
4152 if ((drvp->drive_flags & DRIVE) == 0) {
4153 mode[drive] = -1;
4154 continue;
4155 }
4156
4157 if ((drvp->drive_flags & DRIVE_DMA)) {
4158 /*
4159 * Timings will be used for both PIO and DMA,
4160 * so adjust DMA mode if needed
4161 */
4162 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4163 drvp->PIO_mode = drvp->DMA_mode + 2;
4164 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4165 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4166 drvp->PIO_mode - 2 : 0;
4167 if (drvp->DMA_mode == 0)
4168 drvp->PIO_mode = 0;
4169
4170 mode[drive] = drvp->DMA_mode + 5;
4171 } else
4172 mode[drive] = drvp->PIO_mode;
4173
4174 if (drive && mode[0] >= 0 &&
4175 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4176 /*
4177 * Can't have two drives using different values
4178 * for `Address Setup Time'.
4179 * Slow down the faster drive to compensate.
4180 */
4181 int d = (opti_tim_as[spd][mode[0]] >
4182 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4183
4184 mode[d] = mode[1-d];
4185 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4186 chp->ch_drive[d].DMA_mode = 0;
4187 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4188 }
4189 }
4190
4191 for (drive = 0; drive < 2; drive++) {
4192 int m;
4193 if ((m = mode[drive]) < 0)
4194 continue;
4195
4196 /* Set the Address Setup Time and select appropriate index */
4197 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4198 rv |= OPTI_MISC_INDEX(drive);
4199 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4200
4201 /* Set the pulse width and recovery timing parameters */
4202 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4203 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4204 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4205 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4206
4207 /* Set the Enhanced Mode register appropriately */
4208 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4209 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4210 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4211 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4212 }
4213
4214 /* Finally, enable the timings */
4215 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4216
4217 pciide_print_modes(cp);
4218 }
4219
4220 #define ACARD_IS_850(sc) \
4221 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4222
4223 void
4224 acard_chip_map(sc, pa)
4225 struct pciide_softc *sc;
4226 struct pci_attach_args *pa;
4227 {
4228 struct pciide_channel *cp;
4229 int i;
4230 pcireg_t interface;
4231 bus_size_t cmdsize, ctlsize;
4232
4233 if (pciide_chipen(sc, pa) == 0)
4234 return;
4235
4236 /*
4237 * when the chip is in native mode it identifies itself as a
4238 * 'misc mass storage'. Fake interface in this case.
4239 */
4240 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4241 interface = PCI_INTERFACE(pa->pa_class);
4242 } else {
4243 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4244 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4245 }
4246
4247 printf("%s: bus-master DMA support present",
4248 sc->sc_wdcdev.sc_dev.dv_xname);
4249 pciide_mapreg_dma(sc, pa);
4250 printf("\n");
4251 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4252 WDC_CAPABILITY_MODE;
4253
4254 if (sc->sc_dma_ok) {
4255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4256 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4257 sc->sc_wdcdev.irqack = pciide_irqack;
4258 }
4259 sc->sc_wdcdev.PIO_cap = 4;
4260 sc->sc_wdcdev.DMA_cap = 2;
4261 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4262
4263 sc->sc_wdcdev.set_modes = acard_setup_channel;
4264 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4265 sc->sc_wdcdev.nchannels = 2;
4266
4267 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4268 cp = &sc->pciide_channels[i];
4269 if (pciide_chansetup(sc, i, interface) == 0)
4270 continue;
4271 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4272 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4273 &ctlsize, pciide_pci_intr);
4274 } else {
4275 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4276 &cmdsize, &ctlsize);
4277 }
4278 if (cp->hw_ok == 0)
4279 return;
4280 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4281 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4282 wdcattach(&cp->wdc_channel);
4283 acard_setup_channel(&cp->wdc_channel);
4284 }
4285 if (!ACARD_IS_850(sc)) {
4286 u_int32_t reg;
4287 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4288 reg &= ~ATP860_CTRL_INT;
4289 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4290 }
4291 }
4292
4293 void
4294 acard_setup_channel(chp)
4295 struct channel_softc *chp;
4296 {
4297 struct ata_drive_datas *drvp;
4298 struct pciide_channel *cp = (struct pciide_channel*)chp;
4299 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4300 int channel = chp->channel;
4301 int drive;
4302 u_int32_t idetime, udma_mode;
4303 u_int32_t idedma_ctl;
4304
4305 /* setup DMA if needed */
4306 pciide_channel_dma_setup(cp);
4307
4308 if (ACARD_IS_850(sc)) {
4309 idetime = 0;
4310 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4311 udma_mode &= ~ATP850_UDMA_MASK(channel);
4312 } else {
4313 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4314 idetime &= ~ATP860_SETTIME_MASK(channel);
4315 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4316 udma_mode &= ~ATP860_UDMA_MASK(channel);
4317
4318 /* check 80 pins cable */
4319 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4320 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4321 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4322 & ATP860_CTRL_80P(chp->channel)) {
4323 if (chp->ch_drive[0].UDMA_mode > 2)
4324 chp->ch_drive[0].UDMA_mode = 2;
4325 if (chp->ch_drive[1].UDMA_mode > 2)
4326 chp->ch_drive[1].UDMA_mode = 2;
4327 }
4328 }
4329 }
4330
4331 idedma_ctl = 0;
4332
4333 /* Per drive settings */
4334 for (drive = 0; drive < 2; drive++) {
4335 drvp = &chp->ch_drive[drive];
4336 /* If no drive, skip */
4337 if ((drvp->drive_flags & DRIVE) == 0)
4338 continue;
4339 /* add timing values, setup DMA if needed */
4340 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4341 (drvp->drive_flags & DRIVE_UDMA)) {
4342 /* use Ultra/DMA */
4343 if (ACARD_IS_850(sc)) {
4344 idetime |= ATP850_SETTIME(drive,
4345 acard_act_udma[drvp->UDMA_mode],
4346 acard_rec_udma[drvp->UDMA_mode]);
4347 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4348 acard_udma_conf[drvp->UDMA_mode]);
4349 } else {
4350 idetime |= ATP860_SETTIME(channel, drive,
4351 acard_act_udma[drvp->UDMA_mode],
4352 acard_rec_udma[drvp->UDMA_mode]);
4353 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4354 acard_udma_conf[drvp->UDMA_mode]);
4355 }
4356 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4357 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4358 (drvp->drive_flags & DRIVE_DMA)) {
4359 /* use Multiword DMA */
4360 drvp->drive_flags &= ~DRIVE_UDMA;
4361 if (ACARD_IS_850(sc)) {
4362 idetime |= ATP850_SETTIME(drive,
4363 acard_act_dma[drvp->DMA_mode],
4364 acard_rec_dma[drvp->DMA_mode]);
4365 } else {
4366 idetime |= ATP860_SETTIME(channel, drive,
4367 acard_act_dma[drvp->DMA_mode],
4368 acard_rec_dma[drvp->DMA_mode]);
4369 }
4370 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4371 } else {
4372 /* PIO only */
4373 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4374 if (ACARD_IS_850(sc)) {
4375 idetime |= ATP850_SETTIME(drive,
4376 acard_act_pio[drvp->PIO_mode],
4377 acard_rec_pio[drvp->PIO_mode]);
4378 } else {
4379 idetime |= ATP860_SETTIME(channel, drive,
4380 acard_act_pio[drvp->PIO_mode],
4381 acard_rec_pio[drvp->PIO_mode]);
4382 }
4383 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4384 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4385 | ATP8x0_CTRL_EN(channel));
4386 }
4387 }
4388
4389 if (idedma_ctl != 0) {
4390 /* Add software bits in status register */
4391 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4392 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4393 }
4394 pciide_print_modes(cp);
4395
4396 if (ACARD_IS_850(sc)) {
4397 pci_conf_write(sc->sc_pc, sc->sc_tag,
4398 ATP850_IDETIME(channel), idetime);
4399 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4400 } else {
4401 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4402 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4403 }
4404 }
4405
4406 int
4407 acard_pci_intr(arg)
4408 void *arg;
4409 {
4410 struct pciide_softc *sc = arg;
4411 struct pciide_channel *cp;
4412 struct channel_softc *wdc_cp;
4413 int rv = 0;
4414 int dmastat, i, crv;
4415
4416 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4417 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4418 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4419 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4420 continue;
4421 cp = &sc->pciide_channels[i];
4422 wdc_cp = &cp->wdc_channel;
4423 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4424 (void)wdcintr(wdc_cp);
4425 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4426 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4427 continue;
4428 }
4429 crv = wdcintr(wdc_cp);
4430 if (crv == 0)
4431 printf("%s:%d: bogus intr\n",
4432 sc->sc_wdcdev.sc_dev.dv_xname, i);
4433 else if (crv == 1)
4434 rv = 1;
4435 else if (rv == 0)
4436 rv = crv;
4437 }
4438 return rv;
4439 }
4440
4441 static int
4442 sl82c105_bugchk(struct pci_attach_args *pa)
4443 {
4444
4445 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4446 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4447 return (0);
4448
4449 if (PCI_REVISION(pa->pa_class) <= 0x05)
4450 return (1);
4451
4452 return (0);
4453 }
4454
4455 void
4456 sl82c105_chip_map(sc, pa)
4457 struct pciide_softc *sc;
4458 struct pci_attach_args *pa;
4459 {
4460 struct pciide_channel *cp;
4461 bus_size_t cmdsize, ctlsize;
4462 pcireg_t interface, idecr;
4463 int channel;
4464
4465 if (pciide_chipen(sc, pa) == 0)
4466 return;
4467
4468 printf("%s: bus-master DMA support present",
4469 sc->sc_wdcdev.sc_dev.dv_xname);
4470
4471 /*
4472 * Check to see if we're part of the Winbond 83c553 Southbridge.
4473 * If so, we need to disable DMA on rev. <= 5 of that chip.
4474 */
4475 if (pci_find_device(pa, sl82c105_bugchk)) {
4476 printf(" but disabled due to 83c553 rev. <= 0x05");
4477 sc->sc_dma_ok = 0;
4478 } else
4479 pciide_mapreg_dma(sc, pa);
4480 printf("\n");
4481
4482 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4483 WDC_CAPABILITY_MODE;
4484 sc->sc_wdcdev.PIO_cap = 4;
4485 if (sc->sc_dma_ok) {
4486 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4487 sc->sc_wdcdev.irqack = pciide_irqack;
4488 sc->sc_wdcdev.DMA_cap = 2;
4489 }
4490 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4491
4492 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4493 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4494
4495 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4496
4497 interface = PCI_INTERFACE(pa->pa_class);
4498
4499 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4500 cp = &sc->pciide_channels[channel];
4501 if (pciide_chansetup(sc, channel, interface) == 0)
4502 continue;
4503 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4504 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4505 printf("%s: %s channel ignored (disabled)\n",
4506 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4507 continue;
4508 }
4509 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4510 pciide_pci_intr);
4511 if (cp->hw_ok == 0)
4512 continue;
4513 pciide_map_compat_intr(pa, cp, channel, interface);
4514 if (cp->hw_ok == 0)
4515 continue;
4516 sl82c105_setup_channel(&cp->wdc_channel);
4517 }
4518 }
4519
4520 void
4521 sl82c105_setup_channel(chp)
4522 struct channel_softc *chp;
4523 {
4524 struct ata_drive_datas *drvp;
4525 struct pciide_channel *cp = (struct pciide_channel*)chp;
4526 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4527 int pxdx_reg, drive;
4528 pcireg_t pxdx;
4529
4530 /* Set up DMA if needed. */
4531 pciide_channel_dma_setup(cp);
4532
4533 for (drive = 0; drive < 2; drive++) {
4534 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4535 : SYMPH_P1D0CR) + (drive * 4);
4536
4537 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4538
4539 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4540 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4541
4542 drvp = &chp->ch_drive[drive];
4543 /* If no drive, skip. */
4544 if ((drvp->drive_flags & DRIVE) == 0) {
4545 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4546 continue;
4547 }
4548
4549 if (drvp->drive_flags & DRIVE_DMA) {
4550 /*
4551 * Timings will be used for both PIO and DMA,
4552 * so adjust DMA mode if needed.
4553 */
4554 if (drvp->PIO_mode >= 3) {
4555 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4556 drvp->DMA_mode = drvp->PIO_mode - 2;
4557 if (drvp->DMA_mode < 1) {
4558 /*
4559 * Can't mix both PIO and DMA.
4560 * Disable DMA.
4561 */
4562 drvp->drive_flags &= ~DRIVE_DMA;
4563 }
4564 } else {
4565 /*
4566 * Can't mix both PIO and DMA. Disable
4567 * DMA.
4568 */
4569 drvp->drive_flags &= ~DRIVE_DMA;
4570 }
4571 }
4572
4573 if (drvp->drive_flags & DRIVE_DMA) {
4574 /* Use multi-word DMA. */
4575 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4576 PxDx_CMD_ON_SHIFT;
4577 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4578 } else {
4579 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4580 PxDx_CMD_ON_SHIFT;
4581 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4582 }
4583
4584 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4585
4586 /* ...and set the mode for this drive. */
4587 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4588 }
4589
4590 pciide_print_modes(cp);
4591 }
4592
4593 void
4594 serverworks_chip_map(sc, pa)
4595 struct pciide_softc *sc;
4596 struct pci_attach_args *pa;
4597 {
4598 struct pciide_channel *cp;
4599 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4600 pcitag_t pcib_tag;
4601 int channel;
4602 bus_size_t cmdsize, ctlsize;
4603
4604 if (pciide_chipen(sc, pa) == 0)
4605 return;
4606
4607 printf("%s: bus-master DMA support present",
4608 sc->sc_wdcdev.sc_dev.dv_xname);
4609 pciide_mapreg_dma(sc, pa);
4610 printf("\n");
4611 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4612 WDC_CAPABILITY_MODE;
4613
4614 if (sc->sc_dma_ok) {
4615 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4616 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4617 sc->sc_wdcdev.irqack = pciide_irqack;
4618 }
4619 sc->sc_wdcdev.PIO_cap = 4;
4620 sc->sc_wdcdev.DMA_cap = 2;
4621 switch (sc->sc_pp->ide_product) {
4622 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4623 sc->sc_wdcdev.UDMA_cap = 2;
4624 break;
4625 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4626 if (PCI_REVISION(pa->pa_class) < 0x92)
4627 sc->sc_wdcdev.UDMA_cap = 4;
4628 else
4629 sc->sc_wdcdev.UDMA_cap = 5;
4630 break;
4631 }
4632
4633 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4634 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4635 sc->sc_wdcdev.nchannels = 2;
4636
4637 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4638 cp = &sc->pciide_channels[channel];
4639 if (pciide_chansetup(sc, channel, interface) == 0)
4640 continue;
4641 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4642 serverworks_pci_intr);
4643 if (cp->hw_ok == 0)
4644 return;
4645 pciide_map_compat_intr(pa, cp, channel, interface);
4646 if (cp->hw_ok == 0)
4647 return;
4648 serverworks_setup_channel(&cp->wdc_channel);
4649 }
4650
4651 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4652 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4653 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4654 }
4655
4656 void
4657 serverworks_setup_channel(chp)
4658 struct channel_softc *chp;
4659 {
4660 struct ata_drive_datas *drvp;
4661 struct pciide_channel *cp = (struct pciide_channel*)chp;
4662 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4663 int channel = chp->channel;
4664 int drive, unit;
4665 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4666 u_int32_t idedma_ctl;
4667 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4668 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4669
4670 /* setup DMA if needed */
4671 pciide_channel_dma_setup(cp);
4672
4673 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4674 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4675 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4676 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4677
4678 pio_time &= ~(0xffff << (16 * channel));
4679 dma_time &= ~(0xffff << (16 * channel));
4680 pio_mode &= ~(0xff << (8 * channel + 16));
4681 udma_mode &= ~(0xff << (8 * channel + 16));
4682 udma_mode &= ~(3 << (2 * channel));
4683
4684 idedma_ctl = 0;
4685
4686 /* Per drive settings */
4687 for (drive = 0; drive < 2; drive++) {
4688 drvp = &chp->ch_drive[drive];
4689 /* If no drive, skip */
4690 if ((drvp->drive_flags & DRIVE) == 0)
4691 continue;
4692 unit = drive + 2 * channel;
4693 /* add timing values, setup DMA if needed */
4694 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4695 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4696 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4697 (drvp->drive_flags & DRIVE_UDMA)) {
4698 /* use Ultra/DMA, check for 80-pin cable */
4699 if (drvp->UDMA_mode > 2 &&
4700 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4701 drvp->UDMA_mode = 2;
4702 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4703 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4704 udma_mode |= 1 << unit;
4705 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4706 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4707 (drvp->drive_flags & DRIVE_DMA)) {
4708 /* use Multiword DMA */
4709 drvp->drive_flags &= ~DRIVE_UDMA;
4710 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4711 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4712 } else {
4713 /* PIO only */
4714 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4715 }
4716 }
4717
4718 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4719 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4720 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4721 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4722 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4723
4724 if (idedma_ctl != 0) {
4725 /* Add software bits in status register */
4726 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4727 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4728 }
4729 pciide_print_modes(cp);
4730 }
4731
4732 int
4733 serverworks_pci_intr(arg)
4734 void *arg;
4735 {
4736 struct pciide_softc *sc = arg;
4737 struct pciide_channel *cp;
4738 struct channel_softc *wdc_cp;
4739 int rv = 0;
4740 int dmastat, i, crv;
4741
4742 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4743 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4744 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4745 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4746 IDEDMA_CTL_INTR)
4747 continue;
4748 cp = &sc->pciide_channels[i];
4749 wdc_cp = &cp->wdc_channel;
4750 crv = wdcintr(wdc_cp);
4751 if (crv == 0) {
4752 printf("%s:%d: bogus intr\n",
4753 sc->sc_wdcdev.sc_dev.dv_xname, i);
4754 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4755 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4756 } else
4757 rv = 1;
4758 }
4759 return rv;
4760 }
4761