pciide.c revision 1.153.2.7 1 /* $NetBSD: pciide.c,v 1.153.2.7 2002/11/01 16:20:50 tron Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.153.2.7 2002/11/01 16:20:50 tron Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191
192 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void pdc202xx_setup_channel __P((struct channel_softc*));
194 void pdc20268_setup_channel __P((struct channel_softc*));
195 int pdc202xx_pci_intr __P((void *));
196 int pdc20265_pci_intr __P((void *));
197
198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void opti_setup_channel __P((struct channel_softc*));
200
201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void hpt_setup_channel __P((struct channel_softc*));
203 int hpt_pci_intr __P((void *));
204
205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acard_setup_channel __P((struct channel_softc*));
207 int acard_pci_intr __P((void *));
208
209 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void serverworks_setup_channel __P((struct channel_softc*));
211 int serverworks_pci_intr __P((void *));
212
213 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void sl82c105_setup_channel __P((struct channel_softc*));
215
216 void pciide_channel_dma_setup __P((struct pciide_channel *));
217 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
218 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
219 void pciide_dma_start __P((void*, int, int));
220 int pciide_dma_finish __P((void*, int, int, int));
221 void pciide_irqack __P((struct channel_softc *));
222 void pciide_print_modes __P((struct pciide_channel *));
223
224 struct pciide_product_desc {
225 u_int32_t ide_product;
226 int ide_flags;
227 const char *ide_name;
228 /* map and setup chip, probe drives */
229 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
230 };
231
232 /* Flags for ide_flags */
233 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
234 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
235
236 /* Default product description for devices not known from this controller */
237 const struct pciide_product_desc default_product_desc = {
238 0,
239 0,
240 "Generic PCI IDE controller",
241 default_chip_map,
242 };
243
244 const struct pciide_product_desc pciide_intel_products[] = {
245 { PCI_PRODUCT_INTEL_82092AA,
246 0,
247 "Intel 82092AA IDE controller",
248 default_chip_map,
249 },
250 { PCI_PRODUCT_INTEL_82371FB_IDE,
251 0,
252 "Intel 82371FB IDE controller (PIIX)",
253 piix_chip_map,
254 },
255 { PCI_PRODUCT_INTEL_82371SB_IDE,
256 0,
257 "Intel 82371SB IDE Interface (PIIX3)",
258 piix_chip_map,
259 },
260 { PCI_PRODUCT_INTEL_82371AB_IDE,
261 0,
262 "Intel 82371AB IDE controller (PIIX4)",
263 piix_chip_map,
264 },
265 { PCI_PRODUCT_INTEL_82440MX_IDE,
266 0,
267 "Intel 82440MX IDE controller",
268 piix_chip_map
269 },
270 { PCI_PRODUCT_INTEL_82801AA_IDE,
271 0,
272 "Intel 82801AA IDE Controller (ICH)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82801AB_IDE,
276 0,
277 "Intel 82801AB IDE Controller (ICH0)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82801BA_IDE,
281 0,
282 "Intel 82801BA IDE Controller (ICH2)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82801BAM_IDE,
286 0,
287 "Intel 82801BAM IDE Controller (ICH2)",
288 piix_chip_map,
289 },
290 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
291 0,
292 "Intel 82801CA IDE Controller",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
296 0,
297 "Intel 82801CA IDE Controller",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801DB_IDE,
301 0,
302 "Intel 82801DB IDE Controller (ICH4)",
303 piix_chip_map,
304 },
305 { 0,
306 0,
307 NULL,
308 NULL
309 }
310 };
311
312 const struct pciide_product_desc pciide_amd_products[] = {
313 { PCI_PRODUCT_AMD_PBC756_IDE,
314 0,
315 "Advanced Micro Devices AMD756 IDE Controller",
316 amd7x6_chip_map
317 },
318 { PCI_PRODUCT_AMD_PBC766_IDE,
319 0,
320 "Advanced Micro Devices AMD766 IDE Controller",
321 amd7x6_chip_map
322 },
323 { PCI_PRODUCT_AMD_PBC768_IDE,
324 0,
325 "Advanced Micro Devices AMD768 IDE Controller",
326 amd7x6_chip_map
327 },
328 { 0,
329 0,
330 NULL,
331 NULL
332 }
333 };
334
335 const struct pciide_product_desc pciide_cmd_products[] = {
336 { PCI_PRODUCT_CMDTECH_640,
337 0,
338 "CMD Technology PCI0640",
339 cmd_chip_map
340 },
341 { PCI_PRODUCT_CMDTECH_643,
342 0,
343 "CMD Technology PCI0643",
344 cmd0643_9_chip_map,
345 },
346 { PCI_PRODUCT_CMDTECH_646,
347 0,
348 "CMD Technology PCI0646",
349 cmd0643_9_chip_map,
350 },
351 { PCI_PRODUCT_CMDTECH_648,
352 IDE_PCI_CLASS_OVERRIDE,
353 "CMD Technology PCI0648",
354 cmd0643_9_chip_map,
355 },
356 { PCI_PRODUCT_CMDTECH_649,
357 IDE_PCI_CLASS_OVERRIDE,
358 "CMD Technology PCI0649",
359 cmd0643_9_chip_map,
360 },
361 { 0,
362 0,
363 NULL,
364 NULL
365 }
366 };
367
368 const struct pciide_product_desc pciide_via_products[] = {
369 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
370 0,
371 NULL,
372 apollo_chip_map,
373 },
374 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
375 0,
376 NULL,
377 apollo_chip_map,
378 },
379 { 0,
380 0,
381 NULL,
382 NULL
383 }
384 };
385
386 const struct pciide_product_desc pciide_cypress_products[] = {
387 { PCI_PRODUCT_CONTAQ_82C693,
388 IDE_16BIT_IOSPACE,
389 "Cypress 82C693 IDE Controller",
390 cy693_chip_map,
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_sis_products[] = {
400 { PCI_PRODUCT_SIS_5597_IDE,
401 0,
402 "Silicon Integrated System 5597/5598 IDE controller",
403 sis_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_acer_products[] = {
413 { PCI_PRODUCT_ALI_M5229,
414 0,
415 "Acer Labs M5229 UDMA IDE Controller",
416 acer_chip_map,
417 },
418 { 0,
419 0,
420 NULL,
421 NULL
422 }
423 };
424
425 const struct pciide_product_desc pciide_promise_products[] = {
426 { PCI_PRODUCT_PROMISE_ULTRA33,
427 IDE_PCI_CLASS_OVERRIDE,
428 "Promise Ultra33/ATA Bus Master IDE Accelerator",
429 pdc202xx_chip_map,
430 },
431 { PCI_PRODUCT_PROMISE_ULTRA66,
432 IDE_PCI_CLASS_OVERRIDE,
433 "Promise Ultra66/ATA Bus Master IDE Accelerator",
434 pdc202xx_chip_map,
435 },
436 { PCI_PRODUCT_PROMISE_ULTRA100,
437 IDE_PCI_CLASS_OVERRIDE,
438 "Promise Ultra100/ATA Bus Master IDE Accelerator",
439 pdc202xx_chip_map,
440 },
441 { PCI_PRODUCT_PROMISE_ULTRA100X,
442 IDE_PCI_CLASS_OVERRIDE,
443 "Promise Ultra100/ATA Bus Master IDE Accelerator",
444 pdc202xx_chip_map,
445 },
446 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
447 IDE_PCI_CLASS_OVERRIDE,
448 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
449 pdc202xx_chip_map,
450 },
451 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
452 IDE_PCI_CLASS_OVERRIDE,
453 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
454 pdc202xx_chip_map,
455 },
456 { PCI_PRODUCT_PROMISE_ULTRA133,
457 IDE_PCI_CLASS_OVERRIDE,
458 "Promise Ultra133/ATA Bus Master IDE Accelerator",
459 pdc202xx_chip_map,
460 },
461 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
462 IDE_PCI_CLASS_OVERRIDE,
463 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
464 pdc202xx_chip_map,
465 },
466 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
467 IDE_PCI_CLASS_OVERRIDE,
468 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
469 pdc202xx_chip_map,
470 },
471 { 0,
472 0,
473 NULL,
474 NULL
475 }
476 };
477
478 const struct pciide_product_desc pciide_opti_products[] = {
479 { PCI_PRODUCT_OPTI_82C621,
480 0,
481 "OPTi 82c621 PCI IDE controller",
482 opti_chip_map,
483 },
484 { PCI_PRODUCT_OPTI_82C568,
485 0,
486 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
487 opti_chip_map,
488 },
489 { PCI_PRODUCT_OPTI_82D568,
490 0,
491 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
492 opti_chip_map,
493 },
494 { 0,
495 0,
496 NULL,
497 NULL
498 }
499 };
500
501 const struct pciide_product_desc pciide_triones_products[] = {
502 { PCI_PRODUCT_TRIONES_HPT366,
503 IDE_PCI_CLASS_OVERRIDE,
504 NULL,
505 hpt_chip_map,
506 },
507 { PCI_PRODUCT_TRIONES_HPT372,
508 IDE_PCI_CLASS_OVERRIDE,
509 NULL,
510 hpt_chip_map
511 },
512 { PCI_PRODUCT_TRIONES_HPT374,
513 IDE_PCI_CLASS_OVERRIDE,
514 NULL,
515 hpt_chip_map
516 },
517 { 0,
518 0,
519 NULL,
520 NULL
521 }
522 };
523
524 const struct pciide_product_desc pciide_acard_products[] = {
525 { PCI_PRODUCT_ACARD_ATP850U,
526 IDE_PCI_CLASS_OVERRIDE,
527 "Acard ATP850U Ultra33 IDE Controller",
528 acard_chip_map,
529 },
530 { PCI_PRODUCT_ACARD_ATP860,
531 IDE_PCI_CLASS_OVERRIDE,
532 "Acard ATP860 Ultra66 IDE Controller",
533 acard_chip_map,
534 },
535 { PCI_PRODUCT_ACARD_ATP860A,
536 IDE_PCI_CLASS_OVERRIDE,
537 "Acard ATP860-A Ultra66 IDE Controller",
538 acard_chip_map,
539 },
540 { 0,
541 0,
542 NULL,
543 NULL
544 }
545 };
546
547 const struct pciide_product_desc pciide_serverworks_products[] = {
548 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
549 0,
550 "ServerWorks OSB4 IDE Controller",
551 serverworks_chip_map,
552 },
553 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
554 0,
555 "ServerWorks CSB5 IDE Controller",
556 serverworks_chip_map,
557 },
558 { 0,
559 0,
560 NULL,
561 }
562 };
563
564 const struct pciide_product_desc pciide_symphony_products[] = {
565 { PCI_PRODUCT_SYMPHONY_82C105,
566 0,
567 "Symphony Labs 82C105 IDE controller",
568 sl82c105_chip_map,
569 },
570 { 0,
571 0,
572 NULL,
573 }
574 };
575
576 const struct pciide_product_desc pciide_winbond_products[] = {
577 { PCI_PRODUCT_WINBOND_W83C553F_1,
578 0,
579 "Winbond W83C553F IDE controller",
580 sl82c105_chip_map,
581 },
582 { 0,
583 0,
584 NULL,
585 }
586 };
587
588 struct pciide_vendor_desc {
589 u_int32_t ide_vendor;
590 const struct pciide_product_desc *ide_products;
591 };
592
593 const struct pciide_vendor_desc pciide_vendors[] = {
594 { PCI_VENDOR_INTEL, pciide_intel_products },
595 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
596 { PCI_VENDOR_VIATECH, pciide_via_products },
597 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
598 { PCI_VENDOR_SIS, pciide_sis_products },
599 { PCI_VENDOR_ALI, pciide_acer_products },
600 { PCI_VENDOR_PROMISE, pciide_promise_products },
601 { PCI_VENDOR_AMD, pciide_amd_products },
602 { PCI_VENDOR_OPTI, pciide_opti_products },
603 { PCI_VENDOR_TRIONES, pciide_triones_products },
604 { PCI_VENDOR_ACARD, pciide_acard_products },
605 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
606 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
607 { PCI_VENDOR_WINBOND, pciide_winbond_products },
608 { 0, NULL }
609 };
610
611 /* options passed via the 'flags' config keyword */
612 #define PCIIDE_OPTIONS_DMA 0x01
613 #define PCIIDE_OPTIONS_NODMA 0x02
614
615 int pciide_match __P((struct device *, struct cfdata *, void *));
616 void pciide_attach __P((struct device *, struct device *, void *));
617
618 struct cfattach pciide_ca = {
619 sizeof(struct pciide_softc), pciide_match, pciide_attach
620 };
621 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
622 int pciide_mapregs_compat __P(( struct pci_attach_args *,
623 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
624 int pciide_mapregs_native __P((struct pci_attach_args *,
625 struct pciide_channel *, bus_size_t *, bus_size_t *,
626 int (*pci_intr) __P((void *))));
627 void pciide_mapreg_dma __P((struct pciide_softc *,
628 struct pci_attach_args *));
629 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
630 void pciide_mapchan __P((struct pci_attach_args *,
631 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
632 int (*pci_intr) __P((void *))));
633 int pciide_chan_candisable __P((struct pciide_channel *));
634 void pciide_map_compat_intr __P(( struct pci_attach_args *,
635 struct pciide_channel *, int, int));
636 int pciide_compat_intr __P((void *));
637 int pciide_pci_intr __P((void *));
638 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
639
640 const struct pciide_product_desc *
641 pciide_lookup_product(id)
642 u_int32_t id;
643 {
644 const struct pciide_product_desc *pp;
645 const struct pciide_vendor_desc *vp;
646
647 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
648 if (PCI_VENDOR(id) == vp->ide_vendor)
649 break;
650
651 if ((pp = vp->ide_products) == NULL)
652 return NULL;
653
654 for (; pp->chip_map != NULL; pp++)
655 if (PCI_PRODUCT(id) == pp->ide_product)
656 break;
657
658 if (pp->chip_map == NULL)
659 return NULL;
660 return pp;
661 }
662
663 int
664 pciide_match(parent, match, aux)
665 struct device *parent;
666 struct cfdata *match;
667 void *aux;
668 {
669 struct pci_attach_args *pa = aux;
670 const struct pciide_product_desc *pp;
671
672 /*
673 * Check the ID register to see that it's a PCI IDE controller.
674 * If it is, we assume that we can deal with it; it _should_
675 * work in a standardized way...
676 */
677 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
678 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
679 return (1);
680 }
681
682 /*
683 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
684 * controllers. Let see if we can deal with it anyway.
685 */
686 pp = pciide_lookup_product(pa->pa_id);
687 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
688 return (1);
689 }
690
691 return (0);
692 }
693
694 void
695 pciide_attach(parent, self, aux)
696 struct device *parent, *self;
697 void *aux;
698 {
699 struct pci_attach_args *pa = aux;
700 pci_chipset_tag_t pc = pa->pa_pc;
701 pcitag_t tag = pa->pa_tag;
702 struct pciide_softc *sc = (struct pciide_softc *)self;
703 pcireg_t csr;
704 char devinfo[256];
705 const char *displaydev;
706
707 sc->sc_pp = pciide_lookup_product(pa->pa_id);
708 if (sc->sc_pp == NULL) {
709 sc->sc_pp = &default_product_desc;
710 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
711 displaydev = devinfo;
712 } else
713 displaydev = sc->sc_pp->ide_name;
714
715 /* if displaydev == NULL, printf is done in chip-specific map */
716 if (displaydev)
717 printf(": %s (rev. 0x%02x)\n", displaydev,
718 PCI_REVISION(pa->pa_class));
719
720 sc->sc_pc = pa->pa_pc;
721 sc->sc_tag = pa->pa_tag;
722 #ifdef WDCDEBUG
723 if (wdcdebug_pciide_mask & DEBUG_PROBE)
724 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
725 #endif
726 sc->sc_pp->chip_map(sc, pa);
727
728 if (sc->sc_dma_ok) {
729 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
730 csr |= PCI_COMMAND_MASTER_ENABLE;
731 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
732 }
733 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
734 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
735 }
736
737 /* tell wether the chip is enabled or not */
738 int
739 pciide_chipen(sc, pa)
740 struct pciide_softc *sc;
741 struct pci_attach_args *pa;
742 {
743 pcireg_t csr;
744 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
745 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
746 PCI_COMMAND_STATUS_REG);
747 printf("%s: device disabled (at %s)\n",
748 sc->sc_wdcdev.sc_dev.dv_xname,
749 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
750 "device" : "bridge");
751 return 0;
752 }
753 return 1;
754 }
755
756 int
757 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
758 struct pci_attach_args *pa;
759 struct pciide_channel *cp;
760 int compatchan;
761 bus_size_t *cmdsizep, *ctlsizep;
762 {
763 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
764 struct channel_softc *wdc_cp = &cp->wdc_channel;
765
766 cp->compat = 1;
767 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
768 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
769
770 wdc_cp->cmd_iot = pa->pa_iot;
771 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
772 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
773 printf("%s: couldn't map %s channel cmd regs\n",
774 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
775 return (0);
776 }
777
778 wdc_cp->ctl_iot = pa->pa_iot;
779 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
780 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
781 printf("%s: couldn't map %s channel ctl regs\n",
782 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
783 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
784 PCIIDE_COMPAT_CMD_SIZE);
785 return (0);
786 }
787
788 return (1);
789 }
790
791 int
792 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
793 struct pci_attach_args * pa;
794 struct pciide_channel *cp;
795 bus_size_t *cmdsizep, *ctlsizep;
796 int (*pci_intr) __P((void *));
797 {
798 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
799 struct channel_softc *wdc_cp = &cp->wdc_channel;
800 const char *intrstr;
801 pci_intr_handle_t intrhandle;
802
803 cp->compat = 0;
804
805 if (sc->sc_pci_ih == NULL) {
806 if (pci_intr_map(pa, &intrhandle) != 0) {
807 printf("%s: couldn't map native-PCI interrupt\n",
808 sc->sc_wdcdev.sc_dev.dv_xname);
809 return 0;
810 }
811 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
812 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
813 intrhandle, IPL_BIO, pci_intr, sc);
814 if (sc->sc_pci_ih != NULL) {
815 printf("%s: using %s for native-PCI interrupt\n",
816 sc->sc_wdcdev.sc_dev.dv_xname,
817 intrstr ? intrstr : "unknown interrupt");
818 } else {
819 printf("%s: couldn't establish native-PCI interrupt",
820 sc->sc_wdcdev.sc_dev.dv_xname);
821 if (intrstr != NULL)
822 printf(" at %s", intrstr);
823 printf("\n");
824 return 0;
825 }
826 }
827 cp->ih = sc->sc_pci_ih;
828 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
829 PCI_MAPREG_TYPE_IO, 0,
830 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
831 printf("%s: couldn't map %s channel cmd regs\n",
832 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
833 return 0;
834 }
835
836 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
837 PCI_MAPREG_TYPE_IO, 0,
838 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
839 printf("%s: couldn't map %s channel ctl regs\n",
840 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
841 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
842 return 0;
843 }
844 /*
845 * In native mode, 4 bytes of I/O space are mapped for the control
846 * register, the control register is at offset 2. Pass the generic
847 * code a handle for only one byte at the rigth offset.
848 */
849 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
850 &wdc_cp->ctl_ioh) != 0) {
851 printf("%s: unable to subregion %s channel ctl regs\n",
852 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
853 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
854 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
855 return 0;
856 }
857 return (1);
858 }
859
860 void
861 pciide_mapreg_dma(sc, pa)
862 struct pciide_softc *sc;
863 struct pci_attach_args *pa;
864 {
865 pcireg_t maptype;
866 bus_addr_t addr;
867
868 /*
869 * Map DMA registers
870 *
871 * Note that sc_dma_ok is the right variable to test to see if
872 * DMA can be done. If the interface doesn't support DMA,
873 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
874 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
875 * non-zero if the interface supports DMA and the registers
876 * could be mapped.
877 *
878 * XXX Note that despite the fact that the Bus Master IDE specs
879 * XXX say that "The bus master IDE function uses 16 bytes of IO
880 * XXX space," some controllers (at least the United
881 * XXX Microelectronics UM8886BF) place it in memory space.
882 */
883 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
884 PCIIDE_REG_BUS_MASTER_DMA);
885
886 switch (maptype) {
887 case PCI_MAPREG_TYPE_IO:
888 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
889 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
890 &addr, NULL, NULL) == 0);
891 if (sc->sc_dma_ok == 0) {
892 printf(", but unused (couldn't query registers)");
893 break;
894 }
895 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
896 && addr >= 0x10000) {
897 sc->sc_dma_ok = 0;
898 printf(", but unused (registers at unsafe address "
899 "%#lx)", (unsigned long)addr);
900 break;
901 }
902 /* FALLTHROUGH */
903
904 case PCI_MAPREG_MEM_TYPE_32BIT:
905 sc->sc_dma_ok = (pci_mapreg_map(pa,
906 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
907 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
908 sc->sc_dmat = pa->pa_dmat;
909 if (sc->sc_dma_ok == 0) {
910 printf(", but unused (couldn't map registers)");
911 } else {
912 sc->sc_wdcdev.dma_arg = sc;
913 sc->sc_wdcdev.dma_init = pciide_dma_init;
914 sc->sc_wdcdev.dma_start = pciide_dma_start;
915 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
916 }
917
918 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
919 PCIIDE_OPTIONS_NODMA) {
920 printf(", but unused (forced off by config file)");
921 sc->sc_dma_ok = 0;
922 }
923 break;
924
925 default:
926 sc->sc_dma_ok = 0;
927 printf(", but unsupported register maptype (0x%x)", maptype);
928 }
929 }
930
931 int
932 pciide_compat_intr(arg)
933 void *arg;
934 {
935 struct pciide_channel *cp = arg;
936
937 #ifdef DIAGNOSTIC
938 /* should only be called for a compat channel */
939 if (cp->compat == 0)
940 panic("pciide compat intr called for non-compat chan %p\n", cp);
941 #endif
942 return (wdcintr(&cp->wdc_channel));
943 }
944
945 int
946 pciide_pci_intr(arg)
947 void *arg;
948 {
949 struct pciide_softc *sc = arg;
950 struct pciide_channel *cp;
951 struct channel_softc *wdc_cp;
952 int i, rv, crv;
953
954 rv = 0;
955 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
956 cp = &sc->pciide_channels[i];
957 wdc_cp = &cp->wdc_channel;
958
959 /* If a compat channel skip. */
960 if (cp->compat)
961 continue;
962 /* if this channel not waiting for intr, skip */
963 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
964 continue;
965
966 crv = wdcintr(wdc_cp);
967 if (crv == 0)
968 ; /* leave rv alone */
969 else if (crv == 1)
970 rv = 1; /* claim the intr */
971 else if (rv == 0) /* crv should be -1 in this case */
972 rv = crv; /* if we've done no better, take it */
973 }
974 return (rv);
975 }
976
977 void
978 pciide_channel_dma_setup(cp)
979 struct pciide_channel *cp;
980 {
981 int drive;
982 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
983 struct ata_drive_datas *drvp;
984
985 for (drive = 0; drive < 2; drive++) {
986 drvp = &cp->wdc_channel.ch_drive[drive];
987 /* If no drive, skip */
988 if ((drvp->drive_flags & DRIVE) == 0)
989 continue;
990 /* setup DMA if needed */
991 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
992 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
993 sc->sc_dma_ok == 0) {
994 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
995 continue;
996 }
997 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
998 != 0) {
999 /* Abort DMA setup */
1000 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1001 continue;
1002 }
1003 }
1004 }
1005
1006 int
1007 pciide_dma_table_setup(sc, channel, drive)
1008 struct pciide_softc *sc;
1009 int channel, drive;
1010 {
1011 bus_dma_segment_t seg;
1012 int error, rseg;
1013 const bus_size_t dma_table_size =
1014 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1015 struct pciide_dma_maps *dma_maps =
1016 &sc->pciide_channels[channel].dma_maps[drive];
1017
1018 /* If table was already allocated, just return */
1019 if (dma_maps->dma_table)
1020 return 0;
1021
1022 /* Allocate memory for the DMA tables and map it */
1023 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1024 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1025 BUS_DMA_NOWAIT)) != 0) {
1026 printf("%s:%d: unable to allocate table DMA for "
1027 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1028 channel, drive, error);
1029 return error;
1030 }
1031 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1032 dma_table_size,
1033 (caddr_t *)&dma_maps->dma_table,
1034 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1035 printf("%s:%d: unable to map table DMA for"
1036 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1037 channel, drive, error);
1038 return error;
1039 }
1040 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1041 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1042 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1043
1044 /* Create and load table DMA map for this disk */
1045 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1046 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1047 &dma_maps->dmamap_table)) != 0) {
1048 printf("%s:%d: unable to create table DMA map for "
1049 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1050 channel, drive, error);
1051 return error;
1052 }
1053 if ((error = bus_dmamap_load(sc->sc_dmat,
1054 dma_maps->dmamap_table,
1055 dma_maps->dma_table,
1056 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1057 printf("%s:%d: unable to load table DMA map for "
1058 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1059 channel, drive, error);
1060 return error;
1061 }
1062 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1063 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1064 DEBUG_PROBE);
1065 /* Create a xfer DMA map for this drive */
1066 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1067 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1068 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1069 &dma_maps->dmamap_xfer)) != 0) {
1070 printf("%s:%d: unable to create xfer DMA map for "
1071 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1072 channel, drive, error);
1073 return error;
1074 }
1075 return 0;
1076 }
1077
1078 int
1079 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1080 void *v;
1081 int channel, drive;
1082 void *databuf;
1083 size_t datalen;
1084 int flags;
1085 {
1086 struct pciide_softc *sc = v;
1087 int error, seg;
1088 struct pciide_dma_maps *dma_maps =
1089 &sc->pciide_channels[channel].dma_maps[drive];
1090
1091 error = bus_dmamap_load(sc->sc_dmat,
1092 dma_maps->dmamap_xfer,
1093 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1094 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1095 if (error) {
1096 printf("%s:%d: unable to load xfer DMA map for"
1097 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1098 channel, drive, error);
1099 return error;
1100 }
1101
1102 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1103 dma_maps->dmamap_xfer->dm_mapsize,
1104 (flags & WDC_DMA_READ) ?
1105 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1106
1107 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1108 #ifdef DIAGNOSTIC
1109 /* A segment must not cross a 64k boundary */
1110 {
1111 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1112 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1113 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1114 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1115 printf("pciide_dma: segment %d physical addr 0x%lx"
1116 " len 0x%lx not properly aligned\n",
1117 seg, phys, len);
1118 panic("pciide_dma: buf align");
1119 }
1120 }
1121 #endif
1122 dma_maps->dma_table[seg].base_addr =
1123 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1124 dma_maps->dma_table[seg].byte_count =
1125 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1126 IDEDMA_BYTE_COUNT_MASK);
1127 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1128 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1129 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1130
1131 }
1132 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1133 htole32(IDEDMA_BYTE_COUNT_EOT);
1134
1135 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1136 dma_maps->dmamap_table->dm_mapsize,
1137 BUS_DMASYNC_PREWRITE);
1138
1139 /* Maps are ready. Start DMA function */
1140 #ifdef DIAGNOSTIC
1141 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1142 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1143 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1144 panic("pciide_dma_init: table align");
1145 }
1146 #endif
1147
1148 /* Clear status bits */
1149 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1150 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1151 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1152 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1153 /* Write table addr */
1154 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1155 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1156 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1157 /* set read/write */
1158 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1159 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1160 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1161 /* remember flags */
1162 dma_maps->dma_flags = flags;
1163 return 0;
1164 }
1165
1166 void
1167 pciide_dma_start(v, channel, drive)
1168 void *v;
1169 int channel, drive;
1170 {
1171 struct pciide_softc *sc = v;
1172
1173 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1174 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1175 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1176 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1177 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1178 }
1179
1180 int
1181 pciide_dma_finish(v, channel, drive, force)
1182 void *v;
1183 int channel, drive;
1184 int force;
1185 {
1186 struct pciide_softc *sc = v;
1187 u_int8_t status;
1188 int error = 0;
1189 struct pciide_dma_maps *dma_maps =
1190 &sc->pciide_channels[channel].dma_maps[drive];
1191
1192 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1193 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1194 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1195 DEBUG_XFERS);
1196
1197 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1198 return WDC_DMAST_NOIRQ;
1199
1200 /* stop DMA channel */
1201 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1202 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1203 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1204 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1205
1206 /* Unload the map of the data buffer */
1207 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1208 dma_maps->dmamap_xfer->dm_mapsize,
1209 (dma_maps->dma_flags & WDC_DMA_READ) ?
1210 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1211 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1212
1213 if ((status & IDEDMA_CTL_ERR) != 0) {
1214 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1215 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1216 error |= WDC_DMAST_ERR;
1217 }
1218
1219 if ((status & IDEDMA_CTL_INTR) == 0) {
1220 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1221 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1222 drive, status);
1223 error |= WDC_DMAST_NOIRQ;
1224 }
1225
1226 if ((status & IDEDMA_CTL_ACT) != 0) {
1227 /* data underrun, may be a valid condition for ATAPI */
1228 error |= WDC_DMAST_UNDER;
1229 }
1230 return error;
1231 }
1232
1233 void
1234 pciide_irqack(chp)
1235 struct channel_softc *chp;
1236 {
1237 struct pciide_channel *cp = (struct pciide_channel*)chp;
1238 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1239
1240 /* clear status bits in IDE DMA registers */
1241 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1242 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1243 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1244 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1245 }
1246
1247 /* some common code used by several chip_map */
1248 int
1249 pciide_chansetup(sc, channel, interface)
1250 struct pciide_softc *sc;
1251 int channel;
1252 pcireg_t interface;
1253 {
1254 struct pciide_channel *cp = &sc->pciide_channels[channel];
1255 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1256 cp->name = PCIIDE_CHANNEL_NAME(channel);
1257 cp->wdc_channel.channel = channel;
1258 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1259 cp->wdc_channel.ch_queue =
1260 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1261 if (cp->wdc_channel.ch_queue == NULL) {
1262 printf("%s %s channel: "
1263 "can't allocate memory for command queue",
1264 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1265 return 0;
1266 }
1267 printf("%s: %s channel %s to %s mode\n",
1268 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1269 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1270 "configured" : "wired",
1271 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1272 "native-PCI" : "compatibility");
1273 return 1;
1274 }
1275
1276 /* some common code used by several chip channel_map */
1277 void
1278 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1279 struct pci_attach_args *pa;
1280 struct pciide_channel *cp;
1281 pcireg_t interface;
1282 bus_size_t *cmdsizep, *ctlsizep;
1283 int (*pci_intr) __P((void *));
1284 {
1285 struct channel_softc *wdc_cp = &cp->wdc_channel;
1286
1287 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1288 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1289 pci_intr);
1290 else
1291 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1292 wdc_cp->channel, cmdsizep, ctlsizep);
1293
1294 if (cp->hw_ok == 0)
1295 return;
1296 wdc_cp->data32iot = wdc_cp->cmd_iot;
1297 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1298 wdcattach(wdc_cp);
1299 }
1300
1301 /*
1302 * Generic code to call to know if a channel can be disabled. Return 1
1303 * if channel can be disabled, 0 if not
1304 */
1305 int
1306 pciide_chan_candisable(cp)
1307 struct pciide_channel *cp;
1308 {
1309 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1310 struct channel_softc *wdc_cp = &cp->wdc_channel;
1311
1312 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1313 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1314 printf("%s: disabling %s channel (no drives)\n",
1315 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1316 cp->hw_ok = 0;
1317 return 1;
1318 }
1319 return 0;
1320 }
1321
1322 /*
1323 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1324 * Set hw_ok=0 on failure
1325 */
1326 void
1327 pciide_map_compat_intr(pa, cp, compatchan, interface)
1328 struct pci_attach_args *pa;
1329 struct pciide_channel *cp;
1330 int compatchan, interface;
1331 {
1332 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1333 struct channel_softc *wdc_cp = &cp->wdc_channel;
1334
1335 if (cp->hw_ok == 0)
1336 return;
1337 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1338 return;
1339
1340 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1341 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1342 pa, compatchan, pciide_compat_intr, cp);
1343 if (cp->ih == NULL) {
1344 #endif
1345 printf("%s: no compatibility interrupt for use by %s "
1346 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1347 cp->hw_ok = 0;
1348 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1349 }
1350 #endif
1351 }
1352
1353 void
1354 pciide_print_modes(cp)
1355 struct pciide_channel *cp;
1356 {
1357 wdc_print_modes(&cp->wdc_channel);
1358 }
1359
1360 void
1361 default_chip_map(sc, pa)
1362 struct pciide_softc *sc;
1363 struct pci_attach_args *pa;
1364 {
1365 struct pciide_channel *cp;
1366 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1367 pcireg_t csr;
1368 int channel, drive;
1369 struct ata_drive_datas *drvp;
1370 u_int8_t idedma_ctl;
1371 bus_size_t cmdsize, ctlsize;
1372 char *failreason;
1373
1374 if (pciide_chipen(sc, pa) == 0)
1375 return;
1376
1377 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1378 printf("%s: bus-master DMA support present",
1379 sc->sc_wdcdev.sc_dev.dv_xname);
1380 if (sc->sc_pp == &default_product_desc &&
1381 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1382 PCIIDE_OPTIONS_DMA) == 0) {
1383 printf(", but unused (no driver support)");
1384 sc->sc_dma_ok = 0;
1385 } else {
1386 pciide_mapreg_dma(sc, pa);
1387 if (sc->sc_dma_ok != 0)
1388 printf(", used without full driver "
1389 "support");
1390 }
1391 } else {
1392 printf("%s: hardware does not support DMA",
1393 sc->sc_wdcdev.sc_dev.dv_xname);
1394 sc->sc_dma_ok = 0;
1395 }
1396 printf("\n");
1397 if (sc->sc_dma_ok) {
1398 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1399 sc->sc_wdcdev.irqack = pciide_irqack;
1400 }
1401 sc->sc_wdcdev.PIO_cap = 0;
1402 sc->sc_wdcdev.DMA_cap = 0;
1403
1404 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1405 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1406 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1407
1408 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1409 cp = &sc->pciide_channels[channel];
1410 if (pciide_chansetup(sc, channel, interface) == 0)
1411 continue;
1412 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1413 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1414 &ctlsize, pciide_pci_intr);
1415 } else {
1416 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1417 channel, &cmdsize, &ctlsize);
1418 }
1419 if (cp->hw_ok == 0)
1420 continue;
1421 /*
1422 * Check to see if something appears to be there.
1423 */
1424 failreason = NULL;
1425 if (!wdcprobe(&cp->wdc_channel)) {
1426 failreason = "not responding; disabled or no drives?";
1427 goto next;
1428 }
1429 /*
1430 * Now, make sure it's actually attributable to this PCI IDE
1431 * channel by trying to access the channel again while the
1432 * PCI IDE controller's I/O space is disabled. (If the
1433 * channel no longer appears to be there, it belongs to
1434 * this controller.) YUCK!
1435 */
1436 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1437 PCI_COMMAND_STATUS_REG);
1438 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1439 csr & ~PCI_COMMAND_IO_ENABLE);
1440 if (wdcprobe(&cp->wdc_channel))
1441 failreason = "other hardware responding at addresses";
1442 pci_conf_write(sc->sc_pc, sc->sc_tag,
1443 PCI_COMMAND_STATUS_REG, csr);
1444 next:
1445 if (failreason) {
1446 printf("%s: %s channel ignored (%s)\n",
1447 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1448 failreason);
1449 cp->hw_ok = 0;
1450 bus_space_unmap(cp->wdc_channel.cmd_iot,
1451 cp->wdc_channel.cmd_ioh, cmdsize);
1452 if (interface & PCIIDE_INTERFACE_PCI(channel))
1453 bus_space_unmap(cp->wdc_channel.ctl_iot,
1454 cp->ctl_baseioh, ctlsize);
1455 else
1456 bus_space_unmap(cp->wdc_channel.ctl_iot,
1457 cp->wdc_channel.ctl_ioh, ctlsize);
1458 } else {
1459 pciide_map_compat_intr(pa, cp, channel, interface);
1460 }
1461 if (cp->hw_ok) {
1462 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1463 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1464 wdcattach(&cp->wdc_channel);
1465 }
1466 }
1467
1468 if (sc->sc_dma_ok == 0)
1469 return;
1470
1471 /* Allocate DMA maps */
1472 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1473 idedma_ctl = 0;
1474 cp = &sc->pciide_channels[channel];
1475 for (drive = 0; drive < 2; drive++) {
1476 drvp = &cp->wdc_channel.ch_drive[drive];
1477 /* If no drive, skip */
1478 if ((drvp->drive_flags & DRIVE) == 0)
1479 continue;
1480 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1481 continue;
1482 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1483 /* Abort DMA setup */
1484 printf("%s:%d:%d: can't allocate DMA maps, "
1485 "using PIO transfers\n",
1486 sc->sc_wdcdev.sc_dev.dv_xname,
1487 channel, drive);
1488 drvp->drive_flags &= ~DRIVE_DMA;
1489 }
1490 printf("%s:%d:%d: using DMA data transfers\n",
1491 sc->sc_wdcdev.sc_dev.dv_xname,
1492 channel, drive);
1493 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1494 }
1495 if (idedma_ctl != 0) {
1496 /* Add software bits in status register */
1497 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1498 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1499 idedma_ctl);
1500 }
1501 }
1502 }
1503
1504 void
1505 piix_chip_map(sc, pa)
1506 struct pciide_softc *sc;
1507 struct pci_attach_args *pa;
1508 {
1509 struct pciide_channel *cp;
1510 int channel;
1511 u_int32_t idetim;
1512 bus_size_t cmdsize, ctlsize;
1513
1514 if (pciide_chipen(sc, pa) == 0)
1515 return;
1516
1517 printf("%s: bus-master DMA support present",
1518 sc->sc_wdcdev.sc_dev.dv_xname);
1519 pciide_mapreg_dma(sc, pa);
1520 printf("\n");
1521 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1522 WDC_CAPABILITY_MODE;
1523 if (sc->sc_dma_ok) {
1524 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1525 sc->sc_wdcdev.irqack = pciide_irqack;
1526 switch(sc->sc_pp->ide_product) {
1527 case PCI_PRODUCT_INTEL_82371AB_IDE:
1528 case PCI_PRODUCT_INTEL_82440MX_IDE:
1529 case PCI_PRODUCT_INTEL_82801AA_IDE:
1530 case PCI_PRODUCT_INTEL_82801AB_IDE:
1531 case PCI_PRODUCT_INTEL_82801BA_IDE:
1532 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1533 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1534 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1535 case PCI_PRODUCT_INTEL_82801DB_IDE:
1536 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1537 }
1538 }
1539 sc->sc_wdcdev.PIO_cap = 4;
1540 sc->sc_wdcdev.DMA_cap = 2;
1541 switch(sc->sc_pp->ide_product) {
1542 case PCI_PRODUCT_INTEL_82801AA_IDE:
1543 sc->sc_wdcdev.UDMA_cap = 4;
1544 break;
1545 case PCI_PRODUCT_INTEL_82801BA_IDE:
1546 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1547 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1548 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1549 case PCI_PRODUCT_INTEL_82801DB_IDE:
1550 sc->sc_wdcdev.UDMA_cap = 5;
1551 break;
1552 default:
1553 sc->sc_wdcdev.UDMA_cap = 2;
1554 }
1555 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1556 sc->sc_wdcdev.set_modes = piix_setup_channel;
1557 else
1558 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1559 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1560 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1561
1562 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1563 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1564 DEBUG_PROBE);
1565 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1566 WDCDEBUG_PRINT((", sidetim=0x%x",
1567 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1568 DEBUG_PROBE);
1569 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1570 WDCDEBUG_PRINT((", udamreg 0x%x",
1571 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1572 DEBUG_PROBE);
1573 }
1574 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1575 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1576 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1577 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1578 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1579 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1580 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1581 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1582 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1583 DEBUG_PROBE);
1584 }
1585
1586 }
1587 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1588
1589 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1590 cp = &sc->pciide_channels[channel];
1591 /* PIIX is compat-only */
1592 if (pciide_chansetup(sc, channel, 0) == 0)
1593 continue;
1594 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1595 if ((PIIX_IDETIM_READ(idetim, channel) &
1596 PIIX_IDETIM_IDE) == 0) {
1597 printf("%s: %s channel ignored (disabled)\n",
1598 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1599 continue;
1600 }
1601 /* PIIX are compat-only pciide devices */
1602 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1603 if (cp->hw_ok == 0)
1604 continue;
1605 if (pciide_chan_candisable(cp)) {
1606 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1607 channel);
1608 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1609 idetim);
1610 }
1611 pciide_map_compat_intr(pa, cp, channel, 0);
1612 if (cp->hw_ok == 0)
1613 continue;
1614 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1615 }
1616
1617 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1618 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1619 DEBUG_PROBE);
1620 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1621 WDCDEBUG_PRINT((", sidetim=0x%x",
1622 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1623 DEBUG_PROBE);
1624 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1625 WDCDEBUG_PRINT((", udamreg 0x%x",
1626 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1627 DEBUG_PROBE);
1628 }
1629 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1630 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1631 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1632 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1633 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1634 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1635 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1636 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1637 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1638 DEBUG_PROBE);
1639 }
1640 }
1641 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1642 }
1643
1644 void
1645 piix_setup_channel(chp)
1646 struct channel_softc *chp;
1647 {
1648 u_int8_t mode[2], drive;
1649 u_int32_t oidetim, idetim, idedma_ctl;
1650 struct pciide_channel *cp = (struct pciide_channel*)chp;
1651 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1652 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1653
1654 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1655 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1656 idedma_ctl = 0;
1657
1658 /* set up new idetim: Enable IDE registers decode */
1659 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1660 chp->channel);
1661
1662 /* setup DMA */
1663 pciide_channel_dma_setup(cp);
1664
1665 /*
1666 * Here we have to mess up with drives mode: PIIX can't have
1667 * different timings for master and slave drives.
1668 * We need to find the best combination.
1669 */
1670
1671 /* If both drives supports DMA, take the lower mode */
1672 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1673 (drvp[1].drive_flags & DRIVE_DMA)) {
1674 mode[0] = mode[1] =
1675 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1676 drvp[0].DMA_mode = mode[0];
1677 drvp[1].DMA_mode = mode[1];
1678 goto ok;
1679 }
1680 /*
1681 * If only one drive supports DMA, use its mode, and
1682 * put the other one in PIO mode 0 if mode not compatible
1683 */
1684 if (drvp[0].drive_flags & DRIVE_DMA) {
1685 mode[0] = drvp[0].DMA_mode;
1686 mode[1] = drvp[1].PIO_mode;
1687 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1688 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1689 mode[1] = drvp[1].PIO_mode = 0;
1690 goto ok;
1691 }
1692 if (drvp[1].drive_flags & DRIVE_DMA) {
1693 mode[1] = drvp[1].DMA_mode;
1694 mode[0] = drvp[0].PIO_mode;
1695 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1696 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1697 mode[0] = drvp[0].PIO_mode = 0;
1698 goto ok;
1699 }
1700 /*
1701 * If both drives are not DMA, takes the lower mode, unless
1702 * one of them is PIO mode < 2
1703 */
1704 if (drvp[0].PIO_mode < 2) {
1705 mode[0] = drvp[0].PIO_mode = 0;
1706 mode[1] = drvp[1].PIO_mode;
1707 } else if (drvp[1].PIO_mode < 2) {
1708 mode[1] = drvp[1].PIO_mode = 0;
1709 mode[0] = drvp[0].PIO_mode;
1710 } else {
1711 mode[0] = mode[1] =
1712 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1713 drvp[0].PIO_mode = mode[0];
1714 drvp[1].PIO_mode = mode[1];
1715 }
1716 ok: /* The modes are setup */
1717 for (drive = 0; drive < 2; drive++) {
1718 if (drvp[drive].drive_flags & DRIVE_DMA) {
1719 idetim |= piix_setup_idetim_timings(
1720 mode[drive], 1, chp->channel);
1721 goto end;
1722 }
1723 }
1724 /* If we are there, none of the drives are DMA */
1725 if (mode[0] >= 2)
1726 idetim |= piix_setup_idetim_timings(
1727 mode[0], 0, chp->channel);
1728 else
1729 idetim |= piix_setup_idetim_timings(
1730 mode[1], 0, chp->channel);
1731 end: /*
1732 * timing mode is now set up in the controller. Enable
1733 * it per-drive
1734 */
1735 for (drive = 0; drive < 2; drive++) {
1736 /* If no drive, skip */
1737 if ((drvp[drive].drive_flags & DRIVE) == 0)
1738 continue;
1739 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1740 if (drvp[drive].drive_flags & DRIVE_DMA)
1741 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1742 }
1743 if (idedma_ctl != 0) {
1744 /* Add software bits in status register */
1745 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1746 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1747 idedma_ctl);
1748 }
1749 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1750 pciide_print_modes(cp);
1751 }
1752
1753 void
1754 piix3_4_setup_channel(chp)
1755 struct channel_softc *chp;
1756 {
1757 struct ata_drive_datas *drvp;
1758 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1759 struct pciide_channel *cp = (struct pciide_channel*)chp;
1760 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1761 int drive;
1762 int channel = chp->channel;
1763
1764 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1765 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1766 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1767 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1768 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1769 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1770 PIIX_SIDETIM_RTC_MASK(channel));
1771
1772 idedma_ctl = 0;
1773 /* If channel disabled, no need to go further */
1774 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1775 return;
1776 /* set up new idetim: Enable IDE registers decode */
1777 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1778
1779 /* setup DMA if needed */
1780 pciide_channel_dma_setup(cp);
1781
1782 for (drive = 0; drive < 2; drive++) {
1783 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1784 PIIX_UDMATIM_SET(0x3, channel, drive));
1785 drvp = &chp->ch_drive[drive];
1786 /* If no drive, skip */
1787 if ((drvp->drive_flags & DRIVE) == 0)
1788 continue;
1789 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1790 (drvp->drive_flags & DRIVE_UDMA) == 0))
1791 goto pio;
1792
1793 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1794 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1795 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1796 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1797 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1798 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1799 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1800 ideconf |= PIIX_CONFIG_PINGPONG;
1801 }
1802 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1803 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1804 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1805 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1806 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1807 /* setup Ultra/100 */
1808 if (drvp->UDMA_mode > 2 &&
1809 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1810 drvp->UDMA_mode = 2;
1811 if (drvp->UDMA_mode > 4) {
1812 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1813 } else {
1814 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1815 if (drvp->UDMA_mode > 2) {
1816 ideconf |= PIIX_CONFIG_UDMA66(channel,
1817 drive);
1818 } else {
1819 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1820 drive);
1821 }
1822 }
1823 }
1824 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1825 /* setup Ultra/66 */
1826 if (drvp->UDMA_mode > 2 &&
1827 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1828 drvp->UDMA_mode = 2;
1829 if (drvp->UDMA_mode > 2)
1830 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1831 else
1832 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1833 }
1834 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1835 (drvp->drive_flags & DRIVE_UDMA)) {
1836 /* use Ultra/DMA */
1837 drvp->drive_flags &= ~DRIVE_DMA;
1838 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1839 udmareg |= PIIX_UDMATIM_SET(
1840 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1841 } else {
1842 /* use Multiword DMA */
1843 drvp->drive_flags &= ~DRIVE_UDMA;
1844 if (drive == 0) {
1845 idetim |= piix_setup_idetim_timings(
1846 drvp->DMA_mode, 1, channel);
1847 } else {
1848 sidetim |= piix_setup_sidetim_timings(
1849 drvp->DMA_mode, 1, channel);
1850 idetim =PIIX_IDETIM_SET(idetim,
1851 PIIX_IDETIM_SITRE, channel);
1852 }
1853 }
1854 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1855
1856 pio: /* use PIO mode */
1857 idetim |= piix_setup_idetim_drvs(drvp);
1858 if (drive == 0) {
1859 idetim |= piix_setup_idetim_timings(
1860 drvp->PIO_mode, 0, channel);
1861 } else {
1862 sidetim |= piix_setup_sidetim_timings(
1863 drvp->PIO_mode, 0, channel);
1864 idetim =PIIX_IDETIM_SET(idetim,
1865 PIIX_IDETIM_SITRE, channel);
1866 }
1867 }
1868 if (idedma_ctl != 0) {
1869 /* Add software bits in status register */
1870 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1871 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1872 idedma_ctl);
1873 }
1874 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1875 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1876 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1877 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1878 pciide_print_modes(cp);
1879 }
1880
1881
1882 /* setup ISP and RTC fields, based on mode */
1883 static u_int32_t
1884 piix_setup_idetim_timings(mode, dma, channel)
1885 u_int8_t mode;
1886 u_int8_t dma;
1887 u_int8_t channel;
1888 {
1889
1890 if (dma)
1891 return PIIX_IDETIM_SET(0,
1892 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1893 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1894 channel);
1895 else
1896 return PIIX_IDETIM_SET(0,
1897 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1898 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1899 channel);
1900 }
1901
1902 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1903 static u_int32_t
1904 piix_setup_idetim_drvs(drvp)
1905 struct ata_drive_datas *drvp;
1906 {
1907 u_int32_t ret = 0;
1908 struct channel_softc *chp = drvp->chnl_softc;
1909 u_int8_t channel = chp->channel;
1910 u_int8_t drive = drvp->drive;
1911
1912 /*
1913 * If drive is using UDMA, timings setups are independant
1914 * So just check DMA and PIO here.
1915 */
1916 if (drvp->drive_flags & DRIVE_DMA) {
1917 /* if mode = DMA mode 0, use compatible timings */
1918 if ((drvp->drive_flags & DRIVE_DMA) &&
1919 drvp->DMA_mode == 0) {
1920 drvp->PIO_mode = 0;
1921 return ret;
1922 }
1923 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1924 /*
1925 * PIO and DMA timings are the same, use fast timings for PIO
1926 * too, else use compat timings.
1927 */
1928 if ((piix_isp_pio[drvp->PIO_mode] !=
1929 piix_isp_dma[drvp->DMA_mode]) ||
1930 (piix_rtc_pio[drvp->PIO_mode] !=
1931 piix_rtc_dma[drvp->DMA_mode]))
1932 drvp->PIO_mode = 0;
1933 /* if PIO mode <= 2, use compat timings for PIO */
1934 if (drvp->PIO_mode <= 2) {
1935 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1936 channel);
1937 return ret;
1938 }
1939 }
1940
1941 /*
1942 * Now setup PIO modes. If mode < 2, use compat timings.
1943 * Else enable fast timings. Enable IORDY and prefetch/post
1944 * if PIO mode >= 3.
1945 */
1946
1947 if (drvp->PIO_mode < 2)
1948 return ret;
1949
1950 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1951 if (drvp->PIO_mode >= 3) {
1952 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1953 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1954 }
1955 return ret;
1956 }
1957
1958 /* setup values in SIDETIM registers, based on mode */
1959 static u_int32_t
1960 piix_setup_sidetim_timings(mode, dma, channel)
1961 u_int8_t mode;
1962 u_int8_t dma;
1963 u_int8_t channel;
1964 {
1965 if (dma)
1966 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1967 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1968 else
1969 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1970 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1971 }
1972
1973 void
1974 amd7x6_chip_map(sc, pa)
1975 struct pciide_softc *sc;
1976 struct pci_attach_args *pa;
1977 {
1978 struct pciide_channel *cp;
1979 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1980 int channel;
1981 pcireg_t chanenable;
1982 bus_size_t cmdsize, ctlsize;
1983
1984 if (pciide_chipen(sc, pa) == 0)
1985 return;
1986 printf("%s: bus-master DMA support present",
1987 sc->sc_wdcdev.sc_dev.dv_xname);
1988 pciide_mapreg_dma(sc, pa);
1989 printf("\n");
1990 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1991 WDC_CAPABILITY_MODE;
1992 if (sc->sc_dma_ok) {
1993 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1994 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1995 sc->sc_wdcdev.irqack = pciide_irqack;
1996 }
1997 sc->sc_wdcdev.PIO_cap = 4;
1998 sc->sc_wdcdev.DMA_cap = 2;
1999
2000 switch (sc->sc_pp->ide_product) {
2001 case PCI_PRODUCT_AMD_PBC766_IDE:
2002 case PCI_PRODUCT_AMD_PBC768_IDE:
2003 sc->sc_wdcdev.UDMA_cap = 5;
2004 break;
2005 default:
2006 sc->sc_wdcdev.UDMA_cap = 4;
2007 }
2008 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2009 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2010 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2011 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2012
2013 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2014 DEBUG_PROBE);
2015 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2016 cp = &sc->pciide_channels[channel];
2017 if (pciide_chansetup(sc, channel, interface) == 0)
2018 continue;
2019
2020 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2021 printf("%s: %s channel ignored (disabled)\n",
2022 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2023 continue;
2024 }
2025 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2026 pciide_pci_intr);
2027
2028 if (pciide_chan_candisable(cp))
2029 chanenable &= ~AMD7X6_CHAN_EN(channel);
2030 pciide_map_compat_intr(pa, cp, channel, interface);
2031 if (cp->hw_ok == 0)
2032 continue;
2033
2034 amd7x6_setup_channel(&cp->wdc_channel);
2035 }
2036 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2037 chanenable);
2038 return;
2039 }
2040
2041 void
2042 amd7x6_setup_channel(chp)
2043 struct channel_softc *chp;
2044 {
2045 u_int32_t udmatim_reg, datatim_reg;
2046 u_int8_t idedma_ctl;
2047 int mode, drive;
2048 struct ata_drive_datas *drvp;
2049 struct pciide_channel *cp = (struct pciide_channel*)chp;
2050 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2051 #ifndef PCIIDE_AMD756_ENABLEDMA
2052 int rev = PCI_REVISION(
2053 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2054 #endif
2055
2056 idedma_ctl = 0;
2057 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2058 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2059 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2060 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2061
2062 /* setup DMA if needed */
2063 pciide_channel_dma_setup(cp);
2064
2065 for (drive = 0; drive < 2; drive++) {
2066 drvp = &chp->ch_drive[drive];
2067 /* If no drive, skip */
2068 if ((drvp->drive_flags & DRIVE) == 0)
2069 continue;
2070 /* add timing values, setup DMA if needed */
2071 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2072 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2073 mode = drvp->PIO_mode;
2074 goto pio;
2075 }
2076 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2077 (drvp->drive_flags & DRIVE_UDMA)) {
2078 /* use Ultra/DMA */
2079 drvp->drive_flags &= ~DRIVE_DMA;
2080 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2081 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2082 AMD7X6_UDMA_TIME(chp->channel, drive,
2083 amd7x6_udma_tim[drvp->UDMA_mode]);
2084 /* can use PIO timings, MW DMA unused */
2085 mode = drvp->PIO_mode;
2086 } else {
2087 /* use Multiword DMA, but only if revision is OK */
2088 drvp->drive_flags &= ~DRIVE_UDMA;
2089 #ifndef PCIIDE_AMD756_ENABLEDMA
2090 /*
2091 * The workaround doesn't seem to be necessary
2092 * with all drives, so it can be disabled by
2093 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2094 * triggered.
2095 */
2096 if (sc->sc_pp->ide_product ==
2097 PCI_PRODUCT_AMD_PBC756_IDE &&
2098 AMD756_CHIPREV_DISABLEDMA(rev)) {
2099 printf("%s:%d:%d: multi-word DMA disabled due "
2100 "to chip revision\n",
2101 sc->sc_wdcdev.sc_dev.dv_xname,
2102 chp->channel, drive);
2103 mode = drvp->PIO_mode;
2104 drvp->drive_flags &= ~DRIVE_DMA;
2105 goto pio;
2106 }
2107 #endif
2108 /* mode = min(pio, dma+2) */
2109 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2110 mode = drvp->PIO_mode;
2111 else
2112 mode = drvp->DMA_mode + 2;
2113 }
2114 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2115
2116 pio: /* setup PIO mode */
2117 if (mode <= 2) {
2118 drvp->DMA_mode = 0;
2119 drvp->PIO_mode = 0;
2120 mode = 0;
2121 } else {
2122 drvp->PIO_mode = mode;
2123 drvp->DMA_mode = mode - 2;
2124 }
2125 datatim_reg |=
2126 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2127 amd7x6_pio_set[mode]) |
2128 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2129 amd7x6_pio_rec[mode]);
2130 }
2131 if (idedma_ctl != 0) {
2132 /* Add software bits in status register */
2133 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2134 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2135 idedma_ctl);
2136 }
2137 pciide_print_modes(cp);
2138 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2139 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2140 }
2141
2142 void
2143 apollo_chip_map(sc, pa)
2144 struct pciide_softc *sc;
2145 struct pci_attach_args *pa;
2146 {
2147 struct pciide_channel *cp;
2148 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2149 int channel;
2150 u_int32_t ideconf;
2151 bus_size_t cmdsize, ctlsize;
2152 pcitag_t pcib_tag;
2153 pcireg_t pcib_id, pcib_class;
2154
2155 if (pciide_chipen(sc, pa) == 0)
2156 return;
2157 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2158 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2159 /* and read ID and rev of the ISA bridge */
2160 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2161 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2162 printf(": VIA Technologies ");
2163 switch (PCI_PRODUCT(pcib_id)) {
2164 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2165 printf("VT82C586 (Apollo VP) ");
2166 if(PCI_REVISION(pcib_class) >= 0x02) {
2167 printf("ATA33 controller\n");
2168 sc->sc_wdcdev.UDMA_cap = 2;
2169 } else {
2170 printf("controller\n");
2171 sc->sc_wdcdev.UDMA_cap = 0;
2172 }
2173 break;
2174 case PCI_PRODUCT_VIATECH_VT82C596A:
2175 printf("VT82C596A (Apollo Pro) ");
2176 if (PCI_REVISION(pcib_class) >= 0x12) {
2177 printf("ATA66 controller\n");
2178 sc->sc_wdcdev.UDMA_cap = 4;
2179 } else {
2180 printf("ATA33 controller\n");
2181 sc->sc_wdcdev.UDMA_cap = 2;
2182 }
2183 break;
2184 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2185 printf("VT82C686A (Apollo KX133) ");
2186 if (PCI_REVISION(pcib_class) >= 0x40) {
2187 printf("ATA100 controller\n");
2188 sc->sc_wdcdev.UDMA_cap = 5;
2189 } else {
2190 printf("ATA66 controller\n");
2191 sc->sc_wdcdev.UDMA_cap = 4;
2192 }
2193 break;
2194 case PCI_PRODUCT_VIATECH_VT8231:
2195 printf("VT8231 ATA100 controller\n");
2196 sc->sc_wdcdev.UDMA_cap = 5;
2197 break;
2198 case PCI_PRODUCT_VIATECH_VT8233:
2199 printf("VT8233 ATA100 controller\n");
2200 sc->sc_wdcdev.UDMA_cap = 5;
2201 break;
2202 case PCI_PRODUCT_VIATECH_VT8233A:
2203 printf("VT8233A ATA133 controller\n");
2204 /* XXX use ATA100 untill ATA133 is supported */
2205 sc->sc_wdcdev.UDMA_cap = 5;
2206 break;
2207 default:
2208 printf("unknown ATA controller\n");
2209 sc->sc_wdcdev.UDMA_cap = 0;
2210 }
2211
2212 printf("%s: bus-master DMA support present",
2213 sc->sc_wdcdev.sc_dev.dv_xname);
2214 pciide_mapreg_dma(sc, pa);
2215 printf("\n");
2216 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2217 WDC_CAPABILITY_MODE;
2218 if (sc->sc_dma_ok) {
2219 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2220 sc->sc_wdcdev.irqack = pciide_irqack;
2221 if (sc->sc_wdcdev.UDMA_cap > 0)
2222 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2223 }
2224 sc->sc_wdcdev.PIO_cap = 4;
2225 sc->sc_wdcdev.DMA_cap = 2;
2226 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2227 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2228 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2229
2230 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2231 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2232 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2233 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2234 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2235 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2236 DEBUG_PROBE);
2237
2238 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2239 cp = &sc->pciide_channels[channel];
2240 if (pciide_chansetup(sc, channel, interface) == 0)
2241 continue;
2242
2243 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2244 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2245 printf("%s: %s channel ignored (disabled)\n",
2246 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2247 continue;
2248 }
2249 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2250 pciide_pci_intr);
2251 if (cp->hw_ok == 0)
2252 continue;
2253 if (pciide_chan_candisable(cp)) {
2254 ideconf &= ~APO_IDECONF_EN(channel);
2255 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2256 ideconf);
2257 }
2258 pciide_map_compat_intr(pa, cp, channel, interface);
2259
2260 if (cp->hw_ok == 0)
2261 continue;
2262 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2263 }
2264 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2265 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2266 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2267 }
2268
2269 void
2270 apollo_setup_channel(chp)
2271 struct channel_softc *chp;
2272 {
2273 u_int32_t udmatim_reg, datatim_reg;
2274 u_int8_t idedma_ctl;
2275 int mode, drive;
2276 struct ata_drive_datas *drvp;
2277 struct pciide_channel *cp = (struct pciide_channel*)chp;
2278 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2279
2280 idedma_ctl = 0;
2281 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2282 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2283 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2284 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2285
2286 /* setup DMA if needed */
2287 pciide_channel_dma_setup(cp);
2288
2289 for (drive = 0; drive < 2; drive++) {
2290 drvp = &chp->ch_drive[drive];
2291 /* If no drive, skip */
2292 if ((drvp->drive_flags & DRIVE) == 0)
2293 continue;
2294 /* add timing values, setup DMA if needed */
2295 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2296 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2297 mode = drvp->PIO_mode;
2298 goto pio;
2299 }
2300 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2301 (drvp->drive_flags & DRIVE_UDMA)) {
2302 /* use Ultra/DMA */
2303 drvp->drive_flags &= ~DRIVE_DMA;
2304 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2305 APO_UDMA_EN_MTH(chp->channel, drive);
2306 if (sc->sc_wdcdev.UDMA_cap == 5) {
2307 /* 686b */
2308 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2309 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2310 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2311 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2312 /* 596b or 686a */
2313 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2314 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2315 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2316 } else {
2317 /* 596a or 586b */
2318 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2319 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2320 }
2321 /* can use PIO timings, MW DMA unused */
2322 mode = drvp->PIO_mode;
2323 } else {
2324 /* use Multiword DMA */
2325 drvp->drive_flags &= ~DRIVE_UDMA;
2326 /* mode = min(pio, dma+2) */
2327 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2328 mode = drvp->PIO_mode;
2329 else
2330 mode = drvp->DMA_mode + 2;
2331 }
2332 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2333
2334 pio: /* setup PIO mode */
2335 if (mode <= 2) {
2336 drvp->DMA_mode = 0;
2337 drvp->PIO_mode = 0;
2338 mode = 0;
2339 } else {
2340 drvp->PIO_mode = mode;
2341 drvp->DMA_mode = mode - 2;
2342 }
2343 datatim_reg |=
2344 APO_DATATIM_PULSE(chp->channel, drive,
2345 apollo_pio_set[mode]) |
2346 APO_DATATIM_RECOV(chp->channel, drive,
2347 apollo_pio_rec[mode]);
2348 }
2349 if (idedma_ctl != 0) {
2350 /* Add software bits in status register */
2351 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2352 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2353 idedma_ctl);
2354 }
2355 pciide_print_modes(cp);
2356 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2357 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2358 }
2359
2360 void
2361 cmd_channel_map(pa, sc, channel)
2362 struct pci_attach_args *pa;
2363 struct pciide_softc *sc;
2364 int channel;
2365 {
2366 struct pciide_channel *cp = &sc->pciide_channels[channel];
2367 bus_size_t cmdsize, ctlsize;
2368 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2369 int interface, one_channel;
2370
2371 /*
2372 * The 0648/0649 can be told to identify as a RAID controller.
2373 * In this case, we have to fake interface
2374 */
2375 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2376 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2377 PCIIDE_INTERFACE_SETTABLE(1);
2378 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2379 CMD_CONF_DSA1)
2380 interface |= PCIIDE_INTERFACE_PCI(0) |
2381 PCIIDE_INTERFACE_PCI(1);
2382 } else {
2383 interface = PCI_INTERFACE(pa->pa_class);
2384 }
2385
2386 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2387 cp->name = PCIIDE_CHANNEL_NAME(channel);
2388 cp->wdc_channel.channel = channel;
2389 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2390
2391 /*
2392 * Older CMD64X doesn't have independant channels
2393 */
2394 switch (sc->sc_pp->ide_product) {
2395 case PCI_PRODUCT_CMDTECH_649:
2396 one_channel = 0;
2397 break;
2398 default:
2399 one_channel = 1;
2400 break;
2401 }
2402
2403 if (channel > 0 && one_channel) {
2404 cp->wdc_channel.ch_queue =
2405 sc->pciide_channels[0].wdc_channel.ch_queue;
2406 } else {
2407 cp->wdc_channel.ch_queue =
2408 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2409 }
2410 if (cp->wdc_channel.ch_queue == NULL) {
2411 printf("%s %s channel: "
2412 "can't allocate memory for command queue",
2413 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2414 return;
2415 }
2416
2417 printf("%s: %s channel %s to %s mode\n",
2418 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2419 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2420 "configured" : "wired",
2421 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2422 "native-PCI" : "compatibility");
2423
2424 /*
2425 * with a CMD PCI64x, if we get here, the first channel is enabled:
2426 * there's no way to disable the first channel without disabling
2427 * the whole device
2428 */
2429 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2430 printf("%s: %s channel ignored (disabled)\n",
2431 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2432 return;
2433 }
2434
2435 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2436 if (cp->hw_ok == 0)
2437 return;
2438 if (channel == 1) {
2439 if (pciide_chan_candisable(cp)) {
2440 ctrl &= ~CMD_CTRL_2PORT;
2441 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2442 CMD_CTRL, ctrl);
2443 }
2444 }
2445 pciide_map_compat_intr(pa, cp, channel, interface);
2446 }
2447
2448 int
2449 cmd_pci_intr(arg)
2450 void *arg;
2451 {
2452 struct pciide_softc *sc = arg;
2453 struct pciide_channel *cp;
2454 struct channel_softc *wdc_cp;
2455 int i, rv, crv;
2456 u_int32_t priirq, secirq;
2457
2458 rv = 0;
2459 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2460 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2461 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2462 cp = &sc->pciide_channels[i];
2463 wdc_cp = &cp->wdc_channel;
2464 /* If a compat channel skip. */
2465 if (cp->compat)
2466 continue;
2467 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2468 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2469 crv = wdcintr(wdc_cp);
2470 if (crv == 0)
2471 printf("%s:%d: bogus intr\n",
2472 sc->sc_wdcdev.sc_dev.dv_xname, i);
2473 else
2474 rv = 1;
2475 }
2476 }
2477 return rv;
2478 }
2479
2480 void
2481 cmd_chip_map(sc, pa)
2482 struct pciide_softc *sc;
2483 struct pci_attach_args *pa;
2484 {
2485 int channel;
2486
2487 /*
2488 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2489 * and base adresses registers can be disabled at
2490 * hardware level. In this case, the device is wired
2491 * in compat mode and its first channel is always enabled,
2492 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2493 * In fact, it seems that the first channel of the CMD PCI0640
2494 * can't be disabled.
2495 */
2496
2497 #ifdef PCIIDE_CMD064x_DISABLE
2498 if (pciide_chipen(sc, pa) == 0)
2499 return;
2500 #endif
2501
2502 printf("%s: hardware does not support DMA\n",
2503 sc->sc_wdcdev.sc_dev.dv_xname);
2504 sc->sc_dma_ok = 0;
2505
2506 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2507 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2508 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2509
2510 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2511 cmd_channel_map(pa, sc, channel);
2512 }
2513 }
2514
2515 void
2516 cmd0643_9_chip_map(sc, pa)
2517 struct pciide_softc *sc;
2518 struct pci_attach_args *pa;
2519 {
2520 struct pciide_channel *cp;
2521 int channel;
2522 pcireg_t rev = PCI_REVISION(pa->pa_class);
2523
2524 /*
2525 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2526 * and base adresses registers can be disabled at
2527 * hardware level. In this case, the device is wired
2528 * in compat mode and its first channel is always enabled,
2529 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2530 * In fact, it seems that the first channel of the CMD PCI0640
2531 * can't be disabled.
2532 */
2533
2534 #ifdef PCIIDE_CMD064x_DISABLE
2535 if (pciide_chipen(sc, pa) == 0)
2536 return;
2537 #endif
2538 printf("%s: bus-master DMA support present",
2539 sc->sc_wdcdev.sc_dev.dv_xname);
2540 pciide_mapreg_dma(sc, pa);
2541 printf("\n");
2542 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2543 WDC_CAPABILITY_MODE;
2544 if (sc->sc_dma_ok) {
2545 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2546 switch (sc->sc_pp->ide_product) {
2547 case PCI_PRODUCT_CMDTECH_649:
2548 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2549 sc->sc_wdcdev.UDMA_cap = 5;
2550 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2551 break;
2552 case PCI_PRODUCT_CMDTECH_648:
2553 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2554 sc->sc_wdcdev.UDMA_cap = 4;
2555 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2556 break;
2557 case PCI_PRODUCT_CMDTECH_646:
2558 if (rev >= CMD0646U2_REV) {
2559 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2560 sc->sc_wdcdev.UDMA_cap = 2;
2561 } else if (rev >= CMD0646U_REV) {
2562 /*
2563 * Linux's driver claims that the 646U is broken
2564 * with UDMA. Only enable it if we know what we're
2565 * doing
2566 */
2567 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2568 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2569 sc->sc_wdcdev.UDMA_cap = 2;
2570 #endif
2571 /* explicitly disable UDMA */
2572 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2573 CMD_UDMATIM(0), 0);
2574 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2575 CMD_UDMATIM(1), 0);
2576 }
2577 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2578 break;
2579 default:
2580 sc->sc_wdcdev.irqack = pciide_irqack;
2581 }
2582 }
2583
2584 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2585 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2586 sc->sc_wdcdev.PIO_cap = 4;
2587 sc->sc_wdcdev.DMA_cap = 2;
2588 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2589
2590 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2591 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2592 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2593 DEBUG_PROBE);
2594
2595 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2596 cp = &sc->pciide_channels[channel];
2597 cmd_channel_map(pa, sc, channel);
2598 if (cp->hw_ok == 0)
2599 continue;
2600 cmd0643_9_setup_channel(&cp->wdc_channel);
2601 }
2602 /*
2603 * note - this also makes sure we clear the irq disable and reset
2604 * bits
2605 */
2606 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2607 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2608 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2609 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2610 DEBUG_PROBE);
2611 }
2612
2613 void
2614 cmd0643_9_setup_channel(chp)
2615 struct channel_softc *chp;
2616 {
2617 struct ata_drive_datas *drvp;
2618 u_int8_t tim;
2619 u_int32_t idedma_ctl, udma_reg;
2620 int drive;
2621 struct pciide_channel *cp = (struct pciide_channel*)chp;
2622 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2623
2624 idedma_ctl = 0;
2625 /* setup DMA if needed */
2626 pciide_channel_dma_setup(cp);
2627
2628 for (drive = 0; drive < 2; drive++) {
2629 drvp = &chp->ch_drive[drive];
2630 /* If no drive, skip */
2631 if ((drvp->drive_flags & DRIVE) == 0)
2632 continue;
2633 /* add timing values, setup DMA if needed */
2634 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2635 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2636 if (drvp->drive_flags & DRIVE_UDMA) {
2637 /* UltraDMA on a 646U2, 0648 or 0649 */
2638 drvp->drive_flags &= ~DRIVE_DMA;
2639 udma_reg = pciide_pci_read(sc->sc_pc,
2640 sc->sc_tag, CMD_UDMATIM(chp->channel));
2641 if (drvp->UDMA_mode > 2 &&
2642 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2643 CMD_BICSR) &
2644 CMD_BICSR_80(chp->channel)) == 0)
2645 drvp->UDMA_mode = 2;
2646 if (drvp->UDMA_mode > 2)
2647 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2648 else if (sc->sc_wdcdev.UDMA_cap > 2)
2649 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2650 udma_reg |= CMD_UDMATIM_UDMA(drive);
2651 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2652 CMD_UDMATIM_TIM_OFF(drive));
2653 udma_reg |=
2654 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2655 CMD_UDMATIM_TIM_OFF(drive));
2656 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2657 CMD_UDMATIM(chp->channel), udma_reg);
2658 } else {
2659 /*
2660 * use Multiword DMA.
2661 * Timings will be used for both PIO and DMA,
2662 * so adjust DMA mode if needed
2663 * if we have a 0646U2/8/9, turn off UDMA
2664 */
2665 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2666 udma_reg = pciide_pci_read(sc->sc_pc,
2667 sc->sc_tag,
2668 CMD_UDMATIM(chp->channel));
2669 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2670 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2671 CMD_UDMATIM(chp->channel),
2672 udma_reg);
2673 }
2674 if (drvp->PIO_mode >= 3 &&
2675 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2676 drvp->DMA_mode = drvp->PIO_mode - 2;
2677 }
2678 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2679 }
2680 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2681 }
2682 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2683 CMD_DATA_TIM(chp->channel, drive), tim);
2684 }
2685 if (idedma_ctl != 0) {
2686 /* Add software bits in status register */
2687 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2688 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2689 idedma_ctl);
2690 }
2691 pciide_print_modes(cp);
2692 }
2693
2694 void
2695 cmd646_9_irqack(chp)
2696 struct channel_softc *chp;
2697 {
2698 u_int32_t priirq, secirq;
2699 struct pciide_channel *cp = (struct pciide_channel*)chp;
2700 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2701
2702 if (chp->channel == 0) {
2703 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2704 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2705 } else {
2706 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2707 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2708 }
2709 pciide_irqack(chp);
2710 }
2711
2712 void
2713 cy693_chip_map(sc, pa)
2714 struct pciide_softc *sc;
2715 struct pci_attach_args *pa;
2716 {
2717 struct pciide_channel *cp;
2718 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2719 bus_size_t cmdsize, ctlsize;
2720
2721 if (pciide_chipen(sc, pa) == 0)
2722 return;
2723 /*
2724 * this chip has 2 PCI IDE functions, one for primary and one for
2725 * secondary. So we need to call pciide_mapregs_compat() with
2726 * the real channel
2727 */
2728 if (pa->pa_function == 1) {
2729 sc->sc_cy_compatchan = 0;
2730 } else if (pa->pa_function == 2) {
2731 sc->sc_cy_compatchan = 1;
2732 } else {
2733 printf("%s: unexpected PCI function %d\n",
2734 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2735 return;
2736 }
2737 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2738 printf("%s: bus-master DMA support present",
2739 sc->sc_wdcdev.sc_dev.dv_xname);
2740 pciide_mapreg_dma(sc, pa);
2741 } else {
2742 printf("%s: hardware does not support DMA",
2743 sc->sc_wdcdev.sc_dev.dv_xname);
2744 sc->sc_dma_ok = 0;
2745 }
2746 printf("\n");
2747
2748 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2749 if (sc->sc_cy_handle == NULL) {
2750 printf("%s: unable to map hyperCache control registers\n",
2751 sc->sc_wdcdev.sc_dev.dv_xname);
2752 sc->sc_dma_ok = 0;
2753 }
2754
2755 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2756 WDC_CAPABILITY_MODE;
2757 if (sc->sc_dma_ok) {
2758 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2759 sc->sc_wdcdev.irqack = pciide_irqack;
2760 }
2761 sc->sc_wdcdev.PIO_cap = 4;
2762 sc->sc_wdcdev.DMA_cap = 2;
2763 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2764
2765 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2766 sc->sc_wdcdev.nchannels = 1;
2767
2768 /* Only one channel for this chip; if we are here it's enabled */
2769 cp = &sc->pciide_channels[0];
2770 sc->wdc_chanarray[0] = &cp->wdc_channel;
2771 cp->name = PCIIDE_CHANNEL_NAME(0);
2772 cp->wdc_channel.channel = 0;
2773 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2774 cp->wdc_channel.ch_queue =
2775 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2776 if (cp->wdc_channel.ch_queue == NULL) {
2777 printf("%s primary channel: "
2778 "can't allocate memory for command queue",
2779 sc->sc_wdcdev.sc_dev.dv_xname);
2780 return;
2781 }
2782 printf("%s: primary channel %s to ",
2783 sc->sc_wdcdev.sc_dev.dv_xname,
2784 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2785 "configured" : "wired");
2786 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2787 printf("native-PCI");
2788 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2789 pciide_pci_intr);
2790 } else {
2791 printf("compatibility");
2792 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2793 &cmdsize, &ctlsize);
2794 }
2795 printf(" mode\n");
2796 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2797 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2798 wdcattach(&cp->wdc_channel);
2799 if (pciide_chan_candisable(cp)) {
2800 pci_conf_write(sc->sc_pc, sc->sc_tag,
2801 PCI_COMMAND_STATUS_REG, 0);
2802 }
2803 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2804 if (cp->hw_ok == 0)
2805 return;
2806 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2807 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2808 cy693_setup_channel(&cp->wdc_channel);
2809 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2810 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2811 }
2812
2813 void
2814 cy693_setup_channel(chp)
2815 struct channel_softc *chp;
2816 {
2817 struct ata_drive_datas *drvp;
2818 int drive;
2819 u_int32_t cy_cmd_ctrl;
2820 u_int32_t idedma_ctl;
2821 struct pciide_channel *cp = (struct pciide_channel*)chp;
2822 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2823 int dma_mode = -1;
2824
2825 cy_cmd_ctrl = idedma_ctl = 0;
2826
2827 /* setup DMA if needed */
2828 pciide_channel_dma_setup(cp);
2829
2830 for (drive = 0; drive < 2; drive++) {
2831 drvp = &chp->ch_drive[drive];
2832 /* If no drive, skip */
2833 if ((drvp->drive_flags & DRIVE) == 0)
2834 continue;
2835 /* add timing values, setup DMA if needed */
2836 if (drvp->drive_flags & DRIVE_DMA) {
2837 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2838 /* use Multiword DMA */
2839 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2840 dma_mode = drvp->DMA_mode;
2841 }
2842 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2843 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2844 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2845 CY_CMD_CTRL_IOW_REC_OFF(drive));
2846 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2847 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2848 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2849 CY_CMD_CTRL_IOR_REC_OFF(drive));
2850 }
2851 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2852 chp->ch_drive[0].DMA_mode = dma_mode;
2853 chp->ch_drive[1].DMA_mode = dma_mode;
2854
2855 if (dma_mode == -1)
2856 dma_mode = 0;
2857
2858 if (sc->sc_cy_handle != NULL) {
2859 /* Note: `multiple' is implied. */
2860 cy82c693_write(sc->sc_cy_handle,
2861 (sc->sc_cy_compatchan == 0) ?
2862 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2863 }
2864
2865 pciide_print_modes(cp);
2866
2867 if (idedma_ctl != 0) {
2868 /* Add software bits in status register */
2869 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2870 IDEDMA_CTL, idedma_ctl);
2871 }
2872 }
2873
2874 static int
2875 sis_hostbr_match(pa)
2876 struct pci_attach_args *pa;
2877 {
2878 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2879 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2880 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2881 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2882 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2883 }
2884
2885 void
2886 sis_chip_map(sc, pa)
2887 struct pciide_softc *sc;
2888 struct pci_attach_args *pa;
2889 {
2890 struct pciide_channel *cp;
2891 int channel;
2892 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2893 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2894 pcireg_t rev = PCI_REVISION(pa->pa_class);
2895 bus_size_t cmdsize, ctlsize;
2896 pcitag_t pchb_tag;
2897 pcireg_t pchb_id, pchb_class;
2898
2899 if (pciide_chipen(sc, pa) == 0)
2900 return;
2901 printf("%s: bus-master DMA support present",
2902 sc->sc_wdcdev.sc_dev.dv_xname);
2903 pciide_mapreg_dma(sc, pa);
2904 printf("\n");
2905
2906 /* get a PCI tag for the host bridge (function 0 of the same device) */
2907 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2908 /* and read ID and rev of the ISA bridge */
2909 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2910 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2911
2912 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2913 WDC_CAPABILITY_MODE;
2914 if (sc->sc_dma_ok) {
2915 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2916 sc->sc_wdcdev.irqack = pciide_irqack;
2917 /*
2918 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2919 * have problems with UDMA (info provided by Christos)
2920 */
2921 if (rev >= 0xd0 &&
2922 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2923 PCI_REVISION(pchb_class) >= 0x03))
2924 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2925 }
2926
2927 sc->sc_wdcdev.PIO_cap = 4;
2928 sc->sc_wdcdev.DMA_cap = 2;
2929 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2930 /*
2931 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2932 * chipsets.
2933 */
2934 sc->sc_wdcdev.UDMA_cap =
2935 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2936 sc->sc_wdcdev.set_modes = sis_setup_channel;
2937
2938 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2939 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2940
2941 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2942 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2943 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2944
2945 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2946 cp = &sc->pciide_channels[channel];
2947 if (pciide_chansetup(sc, channel, interface) == 0)
2948 continue;
2949 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2950 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2951 printf("%s: %s channel ignored (disabled)\n",
2952 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2953 continue;
2954 }
2955 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2956 pciide_pci_intr);
2957 if (cp->hw_ok == 0)
2958 continue;
2959 if (pciide_chan_candisable(cp)) {
2960 if (channel == 0)
2961 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2962 else
2963 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2964 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2965 sis_ctr0);
2966 }
2967 pciide_map_compat_intr(pa, cp, channel, interface);
2968 if (cp->hw_ok == 0)
2969 continue;
2970 sis_setup_channel(&cp->wdc_channel);
2971 }
2972 }
2973
2974 void
2975 sis_setup_channel(chp)
2976 struct channel_softc *chp;
2977 {
2978 struct ata_drive_datas *drvp;
2979 int drive;
2980 u_int32_t sis_tim;
2981 u_int32_t idedma_ctl;
2982 struct pciide_channel *cp = (struct pciide_channel*)chp;
2983 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2984
2985 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2986 "channel %d 0x%x\n", chp->channel,
2987 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2988 DEBUG_PROBE);
2989 sis_tim = 0;
2990 idedma_ctl = 0;
2991 /* setup DMA if needed */
2992 pciide_channel_dma_setup(cp);
2993
2994 for (drive = 0; drive < 2; drive++) {
2995 drvp = &chp->ch_drive[drive];
2996 /* If no drive, skip */
2997 if ((drvp->drive_flags & DRIVE) == 0)
2998 continue;
2999 /* add timing values, setup DMA if needed */
3000 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3001 (drvp->drive_flags & DRIVE_UDMA) == 0)
3002 goto pio;
3003
3004 if (drvp->drive_flags & DRIVE_UDMA) {
3005 /* use Ultra/DMA */
3006 drvp->drive_flags &= ~DRIVE_DMA;
3007 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3008 SIS_TIM_UDMA_TIME_OFF(drive);
3009 sis_tim |= SIS_TIM_UDMA_EN(drive);
3010 } else {
3011 /*
3012 * use Multiword DMA
3013 * Timings will be used for both PIO and DMA,
3014 * so adjust DMA mode if needed
3015 */
3016 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3017 drvp->PIO_mode = drvp->DMA_mode + 2;
3018 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3019 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3020 drvp->PIO_mode - 2 : 0;
3021 if (drvp->DMA_mode == 0)
3022 drvp->PIO_mode = 0;
3023 }
3024 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3025 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3026 SIS_TIM_ACT_OFF(drive);
3027 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3028 SIS_TIM_REC_OFF(drive);
3029 }
3030 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3031 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3032 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3033 if (idedma_ctl != 0) {
3034 /* Add software bits in status register */
3035 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3036 IDEDMA_CTL, idedma_ctl);
3037 }
3038 pciide_print_modes(cp);
3039 }
3040
3041 void
3042 acer_chip_map(sc, pa)
3043 struct pciide_softc *sc;
3044 struct pci_attach_args *pa;
3045 {
3046 struct pciide_channel *cp;
3047 int channel;
3048 pcireg_t cr, interface;
3049 bus_size_t cmdsize, ctlsize;
3050 pcireg_t rev = PCI_REVISION(pa->pa_class);
3051
3052 if (pciide_chipen(sc, pa) == 0)
3053 return;
3054 printf("%s: bus-master DMA support present",
3055 sc->sc_wdcdev.sc_dev.dv_xname);
3056 pciide_mapreg_dma(sc, pa);
3057 printf("\n");
3058 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3059 WDC_CAPABILITY_MODE;
3060 if (sc->sc_dma_ok) {
3061 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3062 if (rev >= 0x20) {
3063 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3064 if (rev >= 0xC4)
3065 sc->sc_wdcdev.UDMA_cap = 5;
3066 else if (rev >= 0xC2)
3067 sc->sc_wdcdev.UDMA_cap = 4;
3068 else
3069 sc->sc_wdcdev.UDMA_cap = 2;
3070 }
3071 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3072 sc->sc_wdcdev.irqack = pciide_irqack;
3073 }
3074
3075 sc->sc_wdcdev.PIO_cap = 4;
3076 sc->sc_wdcdev.DMA_cap = 2;
3077 sc->sc_wdcdev.set_modes = acer_setup_channel;
3078 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3079 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3080
3081 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3082 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3083 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3084
3085 /* Enable "microsoft register bits" R/W. */
3086 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3087 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3088 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3089 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3090 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3091 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3092 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3093 ~ACER_CHANSTATUSREGS_RO);
3094 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3095 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3096 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3097 /* Don't use cr, re-read the real register content instead */
3098 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3099 PCI_CLASS_REG));
3100
3101 /* From linux: enable "Cable Detection" */
3102 if (rev >= 0xC2) {
3103 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3104 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3105 | ACER_0x4B_CDETECT);
3106 }
3107
3108 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3109 cp = &sc->pciide_channels[channel];
3110 if (pciide_chansetup(sc, channel, interface) == 0)
3111 continue;
3112 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3113 printf("%s: %s channel ignored (disabled)\n",
3114 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3115 continue;
3116 }
3117 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3118 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3119 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3120 if (cp->hw_ok == 0)
3121 continue;
3122 if (pciide_chan_candisable(cp)) {
3123 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3124 pci_conf_write(sc->sc_pc, sc->sc_tag,
3125 PCI_CLASS_REG, cr);
3126 }
3127 pciide_map_compat_intr(pa, cp, channel, interface);
3128 acer_setup_channel(&cp->wdc_channel);
3129 }
3130 }
3131
3132 void
3133 acer_setup_channel(chp)
3134 struct channel_softc *chp;
3135 {
3136 struct ata_drive_datas *drvp;
3137 int drive;
3138 u_int32_t acer_fifo_udma;
3139 u_int32_t idedma_ctl;
3140 struct pciide_channel *cp = (struct pciide_channel*)chp;
3141 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3142
3143 idedma_ctl = 0;
3144 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3145 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3146 acer_fifo_udma), DEBUG_PROBE);
3147 /* setup DMA if needed */
3148 pciide_channel_dma_setup(cp);
3149
3150 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3151 DRIVE_UDMA) { /* check 80 pins cable */
3152 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3153 ACER_0x4A_80PIN(chp->channel)) {
3154 if (chp->ch_drive[0].UDMA_mode > 2)
3155 chp->ch_drive[0].UDMA_mode = 2;
3156 if (chp->ch_drive[1].UDMA_mode > 2)
3157 chp->ch_drive[1].UDMA_mode = 2;
3158 }
3159 }
3160
3161 for (drive = 0; drive < 2; drive++) {
3162 drvp = &chp->ch_drive[drive];
3163 /* If no drive, skip */
3164 if ((drvp->drive_flags & DRIVE) == 0)
3165 continue;
3166 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3167 "channel %d drive %d 0x%x\n", chp->channel, drive,
3168 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3169 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3170 /* clear FIFO/DMA mode */
3171 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3172 ACER_UDMA_EN(chp->channel, drive) |
3173 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3174
3175 /* add timing values, setup DMA if needed */
3176 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3177 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3178 acer_fifo_udma |=
3179 ACER_FTH_OPL(chp->channel, drive, 0x1);
3180 goto pio;
3181 }
3182
3183 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3184 if (drvp->drive_flags & DRIVE_UDMA) {
3185 /* use Ultra/DMA */
3186 drvp->drive_flags &= ~DRIVE_DMA;
3187 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3188 acer_fifo_udma |=
3189 ACER_UDMA_TIM(chp->channel, drive,
3190 acer_udma[drvp->UDMA_mode]);
3191 /* XXX disable if one drive < UDMA3 ? */
3192 if (drvp->UDMA_mode >= 3) {
3193 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3194 ACER_0x4B,
3195 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3196 ACER_0x4B) | ACER_0x4B_UDMA66);
3197 }
3198 } else {
3199 /*
3200 * use Multiword DMA
3201 * Timings will be used for both PIO and DMA,
3202 * so adjust DMA mode if needed
3203 */
3204 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3205 drvp->PIO_mode = drvp->DMA_mode + 2;
3206 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3207 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3208 drvp->PIO_mode - 2 : 0;
3209 if (drvp->DMA_mode == 0)
3210 drvp->PIO_mode = 0;
3211 }
3212 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3213 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3214 ACER_IDETIM(chp->channel, drive),
3215 acer_pio[drvp->PIO_mode]);
3216 }
3217 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3218 acer_fifo_udma), DEBUG_PROBE);
3219 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3220 if (idedma_ctl != 0) {
3221 /* Add software bits in status register */
3222 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3223 IDEDMA_CTL, idedma_ctl);
3224 }
3225 pciide_print_modes(cp);
3226 }
3227
3228 int
3229 acer_pci_intr(arg)
3230 void *arg;
3231 {
3232 struct pciide_softc *sc = arg;
3233 struct pciide_channel *cp;
3234 struct channel_softc *wdc_cp;
3235 int i, rv, crv;
3236 u_int32_t chids;
3237
3238 rv = 0;
3239 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3240 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3241 cp = &sc->pciide_channels[i];
3242 wdc_cp = &cp->wdc_channel;
3243 /* If a compat channel skip. */
3244 if (cp->compat)
3245 continue;
3246 if (chids & ACER_CHIDS_INT(i)) {
3247 crv = wdcintr(wdc_cp);
3248 if (crv == 0)
3249 printf("%s:%d: bogus intr\n",
3250 sc->sc_wdcdev.sc_dev.dv_xname, i);
3251 else
3252 rv = 1;
3253 }
3254 }
3255 return rv;
3256 }
3257
3258 void
3259 hpt_chip_map(sc, pa)
3260 struct pciide_softc *sc;
3261 struct pci_attach_args *pa;
3262 {
3263 struct pciide_channel *cp;
3264 int i, compatchan, revision;
3265 pcireg_t interface;
3266 bus_size_t cmdsize, ctlsize;
3267
3268 if (pciide_chipen(sc, pa) == 0)
3269 return;
3270 revision = PCI_REVISION(pa->pa_class);
3271 printf(": Triones/Highpoint ");
3272 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3273 printf("HPT374 IDE Controller\n");
3274 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3275 printf("HPT372 IDE Controller\n");
3276 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3277 if (revision == HPT372_REV)
3278 printf("HPT372 IDE Controller\n");
3279 else if (revision == HPT370_REV)
3280 printf("HPT370 IDE Controller\n");
3281 else if (revision == HPT370A_REV)
3282 printf("HPT370A IDE Controller\n");
3283 else if (revision == HPT366_REV)
3284 printf("HPT366 IDE Controller\n");
3285 else
3286 printf("unknown HPT IDE controller rev %d\n", revision);
3287 } else
3288 printf("unknown HPT IDE controller 0x%x\n",
3289 sc->sc_pp->ide_product);
3290
3291 /*
3292 * when the chip is in native mode it identifies itself as a
3293 * 'misc mass storage'. Fake interface in this case.
3294 */
3295 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3296 interface = PCI_INTERFACE(pa->pa_class);
3297 } else {
3298 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3299 PCIIDE_INTERFACE_PCI(0);
3300 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3301 (revision == HPT370_REV || revision == HPT370A_REV ||
3302 revision == HPT372_REV)) ||
3303 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3304 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3305 interface |= PCIIDE_INTERFACE_PCI(1);
3306 }
3307
3308 printf("%s: bus-master DMA support present",
3309 sc->sc_wdcdev.sc_dev.dv_xname);
3310 pciide_mapreg_dma(sc, pa);
3311 printf("\n");
3312 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3313 WDC_CAPABILITY_MODE;
3314 if (sc->sc_dma_ok) {
3315 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3316 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3317 sc->sc_wdcdev.irqack = pciide_irqack;
3318 }
3319 sc->sc_wdcdev.PIO_cap = 4;
3320 sc->sc_wdcdev.DMA_cap = 2;
3321
3322 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3323 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3324 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3325 revision == HPT366_REV) {
3326 sc->sc_wdcdev.UDMA_cap = 4;
3327 /*
3328 * The 366 has 2 PCI IDE functions, one for primary and one
3329 * for secondary. So we need to call pciide_mapregs_compat()
3330 * with the real channel
3331 */
3332 if (pa->pa_function == 0) {
3333 compatchan = 0;
3334 } else if (pa->pa_function == 1) {
3335 compatchan = 1;
3336 } else {
3337 printf("%s: unexpected PCI function %d\n",
3338 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3339 return;
3340 }
3341 sc->sc_wdcdev.nchannels = 1;
3342 } else {
3343 sc->sc_wdcdev.nchannels = 2;
3344 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3345 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3346 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3347 revision == HPT372_REV))
3348 sc->sc_wdcdev.UDMA_cap = 6;
3349 else
3350 sc->sc_wdcdev.UDMA_cap = 5;
3351 }
3352 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3353 cp = &sc->pciide_channels[i];
3354 if (sc->sc_wdcdev.nchannels > 1) {
3355 compatchan = i;
3356 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3357 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3358 printf("%s: %s channel ignored (disabled)\n",
3359 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3360 continue;
3361 }
3362 }
3363 if (pciide_chansetup(sc, i, interface) == 0)
3364 continue;
3365 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3366 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3367 &ctlsize, hpt_pci_intr);
3368 } else {
3369 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3370 &cmdsize, &ctlsize);
3371 }
3372 if (cp->hw_ok == 0)
3373 return;
3374 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3375 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3376 wdcattach(&cp->wdc_channel);
3377 hpt_setup_channel(&cp->wdc_channel);
3378 }
3379 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3380 (revision == HPT370_REV || revision == HPT370A_REV ||
3381 revision == HPT372_REV)) ||
3382 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3383 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3384 /*
3385 * HPT370_REV and highter has a bit to disable interrupts,
3386 * make sure to clear it
3387 */
3388 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3389 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3390 ~HPT_CSEL_IRQDIS);
3391 }
3392 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3393 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3394 revision == HPT372_REV ) ||
3395 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3396 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3397 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3398 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3399 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3400 return;
3401 }
3402
3403 void
3404 hpt_setup_channel(chp)
3405 struct channel_softc *chp;
3406 {
3407 struct ata_drive_datas *drvp;
3408 int drive;
3409 int cable;
3410 u_int32_t before, after;
3411 u_int32_t idedma_ctl;
3412 struct pciide_channel *cp = (struct pciide_channel*)chp;
3413 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3414 int revision =
3415 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3416
3417 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3418
3419 /* setup DMA if needed */
3420 pciide_channel_dma_setup(cp);
3421
3422 idedma_ctl = 0;
3423
3424 /* Per drive settings */
3425 for (drive = 0; drive < 2; drive++) {
3426 drvp = &chp->ch_drive[drive];
3427 /* If no drive, skip */
3428 if ((drvp->drive_flags & DRIVE) == 0)
3429 continue;
3430 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3431 HPT_IDETIM(chp->channel, drive));
3432
3433 /* add timing values, setup DMA if needed */
3434 if (drvp->drive_flags & DRIVE_UDMA) {
3435 /* use Ultra/DMA */
3436 drvp->drive_flags &= ~DRIVE_DMA;
3437 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3438 drvp->UDMA_mode > 2)
3439 drvp->UDMA_mode = 2;
3440 switch (sc->sc_pp->ide_product) {
3441 case PCI_PRODUCT_TRIONES_HPT374:
3442 after = hpt374_udma[drvp->UDMA_mode];
3443 break;
3444 case PCI_PRODUCT_TRIONES_HPT372:
3445 after = hpt372_udma[drvp->UDMA_mode];
3446 break;
3447 case PCI_PRODUCT_TRIONES_HPT366:
3448 default:
3449 switch(revision) {
3450 case HPT372_REV:
3451 after = hpt372_udma[drvp->UDMA_mode];
3452 break;
3453 case HPT370_REV:
3454 case HPT370A_REV:
3455 after = hpt370_udma[drvp->UDMA_mode];
3456 break;
3457 case HPT366_REV:
3458 default:
3459 after = hpt366_udma[drvp->UDMA_mode];
3460 break;
3461 }
3462 }
3463 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3464 } else if (drvp->drive_flags & DRIVE_DMA) {
3465 /*
3466 * use Multiword DMA.
3467 * Timings will be used for both PIO and DMA, so adjust
3468 * DMA mode if needed
3469 */
3470 if (drvp->PIO_mode >= 3 &&
3471 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3472 drvp->DMA_mode = drvp->PIO_mode - 2;
3473 }
3474 switch (sc->sc_pp->ide_product) {
3475 case PCI_PRODUCT_TRIONES_HPT374:
3476 after = hpt374_dma[drvp->DMA_mode];
3477 break;
3478 case PCI_PRODUCT_TRIONES_HPT372:
3479 after = hpt372_dma[drvp->DMA_mode];
3480 break;
3481 case PCI_PRODUCT_TRIONES_HPT366:
3482 default:
3483 switch(revision) {
3484 case HPT372_REV:
3485 after = hpt372_dma[drvp->DMA_mode];
3486 break;
3487 case HPT370_REV:
3488 case HPT370A_REV:
3489 after = hpt370_dma[drvp->DMA_mode];
3490 break;
3491 case HPT366_REV:
3492 default:
3493 after = hpt366_dma[drvp->DMA_mode];
3494 break;
3495 }
3496 }
3497 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3498 } else {
3499 /* PIO only */
3500 switch (sc->sc_pp->ide_product) {
3501 case PCI_PRODUCT_TRIONES_HPT374:
3502 after = hpt374_pio[drvp->PIO_mode];
3503 break;
3504 case PCI_PRODUCT_TRIONES_HPT372:
3505 after = hpt372_pio[drvp->PIO_mode];
3506 break;
3507 case PCI_PRODUCT_TRIONES_HPT366:
3508 default:
3509 switch(revision) {
3510 case HPT372_REV:
3511 after = hpt372_pio[drvp->PIO_mode];
3512 break;
3513 case HPT370_REV:
3514 case HPT370A_REV:
3515 after = hpt370_pio[drvp->PIO_mode];
3516 break;
3517 case HPT366_REV:
3518 default:
3519 after = hpt366_pio[drvp->PIO_mode];
3520 break;
3521 }
3522 }
3523 }
3524 pci_conf_write(sc->sc_pc, sc->sc_tag,
3525 HPT_IDETIM(chp->channel, drive), after);
3526 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3527 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3528 after, before), DEBUG_PROBE);
3529 }
3530 if (idedma_ctl != 0) {
3531 /* Add software bits in status register */
3532 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3533 IDEDMA_CTL, idedma_ctl);
3534 }
3535 pciide_print_modes(cp);
3536 }
3537
3538 int
3539 hpt_pci_intr(arg)
3540 void *arg;
3541 {
3542 struct pciide_softc *sc = arg;
3543 struct pciide_channel *cp;
3544 struct channel_softc *wdc_cp;
3545 int rv = 0;
3546 int dmastat, i, crv;
3547
3548 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3549 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3550 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3551 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3552 IDEDMA_CTL_INTR)
3553 continue;
3554 cp = &sc->pciide_channels[i];
3555 wdc_cp = &cp->wdc_channel;
3556 crv = wdcintr(wdc_cp);
3557 if (crv == 0) {
3558 printf("%s:%d: bogus intr\n",
3559 sc->sc_wdcdev.sc_dev.dv_xname, i);
3560 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3561 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3562 } else
3563 rv = 1;
3564 }
3565 return rv;
3566 }
3567
3568
3569 /* Macros to test product */
3570 #define PDC_IS_262(sc) \
3571 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3572 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3573 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3574 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3575 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3576 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3577 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3578 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3579 #define PDC_IS_265(sc) \
3580 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3581 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3582 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3583 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3584 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3585 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3586 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3587 #define PDC_IS_268(sc) \
3588 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3589 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3590 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3591 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3592 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3593
3594 void
3595 pdc202xx_chip_map(sc, pa)
3596 struct pciide_softc *sc;
3597 struct pci_attach_args *pa;
3598 {
3599 struct pciide_channel *cp;
3600 int channel;
3601 pcireg_t interface, st, mode;
3602 bus_size_t cmdsize, ctlsize;
3603
3604 if (!PDC_IS_268(sc)) {
3605 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3606 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3607 st), DEBUG_PROBE);
3608 }
3609 if (pciide_chipen(sc, pa) == 0)
3610 return;
3611
3612 /* turn off RAID mode */
3613 if (!PDC_IS_268(sc))
3614 st &= ~PDC2xx_STATE_IDERAID;
3615
3616 /*
3617 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3618 * mode. We have to fake interface
3619 */
3620 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3621 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3622 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3623
3624 printf("%s: bus-master DMA support present",
3625 sc->sc_wdcdev.sc_dev.dv_xname);
3626 pciide_mapreg_dma(sc, pa);
3627 printf("\n");
3628 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3629 WDC_CAPABILITY_MODE;
3630 if (sc->sc_dma_ok) {
3631 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3632 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3633 sc->sc_wdcdev.irqack = pciide_irqack;
3634 }
3635 sc->sc_wdcdev.PIO_cap = 4;
3636 sc->sc_wdcdev.DMA_cap = 2;
3637 if (PDC_IS_265(sc))
3638 sc->sc_wdcdev.UDMA_cap = 5;
3639 else if (PDC_IS_262(sc))
3640 sc->sc_wdcdev.UDMA_cap = 4;
3641 else
3642 sc->sc_wdcdev.UDMA_cap = 2;
3643 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3644 pdc20268_setup_channel : pdc202xx_setup_channel;
3645 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3646 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3647
3648 if (!PDC_IS_268(sc)) {
3649 /* setup failsafe defaults */
3650 mode = 0;
3651 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3652 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3653 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3654 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3655 for (channel = 0;
3656 channel < sc->sc_wdcdev.nchannels;
3657 channel++) {
3658 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3659 "drive 0 initial timings 0x%x, now 0x%x\n",
3660 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3661 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3662 DEBUG_PROBE);
3663 pci_conf_write(sc->sc_pc, sc->sc_tag,
3664 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3665 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3666 "drive 1 initial timings 0x%x, now 0x%x\n",
3667 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3668 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3669 pci_conf_write(sc->sc_pc, sc->sc_tag,
3670 PDC2xx_TIM(channel, 1), mode);
3671 }
3672
3673 mode = PDC2xx_SCR_DMA;
3674 if (PDC_IS_262(sc)) {
3675 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3676 } else {
3677 /* the BIOS set it up this way */
3678 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3679 }
3680 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3681 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3682 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3683 "now 0x%x\n",
3684 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3685 PDC2xx_SCR),
3686 mode), DEBUG_PROBE);
3687 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3688 PDC2xx_SCR, mode);
3689
3690 /* controller initial state register is OK even without BIOS */
3691 /* Set DMA mode to IDE DMA compatibility */
3692 mode =
3693 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3694 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3695 DEBUG_PROBE);
3696 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3697 mode | 0x1);
3698 mode =
3699 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3700 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3701 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3702 mode | 0x1);
3703 }
3704
3705 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3706 cp = &sc->pciide_channels[channel];
3707 if (pciide_chansetup(sc, channel, interface) == 0)
3708 continue;
3709 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3710 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3711 printf("%s: %s channel ignored (disabled)\n",
3712 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3713 continue;
3714 }
3715 if (PDC_IS_265(sc))
3716 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3717 pdc20265_pci_intr);
3718 else
3719 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3720 pdc202xx_pci_intr);
3721 if (cp->hw_ok == 0)
3722 continue;
3723 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3724 st &= ~(PDC_IS_262(sc) ?
3725 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3726 pciide_map_compat_intr(pa, cp, channel, interface);
3727 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3728 }
3729 if (!PDC_IS_268(sc)) {
3730 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3731 "0x%x\n", st), DEBUG_PROBE);
3732 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3733 }
3734 return;
3735 }
3736
3737 void
3738 pdc202xx_setup_channel(chp)
3739 struct channel_softc *chp;
3740 {
3741 struct ata_drive_datas *drvp;
3742 int drive;
3743 pcireg_t mode, st;
3744 u_int32_t idedma_ctl, scr, atapi;
3745 struct pciide_channel *cp = (struct pciide_channel*)chp;
3746 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3747 int channel = chp->channel;
3748
3749 /* setup DMA if needed */
3750 pciide_channel_dma_setup(cp);
3751
3752 idedma_ctl = 0;
3753 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3754 sc->sc_wdcdev.sc_dev.dv_xname,
3755 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3756 DEBUG_PROBE);
3757
3758 /* Per channel settings */
3759 if (PDC_IS_262(sc)) {
3760 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3761 PDC262_U66);
3762 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3763 /* Trim UDMA mode */
3764 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3765 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3766 chp->ch_drive[0].UDMA_mode <= 2) ||
3767 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3768 chp->ch_drive[1].UDMA_mode <= 2)) {
3769 if (chp->ch_drive[0].UDMA_mode > 2)
3770 chp->ch_drive[0].UDMA_mode = 2;
3771 if (chp->ch_drive[1].UDMA_mode > 2)
3772 chp->ch_drive[1].UDMA_mode = 2;
3773 }
3774 /* Set U66 if needed */
3775 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3776 chp->ch_drive[0].UDMA_mode > 2) ||
3777 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3778 chp->ch_drive[1].UDMA_mode > 2))
3779 scr |= PDC262_U66_EN(channel);
3780 else
3781 scr &= ~PDC262_U66_EN(channel);
3782 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3783 PDC262_U66, scr);
3784 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3785 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3786 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3787 PDC262_ATAPI(channel))), DEBUG_PROBE);
3788 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3789 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3790 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3791 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3792 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3793 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3794 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3795 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3796 atapi = 0;
3797 else
3798 atapi = PDC262_ATAPI_UDMA;
3799 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3800 PDC262_ATAPI(channel), atapi);
3801 }
3802 }
3803 for (drive = 0; drive < 2; drive++) {
3804 drvp = &chp->ch_drive[drive];
3805 /* If no drive, skip */
3806 if ((drvp->drive_flags & DRIVE) == 0)
3807 continue;
3808 mode = 0;
3809 if (drvp->drive_flags & DRIVE_UDMA) {
3810 /* use Ultra/DMA */
3811 drvp->drive_flags &= ~DRIVE_DMA;
3812 mode = PDC2xx_TIM_SET_MB(mode,
3813 pdc2xx_udma_mb[drvp->UDMA_mode]);
3814 mode = PDC2xx_TIM_SET_MC(mode,
3815 pdc2xx_udma_mc[drvp->UDMA_mode]);
3816 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3817 } else if (drvp->drive_flags & DRIVE_DMA) {
3818 mode = PDC2xx_TIM_SET_MB(mode,
3819 pdc2xx_dma_mb[drvp->DMA_mode]);
3820 mode = PDC2xx_TIM_SET_MC(mode,
3821 pdc2xx_dma_mc[drvp->DMA_mode]);
3822 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3823 } else {
3824 mode = PDC2xx_TIM_SET_MB(mode,
3825 pdc2xx_dma_mb[0]);
3826 mode = PDC2xx_TIM_SET_MC(mode,
3827 pdc2xx_dma_mc[0]);
3828 }
3829 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3830 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3831 if (drvp->drive_flags & DRIVE_ATA)
3832 mode |= PDC2xx_TIM_PRE;
3833 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3834 if (drvp->PIO_mode >= 3) {
3835 mode |= PDC2xx_TIM_IORDY;
3836 if (drive == 0)
3837 mode |= PDC2xx_TIM_IORDYp;
3838 }
3839 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3840 "timings 0x%x\n",
3841 sc->sc_wdcdev.sc_dev.dv_xname,
3842 chp->channel, drive, mode), DEBUG_PROBE);
3843 pci_conf_write(sc->sc_pc, sc->sc_tag,
3844 PDC2xx_TIM(chp->channel, drive), mode);
3845 }
3846 if (idedma_ctl != 0) {
3847 /* Add software bits in status register */
3848 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3849 IDEDMA_CTL, idedma_ctl);
3850 }
3851 pciide_print_modes(cp);
3852 }
3853
3854 void
3855 pdc20268_setup_channel(chp)
3856 struct channel_softc *chp;
3857 {
3858 struct ata_drive_datas *drvp;
3859 int drive;
3860 u_int32_t idedma_ctl;
3861 struct pciide_channel *cp = (struct pciide_channel*)chp;
3862 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3863 int u100;
3864
3865 /* setup DMA if needed */
3866 pciide_channel_dma_setup(cp);
3867
3868 idedma_ctl = 0;
3869
3870 /* I don't know what this is for, FreeBSD does it ... */
3871 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3872 IDEDMA_CMD + 0x1, 0x0b);
3873
3874 /*
3875 * I don't know what this is for; FreeBSD checks this ... this is not
3876 * cable type detect.
3877 */
3878 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3879 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3880
3881 for (drive = 0; drive < 2; drive++) {
3882 drvp = &chp->ch_drive[drive];
3883 /* If no drive, skip */
3884 if ((drvp->drive_flags & DRIVE) == 0)
3885 continue;
3886 if (drvp->drive_flags & DRIVE_UDMA) {
3887 /* use Ultra/DMA */
3888 drvp->drive_flags &= ~DRIVE_DMA;
3889 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3890 if (drvp->UDMA_mode > 2 && u100 == 0)
3891 drvp->UDMA_mode = 2;
3892 } else if (drvp->drive_flags & DRIVE_DMA) {
3893 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3894 }
3895 }
3896 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3897 if (idedma_ctl != 0) {
3898 /* Add software bits in status register */
3899 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3900 IDEDMA_CTL, idedma_ctl);
3901 }
3902 pciide_print_modes(cp);
3903 }
3904
3905 int
3906 pdc202xx_pci_intr(arg)
3907 void *arg;
3908 {
3909 struct pciide_softc *sc = arg;
3910 struct pciide_channel *cp;
3911 struct channel_softc *wdc_cp;
3912 int i, rv, crv;
3913 u_int32_t scr;
3914
3915 rv = 0;
3916 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3917 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3918 cp = &sc->pciide_channels[i];
3919 wdc_cp = &cp->wdc_channel;
3920 /* If a compat channel skip. */
3921 if (cp->compat)
3922 continue;
3923 if (scr & PDC2xx_SCR_INT(i)) {
3924 crv = wdcintr(wdc_cp);
3925 if (crv == 0)
3926 printf("%s:%d: bogus intr (reg 0x%x)\n",
3927 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3928 else
3929 rv = 1;
3930 }
3931 }
3932 return rv;
3933 }
3934
3935 int
3936 pdc20265_pci_intr(arg)
3937 void *arg;
3938 {
3939 struct pciide_softc *sc = arg;
3940 struct pciide_channel *cp;
3941 struct channel_softc *wdc_cp;
3942 int i, rv, crv;
3943 u_int32_t dmastat;
3944
3945 rv = 0;
3946 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3947 cp = &sc->pciide_channels[i];
3948 wdc_cp = &cp->wdc_channel;
3949 /* If a compat channel skip. */
3950 if (cp->compat)
3951 continue;
3952 /*
3953 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3954 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3955 * So use it instead (requires 2 reg reads instead of 1,
3956 * but we can't do it another way).
3957 */
3958 dmastat = bus_space_read_1(sc->sc_dma_iot,
3959 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3960 if((dmastat & IDEDMA_CTL_INTR) == 0)
3961 continue;
3962 crv = wdcintr(wdc_cp);
3963 if (crv == 0)
3964 printf("%s:%d: bogus intr\n",
3965 sc->sc_wdcdev.sc_dev.dv_xname, i);
3966 else
3967 rv = 1;
3968 }
3969 return rv;
3970 }
3971
3972 void
3973 opti_chip_map(sc, pa)
3974 struct pciide_softc *sc;
3975 struct pci_attach_args *pa;
3976 {
3977 struct pciide_channel *cp;
3978 bus_size_t cmdsize, ctlsize;
3979 pcireg_t interface;
3980 u_int8_t init_ctrl;
3981 int channel;
3982
3983 if (pciide_chipen(sc, pa) == 0)
3984 return;
3985 printf("%s: bus-master DMA support present",
3986 sc->sc_wdcdev.sc_dev.dv_xname);
3987
3988 /*
3989 * XXXSCW:
3990 * There seem to be a couple of buggy revisions/implementations
3991 * of the OPTi pciide chipset. This kludge seems to fix one of
3992 * the reported problems (PR/11644) but still fails for the
3993 * other (PR/13151), although the latter may be due to other
3994 * issues too...
3995 */
3996 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3997 printf(" but disabled due to chip rev. <= 0x12");
3998 sc->sc_dma_ok = 0;
3999 } else
4000 pciide_mapreg_dma(sc, pa);
4001
4002 printf("\n");
4003
4004 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4005 WDC_CAPABILITY_MODE;
4006 sc->sc_wdcdev.PIO_cap = 4;
4007 if (sc->sc_dma_ok) {
4008 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4009 sc->sc_wdcdev.irqack = pciide_irqack;
4010 sc->sc_wdcdev.DMA_cap = 2;
4011 }
4012 sc->sc_wdcdev.set_modes = opti_setup_channel;
4013
4014 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4015 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4016
4017 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4018 OPTI_REG_INIT_CONTROL);
4019
4020 interface = PCI_INTERFACE(pa->pa_class);
4021
4022 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4023 cp = &sc->pciide_channels[channel];
4024 if (pciide_chansetup(sc, channel, interface) == 0)
4025 continue;
4026 if (channel == 1 &&
4027 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4028 printf("%s: %s channel ignored (disabled)\n",
4029 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4030 continue;
4031 }
4032 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4033 pciide_pci_intr);
4034 if (cp->hw_ok == 0)
4035 continue;
4036 pciide_map_compat_intr(pa, cp, channel, interface);
4037 if (cp->hw_ok == 0)
4038 continue;
4039 opti_setup_channel(&cp->wdc_channel);
4040 }
4041 }
4042
4043 void
4044 opti_setup_channel(chp)
4045 struct channel_softc *chp;
4046 {
4047 struct ata_drive_datas *drvp;
4048 struct pciide_channel *cp = (struct pciide_channel*)chp;
4049 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4050 int drive, spd;
4051 int mode[2];
4052 u_int8_t rv, mr;
4053
4054 /*
4055 * The `Delay' and `Address Setup Time' fields of the
4056 * Miscellaneous Register are always zero initially.
4057 */
4058 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4059 mr &= ~(OPTI_MISC_DELAY_MASK |
4060 OPTI_MISC_ADDR_SETUP_MASK |
4061 OPTI_MISC_INDEX_MASK);
4062
4063 /* Prime the control register before setting timing values */
4064 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4065
4066 /* Determine the clockrate of the PCIbus the chip is attached to */
4067 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4068 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4069
4070 /* setup DMA if needed */
4071 pciide_channel_dma_setup(cp);
4072
4073 for (drive = 0; drive < 2; drive++) {
4074 drvp = &chp->ch_drive[drive];
4075 /* If no drive, skip */
4076 if ((drvp->drive_flags & DRIVE) == 0) {
4077 mode[drive] = -1;
4078 continue;
4079 }
4080
4081 if ((drvp->drive_flags & DRIVE_DMA)) {
4082 /*
4083 * Timings will be used for both PIO and DMA,
4084 * so adjust DMA mode if needed
4085 */
4086 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4087 drvp->PIO_mode = drvp->DMA_mode + 2;
4088 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4089 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4090 drvp->PIO_mode - 2 : 0;
4091 if (drvp->DMA_mode == 0)
4092 drvp->PIO_mode = 0;
4093
4094 mode[drive] = drvp->DMA_mode + 5;
4095 } else
4096 mode[drive] = drvp->PIO_mode;
4097
4098 if (drive && mode[0] >= 0 &&
4099 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4100 /*
4101 * Can't have two drives using different values
4102 * for `Address Setup Time'.
4103 * Slow down the faster drive to compensate.
4104 */
4105 int d = (opti_tim_as[spd][mode[0]] >
4106 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4107
4108 mode[d] = mode[1-d];
4109 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4110 chp->ch_drive[d].DMA_mode = 0;
4111 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4112 }
4113 }
4114
4115 for (drive = 0; drive < 2; drive++) {
4116 int m;
4117 if ((m = mode[drive]) < 0)
4118 continue;
4119
4120 /* Set the Address Setup Time and select appropriate index */
4121 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4122 rv |= OPTI_MISC_INDEX(drive);
4123 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4124
4125 /* Set the pulse width and recovery timing parameters */
4126 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4127 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4128 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4129 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4130
4131 /* Set the Enhanced Mode register appropriately */
4132 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4133 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4134 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4135 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4136 }
4137
4138 /* Finally, enable the timings */
4139 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4140
4141 pciide_print_modes(cp);
4142 }
4143
4144 #define ACARD_IS_850(sc) \
4145 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4146
4147 void
4148 acard_chip_map(sc, pa)
4149 struct pciide_softc *sc;
4150 struct pci_attach_args *pa;
4151 {
4152 struct pciide_channel *cp;
4153 int i;
4154 pcireg_t interface;
4155 bus_size_t cmdsize, ctlsize;
4156
4157 if (pciide_chipen(sc, pa) == 0)
4158 return;
4159
4160 /*
4161 * when the chip is in native mode it identifies itself as a
4162 * 'misc mass storage'. Fake interface in this case.
4163 */
4164 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4165 interface = PCI_INTERFACE(pa->pa_class);
4166 } else {
4167 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4168 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4169 }
4170
4171 printf("%s: bus-master DMA support present",
4172 sc->sc_wdcdev.sc_dev.dv_xname);
4173 pciide_mapreg_dma(sc, pa);
4174 printf("\n");
4175 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4176 WDC_CAPABILITY_MODE;
4177
4178 if (sc->sc_dma_ok) {
4179 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4180 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4181 sc->sc_wdcdev.irqack = pciide_irqack;
4182 }
4183 sc->sc_wdcdev.PIO_cap = 4;
4184 sc->sc_wdcdev.DMA_cap = 2;
4185 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4186
4187 sc->sc_wdcdev.set_modes = acard_setup_channel;
4188 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4189 sc->sc_wdcdev.nchannels = 2;
4190
4191 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4192 cp = &sc->pciide_channels[i];
4193 if (pciide_chansetup(sc, i, interface) == 0)
4194 continue;
4195 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4196 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4197 &ctlsize, pciide_pci_intr);
4198 } else {
4199 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4200 &cmdsize, &ctlsize);
4201 }
4202 if (cp->hw_ok == 0)
4203 return;
4204 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4205 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4206 wdcattach(&cp->wdc_channel);
4207 acard_setup_channel(&cp->wdc_channel);
4208 }
4209 if (!ACARD_IS_850(sc)) {
4210 u_int32_t reg;
4211 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4212 reg &= ~ATP860_CTRL_INT;
4213 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4214 }
4215 }
4216
4217 void
4218 acard_setup_channel(chp)
4219 struct channel_softc *chp;
4220 {
4221 struct ata_drive_datas *drvp;
4222 struct pciide_channel *cp = (struct pciide_channel*)chp;
4223 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4224 int channel = chp->channel;
4225 int drive;
4226 u_int32_t idetime, udma_mode;
4227 u_int32_t idedma_ctl;
4228
4229 /* setup DMA if needed */
4230 pciide_channel_dma_setup(cp);
4231
4232 if (ACARD_IS_850(sc)) {
4233 idetime = 0;
4234 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4235 udma_mode &= ~ATP850_UDMA_MASK(channel);
4236 } else {
4237 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4238 idetime &= ~ATP860_SETTIME_MASK(channel);
4239 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4240 udma_mode &= ~ATP860_UDMA_MASK(channel);
4241
4242 /* check 80 pins cable */
4243 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4244 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4245 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4246 & ATP860_CTRL_80P(chp->channel)) {
4247 if (chp->ch_drive[0].UDMA_mode > 2)
4248 chp->ch_drive[0].UDMA_mode = 2;
4249 if (chp->ch_drive[1].UDMA_mode > 2)
4250 chp->ch_drive[1].UDMA_mode = 2;
4251 }
4252 }
4253 }
4254
4255 idedma_ctl = 0;
4256
4257 /* Per drive settings */
4258 for (drive = 0; drive < 2; drive++) {
4259 drvp = &chp->ch_drive[drive];
4260 /* If no drive, skip */
4261 if ((drvp->drive_flags & DRIVE) == 0)
4262 continue;
4263 /* add timing values, setup DMA if needed */
4264 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4265 (drvp->drive_flags & DRIVE_UDMA)) {
4266 /* use Ultra/DMA */
4267 if (ACARD_IS_850(sc)) {
4268 idetime |= ATP850_SETTIME(drive,
4269 acard_act_udma[drvp->UDMA_mode],
4270 acard_rec_udma[drvp->UDMA_mode]);
4271 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4272 acard_udma_conf[drvp->UDMA_mode]);
4273 } else {
4274 idetime |= ATP860_SETTIME(channel, drive,
4275 acard_act_udma[drvp->UDMA_mode],
4276 acard_rec_udma[drvp->UDMA_mode]);
4277 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4278 acard_udma_conf[drvp->UDMA_mode]);
4279 }
4280 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4281 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4282 (drvp->drive_flags & DRIVE_DMA)) {
4283 /* use Multiword DMA */
4284 drvp->drive_flags &= ~DRIVE_UDMA;
4285 if (ACARD_IS_850(sc)) {
4286 idetime |= ATP850_SETTIME(drive,
4287 acard_act_dma[drvp->DMA_mode],
4288 acard_rec_dma[drvp->DMA_mode]);
4289 } else {
4290 idetime |= ATP860_SETTIME(channel, drive,
4291 acard_act_dma[drvp->DMA_mode],
4292 acard_rec_dma[drvp->DMA_mode]);
4293 }
4294 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4295 } else {
4296 /* PIO only */
4297 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4298 if (ACARD_IS_850(sc)) {
4299 idetime |= ATP850_SETTIME(drive,
4300 acard_act_pio[drvp->PIO_mode],
4301 acard_rec_pio[drvp->PIO_mode]);
4302 } else {
4303 idetime |= ATP860_SETTIME(channel, drive,
4304 acard_act_pio[drvp->PIO_mode],
4305 acard_rec_pio[drvp->PIO_mode]);
4306 }
4307 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4308 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4309 | ATP8x0_CTRL_EN(channel));
4310 }
4311 }
4312
4313 if (idedma_ctl != 0) {
4314 /* Add software bits in status register */
4315 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4316 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4317 }
4318 pciide_print_modes(cp);
4319
4320 if (ACARD_IS_850(sc)) {
4321 pci_conf_write(sc->sc_pc, sc->sc_tag,
4322 ATP850_IDETIME(channel), idetime);
4323 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4324 } else {
4325 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4326 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4327 }
4328 }
4329
4330 int
4331 acard_pci_intr(arg)
4332 void *arg;
4333 {
4334 struct pciide_softc *sc = arg;
4335 struct pciide_channel *cp;
4336 struct channel_softc *wdc_cp;
4337 int rv = 0;
4338 int dmastat, i, crv;
4339
4340 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4341 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4342 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4343 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4344 continue;
4345 cp = &sc->pciide_channels[i];
4346 wdc_cp = &cp->wdc_channel;
4347 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4348 (void)wdcintr(wdc_cp);
4349 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4350 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4351 continue;
4352 }
4353 crv = wdcintr(wdc_cp);
4354 if (crv == 0)
4355 printf("%s:%d: bogus intr\n",
4356 sc->sc_wdcdev.sc_dev.dv_xname, i);
4357 else if (crv == 1)
4358 rv = 1;
4359 else if (rv == 0)
4360 rv = crv;
4361 }
4362 return rv;
4363 }
4364
4365 static int
4366 sl82c105_bugchk(struct pci_attach_args *pa)
4367 {
4368
4369 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4370 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4371 return (0);
4372
4373 if (PCI_REVISION(pa->pa_class) <= 0x05)
4374 return (1);
4375
4376 return (0);
4377 }
4378
4379 void
4380 sl82c105_chip_map(sc, pa)
4381 struct pciide_softc *sc;
4382 struct pci_attach_args *pa;
4383 {
4384 struct pciide_channel *cp;
4385 bus_size_t cmdsize, ctlsize;
4386 pcireg_t interface, idecr;
4387 int channel;
4388
4389 if (pciide_chipen(sc, pa) == 0)
4390 return;
4391
4392 printf("%s: bus-master DMA support present",
4393 sc->sc_wdcdev.sc_dev.dv_xname);
4394
4395 /*
4396 * Check to see if we're part of the Winbond 83c553 Southbridge.
4397 * If so, we need to disable DMA on rev. <= 5 of that chip.
4398 */
4399 if (pci_find_device(pa, sl82c105_bugchk)) {
4400 printf(" but disabled due to 83c553 rev. <= 0x05");
4401 sc->sc_dma_ok = 0;
4402 } else
4403 pciide_mapreg_dma(sc, pa);
4404 printf("\n");
4405
4406 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4407 WDC_CAPABILITY_MODE;
4408 sc->sc_wdcdev.PIO_cap = 4;
4409 if (sc->sc_dma_ok) {
4410 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4411 sc->sc_wdcdev.irqack = pciide_irqack;
4412 sc->sc_wdcdev.DMA_cap = 2;
4413 }
4414 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4415
4416 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4417 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4418
4419 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4420
4421 interface = PCI_INTERFACE(pa->pa_class);
4422
4423 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4424 cp = &sc->pciide_channels[channel];
4425 if (pciide_chansetup(sc, channel, interface) == 0)
4426 continue;
4427 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4428 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4429 printf("%s: %s channel ignored (disabled)\n",
4430 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4431 continue;
4432 }
4433 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4434 pciide_pci_intr);
4435 if (cp->hw_ok == 0)
4436 continue;
4437 pciide_map_compat_intr(pa, cp, channel, interface);
4438 if (cp->hw_ok == 0)
4439 continue;
4440 sl82c105_setup_channel(&cp->wdc_channel);
4441 }
4442 }
4443
4444 void
4445 sl82c105_setup_channel(chp)
4446 struct channel_softc *chp;
4447 {
4448 struct ata_drive_datas *drvp;
4449 struct pciide_channel *cp = (struct pciide_channel*)chp;
4450 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4451 int pxdx_reg, drive;
4452 pcireg_t pxdx;
4453
4454 /* Set up DMA if needed. */
4455 pciide_channel_dma_setup(cp);
4456
4457 for (drive = 0; drive < 2; drive++) {
4458 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4459 : SYMPH_P1D0CR) + (drive * 4);
4460
4461 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4462
4463 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4464 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4465
4466 drvp = &chp->ch_drive[drive];
4467 /* If no drive, skip. */
4468 if ((drvp->drive_flags & DRIVE) == 0) {
4469 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4470 continue;
4471 }
4472
4473 if (drvp->drive_flags & DRIVE_DMA) {
4474 /*
4475 * Timings will be used for both PIO and DMA,
4476 * so adjust DMA mode if needed.
4477 */
4478 if (drvp->PIO_mode >= 3) {
4479 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4480 drvp->DMA_mode = drvp->PIO_mode - 2;
4481 if (drvp->DMA_mode < 1) {
4482 /*
4483 * Can't mix both PIO and DMA.
4484 * Disable DMA.
4485 */
4486 drvp->drive_flags &= ~DRIVE_DMA;
4487 }
4488 } else {
4489 /*
4490 * Can't mix both PIO and DMA. Disable
4491 * DMA.
4492 */
4493 drvp->drive_flags &= ~DRIVE_DMA;
4494 }
4495 }
4496
4497 if (drvp->drive_flags & DRIVE_DMA) {
4498 /* Use multi-word DMA. */
4499 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4500 PxDx_CMD_ON_SHIFT;
4501 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4502 } else {
4503 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4504 PxDx_CMD_ON_SHIFT;
4505 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4506 }
4507
4508 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4509
4510 /* ...and set the mode for this drive. */
4511 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4512 }
4513
4514 pciide_print_modes(cp);
4515 }
4516
4517 void
4518 serverworks_chip_map(sc, pa)
4519 struct pciide_softc *sc;
4520 struct pci_attach_args *pa;
4521 {
4522 struct pciide_channel *cp;
4523 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4524 pcitag_t pcib_tag;
4525 int channel;
4526 bus_size_t cmdsize, ctlsize;
4527
4528 if (pciide_chipen(sc, pa) == 0)
4529 return;
4530
4531 printf("%s: bus-master DMA support present",
4532 sc->sc_wdcdev.sc_dev.dv_xname);
4533 pciide_mapreg_dma(sc, pa);
4534 printf("\n");
4535 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4536 WDC_CAPABILITY_MODE;
4537
4538 if (sc->sc_dma_ok) {
4539 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4540 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4541 sc->sc_wdcdev.irqack = pciide_irqack;
4542 }
4543 sc->sc_wdcdev.PIO_cap = 4;
4544 sc->sc_wdcdev.DMA_cap = 2;
4545 switch (sc->sc_pp->ide_product) {
4546 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4547 sc->sc_wdcdev.UDMA_cap = 2;
4548 break;
4549 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4550 if (PCI_REVISION(pa->pa_class) < 0x92)
4551 sc->sc_wdcdev.UDMA_cap = 4;
4552 else
4553 sc->sc_wdcdev.UDMA_cap = 5;
4554 break;
4555 }
4556
4557 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4558 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4559 sc->sc_wdcdev.nchannels = 2;
4560
4561 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4562 cp = &sc->pciide_channels[channel];
4563 if (pciide_chansetup(sc, channel, interface) == 0)
4564 continue;
4565 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4566 serverworks_pci_intr);
4567 if (cp->hw_ok == 0)
4568 return;
4569 pciide_map_compat_intr(pa, cp, channel, interface);
4570 if (cp->hw_ok == 0)
4571 return;
4572 serverworks_setup_channel(&cp->wdc_channel);
4573 }
4574
4575 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4576 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4577 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4578 }
4579
4580 void
4581 serverworks_setup_channel(chp)
4582 struct channel_softc *chp;
4583 {
4584 struct ata_drive_datas *drvp;
4585 struct pciide_channel *cp = (struct pciide_channel*)chp;
4586 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4587 int channel = chp->channel;
4588 int drive, unit;
4589 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4590 u_int32_t idedma_ctl;
4591 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4592 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4593
4594 /* setup DMA if needed */
4595 pciide_channel_dma_setup(cp);
4596
4597 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4598 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4599 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4600 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4601
4602 pio_time &= ~(0xffff << (16 * channel));
4603 dma_time &= ~(0xffff << (16 * channel));
4604 pio_mode &= ~(0xff << (8 * channel + 16));
4605 udma_mode &= ~(0xff << (8 * channel + 16));
4606 udma_mode &= ~(3 << (2 * channel));
4607
4608 idedma_ctl = 0;
4609
4610 /* Per drive settings */
4611 for (drive = 0; drive < 2; drive++) {
4612 drvp = &chp->ch_drive[drive];
4613 /* If no drive, skip */
4614 if ((drvp->drive_flags & DRIVE) == 0)
4615 continue;
4616 unit = drive + 2 * channel;
4617 /* add timing values, setup DMA if needed */
4618 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4619 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4620 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4621 (drvp->drive_flags & DRIVE_UDMA)) {
4622 /* use Ultra/DMA, check for 80-pin cable */
4623 if (drvp->UDMA_mode > 2 &&
4624 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4625 drvp->UDMA_mode = 2;
4626 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4627 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4628 udma_mode |= 1 << unit;
4629 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4630 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4631 (drvp->drive_flags & DRIVE_DMA)) {
4632 /* use Multiword DMA */
4633 drvp->drive_flags &= ~DRIVE_UDMA;
4634 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4635 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4636 } else {
4637 /* PIO only */
4638 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4639 }
4640 }
4641
4642 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4643 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4644 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4645 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4646 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4647
4648 if (idedma_ctl != 0) {
4649 /* Add software bits in status register */
4650 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4651 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4652 }
4653 pciide_print_modes(cp);
4654 }
4655
4656 int
4657 serverworks_pci_intr(arg)
4658 void *arg;
4659 {
4660 struct pciide_softc *sc = arg;
4661 struct pciide_channel *cp;
4662 struct channel_softc *wdc_cp;
4663 int rv = 0;
4664 int dmastat, i, crv;
4665
4666 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4667 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4668 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4669 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4670 IDEDMA_CTL_INTR)
4671 continue;
4672 cp = &sc->pciide_channels[i];
4673 wdc_cp = &cp->wdc_channel;
4674 crv = wdcintr(wdc_cp);
4675 if (crv == 0) {
4676 printf("%s:%d: bogus intr\n",
4677 sc->sc_wdcdev.sc_dev.dv_xname, i);
4678 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4679 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4680 } else
4681 rv = 1;
4682 }
4683 return rv;
4684 }
4685