pciide.c revision 1.153.2.10 1 /* $NetBSD: pciide.c,v 1.153.2.10 2002/11/28 13:31:31 tron Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.153.2.10 2002/11/28 13:31:31 tron Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191
192 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void pdc202xx_setup_channel __P((struct channel_softc*));
194 void pdc20268_setup_channel __P((struct channel_softc*));
195 int pdc202xx_pci_intr __P((void *));
196 int pdc20265_pci_intr __P((void *));
197
198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void opti_setup_channel __P((struct channel_softc*));
200
201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void hpt_setup_channel __P((struct channel_softc*));
203 int hpt_pci_intr __P((void *));
204
205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acard_setup_channel __P((struct channel_softc*));
207 int acard_pci_intr __P((void *));
208
209 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void serverworks_setup_channel __P((struct channel_softc*));
211 int serverworks_pci_intr __P((void *));
212
213 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void sl82c105_setup_channel __P((struct channel_softc*));
215
216 void pciide_channel_dma_setup __P((struct pciide_channel *));
217 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
218 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
219 void pciide_dma_start __P((void*, int, int));
220 int pciide_dma_finish __P((void*, int, int, int));
221 void pciide_irqack __P((struct channel_softc *));
222 void pciide_print_modes __P((struct pciide_channel *));
223
224 struct pciide_product_desc {
225 u_int32_t ide_product;
226 int ide_flags;
227 const char *ide_name;
228 /* map and setup chip, probe drives */
229 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
230 };
231
232 /* Flags for ide_flags */
233 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
234 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
235
236 /* Default product description for devices not known from this controller */
237 const struct pciide_product_desc default_product_desc = {
238 0,
239 0,
240 "Generic PCI IDE controller",
241 default_chip_map,
242 };
243
244 const struct pciide_product_desc pciide_intel_products[] = {
245 { PCI_PRODUCT_INTEL_82092AA,
246 0,
247 "Intel 82092AA IDE controller",
248 default_chip_map,
249 },
250 { PCI_PRODUCT_INTEL_82371FB_IDE,
251 0,
252 "Intel 82371FB IDE controller (PIIX)",
253 piix_chip_map,
254 },
255 { PCI_PRODUCT_INTEL_82371SB_IDE,
256 0,
257 "Intel 82371SB IDE Interface (PIIX3)",
258 piix_chip_map,
259 },
260 { PCI_PRODUCT_INTEL_82371AB_IDE,
261 0,
262 "Intel 82371AB IDE controller (PIIX4)",
263 piix_chip_map,
264 },
265 { PCI_PRODUCT_INTEL_82440MX_IDE,
266 0,
267 "Intel 82440MX IDE controller",
268 piix_chip_map
269 },
270 { PCI_PRODUCT_INTEL_82801AA_IDE,
271 0,
272 "Intel 82801AA IDE Controller (ICH)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82801AB_IDE,
276 0,
277 "Intel 82801AB IDE Controller (ICH0)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82801BA_IDE,
281 0,
282 "Intel 82801BA IDE Controller (ICH2)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82801BAM_IDE,
286 0,
287 "Intel 82801BAM IDE Controller (ICH2)",
288 piix_chip_map,
289 },
290 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
291 0,
292 "Intel 82801CA IDE Controller",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
296 0,
297 "Intel 82801CA IDE Controller",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801DB_IDE,
301 0,
302 "Intel 82801DB IDE Controller (ICH4)",
303 piix_chip_map,
304 },
305 { 0,
306 0,
307 NULL,
308 NULL
309 }
310 };
311
312 const struct pciide_product_desc pciide_amd_products[] = {
313 { PCI_PRODUCT_AMD_PBC756_IDE,
314 0,
315 "Advanced Micro Devices AMD756 IDE Controller",
316 amd7x6_chip_map
317 },
318 { PCI_PRODUCT_AMD_PBC766_IDE,
319 0,
320 "Advanced Micro Devices AMD766 IDE Controller",
321 amd7x6_chip_map
322 },
323 { PCI_PRODUCT_AMD_PBC768_IDE,
324 0,
325 "Advanced Micro Devices AMD768 IDE Controller",
326 amd7x6_chip_map
327 },
328 { 0,
329 0,
330 NULL,
331 NULL
332 }
333 };
334
335 const struct pciide_product_desc pciide_cmd_products[] = {
336 { PCI_PRODUCT_CMDTECH_640,
337 0,
338 "CMD Technology PCI0640",
339 cmd_chip_map
340 },
341 { PCI_PRODUCT_CMDTECH_643,
342 0,
343 "CMD Technology PCI0643",
344 cmd0643_9_chip_map,
345 },
346 { PCI_PRODUCT_CMDTECH_646,
347 0,
348 "CMD Technology PCI0646",
349 cmd0643_9_chip_map,
350 },
351 { PCI_PRODUCT_CMDTECH_648,
352 IDE_PCI_CLASS_OVERRIDE,
353 "CMD Technology PCI0648",
354 cmd0643_9_chip_map,
355 },
356 { PCI_PRODUCT_CMDTECH_649,
357 IDE_PCI_CLASS_OVERRIDE,
358 "CMD Technology PCI0649",
359 cmd0643_9_chip_map,
360 },
361 { 0,
362 0,
363 NULL,
364 NULL
365 }
366 };
367
368 const struct pciide_product_desc pciide_via_products[] = {
369 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
370 0,
371 NULL,
372 apollo_chip_map,
373 },
374 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
375 0,
376 NULL,
377 apollo_chip_map,
378 },
379 { 0,
380 0,
381 NULL,
382 NULL
383 }
384 };
385
386 const struct pciide_product_desc pciide_cypress_products[] = {
387 { PCI_PRODUCT_CONTAQ_82C693,
388 IDE_16BIT_IOSPACE,
389 "Cypress 82C693 IDE Controller",
390 cy693_chip_map,
391 },
392 { 0,
393 0,
394 NULL,
395 NULL
396 }
397 };
398
399 const struct pciide_product_desc pciide_sis_products[] = {
400 { PCI_PRODUCT_SIS_5597_IDE,
401 0,
402 "Silicon Integrated System 5597/5598 IDE controller",
403 sis_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_acer_products[] = {
413 { PCI_PRODUCT_ALI_M5229,
414 0,
415 "Acer Labs M5229 UDMA IDE Controller",
416 acer_chip_map,
417 },
418 { 0,
419 0,
420 NULL,
421 NULL
422 }
423 };
424
425 const struct pciide_product_desc pciide_promise_products[] = {
426 { PCI_PRODUCT_PROMISE_ULTRA33,
427 IDE_PCI_CLASS_OVERRIDE,
428 "Promise Ultra33/ATA Bus Master IDE Accelerator",
429 pdc202xx_chip_map,
430 },
431 { PCI_PRODUCT_PROMISE_ULTRA66,
432 IDE_PCI_CLASS_OVERRIDE,
433 "Promise Ultra66/ATA Bus Master IDE Accelerator",
434 pdc202xx_chip_map,
435 },
436 { PCI_PRODUCT_PROMISE_ULTRA100,
437 IDE_PCI_CLASS_OVERRIDE,
438 "Promise Ultra100/ATA Bus Master IDE Accelerator",
439 pdc202xx_chip_map,
440 },
441 { PCI_PRODUCT_PROMISE_ULTRA100X,
442 IDE_PCI_CLASS_OVERRIDE,
443 "Promise Ultra100/ATA Bus Master IDE Accelerator",
444 pdc202xx_chip_map,
445 },
446 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
447 IDE_PCI_CLASS_OVERRIDE,
448 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
449 pdc202xx_chip_map,
450 },
451 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
452 IDE_PCI_CLASS_OVERRIDE,
453 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
454 pdc202xx_chip_map,
455 },
456 { PCI_PRODUCT_PROMISE_ULTRA133,
457 IDE_PCI_CLASS_OVERRIDE,
458 "Promise Ultra133/ATA Bus Master IDE Accelerator",
459 pdc202xx_chip_map,
460 },
461 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
462 IDE_PCI_CLASS_OVERRIDE,
463 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
464 pdc202xx_chip_map,
465 },
466 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
467 IDE_PCI_CLASS_OVERRIDE,
468 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
469 pdc202xx_chip_map,
470 },
471 { 0,
472 0,
473 NULL,
474 NULL
475 }
476 };
477
478 const struct pciide_product_desc pciide_opti_products[] = {
479 { PCI_PRODUCT_OPTI_82C621,
480 0,
481 "OPTi 82c621 PCI IDE controller",
482 opti_chip_map,
483 },
484 { PCI_PRODUCT_OPTI_82C568,
485 0,
486 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
487 opti_chip_map,
488 },
489 { PCI_PRODUCT_OPTI_82D568,
490 0,
491 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
492 opti_chip_map,
493 },
494 { 0,
495 0,
496 NULL,
497 NULL
498 }
499 };
500
501 const struct pciide_product_desc pciide_triones_products[] = {
502 { PCI_PRODUCT_TRIONES_HPT366,
503 IDE_PCI_CLASS_OVERRIDE,
504 NULL,
505 hpt_chip_map,
506 },
507 { PCI_PRODUCT_TRIONES_HPT372,
508 IDE_PCI_CLASS_OVERRIDE,
509 NULL,
510 hpt_chip_map
511 },
512 { PCI_PRODUCT_TRIONES_HPT374,
513 IDE_PCI_CLASS_OVERRIDE,
514 NULL,
515 hpt_chip_map
516 },
517 { 0,
518 0,
519 NULL,
520 NULL
521 }
522 };
523
524 const struct pciide_product_desc pciide_acard_products[] = {
525 { PCI_PRODUCT_ACARD_ATP850U,
526 IDE_PCI_CLASS_OVERRIDE,
527 "Acard ATP850U Ultra33 IDE Controller",
528 acard_chip_map,
529 },
530 { PCI_PRODUCT_ACARD_ATP860,
531 IDE_PCI_CLASS_OVERRIDE,
532 "Acard ATP860 Ultra66 IDE Controller",
533 acard_chip_map,
534 },
535 { PCI_PRODUCT_ACARD_ATP860A,
536 IDE_PCI_CLASS_OVERRIDE,
537 "Acard ATP860-A Ultra66 IDE Controller",
538 acard_chip_map,
539 },
540 { 0,
541 0,
542 NULL,
543 NULL
544 }
545 };
546
547 const struct pciide_product_desc pciide_serverworks_products[] = {
548 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
549 0,
550 "ServerWorks OSB4 IDE Controller",
551 serverworks_chip_map,
552 },
553 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
554 0,
555 "ServerWorks CSB5 IDE Controller",
556 serverworks_chip_map,
557 },
558 { 0,
559 0,
560 NULL,
561 }
562 };
563
564 const struct pciide_product_desc pciide_symphony_products[] = {
565 { PCI_PRODUCT_SYMPHONY_82C105,
566 0,
567 "Symphony Labs 82C105 IDE controller",
568 sl82c105_chip_map,
569 },
570 { 0,
571 0,
572 NULL,
573 }
574 };
575
576 const struct pciide_product_desc pciide_winbond_products[] = {
577 { PCI_PRODUCT_WINBOND_W83C553F_1,
578 0,
579 "Winbond W83C553F IDE controller",
580 sl82c105_chip_map,
581 },
582 { 0,
583 0,
584 NULL,
585 }
586 };
587
588 struct pciide_vendor_desc {
589 u_int32_t ide_vendor;
590 const struct pciide_product_desc *ide_products;
591 };
592
593 const struct pciide_vendor_desc pciide_vendors[] = {
594 { PCI_VENDOR_INTEL, pciide_intel_products },
595 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
596 { PCI_VENDOR_VIATECH, pciide_via_products },
597 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
598 { PCI_VENDOR_SIS, pciide_sis_products },
599 { PCI_VENDOR_ALI, pciide_acer_products },
600 { PCI_VENDOR_PROMISE, pciide_promise_products },
601 { PCI_VENDOR_AMD, pciide_amd_products },
602 { PCI_VENDOR_OPTI, pciide_opti_products },
603 { PCI_VENDOR_TRIONES, pciide_triones_products },
604 { PCI_VENDOR_ACARD, pciide_acard_products },
605 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
606 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
607 { PCI_VENDOR_WINBOND, pciide_winbond_products },
608 { 0, NULL }
609 };
610
611 /* options passed via the 'flags' config keyword */
612 #define PCIIDE_OPTIONS_DMA 0x01
613 #define PCIIDE_OPTIONS_NODMA 0x02
614
615 int pciide_match __P((struct device *, struct cfdata *, void *));
616 void pciide_attach __P((struct device *, struct device *, void *));
617
618 struct cfattach pciide_ca = {
619 sizeof(struct pciide_softc), pciide_match, pciide_attach
620 };
621 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
622 int pciide_mapregs_compat __P(( struct pci_attach_args *,
623 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
624 int pciide_mapregs_native __P((struct pci_attach_args *,
625 struct pciide_channel *, bus_size_t *, bus_size_t *,
626 int (*pci_intr) __P((void *))));
627 void pciide_mapreg_dma __P((struct pciide_softc *,
628 struct pci_attach_args *));
629 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
630 void pciide_mapchan __P((struct pci_attach_args *,
631 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
632 int (*pci_intr) __P((void *))));
633 int pciide_chan_candisable __P((struct pciide_channel *));
634 void pciide_map_compat_intr __P(( struct pci_attach_args *,
635 struct pciide_channel *, int, int));
636 int pciide_compat_intr __P((void *));
637 int pciide_pci_intr __P((void *));
638 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
639
640 const struct pciide_product_desc *
641 pciide_lookup_product(id)
642 u_int32_t id;
643 {
644 const struct pciide_product_desc *pp;
645 const struct pciide_vendor_desc *vp;
646
647 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
648 if (PCI_VENDOR(id) == vp->ide_vendor)
649 break;
650
651 if ((pp = vp->ide_products) == NULL)
652 return NULL;
653
654 for (; pp->chip_map != NULL; pp++)
655 if (PCI_PRODUCT(id) == pp->ide_product)
656 break;
657
658 if (pp->chip_map == NULL)
659 return NULL;
660 return pp;
661 }
662
663 int
664 pciide_match(parent, match, aux)
665 struct device *parent;
666 struct cfdata *match;
667 void *aux;
668 {
669 struct pci_attach_args *pa = aux;
670 const struct pciide_product_desc *pp;
671
672 /*
673 * Check the ID register to see that it's a PCI IDE controller.
674 * If it is, we assume that we can deal with it; it _should_
675 * work in a standardized way...
676 */
677 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
678 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
679 return (1);
680 }
681
682 /*
683 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
684 * controllers. Let see if we can deal with it anyway.
685 */
686 pp = pciide_lookup_product(pa->pa_id);
687 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
688 return (1);
689 }
690
691 return (0);
692 }
693
694 void
695 pciide_attach(parent, self, aux)
696 struct device *parent, *self;
697 void *aux;
698 {
699 struct pci_attach_args *pa = aux;
700 pci_chipset_tag_t pc = pa->pa_pc;
701 pcitag_t tag = pa->pa_tag;
702 struct pciide_softc *sc = (struct pciide_softc *)self;
703 pcireg_t csr;
704 char devinfo[256];
705 const char *displaydev;
706
707 sc->sc_pp = pciide_lookup_product(pa->pa_id);
708 if (sc->sc_pp == NULL) {
709 sc->sc_pp = &default_product_desc;
710 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
711 displaydev = devinfo;
712 } else
713 displaydev = sc->sc_pp->ide_name;
714
715 /* if displaydev == NULL, printf is done in chip-specific map */
716 if (displaydev)
717 printf(": %s (rev. 0x%02x)\n", displaydev,
718 PCI_REVISION(pa->pa_class));
719
720 sc->sc_pc = pa->pa_pc;
721 sc->sc_tag = pa->pa_tag;
722 #ifdef WDCDEBUG
723 if (wdcdebug_pciide_mask & DEBUG_PROBE)
724 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
725 #endif
726 sc->sc_pp->chip_map(sc, pa);
727
728 if (sc->sc_dma_ok) {
729 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
730 csr |= PCI_COMMAND_MASTER_ENABLE;
731 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
732 }
733 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
734 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
735 }
736
737 /* tell wether the chip is enabled or not */
738 int
739 pciide_chipen(sc, pa)
740 struct pciide_softc *sc;
741 struct pci_attach_args *pa;
742 {
743 pcireg_t csr;
744 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
745 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
746 PCI_COMMAND_STATUS_REG);
747 printf("%s: device disabled (at %s)\n",
748 sc->sc_wdcdev.sc_dev.dv_xname,
749 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
750 "device" : "bridge");
751 return 0;
752 }
753 return 1;
754 }
755
756 int
757 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
758 struct pci_attach_args *pa;
759 struct pciide_channel *cp;
760 int compatchan;
761 bus_size_t *cmdsizep, *ctlsizep;
762 {
763 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
764 struct channel_softc *wdc_cp = &cp->wdc_channel;
765
766 cp->compat = 1;
767 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
768 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
769
770 wdc_cp->cmd_iot = pa->pa_iot;
771 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
772 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
773 printf("%s: couldn't map %s channel cmd regs\n",
774 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
775 return (0);
776 }
777
778 wdc_cp->ctl_iot = pa->pa_iot;
779 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
780 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
781 printf("%s: couldn't map %s channel ctl regs\n",
782 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
783 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
784 PCIIDE_COMPAT_CMD_SIZE);
785 return (0);
786 }
787
788 return (1);
789 }
790
791 int
792 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
793 struct pci_attach_args * pa;
794 struct pciide_channel *cp;
795 bus_size_t *cmdsizep, *ctlsizep;
796 int (*pci_intr) __P((void *));
797 {
798 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
799 struct channel_softc *wdc_cp = &cp->wdc_channel;
800 const char *intrstr;
801 pci_intr_handle_t intrhandle;
802
803 cp->compat = 0;
804
805 if (sc->sc_pci_ih == NULL) {
806 if (pci_intr_map(pa, &intrhandle) != 0) {
807 printf("%s: couldn't map native-PCI interrupt\n",
808 sc->sc_wdcdev.sc_dev.dv_xname);
809 return 0;
810 }
811 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
812 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
813 intrhandle, IPL_BIO, pci_intr, sc);
814 if (sc->sc_pci_ih != NULL) {
815 printf("%s: using %s for native-PCI interrupt\n",
816 sc->sc_wdcdev.sc_dev.dv_xname,
817 intrstr ? intrstr : "unknown interrupt");
818 } else {
819 printf("%s: couldn't establish native-PCI interrupt",
820 sc->sc_wdcdev.sc_dev.dv_xname);
821 if (intrstr != NULL)
822 printf(" at %s", intrstr);
823 printf("\n");
824 return 0;
825 }
826 }
827 cp->ih = sc->sc_pci_ih;
828 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
829 PCI_MAPREG_TYPE_IO, 0,
830 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
831 printf("%s: couldn't map %s channel cmd regs\n",
832 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
833 return 0;
834 }
835
836 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
837 PCI_MAPREG_TYPE_IO, 0,
838 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
839 printf("%s: couldn't map %s channel ctl regs\n",
840 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
841 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
842 return 0;
843 }
844 /*
845 * In native mode, 4 bytes of I/O space are mapped for the control
846 * register, the control register is at offset 2. Pass the generic
847 * code a handle for only one byte at the rigth offset.
848 */
849 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
850 &wdc_cp->ctl_ioh) != 0) {
851 printf("%s: unable to subregion %s channel ctl regs\n",
852 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
853 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
854 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
855 return 0;
856 }
857 return (1);
858 }
859
860 void
861 pciide_mapreg_dma(sc, pa)
862 struct pciide_softc *sc;
863 struct pci_attach_args *pa;
864 {
865 pcireg_t maptype;
866 bus_addr_t addr;
867
868 /*
869 * Map DMA registers
870 *
871 * Note that sc_dma_ok is the right variable to test to see if
872 * DMA can be done. If the interface doesn't support DMA,
873 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
874 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
875 * non-zero if the interface supports DMA and the registers
876 * could be mapped.
877 *
878 * XXX Note that despite the fact that the Bus Master IDE specs
879 * XXX say that "The bus master IDE function uses 16 bytes of IO
880 * XXX space," some controllers (at least the United
881 * XXX Microelectronics UM8886BF) place it in memory space.
882 */
883 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
884 PCIIDE_REG_BUS_MASTER_DMA);
885
886 switch (maptype) {
887 case PCI_MAPREG_TYPE_IO:
888 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
889 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
890 &addr, NULL, NULL) == 0);
891 if (sc->sc_dma_ok == 0) {
892 printf(", but unused (couldn't query registers)");
893 break;
894 }
895 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
896 && addr >= 0x10000) {
897 sc->sc_dma_ok = 0;
898 printf(", but unused (registers at unsafe address "
899 "%#lx)", (unsigned long)addr);
900 break;
901 }
902 /* FALLTHROUGH */
903
904 case PCI_MAPREG_MEM_TYPE_32BIT:
905 sc->sc_dma_ok = (pci_mapreg_map(pa,
906 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
907 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
908 sc->sc_dmat = pa->pa_dmat;
909 if (sc->sc_dma_ok == 0) {
910 printf(", but unused (couldn't map registers)");
911 } else {
912 sc->sc_wdcdev.dma_arg = sc;
913 sc->sc_wdcdev.dma_init = pciide_dma_init;
914 sc->sc_wdcdev.dma_start = pciide_dma_start;
915 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
916 }
917
918 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
919 PCIIDE_OPTIONS_NODMA) {
920 printf(", but unused (forced off by config file)");
921 sc->sc_dma_ok = 0;
922 }
923 break;
924
925 default:
926 sc->sc_dma_ok = 0;
927 printf(", but unsupported register maptype (0x%x)", maptype);
928 }
929 }
930
931 int
932 pciide_compat_intr(arg)
933 void *arg;
934 {
935 struct pciide_channel *cp = arg;
936
937 #ifdef DIAGNOSTIC
938 /* should only be called for a compat channel */
939 if (cp->compat == 0)
940 panic("pciide compat intr called for non-compat chan %p\n", cp);
941 #endif
942 return (wdcintr(&cp->wdc_channel));
943 }
944
945 int
946 pciide_pci_intr(arg)
947 void *arg;
948 {
949 struct pciide_softc *sc = arg;
950 struct pciide_channel *cp;
951 struct channel_softc *wdc_cp;
952 int i, rv, crv;
953
954 rv = 0;
955 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
956 cp = &sc->pciide_channels[i];
957 wdc_cp = &cp->wdc_channel;
958
959 /* If a compat channel skip. */
960 if (cp->compat)
961 continue;
962 /* if this channel not waiting for intr, skip */
963 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
964 continue;
965
966 crv = wdcintr(wdc_cp);
967 if (crv == 0)
968 ; /* leave rv alone */
969 else if (crv == 1)
970 rv = 1; /* claim the intr */
971 else if (rv == 0) /* crv should be -1 in this case */
972 rv = crv; /* if we've done no better, take it */
973 }
974 return (rv);
975 }
976
977 void
978 pciide_channel_dma_setup(cp)
979 struct pciide_channel *cp;
980 {
981 int drive;
982 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
983 struct ata_drive_datas *drvp;
984
985 for (drive = 0; drive < 2; drive++) {
986 drvp = &cp->wdc_channel.ch_drive[drive];
987 /* If no drive, skip */
988 if ((drvp->drive_flags & DRIVE) == 0)
989 continue;
990 /* setup DMA if needed */
991 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
992 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
993 sc->sc_dma_ok == 0) {
994 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
995 continue;
996 }
997 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
998 != 0) {
999 /* Abort DMA setup */
1000 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1001 continue;
1002 }
1003 }
1004 }
1005
1006 int
1007 pciide_dma_table_setup(sc, channel, drive)
1008 struct pciide_softc *sc;
1009 int channel, drive;
1010 {
1011 bus_dma_segment_t seg;
1012 int error, rseg;
1013 const bus_size_t dma_table_size =
1014 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1015 struct pciide_dma_maps *dma_maps =
1016 &sc->pciide_channels[channel].dma_maps[drive];
1017
1018 /* If table was already allocated, just return */
1019 if (dma_maps->dma_table)
1020 return 0;
1021
1022 /* Allocate memory for the DMA tables and map it */
1023 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1024 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1025 BUS_DMA_NOWAIT)) != 0) {
1026 printf("%s:%d: unable to allocate table DMA for "
1027 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1028 channel, drive, error);
1029 return error;
1030 }
1031 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1032 dma_table_size,
1033 (caddr_t *)&dma_maps->dma_table,
1034 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1035 printf("%s:%d: unable to map table DMA for"
1036 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1037 channel, drive, error);
1038 return error;
1039 }
1040 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1041 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1042 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1043
1044 /* Create and load table DMA map for this disk */
1045 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1046 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1047 &dma_maps->dmamap_table)) != 0) {
1048 printf("%s:%d: unable to create table DMA map for "
1049 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1050 channel, drive, error);
1051 return error;
1052 }
1053 if ((error = bus_dmamap_load(sc->sc_dmat,
1054 dma_maps->dmamap_table,
1055 dma_maps->dma_table,
1056 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1057 printf("%s:%d: unable to load table DMA map for "
1058 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1059 channel, drive, error);
1060 return error;
1061 }
1062 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1063 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1064 DEBUG_PROBE);
1065 /* Create a xfer DMA map for this drive */
1066 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1067 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1068 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1069 &dma_maps->dmamap_xfer)) != 0) {
1070 printf("%s:%d: unable to create xfer DMA map for "
1071 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1072 channel, drive, error);
1073 return error;
1074 }
1075 return 0;
1076 }
1077
1078 int
1079 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1080 void *v;
1081 int channel, drive;
1082 void *databuf;
1083 size_t datalen;
1084 int flags;
1085 {
1086 struct pciide_softc *sc = v;
1087 int error, seg;
1088 struct pciide_dma_maps *dma_maps =
1089 &sc->pciide_channels[channel].dma_maps[drive];
1090
1091 error = bus_dmamap_load(sc->sc_dmat,
1092 dma_maps->dmamap_xfer,
1093 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1094 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1095 if (error) {
1096 printf("%s:%d: unable to load xfer DMA map for"
1097 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1098 channel, drive, error);
1099 return error;
1100 }
1101
1102 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1103 dma_maps->dmamap_xfer->dm_mapsize,
1104 (flags & WDC_DMA_READ) ?
1105 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1106
1107 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1108 #ifdef DIAGNOSTIC
1109 /* A segment must not cross a 64k boundary */
1110 {
1111 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1112 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1113 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1114 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1115 printf("pciide_dma: segment %d physical addr 0x%lx"
1116 " len 0x%lx not properly aligned\n",
1117 seg, phys, len);
1118 panic("pciide_dma: buf align");
1119 }
1120 }
1121 #endif
1122 dma_maps->dma_table[seg].base_addr =
1123 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1124 dma_maps->dma_table[seg].byte_count =
1125 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1126 IDEDMA_BYTE_COUNT_MASK);
1127 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1128 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1129 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1130
1131 }
1132 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1133 htole32(IDEDMA_BYTE_COUNT_EOT);
1134
1135 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1136 dma_maps->dmamap_table->dm_mapsize,
1137 BUS_DMASYNC_PREWRITE);
1138
1139 /* Maps are ready. Start DMA function */
1140 #ifdef DIAGNOSTIC
1141 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1142 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1143 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1144 panic("pciide_dma_init: table align");
1145 }
1146 #endif
1147
1148 /* Clear status bits */
1149 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1150 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1151 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1152 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1153 /* Write table addr */
1154 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1155 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1156 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1157 /* set read/write */
1158 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1159 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1160 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1161 /* remember flags */
1162 dma_maps->dma_flags = flags;
1163 return 0;
1164 }
1165
1166 void
1167 pciide_dma_start(v, channel, drive)
1168 void *v;
1169 int channel, drive;
1170 {
1171 struct pciide_softc *sc = v;
1172
1173 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1174 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1175 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1176 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1177 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1178 }
1179
1180 int
1181 pciide_dma_finish(v, channel, drive, force)
1182 void *v;
1183 int channel, drive;
1184 int force;
1185 {
1186 struct pciide_softc *sc = v;
1187 u_int8_t status;
1188 int error = 0;
1189 struct pciide_dma_maps *dma_maps =
1190 &sc->pciide_channels[channel].dma_maps[drive];
1191
1192 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1193 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1194 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1195 DEBUG_XFERS);
1196
1197 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1198 return WDC_DMAST_NOIRQ;
1199
1200 /* stop DMA channel */
1201 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1202 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1203 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1204 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1205
1206 /* Unload the map of the data buffer */
1207 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1208 dma_maps->dmamap_xfer->dm_mapsize,
1209 (dma_maps->dma_flags & WDC_DMA_READ) ?
1210 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1211 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1212
1213 if ((status & IDEDMA_CTL_ERR) != 0) {
1214 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1215 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1216 error |= WDC_DMAST_ERR;
1217 }
1218
1219 if ((status & IDEDMA_CTL_INTR) == 0) {
1220 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1221 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1222 drive, status);
1223 error |= WDC_DMAST_NOIRQ;
1224 }
1225
1226 if ((status & IDEDMA_CTL_ACT) != 0) {
1227 /* data underrun, may be a valid condition for ATAPI */
1228 error |= WDC_DMAST_UNDER;
1229 }
1230 return error;
1231 }
1232
1233 void
1234 pciide_irqack(chp)
1235 struct channel_softc *chp;
1236 {
1237 struct pciide_channel *cp = (struct pciide_channel*)chp;
1238 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1239
1240 /* clear status bits in IDE DMA registers */
1241 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1242 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1243 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1244 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1245 }
1246
1247 /* some common code used by several chip_map */
1248 int
1249 pciide_chansetup(sc, channel, interface)
1250 struct pciide_softc *sc;
1251 int channel;
1252 pcireg_t interface;
1253 {
1254 struct pciide_channel *cp = &sc->pciide_channels[channel];
1255 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1256 cp->name = PCIIDE_CHANNEL_NAME(channel);
1257 cp->wdc_channel.channel = channel;
1258 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1259 cp->wdc_channel.ch_queue =
1260 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1261 if (cp->wdc_channel.ch_queue == NULL) {
1262 printf("%s %s channel: "
1263 "can't allocate memory for command queue",
1264 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1265 return 0;
1266 }
1267 printf("%s: %s channel %s to %s mode\n",
1268 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1269 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1270 "configured" : "wired",
1271 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1272 "native-PCI" : "compatibility");
1273 return 1;
1274 }
1275
1276 /* some common code used by several chip channel_map */
1277 void
1278 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1279 struct pci_attach_args *pa;
1280 struct pciide_channel *cp;
1281 pcireg_t interface;
1282 bus_size_t *cmdsizep, *ctlsizep;
1283 int (*pci_intr) __P((void *));
1284 {
1285 struct channel_softc *wdc_cp = &cp->wdc_channel;
1286
1287 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1288 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1289 pci_intr);
1290 else
1291 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1292 wdc_cp->channel, cmdsizep, ctlsizep);
1293
1294 if (cp->hw_ok == 0)
1295 return;
1296 wdc_cp->data32iot = wdc_cp->cmd_iot;
1297 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1298 wdcattach(wdc_cp);
1299 }
1300
1301 /*
1302 * Generic code to call to know if a channel can be disabled. Return 1
1303 * if channel can be disabled, 0 if not
1304 */
1305 int
1306 pciide_chan_candisable(cp)
1307 struct pciide_channel *cp;
1308 {
1309 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1310 struct channel_softc *wdc_cp = &cp->wdc_channel;
1311
1312 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1313 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1314 printf("%s: disabling %s channel (no drives)\n",
1315 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1316 cp->hw_ok = 0;
1317 return 1;
1318 }
1319 return 0;
1320 }
1321
1322 /*
1323 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1324 * Set hw_ok=0 on failure
1325 */
1326 void
1327 pciide_map_compat_intr(pa, cp, compatchan, interface)
1328 struct pci_attach_args *pa;
1329 struct pciide_channel *cp;
1330 int compatchan, interface;
1331 {
1332 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1333 struct channel_softc *wdc_cp = &cp->wdc_channel;
1334
1335 if (cp->hw_ok == 0)
1336 return;
1337 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1338 return;
1339
1340 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1341 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1342 pa, compatchan, pciide_compat_intr, cp);
1343 if (cp->ih == NULL) {
1344 #endif
1345 printf("%s: no compatibility interrupt for use by %s "
1346 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1347 cp->hw_ok = 0;
1348 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1349 }
1350 #endif
1351 }
1352
1353 void
1354 pciide_print_modes(cp)
1355 struct pciide_channel *cp;
1356 {
1357 wdc_print_modes(&cp->wdc_channel);
1358 }
1359
1360 void
1361 default_chip_map(sc, pa)
1362 struct pciide_softc *sc;
1363 struct pci_attach_args *pa;
1364 {
1365 struct pciide_channel *cp;
1366 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1367 pcireg_t csr;
1368 int channel, drive;
1369 struct ata_drive_datas *drvp;
1370 u_int8_t idedma_ctl;
1371 bus_size_t cmdsize, ctlsize;
1372 char *failreason;
1373
1374 if (pciide_chipen(sc, pa) == 0)
1375 return;
1376
1377 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1378 printf("%s: bus-master DMA support present",
1379 sc->sc_wdcdev.sc_dev.dv_xname);
1380 if (sc->sc_pp == &default_product_desc &&
1381 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1382 PCIIDE_OPTIONS_DMA) == 0) {
1383 printf(", but unused (no driver support)");
1384 sc->sc_dma_ok = 0;
1385 } else {
1386 pciide_mapreg_dma(sc, pa);
1387 if (sc->sc_dma_ok != 0)
1388 printf(", used without full driver "
1389 "support");
1390 }
1391 } else {
1392 printf("%s: hardware does not support DMA",
1393 sc->sc_wdcdev.sc_dev.dv_xname);
1394 sc->sc_dma_ok = 0;
1395 }
1396 printf("\n");
1397 if (sc->sc_dma_ok) {
1398 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1399 sc->sc_wdcdev.irqack = pciide_irqack;
1400 }
1401 sc->sc_wdcdev.PIO_cap = 0;
1402 sc->sc_wdcdev.DMA_cap = 0;
1403
1404 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1405 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1406 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1407
1408 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1409 cp = &sc->pciide_channels[channel];
1410 if (pciide_chansetup(sc, channel, interface) == 0)
1411 continue;
1412 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1413 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1414 &ctlsize, pciide_pci_intr);
1415 } else {
1416 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1417 channel, &cmdsize, &ctlsize);
1418 }
1419 if (cp->hw_ok == 0)
1420 continue;
1421 /*
1422 * Check to see if something appears to be there.
1423 */
1424 failreason = NULL;
1425 if (!wdcprobe(&cp->wdc_channel)) {
1426 failreason = "not responding; disabled or no drives?";
1427 goto next;
1428 }
1429 /*
1430 * Now, make sure it's actually attributable to this PCI IDE
1431 * channel by trying to access the channel again while the
1432 * PCI IDE controller's I/O space is disabled. (If the
1433 * channel no longer appears to be there, it belongs to
1434 * this controller.) YUCK!
1435 */
1436 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1437 PCI_COMMAND_STATUS_REG);
1438 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1439 csr & ~PCI_COMMAND_IO_ENABLE);
1440 if (wdcprobe(&cp->wdc_channel))
1441 failreason = "other hardware responding at addresses";
1442 pci_conf_write(sc->sc_pc, sc->sc_tag,
1443 PCI_COMMAND_STATUS_REG, csr);
1444 next:
1445 if (failreason) {
1446 printf("%s: %s channel ignored (%s)\n",
1447 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1448 failreason);
1449 cp->hw_ok = 0;
1450 bus_space_unmap(cp->wdc_channel.cmd_iot,
1451 cp->wdc_channel.cmd_ioh, cmdsize);
1452 if (interface & PCIIDE_INTERFACE_PCI(channel))
1453 bus_space_unmap(cp->wdc_channel.ctl_iot,
1454 cp->ctl_baseioh, ctlsize);
1455 else
1456 bus_space_unmap(cp->wdc_channel.ctl_iot,
1457 cp->wdc_channel.ctl_ioh, ctlsize);
1458 } else {
1459 pciide_map_compat_intr(pa, cp, channel, interface);
1460 }
1461 if (cp->hw_ok) {
1462 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1463 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1464 wdcattach(&cp->wdc_channel);
1465 }
1466 }
1467
1468 if (sc->sc_dma_ok == 0)
1469 return;
1470
1471 /* Allocate DMA maps */
1472 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1473 idedma_ctl = 0;
1474 cp = &sc->pciide_channels[channel];
1475 for (drive = 0; drive < 2; drive++) {
1476 drvp = &cp->wdc_channel.ch_drive[drive];
1477 /* If no drive, skip */
1478 if ((drvp->drive_flags & DRIVE) == 0)
1479 continue;
1480 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1481 continue;
1482 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1483 /* Abort DMA setup */
1484 printf("%s:%d:%d: can't allocate DMA maps, "
1485 "using PIO transfers\n",
1486 sc->sc_wdcdev.sc_dev.dv_xname,
1487 channel, drive);
1488 drvp->drive_flags &= ~DRIVE_DMA;
1489 }
1490 printf("%s:%d:%d: using DMA data transfers\n",
1491 sc->sc_wdcdev.sc_dev.dv_xname,
1492 channel, drive);
1493 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1494 }
1495 if (idedma_ctl != 0) {
1496 /* Add software bits in status register */
1497 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1498 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1499 idedma_ctl);
1500 }
1501 }
1502 }
1503
1504 void
1505 piix_chip_map(sc, pa)
1506 struct pciide_softc *sc;
1507 struct pci_attach_args *pa;
1508 {
1509 struct pciide_channel *cp;
1510 int channel;
1511 u_int32_t idetim;
1512 bus_size_t cmdsize, ctlsize;
1513
1514 if (pciide_chipen(sc, pa) == 0)
1515 return;
1516
1517 printf("%s: bus-master DMA support present",
1518 sc->sc_wdcdev.sc_dev.dv_xname);
1519 pciide_mapreg_dma(sc, pa);
1520 printf("\n");
1521 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1522 WDC_CAPABILITY_MODE;
1523 if (sc->sc_dma_ok) {
1524 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1525 sc->sc_wdcdev.irqack = pciide_irqack;
1526 switch(sc->sc_pp->ide_product) {
1527 case PCI_PRODUCT_INTEL_82371AB_IDE:
1528 case PCI_PRODUCT_INTEL_82440MX_IDE:
1529 case PCI_PRODUCT_INTEL_82801AA_IDE:
1530 case PCI_PRODUCT_INTEL_82801AB_IDE:
1531 case PCI_PRODUCT_INTEL_82801BA_IDE:
1532 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1533 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1534 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1535 case PCI_PRODUCT_INTEL_82801DB_IDE:
1536 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1537 }
1538 }
1539 sc->sc_wdcdev.PIO_cap = 4;
1540 sc->sc_wdcdev.DMA_cap = 2;
1541 switch(sc->sc_pp->ide_product) {
1542 case PCI_PRODUCT_INTEL_82801AA_IDE:
1543 sc->sc_wdcdev.UDMA_cap = 4;
1544 break;
1545 case PCI_PRODUCT_INTEL_82801BA_IDE:
1546 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1547 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1548 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1549 case PCI_PRODUCT_INTEL_82801DB_IDE:
1550 sc->sc_wdcdev.UDMA_cap = 5;
1551 break;
1552 default:
1553 sc->sc_wdcdev.UDMA_cap = 2;
1554 }
1555 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1556 sc->sc_wdcdev.set_modes = piix_setup_channel;
1557 else
1558 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1559 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1560 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1561
1562 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1563 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1564 DEBUG_PROBE);
1565 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1566 WDCDEBUG_PRINT((", sidetim=0x%x",
1567 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1568 DEBUG_PROBE);
1569 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1570 WDCDEBUG_PRINT((", udamreg 0x%x",
1571 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1572 DEBUG_PROBE);
1573 }
1574 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1575 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1576 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1577 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1578 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1579 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1580 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1581 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1582 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1583 DEBUG_PROBE);
1584 }
1585
1586 }
1587 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1588
1589 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1590 cp = &sc->pciide_channels[channel];
1591 /* PIIX is compat-only */
1592 if (pciide_chansetup(sc, channel, 0) == 0)
1593 continue;
1594 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1595 if ((PIIX_IDETIM_READ(idetim, channel) &
1596 PIIX_IDETIM_IDE) == 0) {
1597 printf("%s: %s channel ignored (disabled)\n",
1598 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1599 continue;
1600 }
1601 /* PIIX are compat-only pciide devices */
1602 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1603 if (cp->hw_ok == 0)
1604 continue;
1605 if (pciide_chan_candisable(cp)) {
1606 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1607 channel);
1608 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1609 idetim);
1610 }
1611 pciide_map_compat_intr(pa, cp, channel, 0);
1612 if (cp->hw_ok == 0)
1613 continue;
1614 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1615 }
1616
1617 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1618 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1619 DEBUG_PROBE);
1620 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1621 WDCDEBUG_PRINT((", sidetim=0x%x",
1622 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1623 DEBUG_PROBE);
1624 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1625 WDCDEBUG_PRINT((", udamreg 0x%x",
1626 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1627 DEBUG_PROBE);
1628 }
1629 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1630 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1631 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1632 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1633 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1634 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1635 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1636 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1637 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1638 DEBUG_PROBE);
1639 }
1640 }
1641 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1642 }
1643
1644 void
1645 piix_setup_channel(chp)
1646 struct channel_softc *chp;
1647 {
1648 u_int8_t mode[2], drive;
1649 u_int32_t oidetim, idetim, idedma_ctl;
1650 struct pciide_channel *cp = (struct pciide_channel*)chp;
1651 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1652 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1653
1654 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1655 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1656 idedma_ctl = 0;
1657
1658 /* set up new idetim: Enable IDE registers decode */
1659 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1660 chp->channel);
1661
1662 /* setup DMA */
1663 pciide_channel_dma_setup(cp);
1664
1665 /*
1666 * Here we have to mess up with drives mode: PIIX can't have
1667 * different timings for master and slave drives.
1668 * We need to find the best combination.
1669 */
1670
1671 /* If both drives supports DMA, take the lower mode */
1672 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1673 (drvp[1].drive_flags & DRIVE_DMA)) {
1674 mode[0] = mode[1] =
1675 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1676 drvp[0].DMA_mode = mode[0];
1677 drvp[1].DMA_mode = mode[1];
1678 goto ok;
1679 }
1680 /*
1681 * If only one drive supports DMA, use its mode, and
1682 * put the other one in PIO mode 0 if mode not compatible
1683 */
1684 if (drvp[0].drive_flags & DRIVE_DMA) {
1685 mode[0] = drvp[0].DMA_mode;
1686 mode[1] = drvp[1].PIO_mode;
1687 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1688 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1689 mode[1] = drvp[1].PIO_mode = 0;
1690 goto ok;
1691 }
1692 if (drvp[1].drive_flags & DRIVE_DMA) {
1693 mode[1] = drvp[1].DMA_mode;
1694 mode[0] = drvp[0].PIO_mode;
1695 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1696 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1697 mode[0] = drvp[0].PIO_mode = 0;
1698 goto ok;
1699 }
1700 /*
1701 * If both drives are not DMA, takes the lower mode, unless
1702 * one of them is PIO mode < 2
1703 */
1704 if (drvp[0].PIO_mode < 2) {
1705 mode[0] = drvp[0].PIO_mode = 0;
1706 mode[1] = drvp[1].PIO_mode;
1707 } else if (drvp[1].PIO_mode < 2) {
1708 mode[1] = drvp[1].PIO_mode = 0;
1709 mode[0] = drvp[0].PIO_mode;
1710 } else {
1711 mode[0] = mode[1] =
1712 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1713 drvp[0].PIO_mode = mode[0];
1714 drvp[1].PIO_mode = mode[1];
1715 }
1716 ok: /* The modes are setup */
1717 for (drive = 0; drive < 2; drive++) {
1718 if (drvp[drive].drive_flags & DRIVE_DMA) {
1719 idetim |= piix_setup_idetim_timings(
1720 mode[drive], 1, chp->channel);
1721 goto end;
1722 }
1723 }
1724 /* If we are there, none of the drives are DMA */
1725 if (mode[0] >= 2)
1726 idetim |= piix_setup_idetim_timings(
1727 mode[0], 0, chp->channel);
1728 else
1729 idetim |= piix_setup_idetim_timings(
1730 mode[1], 0, chp->channel);
1731 end: /*
1732 * timing mode is now set up in the controller. Enable
1733 * it per-drive
1734 */
1735 for (drive = 0; drive < 2; drive++) {
1736 /* If no drive, skip */
1737 if ((drvp[drive].drive_flags & DRIVE) == 0)
1738 continue;
1739 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1740 if (drvp[drive].drive_flags & DRIVE_DMA)
1741 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1742 }
1743 if (idedma_ctl != 0) {
1744 /* Add software bits in status register */
1745 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1746 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1747 idedma_ctl);
1748 }
1749 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1750 pciide_print_modes(cp);
1751 }
1752
1753 void
1754 piix3_4_setup_channel(chp)
1755 struct channel_softc *chp;
1756 {
1757 struct ata_drive_datas *drvp;
1758 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1759 struct pciide_channel *cp = (struct pciide_channel*)chp;
1760 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1761 int drive;
1762 int channel = chp->channel;
1763
1764 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1765 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1766 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1767 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1768 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1769 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1770 PIIX_SIDETIM_RTC_MASK(channel));
1771
1772 idedma_ctl = 0;
1773 /* If channel disabled, no need to go further */
1774 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1775 return;
1776 /* set up new idetim: Enable IDE registers decode */
1777 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1778
1779 /* setup DMA if needed */
1780 pciide_channel_dma_setup(cp);
1781
1782 for (drive = 0; drive < 2; drive++) {
1783 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1784 PIIX_UDMATIM_SET(0x3, channel, drive));
1785 drvp = &chp->ch_drive[drive];
1786 /* If no drive, skip */
1787 if ((drvp->drive_flags & DRIVE) == 0)
1788 continue;
1789 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1790 (drvp->drive_flags & DRIVE_UDMA) == 0))
1791 goto pio;
1792
1793 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1794 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1795 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1796 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1797 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1798 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1799 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1800 ideconf |= PIIX_CONFIG_PINGPONG;
1801 }
1802 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1803 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1804 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1805 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1806 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1807 /* setup Ultra/100 */
1808 if (drvp->UDMA_mode > 2 &&
1809 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1810 drvp->UDMA_mode = 2;
1811 if (drvp->UDMA_mode > 4) {
1812 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1813 } else {
1814 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1815 if (drvp->UDMA_mode > 2) {
1816 ideconf |= PIIX_CONFIG_UDMA66(channel,
1817 drive);
1818 } else {
1819 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1820 drive);
1821 }
1822 }
1823 }
1824 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1825 /* setup Ultra/66 */
1826 if (drvp->UDMA_mode > 2 &&
1827 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1828 drvp->UDMA_mode = 2;
1829 if (drvp->UDMA_mode > 2)
1830 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1831 else
1832 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1833 }
1834 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1835 (drvp->drive_flags & DRIVE_UDMA)) {
1836 /* use Ultra/DMA */
1837 drvp->drive_flags &= ~DRIVE_DMA;
1838 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1839 udmareg |= PIIX_UDMATIM_SET(
1840 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1841 } else {
1842 /* use Multiword DMA */
1843 drvp->drive_flags &= ~DRIVE_UDMA;
1844 if (drive == 0) {
1845 idetim |= piix_setup_idetim_timings(
1846 drvp->DMA_mode, 1, channel);
1847 } else {
1848 sidetim |= piix_setup_sidetim_timings(
1849 drvp->DMA_mode, 1, channel);
1850 idetim =PIIX_IDETIM_SET(idetim,
1851 PIIX_IDETIM_SITRE, channel);
1852 }
1853 }
1854 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1855
1856 pio: /* use PIO mode */
1857 idetim |= piix_setup_idetim_drvs(drvp);
1858 if (drive == 0) {
1859 idetim |= piix_setup_idetim_timings(
1860 drvp->PIO_mode, 0, channel);
1861 } else {
1862 sidetim |= piix_setup_sidetim_timings(
1863 drvp->PIO_mode, 0, channel);
1864 idetim =PIIX_IDETIM_SET(idetim,
1865 PIIX_IDETIM_SITRE, channel);
1866 }
1867 }
1868 if (idedma_ctl != 0) {
1869 /* Add software bits in status register */
1870 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1871 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1872 idedma_ctl);
1873 }
1874 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1875 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1876 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1877 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1878 pciide_print_modes(cp);
1879 }
1880
1881
1882 /* setup ISP and RTC fields, based on mode */
1883 static u_int32_t
1884 piix_setup_idetim_timings(mode, dma, channel)
1885 u_int8_t mode;
1886 u_int8_t dma;
1887 u_int8_t channel;
1888 {
1889
1890 if (dma)
1891 return PIIX_IDETIM_SET(0,
1892 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1893 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1894 channel);
1895 else
1896 return PIIX_IDETIM_SET(0,
1897 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1898 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1899 channel);
1900 }
1901
1902 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1903 static u_int32_t
1904 piix_setup_idetim_drvs(drvp)
1905 struct ata_drive_datas *drvp;
1906 {
1907 u_int32_t ret = 0;
1908 struct channel_softc *chp = drvp->chnl_softc;
1909 u_int8_t channel = chp->channel;
1910 u_int8_t drive = drvp->drive;
1911
1912 /*
1913 * If drive is using UDMA, timings setups are independant
1914 * So just check DMA and PIO here.
1915 */
1916 if (drvp->drive_flags & DRIVE_DMA) {
1917 /* if mode = DMA mode 0, use compatible timings */
1918 if ((drvp->drive_flags & DRIVE_DMA) &&
1919 drvp->DMA_mode == 0) {
1920 drvp->PIO_mode = 0;
1921 return ret;
1922 }
1923 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1924 /*
1925 * PIO and DMA timings are the same, use fast timings for PIO
1926 * too, else use compat timings.
1927 */
1928 if ((piix_isp_pio[drvp->PIO_mode] !=
1929 piix_isp_dma[drvp->DMA_mode]) ||
1930 (piix_rtc_pio[drvp->PIO_mode] !=
1931 piix_rtc_dma[drvp->DMA_mode]))
1932 drvp->PIO_mode = 0;
1933 /* if PIO mode <= 2, use compat timings for PIO */
1934 if (drvp->PIO_mode <= 2) {
1935 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1936 channel);
1937 return ret;
1938 }
1939 }
1940
1941 /*
1942 * Now setup PIO modes. If mode < 2, use compat timings.
1943 * Else enable fast timings. Enable IORDY and prefetch/post
1944 * if PIO mode >= 3.
1945 */
1946
1947 if (drvp->PIO_mode < 2)
1948 return ret;
1949
1950 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1951 if (drvp->PIO_mode >= 3) {
1952 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1953 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1954 }
1955 return ret;
1956 }
1957
1958 /* setup values in SIDETIM registers, based on mode */
1959 static u_int32_t
1960 piix_setup_sidetim_timings(mode, dma, channel)
1961 u_int8_t mode;
1962 u_int8_t dma;
1963 u_int8_t channel;
1964 {
1965 if (dma)
1966 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1967 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1968 else
1969 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1970 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1971 }
1972
1973 void
1974 amd7x6_chip_map(sc, pa)
1975 struct pciide_softc *sc;
1976 struct pci_attach_args *pa;
1977 {
1978 struct pciide_channel *cp;
1979 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1980 int channel;
1981 pcireg_t chanenable;
1982 bus_size_t cmdsize, ctlsize;
1983
1984 if (pciide_chipen(sc, pa) == 0)
1985 return;
1986 printf("%s: bus-master DMA support present",
1987 sc->sc_wdcdev.sc_dev.dv_xname);
1988 pciide_mapreg_dma(sc, pa);
1989 printf("\n");
1990 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1991 WDC_CAPABILITY_MODE;
1992 if (sc->sc_dma_ok) {
1993 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1994 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1995 sc->sc_wdcdev.irqack = pciide_irqack;
1996 }
1997 sc->sc_wdcdev.PIO_cap = 4;
1998 sc->sc_wdcdev.DMA_cap = 2;
1999
2000 switch (sc->sc_pp->ide_product) {
2001 case PCI_PRODUCT_AMD_PBC766_IDE:
2002 case PCI_PRODUCT_AMD_PBC768_IDE:
2003 sc->sc_wdcdev.UDMA_cap = 5;
2004 break;
2005 default:
2006 sc->sc_wdcdev.UDMA_cap = 4;
2007 }
2008 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2009 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2010 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2011 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
2012
2013 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2014 DEBUG_PROBE);
2015 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2016 cp = &sc->pciide_channels[channel];
2017 if (pciide_chansetup(sc, channel, interface) == 0)
2018 continue;
2019
2020 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2021 printf("%s: %s channel ignored (disabled)\n",
2022 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2023 continue;
2024 }
2025 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2026 pciide_pci_intr);
2027
2028 if (pciide_chan_candisable(cp))
2029 chanenable &= ~AMD7X6_CHAN_EN(channel);
2030 pciide_map_compat_intr(pa, cp, channel, interface);
2031 if (cp->hw_ok == 0)
2032 continue;
2033
2034 amd7x6_setup_channel(&cp->wdc_channel);
2035 }
2036 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
2037 chanenable);
2038 return;
2039 }
2040
2041 void
2042 amd7x6_setup_channel(chp)
2043 struct channel_softc *chp;
2044 {
2045 u_int32_t udmatim_reg, datatim_reg;
2046 u_int8_t idedma_ctl;
2047 int mode, drive;
2048 struct ata_drive_datas *drvp;
2049 struct pciide_channel *cp = (struct pciide_channel*)chp;
2050 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2051 #ifndef PCIIDE_AMD756_ENABLEDMA
2052 int rev = PCI_REVISION(
2053 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2054 #endif
2055
2056 idedma_ctl = 0;
2057 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
2058 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
2059 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2060 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2061
2062 /* setup DMA if needed */
2063 pciide_channel_dma_setup(cp);
2064
2065 for (drive = 0; drive < 2; drive++) {
2066 drvp = &chp->ch_drive[drive];
2067 /* If no drive, skip */
2068 if ((drvp->drive_flags & DRIVE) == 0)
2069 continue;
2070 /* add timing values, setup DMA if needed */
2071 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2072 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2073 mode = drvp->PIO_mode;
2074 goto pio;
2075 }
2076 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2077 (drvp->drive_flags & DRIVE_UDMA)) {
2078 /* use Ultra/DMA */
2079 drvp->drive_flags &= ~DRIVE_DMA;
2080 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2081 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2082 AMD7X6_UDMA_TIME(chp->channel, drive,
2083 amd7x6_udma_tim[drvp->UDMA_mode]);
2084 /* can use PIO timings, MW DMA unused */
2085 mode = drvp->PIO_mode;
2086 } else {
2087 /* use Multiword DMA, but only if revision is OK */
2088 drvp->drive_flags &= ~DRIVE_UDMA;
2089 #ifndef PCIIDE_AMD756_ENABLEDMA
2090 /*
2091 * The workaround doesn't seem to be necessary
2092 * with all drives, so it can be disabled by
2093 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2094 * triggered.
2095 */
2096 if (sc->sc_pp->ide_product ==
2097 PCI_PRODUCT_AMD_PBC756_IDE &&
2098 AMD756_CHIPREV_DISABLEDMA(rev)) {
2099 printf("%s:%d:%d: multi-word DMA disabled due "
2100 "to chip revision\n",
2101 sc->sc_wdcdev.sc_dev.dv_xname,
2102 chp->channel, drive);
2103 mode = drvp->PIO_mode;
2104 drvp->drive_flags &= ~DRIVE_DMA;
2105 goto pio;
2106 }
2107 #endif
2108 /* mode = min(pio, dma+2) */
2109 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2110 mode = drvp->PIO_mode;
2111 else
2112 mode = drvp->DMA_mode + 2;
2113 }
2114 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2115
2116 pio: /* setup PIO mode */
2117 if (mode <= 2) {
2118 drvp->DMA_mode = 0;
2119 drvp->PIO_mode = 0;
2120 mode = 0;
2121 } else {
2122 drvp->PIO_mode = mode;
2123 drvp->DMA_mode = mode - 2;
2124 }
2125 datatim_reg |=
2126 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2127 amd7x6_pio_set[mode]) |
2128 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2129 amd7x6_pio_rec[mode]);
2130 }
2131 if (idedma_ctl != 0) {
2132 /* Add software bits in status register */
2133 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2134 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2135 idedma_ctl);
2136 }
2137 pciide_print_modes(cp);
2138 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2139 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2140 }
2141
2142 void
2143 apollo_chip_map(sc, pa)
2144 struct pciide_softc *sc;
2145 struct pci_attach_args *pa;
2146 {
2147 struct pciide_channel *cp;
2148 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2149 int channel;
2150 u_int32_t ideconf;
2151 bus_size_t cmdsize, ctlsize;
2152 pcitag_t pcib_tag;
2153 pcireg_t pcib_id, pcib_class;
2154
2155 if (pciide_chipen(sc, pa) == 0)
2156 return;
2157 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2158 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2159 /* and read ID and rev of the ISA bridge */
2160 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2161 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2162 printf(": VIA Technologies ");
2163 switch (PCI_PRODUCT(pcib_id)) {
2164 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2165 printf("VT82C586 (Apollo VP) ");
2166 if(PCI_REVISION(pcib_class) >= 0x02) {
2167 printf("ATA33 controller\n");
2168 sc->sc_wdcdev.UDMA_cap = 2;
2169 } else {
2170 printf("controller\n");
2171 sc->sc_wdcdev.UDMA_cap = 0;
2172 }
2173 break;
2174 case PCI_PRODUCT_VIATECH_VT82C596A:
2175 printf("VT82C596A (Apollo Pro) ");
2176 if (PCI_REVISION(pcib_class) >= 0x12) {
2177 printf("ATA66 controller\n");
2178 sc->sc_wdcdev.UDMA_cap = 4;
2179 } else {
2180 printf("ATA33 controller\n");
2181 sc->sc_wdcdev.UDMA_cap = 2;
2182 }
2183 break;
2184 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2185 printf("VT82C686A (Apollo KX133) ");
2186 if (PCI_REVISION(pcib_class) >= 0x40) {
2187 printf("ATA100 controller\n");
2188 sc->sc_wdcdev.UDMA_cap = 5;
2189 } else {
2190 printf("ATA66 controller\n");
2191 sc->sc_wdcdev.UDMA_cap = 4;
2192 }
2193 break;
2194 case PCI_PRODUCT_VIATECH_VT8231:
2195 printf("VT8231 ATA100 controller\n");
2196 sc->sc_wdcdev.UDMA_cap = 5;
2197 break;
2198 case PCI_PRODUCT_VIATECH_VT8233:
2199 printf("VT8233 ATA100 controller\n");
2200 sc->sc_wdcdev.UDMA_cap = 5;
2201 break;
2202 case PCI_PRODUCT_VIATECH_VT8233A:
2203 printf("VT8233A ATA133 controller\n");
2204 sc->sc_wdcdev.UDMA_cap = 6;
2205 break;
2206 case PCI_PRODUCT_VIATECH_VT8235:
2207 printf("VT8235 ATA133 controller\n");
2208 sc->sc_wdcdev.UDMA_cap = 6;
2209 break;
2210 default:
2211 printf("unknown ATA controller\n");
2212 sc->sc_wdcdev.UDMA_cap = 0;
2213 }
2214
2215 printf("%s: bus-master DMA support present",
2216 sc->sc_wdcdev.sc_dev.dv_xname);
2217 pciide_mapreg_dma(sc, pa);
2218 printf("\n");
2219 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2220 WDC_CAPABILITY_MODE;
2221 if (sc->sc_dma_ok) {
2222 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2223 sc->sc_wdcdev.irqack = pciide_irqack;
2224 if (sc->sc_wdcdev.UDMA_cap > 0)
2225 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2226 }
2227 sc->sc_wdcdev.PIO_cap = 4;
2228 sc->sc_wdcdev.DMA_cap = 2;
2229 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2230 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2231 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2232
2233 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2234 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2235 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2236 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2237 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2238 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2239 DEBUG_PROBE);
2240
2241 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2242 cp = &sc->pciide_channels[channel];
2243 if (pciide_chansetup(sc, channel, interface) == 0)
2244 continue;
2245
2246 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2247 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2248 printf("%s: %s channel ignored (disabled)\n",
2249 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2250 continue;
2251 }
2252 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2253 pciide_pci_intr);
2254 if (cp->hw_ok == 0)
2255 continue;
2256 if (pciide_chan_candisable(cp)) {
2257 ideconf &= ~APO_IDECONF_EN(channel);
2258 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2259 ideconf);
2260 }
2261 pciide_map_compat_intr(pa, cp, channel, interface);
2262
2263 if (cp->hw_ok == 0)
2264 continue;
2265 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2266 }
2267 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2268 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2269 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2270 }
2271
2272 void
2273 apollo_setup_channel(chp)
2274 struct channel_softc *chp;
2275 {
2276 u_int32_t udmatim_reg, datatim_reg;
2277 u_int8_t idedma_ctl;
2278 int mode, drive;
2279 struct ata_drive_datas *drvp;
2280 struct pciide_channel *cp = (struct pciide_channel*)chp;
2281 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2282
2283 idedma_ctl = 0;
2284 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2285 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2286 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2287 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2288
2289 /* setup DMA if needed */
2290 pciide_channel_dma_setup(cp);
2291
2292 for (drive = 0; drive < 2; drive++) {
2293 drvp = &chp->ch_drive[drive];
2294 /* If no drive, skip */
2295 if ((drvp->drive_flags & DRIVE) == 0)
2296 continue;
2297 /* add timing values, setup DMA if needed */
2298 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2299 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2300 mode = drvp->PIO_mode;
2301 goto pio;
2302 }
2303 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2304 (drvp->drive_flags & DRIVE_UDMA)) {
2305 /* use Ultra/DMA */
2306 drvp->drive_flags &= ~DRIVE_DMA;
2307 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2308 APO_UDMA_EN_MTH(chp->channel, drive);
2309 if (sc->sc_wdcdev.UDMA_cap == 6) {
2310 /* 8233a */
2311 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2312 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2313 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2314 /* 686b */
2315 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2316 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2317 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2318 /* 596b or 686a */
2319 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2320 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2321 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2322 } else {
2323 /* 596a or 586b */
2324 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2325 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2326 }
2327 /* can use PIO timings, MW DMA unused */
2328 mode = drvp->PIO_mode;
2329 } else {
2330 /* use Multiword DMA */
2331 drvp->drive_flags &= ~DRIVE_UDMA;
2332 /* mode = min(pio, dma+2) */
2333 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2334 mode = drvp->PIO_mode;
2335 else
2336 mode = drvp->DMA_mode + 2;
2337 }
2338 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2339
2340 pio: /* setup PIO mode */
2341 if (mode <= 2) {
2342 drvp->DMA_mode = 0;
2343 drvp->PIO_mode = 0;
2344 mode = 0;
2345 } else {
2346 drvp->PIO_mode = mode;
2347 drvp->DMA_mode = mode - 2;
2348 }
2349 datatim_reg |=
2350 APO_DATATIM_PULSE(chp->channel, drive,
2351 apollo_pio_set[mode]) |
2352 APO_DATATIM_RECOV(chp->channel, drive,
2353 apollo_pio_rec[mode]);
2354 }
2355 if (idedma_ctl != 0) {
2356 /* Add software bits in status register */
2357 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2358 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2359 idedma_ctl);
2360 }
2361 pciide_print_modes(cp);
2362 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2363 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2364 }
2365
2366 void
2367 cmd_channel_map(pa, sc, channel)
2368 struct pci_attach_args *pa;
2369 struct pciide_softc *sc;
2370 int channel;
2371 {
2372 struct pciide_channel *cp = &sc->pciide_channels[channel];
2373 bus_size_t cmdsize, ctlsize;
2374 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2375 int interface, one_channel;
2376
2377 /*
2378 * The 0648/0649 can be told to identify as a RAID controller.
2379 * In this case, we have to fake interface
2380 */
2381 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2382 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2383 PCIIDE_INTERFACE_SETTABLE(1);
2384 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2385 CMD_CONF_DSA1)
2386 interface |= PCIIDE_INTERFACE_PCI(0) |
2387 PCIIDE_INTERFACE_PCI(1);
2388 } else {
2389 interface = PCI_INTERFACE(pa->pa_class);
2390 }
2391
2392 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2393 cp->name = PCIIDE_CHANNEL_NAME(channel);
2394 cp->wdc_channel.channel = channel;
2395 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2396
2397 /*
2398 * Older CMD64X doesn't have independant channels
2399 */
2400 switch (sc->sc_pp->ide_product) {
2401 case PCI_PRODUCT_CMDTECH_649:
2402 one_channel = 0;
2403 break;
2404 default:
2405 one_channel = 1;
2406 break;
2407 }
2408
2409 if (channel > 0 && one_channel) {
2410 cp->wdc_channel.ch_queue =
2411 sc->pciide_channels[0].wdc_channel.ch_queue;
2412 } else {
2413 cp->wdc_channel.ch_queue =
2414 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2415 }
2416 if (cp->wdc_channel.ch_queue == NULL) {
2417 printf("%s %s channel: "
2418 "can't allocate memory for command queue",
2419 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2420 return;
2421 }
2422
2423 printf("%s: %s channel %s to %s mode\n",
2424 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2425 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2426 "configured" : "wired",
2427 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2428 "native-PCI" : "compatibility");
2429
2430 /*
2431 * with a CMD PCI64x, if we get here, the first channel is enabled:
2432 * there's no way to disable the first channel without disabling
2433 * the whole device
2434 */
2435 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2436 printf("%s: %s channel ignored (disabled)\n",
2437 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2438 return;
2439 }
2440
2441 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2442 if (cp->hw_ok == 0)
2443 return;
2444 if (channel == 1) {
2445 if (pciide_chan_candisable(cp)) {
2446 ctrl &= ~CMD_CTRL_2PORT;
2447 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2448 CMD_CTRL, ctrl);
2449 }
2450 }
2451 pciide_map_compat_intr(pa, cp, channel, interface);
2452 }
2453
2454 int
2455 cmd_pci_intr(arg)
2456 void *arg;
2457 {
2458 struct pciide_softc *sc = arg;
2459 struct pciide_channel *cp;
2460 struct channel_softc *wdc_cp;
2461 int i, rv, crv;
2462 u_int32_t priirq, secirq;
2463
2464 rv = 0;
2465 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2466 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2467 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2468 cp = &sc->pciide_channels[i];
2469 wdc_cp = &cp->wdc_channel;
2470 /* If a compat channel skip. */
2471 if (cp->compat)
2472 continue;
2473 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2474 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2475 crv = wdcintr(wdc_cp);
2476 if (crv == 0)
2477 printf("%s:%d: bogus intr\n",
2478 sc->sc_wdcdev.sc_dev.dv_xname, i);
2479 else
2480 rv = 1;
2481 }
2482 }
2483 return rv;
2484 }
2485
2486 void
2487 cmd_chip_map(sc, pa)
2488 struct pciide_softc *sc;
2489 struct pci_attach_args *pa;
2490 {
2491 int channel;
2492
2493 /*
2494 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2495 * and base adresses registers can be disabled at
2496 * hardware level. In this case, the device is wired
2497 * in compat mode and its first channel is always enabled,
2498 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2499 * In fact, it seems that the first channel of the CMD PCI0640
2500 * can't be disabled.
2501 */
2502
2503 #ifdef PCIIDE_CMD064x_DISABLE
2504 if (pciide_chipen(sc, pa) == 0)
2505 return;
2506 #endif
2507
2508 printf("%s: hardware does not support DMA\n",
2509 sc->sc_wdcdev.sc_dev.dv_xname);
2510 sc->sc_dma_ok = 0;
2511
2512 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2513 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2514 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2515
2516 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2517 cmd_channel_map(pa, sc, channel);
2518 }
2519 }
2520
2521 void
2522 cmd0643_9_chip_map(sc, pa)
2523 struct pciide_softc *sc;
2524 struct pci_attach_args *pa;
2525 {
2526 struct pciide_channel *cp;
2527 int channel;
2528 pcireg_t rev = PCI_REVISION(pa->pa_class);
2529
2530 /*
2531 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2532 * and base adresses registers can be disabled at
2533 * hardware level. In this case, the device is wired
2534 * in compat mode and its first channel is always enabled,
2535 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2536 * In fact, it seems that the first channel of the CMD PCI0640
2537 * can't be disabled.
2538 */
2539
2540 #ifdef PCIIDE_CMD064x_DISABLE
2541 if (pciide_chipen(sc, pa) == 0)
2542 return;
2543 #endif
2544 printf("%s: bus-master DMA support present",
2545 sc->sc_wdcdev.sc_dev.dv_xname);
2546 pciide_mapreg_dma(sc, pa);
2547 printf("\n");
2548 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2549 WDC_CAPABILITY_MODE;
2550 if (sc->sc_dma_ok) {
2551 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2552 switch (sc->sc_pp->ide_product) {
2553 case PCI_PRODUCT_CMDTECH_649:
2554 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2555 sc->sc_wdcdev.UDMA_cap = 5;
2556 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2557 break;
2558 case PCI_PRODUCT_CMDTECH_648:
2559 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2560 sc->sc_wdcdev.UDMA_cap = 4;
2561 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2562 break;
2563 case PCI_PRODUCT_CMDTECH_646:
2564 if (rev >= CMD0646U2_REV) {
2565 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2566 sc->sc_wdcdev.UDMA_cap = 2;
2567 } else if (rev >= CMD0646U_REV) {
2568 /*
2569 * Linux's driver claims that the 646U is broken
2570 * with UDMA. Only enable it if we know what we're
2571 * doing
2572 */
2573 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2574 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2575 sc->sc_wdcdev.UDMA_cap = 2;
2576 #endif
2577 /* explicitly disable UDMA */
2578 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2579 CMD_UDMATIM(0), 0);
2580 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2581 CMD_UDMATIM(1), 0);
2582 }
2583 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2584 break;
2585 default:
2586 sc->sc_wdcdev.irqack = pciide_irqack;
2587 }
2588 }
2589
2590 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2591 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2592 sc->sc_wdcdev.PIO_cap = 4;
2593 sc->sc_wdcdev.DMA_cap = 2;
2594 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2595
2596 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2597 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2598 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2599 DEBUG_PROBE);
2600
2601 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2602 cp = &sc->pciide_channels[channel];
2603 cmd_channel_map(pa, sc, channel);
2604 if (cp->hw_ok == 0)
2605 continue;
2606 cmd0643_9_setup_channel(&cp->wdc_channel);
2607 }
2608 /*
2609 * note - this also makes sure we clear the irq disable and reset
2610 * bits
2611 */
2612 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2613 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2614 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2615 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2616 DEBUG_PROBE);
2617 }
2618
2619 void
2620 cmd0643_9_setup_channel(chp)
2621 struct channel_softc *chp;
2622 {
2623 struct ata_drive_datas *drvp;
2624 u_int8_t tim;
2625 u_int32_t idedma_ctl, udma_reg;
2626 int drive;
2627 struct pciide_channel *cp = (struct pciide_channel*)chp;
2628 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2629
2630 idedma_ctl = 0;
2631 /* setup DMA if needed */
2632 pciide_channel_dma_setup(cp);
2633
2634 for (drive = 0; drive < 2; drive++) {
2635 drvp = &chp->ch_drive[drive];
2636 /* If no drive, skip */
2637 if ((drvp->drive_flags & DRIVE) == 0)
2638 continue;
2639 /* add timing values, setup DMA if needed */
2640 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2641 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2642 if (drvp->drive_flags & DRIVE_UDMA) {
2643 /* UltraDMA on a 646U2, 0648 or 0649 */
2644 drvp->drive_flags &= ~DRIVE_DMA;
2645 udma_reg = pciide_pci_read(sc->sc_pc,
2646 sc->sc_tag, CMD_UDMATIM(chp->channel));
2647 if (drvp->UDMA_mode > 2 &&
2648 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2649 CMD_BICSR) &
2650 CMD_BICSR_80(chp->channel)) == 0)
2651 drvp->UDMA_mode = 2;
2652 if (drvp->UDMA_mode > 2)
2653 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2654 else if (sc->sc_wdcdev.UDMA_cap > 2)
2655 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2656 udma_reg |= CMD_UDMATIM_UDMA(drive);
2657 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2658 CMD_UDMATIM_TIM_OFF(drive));
2659 udma_reg |=
2660 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2661 CMD_UDMATIM_TIM_OFF(drive));
2662 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2663 CMD_UDMATIM(chp->channel), udma_reg);
2664 } else {
2665 /*
2666 * use Multiword DMA.
2667 * Timings will be used for both PIO and DMA,
2668 * so adjust DMA mode if needed
2669 * if we have a 0646U2/8/9, turn off UDMA
2670 */
2671 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2672 udma_reg = pciide_pci_read(sc->sc_pc,
2673 sc->sc_tag,
2674 CMD_UDMATIM(chp->channel));
2675 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2676 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2677 CMD_UDMATIM(chp->channel),
2678 udma_reg);
2679 }
2680 if (drvp->PIO_mode >= 3 &&
2681 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2682 drvp->DMA_mode = drvp->PIO_mode - 2;
2683 }
2684 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2685 }
2686 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2687 }
2688 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2689 CMD_DATA_TIM(chp->channel, drive), tim);
2690 }
2691 if (idedma_ctl != 0) {
2692 /* Add software bits in status register */
2693 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2694 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2695 idedma_ctl);
2696 }
2697 pciide_print_modes(cp);
2698 }
2699
2700 void
2701 cmd646_9_irqack(chp)
2702 struct channel_softc *chp;
2703 {
2704 u_int32_t priirq, secirq;
2705 struct pciide_channel *cp = (struct pciide_channel*)chp;
2706 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2707
2708 if (chp->channel == 0) {
2709 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2710 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2711 } else {
2712 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2713 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2714 }
2715 pciide_irqack(chp);
2716 }
2717
2718 void
2719 cy693_chip_map(sc, pa)
2720 struct pciide_softc *sc;
2721 struct pci_attach_args *pa;
2722 {
2723 struct pciide_channel *cp;
2724 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2725 bus_size_t cmdsize, ctlsize;
2726
2727 if (pciide_chipen(sc, pa) == 0)
2728 return;
2729 /*
2730 * this chip has 2 PCI IDE functions, one for primary and one for
2731 * secondary. So we need to call pciide_mapregs_compat() with
2732 * the real channel
2733 */
2734 if (pa->pa_function == 1) {
2735 sc->sc_cy_compatchan = 0;
2736 } else if (pa->pa_function == 2) {
2737 sc->sc_cy_compatchan = 1;
2738 } else {
2739 printf("%s: unexpected PCI function %d\n",
2740 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2741 return;
2742 }
2743 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2744 printf("%s: bus-master DMA support present",
2745 sc->sc_wdcdev.sc_dev.dv_xname);
2746 pciide_mapreg_dma(sc, pa);
2747 } else {
2748 printf("%s: hardware does not support DMA",
2749 sc->sc_wdcdev.sc_dev.dv_xname);
2750 sc->sc_dma_ok = 0;
2751 }
2752 printf("\n");
2753
2754 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2755 if (sc->sc_cy_handle == NULL) {
2756 printf("%s: unable to map hyperCache control registers\n",
2757 sc->sc_wdcdev.sc_dev.dv_xname);
2758 sc->sc_dma_ok = 0;
2759 }
2760
2761 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2762 WDC_CAPABILITY_MODE;
2763 if (sc->sc_dma_ok) {
2764 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2765 sc->sc_wdcdev.irqack = pciide_irqack;
2766 }
2767 sc->sc_wdcdev.PIO_cap = 4;
2768 sc->sc_wdcdev.DMA_cap = 2;
2769 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2770
2771 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2772 sc->sc_wdcdev.nchannels = 1;
2773
2774 /* Only one channel for this chip; if we are here it's enabled */
2775 cp = &sc->pciide_channels[0];
2776 sc->wdc_chanarray[0] = &cp->wdc_channel;
2777 cp->name = PCIIDE_CHANNEL_NAME(0);
2778 cp->wdc_channel.channel = 0;
2779 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2780 cp->wdc_channel.ch_queue =
2781 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2782 if (cp->wdc_channel.ch_queue == NULL) {
2783 printf("%s primary channel: "
2784 "can't allocate memory for command queue",
2785 sc->sc_wdcdev.sc_dev.dv_xname);
2786 return;
2787 }
2788 printf("%s: primary channel %s to ",
2789 sc->sc_wdcdev.sc_dev.dv_xname,
2790 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2791 "configured" : "wired");
2792 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2793 printf("native-PCI");
2794 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2795 pciide_pci_intr);
2796 } else {
2797 printf("compatibility");
2798 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2799 &cmdsize, &ctlsize);
2800 }
2801 printf(" mode\n");
2802 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2803 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2804 wdcattach(&cp->wdc_channel);
2805 if (pciide_chan_candisable(cp)) {
2806 pci_conf_write(sc->sc_pc, sc->sc_tag,
2807 PCI_COMMAND_STATUS_REG, 0);
2808 }
2809 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2810 if (cp->hw_ok == 0)
2811 return;
2812 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2813 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2814 cy693_setup_channel(&cp->wdc_channel);
2815 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2816 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2817 }
2818
2819 void
2820 cy693_setup_channel(chp)
2821 struct channel_softc *chp;
2822 {
2823 struct ata_drive_datas *drvp;
2824 int drive;
2825 u_int32_t cy_cmd_ctrl;
2826 u_int32_t idedma_ctl;
2827 struct pciide_channel *cp = (struct pciide_channel*)chp;
2828 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2829 int dma_mode = -1;
2830
2831 cy_cmd_ctrl = idedma_ctl = 0;
2832
2833 /* setup DMA if needed */
2834 pciide_channel_dma_setup(cp);
2835
2836 for (drive = 0; drive < 2; drive++) {
2837 drvp = &chp->ch_drive[drive];
2838 /* If no drive, skip */
2839 if ((drvp->drive_flags & DRIVE) == 0)
2840 continue;
2841 /* add timing values, setup DMA if needed */
2842 if (drvp->drive_flags & DRIVE_DMA) {
2843 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2844 /* use Multiword DMA */
2845 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2846 dma_mode = drvp->DMA_mode;
2847 }
2848 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2849 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2850 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2851 CY_CMD_CTRL_IOW_REC_OFF(drive));
2852 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2853 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2854 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2855 CY_CMD_CTRL_IOR_REC_OFF(drive));
2856 }
2857 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2858 chp->ch_drive[0].DMA_mode = dma_mode;
2859 chp->ch_drive[1].DMA_mode = dma_mode;
2860
2861 if (dma_mode == -1)
2862 dma_mode = 0;
2863
2864 if (sc->sc_cy_handle != NULL) {
2865 /* Note: `multiple' is implied. */
2866 cy82c693_write(sc->sc_cy_handle,
2867 (sc->sc_cy_compatchan == 0) ?
2868 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2869 }
2870
2871 pciide_print_modes(cp);
2872
2873 if (idedma_ctl != 0) {
2874 /* Add software bits in status register */
2875 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2876 IDEDMA_CTL, idedma_ctl);
2877 }
2878 }
2879
2880 static int
2881 sis_hostbr_match(pa)
2882 struct pci_attach_args *pa;
2883 {
2884 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2885 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2886 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2887 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2888 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2889 }
2890
2891 void
2892 sis_chip_map(sc, pa)
2893 struct pciide_softc *sc;
2894 struct pci_attach_args *pa;
2895 {
2896 struct pciide_channel *cp;
2897 int channel;
2898 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2899 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2900 pcireg_t rev = PCI_REVISION(pa->pa_class);
2901 bus_size_t cmdsize, ctlsize;
2902 pcitag_t pchb_tag;
2903 pcireg_t pchb_id, pchb_class;
2904
2905 if (pciide_chipen(sc, pa) == 0)
2906 return;
2907 printf("%s: bus-master DMA support present",
2908 sc->sc_wdcdev.sc_dev.dv_xname);
2909 pciide_mapreg_dma(sc, pa);
2910 printf("\n");
2911
2912 /* get a PCI tag for the host bridge (function 0 of the same device) */
2913 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2914 /* and read ID and rev of the ISA bridge */
2915 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2916 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2917
2918 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2919 WDC_CAPABILITY_MODE;
2920 if (sc->sc_dma_ok) {
2921 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2922 sc->sc_wdcdev.irqack = pciide_irqack;
2923 /*
2924 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2925 * have problems with UDMA (info provided by Christos)
2926 */
2927 if (rev >= 0xd0 &&
2928 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2929 PCI_REVISION(pchb_class) >= 0x03))
2930 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2931 }
2932
2933 sc->sc_wdcdev.PIO_cap = 4;
2934 sc->sc_wdcdev.DMA_cap = 2;
2935 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2936 /*
2937 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2938 * chipsets.
2939 */
2940 sc->sc_wdcdev.UDMA_cap =
2941 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2942 sc->sc_wdcdev.set_modes = sis_setup_channel;
2943
2944 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2945 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2946
2947 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2948 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2949 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2950
2951 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2952 cp = &sc->pciide_channels[channel];
2953 if (pciide_chansetup(sc, channel, interface) == 0)
2954 continue;
2955 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2956 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2957 printf("%s: %s channel ignored (disabled)\n",
2958 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2959 continue;
2960 }
2961 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2962 pciide_pci_intr);
2963 if (cp->hw_ok == 0)
2964 continue;
2965 if (pciide_chan_candisable(cp)) {
2966 if (channel == 0)
2967 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2968 else
2969 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2970 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2971 sis_ctr0);
2972 }
2973 pciide_map_compat_intr(pa, cp, channel, interface);
2974 if (cp->hw_ok == 0)
2975 continue;
2976 sis_setup_channel(&cp->wdc_channel);
2977 }
2978 }
2979
2980 void
2981 sis_setup_channel(chp)
2982 struct channel_softc *chp;
2983 {
2984 struct ata_drive_datas *drvp;
2985 int drive;
2986 u_int32_t sis_tim;
2987 u_int32_t idedma_ctl;
2988 struct pciide_channel *cp = (struct pciide_channel*)chp;
2989 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2990
2991 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2992 "channel %d 0x%x\n", chp->channel,
2993 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2994 DEBUG_PROBE);
2995 sis_tim = 0;
2996 idedma_ctl = 0;
2997 /* setup DMA if needed */
2998 pciide_channel_dma_setup(cp);
2999
3000 for (drive = 0; drive < 2; drive++) {
3001 drvp = &chp->ch_drive[drive];
3002 /* If no drive, skip */
3003 if ((drvp->drive_flags & DRIVE) == 0)
3004 continue;
3005 /* add timing values, setup DMA if needed */
3006 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3007 (drvp->drive_flags & DRIVE_UDMA) == 0)
3008 goto pio;
3009
3010 if (drvp->drive_flags & DRIVE_UDMA) {
3011 /* use Ultra/DMA */
3012 drvp->drive_flags &= ~DRIVE_DMA;
3013 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3014 SIS_TIM_UDMA_TIME_OFF(drive);
3015 sis_tim |= SIS_TIM_UDMA_EN(drive);
3016 } else {
3017 /*
3018 * use Multiword DMA
3019 * Timings will be used for both PIO and DMA,
3020 * so adjust DMA mode if needed
3021 */
3022 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3023 drvp->PIO_mode = drvp->DMA_mode + 2;
3024 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3025 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3026 drvp->PIO_mode - 2 : 0;
3027 if (drvp->DMA_mode == 0)
3028 drvp->PIO_mode = 0;
3029 }
3030 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3031 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3032 SIS_TIM_ACT_OFF(drive);
3033 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3034 SIS_TIM_REC_OFF(drive);
3035 }
3036 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3037 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3038 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3039 if (idedma_ctl != 0) {
3040 /* Add software bits in status register */
3041 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3042 IDEDMA_CTL, idedma_ctl);
3043 }
3044 pciide_print_modes(cp);
3045 }
3046
3047 void
3048 acer_chip_map(sc, pa)
3049 struct pciide_softc *sc;
3050 struct pci_attach_args *pa;
3051 {
3052 struct pciide_channel *cp;
3053 int channel;
3054 pcireg_t cr, interface;
3055 bus_size_t cmdsize, ctlsize;
3056 pcireg_t rev = PCI_REVISION(pa->pa_class);
3057
3058 if (pciide_chipen(sc, pa) == 0)
3059 return;
3060 printf("%s: bus-master DMA support present",
3061 sc->sc_wdcdev.sc_dev.dv_xname);
3062 pciide_mapreg_dma(sc, pa);
3063 printf("\n");
3064 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3065 WDC_CAPABILITY_MODE;
3066 if (sc->sc_dma_ok) {
3067 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3068 if (rev >= 0x20) {
3069 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3070 if (rev >= 0xC4)
3071 sc->sc_wdcdev.UDMA_cap = 5;
3072 else if (rev >= 0xC2)
3073 sc->sc_wdcdev.UDMA_cap = 4;
3074 else
3075 sc->sc_wdcdev.UDMA_cap = 2;
3076 }
3077 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3078 sc->sc_wdcdev.irqack = pciide_irqack;
3079 }
3080
3081 sc->sc_wdcdev.PIO_cap = 4;
3082 sc->sc_wdcdev.DMA_cap = 2;
3083 sc->sc_wdcdev.set_modes = acer_setup_channel;
3084 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3085 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3086
3087 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3088 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3089 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3090
3091 /* Enable "microsoft register bits" R/W. */
3092 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3093 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3094 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3095 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3096 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3097 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3098 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3099 ~ACER_CHANSTATUSREGS_RO);
3100 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3101 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3102 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3103 /* Don't use cr, re-read the real register content instead */
3104 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3105 PCI_CLASS_REG));
3106
3107 /* From linux: enable "Cable Detection" */
3108 if (rev >= 0xC2) {
3109 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3110 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3111 | ACER_0x4B_CDETECT);
3112 }
3113
3114 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3115 cp = &sc->pciide_channels[channel];
3116 if (pciide_chansetup(sc, channel, interface) == 0)
3117 continue;
3118 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3119 printf("%s: %s channel ignored (disabled)\n",
3120 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3121 continue;
3122 }
3123 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3124 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3125 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3126 if (cp->hw_ok == 0)
3127 continue;
3128 if (pciide_chan_candisable(cp)) {
3129 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3130 pci_conf_write(sc->sc_pc, sc->sc_tag,
3131 PCI_CLASS_REG, cr);
3132 }
3133 pciide_map_compat_intr(pa, cp, channel, interface);
3134 acer_setup_channel(&cp->wdc_channel);
3135 }
3136 }
3137
3138 void
3139 acer_setup_channel(chp)
3140 struct channel_softc *chp;
3141 {
3142 struct ata_drive_datas *drvp;
3143 int drive;
3144 u_int32_t acer_fifo_udma;
3145 u_int32_t idedma_ctl;
3146 struct pciide_channel *cp = (struct pciide_channel*)chp;
3147 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3148
3149 idedma_ctl = 0;
3150 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3151 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3152 acer_fifo_udma), DEBUG_PROBE);
3153 /* setup DMA if needed */
3154 pciide_channel_dma_setup(cp);
3155
3156 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3157 DRIVE_UDMA) { /* check 80 pins cable */
3158 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3159 ACER_0x4A_80PIN(chp->channel)) {
3160 if (chp->ch_drive[0].UDMA_mode > 2)
3161 chp->ch_drive[0].UDMA_mode = 2;
3162 if (chp->ch_drive[1].UDMA_mode > 2)
3163 chp->ch_drive[1].UDMA_mode = 2;
3164 }
3165 }
3166
3167 for (drive = 0; drive < 2; drive++) {
3168 drvp = &chp->ch_drive[drive];
3169 /* If no drive, skip */
3170 if ((drvp->drive_flags & DRIVE) == 0)
3171 continue;
3172 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3173 "channel %d drive %d 0x%x\n", chp->channel, drive,
3174 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3175 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3176 /* clear FIFO/DMA mode */
3177 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3178 ACER_UDMA_EN(chp->channel, drive) |
3179 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3180
3181 /* add timing values, setup DMA if needed */
3182 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3183 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3184 acer_fifo_udma |=
3185 ACER_FTH_OPL(chp->channel, drive, 0x1);
3186 goto pio;
3187 }
3188
3189 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3190 if (drvp->drive_flags & DRIVE_UDMA) {
3191 /* use Ultra/DMA */
3192 drvp->drive_flags &= ~DRIVE_DMA;
3193 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3194 acer_fifo_udma |=
3195 ACER_UDMA_TIM(chp->channel, drive,
3196 acer_udma[drvp->UDMA_mode]);
3197 /* XXX disable if one drive < UDMA3 ? */
3198 if (drvp->UDMA_mode >= 3) {
3199 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3200 ACER_0x4B,
3201 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3202 ACER_0x4B) | ACER_0x4B_UDMA66);
3203 }
3204 } else {
3205 /*
3206 * use Multiword DMA
3207 * Timings will be used for both PIO and DMA,
3208 * so adjust DMA mode if needed
3209 */
3210 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3211 drvp->PIO_mode = drvp->DMA_mode + 2;
3212 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3213 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3214 drvp->PIO_mode - 2 : 0;
3215 if (drvp->DMA_mode == 0)
3216 drvp->PIO_mode = 0;
3217 }
3218 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3219 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3220 ACER_IDETIM(chp->channel, drive),
3221 acer_pio[drvp->PIO_mode]);
3222 }
3223 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3224 acer_fifo_udma), DEBUG_PROBE);
3225 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3226 if (idedma_ctl != 0) {
3227 /* Add software bits in status register */
3228 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3229 IDEDMA_CTL, idedma_ctl);
3230 }
3231 pciide_print_modes(cp);
3232 }
3233
3234 int
3235 acer_pci_intr(arg)
3236 void *arg;
3237 {
3238 struct pciide_softc *sc = arg;
3239 struct pciide_channel *cp;
3240 struct channel_softc *wdc_cp;
3241 int i, rv, crv;
3242 u_int32_t chids;
3243
3244 rv = 0;
3245 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3246 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3247 cp = &sc->pciide_channels[i];
3248 wdc_cp = &cp->wdc_channel;
3249 /* If a compat channel skip. */
3250 if (cp->compat)
3251 continue;
3252 if (chids & ACER_CHIDS_INT(i)) {
3253 crv = wdcintr(wdc_cp);
3254 if (crv == 0)
3255 printf("%s:%d: bogus intr\n",
3256 sc->sc_wdcdev.sc_dev.dv_xname, i);
3257 else
3258 rv = 1;
3259 }
3260 }
3261 return rv;
3262 }
3263
3264 void
3265 hpt_chip_map(sc, pa)
3266 struct pciide_softc *sc;
3267 struct pci_attach_args *pa;
3268 {
3269 struct pciide_channel *cp;
3270 int i, compatchan, revision;
3271 pcireg_t interface;
3272 bus_size_t cmdsize, ctlsize;
3273
3274 if (pciide_chipen(sc, pa) == 0)
3275 return;
3276 revision = PCI_REVISION(pa->pa_class);
3277 printf(": Triones/Highpoint ");
3278 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3279 printf("HPT374 IDE Controller\n");
3280 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3281 printf("HPT372 IDE Controller\n");
3282 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3283 if (revision == HPT372_REV)
3284 printf("HPT372 IDE Controller\n");
3285 else if (revision == HPT370_REV)
3286 printf("HPT370 IDE Controller\n");
3287 else if (revision == HPT370A_REV)
3288 printf("HPT370A IDE Controller\n");
3289 else if (revision == HPT366_REV)
3290 printf("HPT366 IDE Controller\n");
3291 else
3292 printf("unknown HPT IDE controller rev %d\n", revision);
3293 } else
3294 printf("unknown HPT IDE controller 0x%x\n",
3295 sc->sc_pp->ide_product);
3296
3297 /*
3298 * when the chip is in native mode it identifies itself as a
3299 * 'misc mass storage'. Fake interface in this case.
3300 */
3301 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3302 interface = PCI_INTERFACE(pa->pa_class);
3303 } else {
3304 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3305 PCIIDE_INTERFACE_PCI(0);
3306 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3307 (revision == HPT370_REV || revision == HPT370A_REV ||
3308 revision == HPT372_REV)) ||
3309 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3310 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3311 interface |= PCIIDE_INTERFACE_PCI(1);
3312 }
3313
3314 printf("%s: bus-master DMA support present",
3315 sc->sc_wdcdev.sc_dev.dv_xname);
3316 pciide_mapreg_dma(sc, pa);
3317 printf("\n");
3318 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3319 WDC_CAPABILITY_MODE;
3320 if (sc->sc_dma_ok) {
3321 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3323 sc->sc_wdcdev.irqack = pciide_irqack;
3324 }
3325 sc->sc_wdcdev.PIO_cap = 4;
3326 sc->sc_wdcdev.DMA_cap = 2;
3327
3328 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3329 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3330 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3331 revision == HPT366_REV) {
3332 sc->sc_wdcdev.UDMA_cap = 4;
3333 /*
3334 * The 366 has 2 PCI IDE functions, one for primary and one
3335 * for secondary. So we need to call pciide_mapregs_compat()
3336 * with the real channel
3337 */
3338 if (pa->pa_function == 0) {
3339 compatchan = 0;
3340 } else if (pa->pa_function == 1) {
3341 compatchan = 1;
3342 } else {
3343 printf("%s: unexpected PCI function %d\n",
3344 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3345 return;
3346 }
3347 sc->sc_wdcdev.nchannels = 1;
3348 } else {
3349 sc->sc_wdcdev.nchannels = 2;
3350 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3351 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3352 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3353 revision == HPT372_REV))
3354 sc->sc_wdcdev.UDMA_cap = 6;
3355 else
3356 sc->sc_wdcdev.UDMA_cap = 5;
3357 }
3358 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3359 cp = &sc->pciide_channels[i];
3360 if (sc->sc_wdcdev.nchannels > 1) {
3361 compatchan = i;
3362 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3363 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3364 printf("%s: %s channel ignored (disabled)\n",
3365 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3366 continue;
3367 }
3368 }
3369 if (pciide_chansetup(sc, i, interface) == 0)
3370 continue;
3371 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3372 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3373 &ctlsize, hpt_pci_intr);
3374 } else {
3375 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3376 &cmdsize, &ctlsize);
3377 }
3378 if (cp->hw_ok == 0)
3379 return;
3380 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3381 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3382 wdcattach(&cp->wdc_channel);
3383 hpt_setup_channel(&cp->wdc_channel);
3384 }
3385 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3386 (revision == HPT370_REV || revision == HPT370A_REV ||
3387 revision == HPT372_REV)) ||
3388 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3389 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3390 /*
3391 * HPT370_REV and highter has a bit to disable interrupts,
3392 * make sure to clear it
3393 */
3394 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3395 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3396 ~HPT_CSEL_IRQDIS);
3397 }
3398 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3399 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3400 revision == HPT372_REV ) ||
3401 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3402 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3403 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3404 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3405 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3406 return;
3407 }
3408
3409 void
3410 hpt_setup_channel(chp)
3411 struct channel_softc *chp;
3412 {
3413 struct ata_drive_datas *drvp;
3414 int drive;
3415 int cable;
3416 u_int32_t before, after;
3417 u_int32_t idedma_ctl;
3418 struct pciide_channel *cp = (struct pciide_channel*)chp;
3419 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3420 int revision =
3421 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3422
3423 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3424
3425 /* setup DMA if needed */
3426 pciide_channel_dma_setup(cp);
3427
3428 idedma_ctl = 0;
3429
3430 /* Per drive settings */
3431 for (drive = 0; drive < 2; drive++) {
3432 drvp = &chp->ch_drive[drive];
3433 /* If no drive, skip */
3434 if ((drvp->drive_flags & DRIVE) == 0)
3435 continue;
3436 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3437 HPT_IDETIM(chp->channel, drive));
3438
3439 /* add timing values, setup DMA if needed */
3440 if (drvp->drive_flags & DRIVE_UDMA) {
3441 /* use Ultra/DMA */
3442 drvp->drive_flags &= ~DRIVE_DMA;
3443 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3444 drvp->UDMA_mode > 2)
3445 drvp->UDMA_mode = 2;
3446 switch (sc->sc_pp->ide_product) {
3447 case PCI_PRODUCT_TRIONES_HPT374:
3448 after = hpt374_udma[drvp->UDMA_mode];
3449 break;
3450 case PCI_PRODUCT_TRIONES_HPT372:
3451 after = hpt372_udma[drvp->UDMA_mode];
3452 break;
3453 case PCI_PRODUCT_TRIONES_HPT366:
3454 default:
3455 switch(revision) {
3456 case HPT372_REV:
3457 after = hpt372_udma[drvp->UDMA_mode];
3458 break;
3459 case HPT370_REV:
3460 case HPT370A_REV:
3461 after = hpt370_udma[drvp->UDMA_mode];
3462 break;
3463 case HPT366_REV:
3464 default:
3465 after = hpt366_udma[drvp->UDMA_mode];
3466 break;
3467 }
3468 }
3469 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3470 } else if (drvp->drive_flags & DRIVE_DMA) {
3471 /*
3472 * use Multiword DMA.
3473 * Timings will be used for both PIO and DMA, so adjust
3474 * DMA mode if needed
3475 */
3476 if (drvp->PIO_mode >= 3 &&
3477 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3478 drvp->DMA_mode = drvp->PIO_mode - 2;
3479 }
3480 switch (sc->sc_pp->ide_product) {
3481 case PCI_PRODUCT_TRIONES_HPT374:
3482 after = hpt374_dma[drvp->DMA_mode];
3483 break;
3484 case PCI_PRODUCT_TRIONES_HPT372:
3485 after = hpt372_dma[drvp->DMA_mode];
3486 break;
3487 case PCI_PRODUCT_TRIONES_HPT366:
3488 default:
3489 switch(revision) {
3490 case HPT372_REV:
3491 after = hpt372_dma[drvp->DMA_mode];
3492 break;
3493 case HPT370_REV:
3494 case HPT370A_REV:
3495 after = hpt370_dma[drvp->DMA_mode];
3496 break;
3497 case HPT366_REV:
3498 default:
3499 after = hpt366_dma[drvp->DMA_mode];
3500 break;
3501 }
3502 }
3503 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3504 } else {
3505 /* PIO only */
3506 switch (sc->sc_pp->ide_product) {
3507 case PCI_PRODUCT_TRIONES_HPT374:
3508 after = hpt374_pio[drvp->PIO_mode];
3509 break;
3510 case PCI_PRODUCT_TRIONES_HPT372:
3511 after = hpt372_pio[drvp->PIO_mode];
3512 break;
3513 case PCI_PRODUCT_TRIONES_HPT366:
3514 default:
3515 switch(revision) {
3516 case HPT372_REV:
3517 after = hpt372_pio[drvp->PIO_mode];
3518 break;
3519 case HPT370_REV:
3520 case HPT370A_REV:
3521 after = hpt370_pio[drvp->PIO_mode];
3522 break;
3523 case HPT366_REV:
3524 default:
3525 after = hpt366_pio[drvp->PIO_mode];
3526 break;
3527 }
3528 }
3529 }
3530 pci_conf_write(sc->sc_pc, sc->sc_tag,
3531 HPT_IDETIM(chp->channel, drive), after);
3532 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3533 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3534 after, before), DEBUG_PROBE);
3535 }
3536 if (idedma_ctl != 0) {
3537 /* Add software bits in status register */
3538 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3539 IDEDMA_CTL, idedma_ctl);
3540 }
3541 pciide_print_modes(cp);
3542 }
3543
3544 int
3545 hpt_pci_intr(arg)
3546 void *arg;
3547 {
3548 struct pciide_softc *sc = arg;
3549 struct pciide_channel *cp;
3550 struct channel_softc *wdc_cp;
3551 int rv = 0;
3552 int dmastat, i, crv;
3553
3554 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3555 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3556 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3557 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3558 IDEDMA_CTL_INTR)
3559 continue;
3560 cp = &sc->pciide_channels[i];
3561 wdc_cp = &cp->wdc_channel;
3562 crv = wdcintr(wdc_cp);
3563 if (crv == 0) {
3564 printf("%s:%d: bogus intr\n",
3565 sc->sc_wdcdev.sc_dev.dv_xname, i);
3566 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3567 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3568 } else
3569 rv = 1;
3570 }
3571 return rv;
3572 }
3573
3574
3575 /* Macros to test product */
3576 #define PDC_IS_262(sc) \
3577 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3578 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3579 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3580 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3581 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3582 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3583 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3584 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3585 #define PDC_IS_265(sc) \
3586 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3587 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3588 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3589 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3590 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3591 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3592 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3593 #define PDC_IS_268(sc) \
3594 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3595 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3596 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3597 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3598 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3599 #define PDC_IS_276(sc) \
3600 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3601 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3602 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3603
3604 void
3605 pdc202xx_chip_map(sc, pa)
3606 struct pciide_softc *sc;
3607 struct pci_attach_args *pa;
3608 {
3609 struct pciide_channel *cp;
3610 int channel;
3611 pcireg_t interface, st, mode;
3612 bus_size_t cmdsize, ctlsize;
3613
3614 if (!PDC_IS_268(sc)) {
3615 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3616 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3617 st), DEBUG_PROBE);
3618 }
3619 if (pciide_chipen(sc, pa) == 0)
3620 return;
3621
3622 /* turn off RAID mode */
3623 if (!PDC_IS_268(sc))
3624 st &= ~PDC2xx_STATE_IDERAID;
3625
3626 /*
3627 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3628 * mode. We have to fake interface
3629 */
3630 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3631 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3632 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3633
3634 printf("%s: bus-master DMA support present",
3635 sc->sc_wdcdev.sc_dev.dv_xname);
3636 pciide_mapreg_dma(sc, pa);
3637 printf("\n");
3638 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3639 WDC_CAPABILITY_MODE;
3640 if (sc->sc_dma_ok) {
3641 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3642 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3643 sc->sc_wdcdev.irqack = pciide_irqack;
3644 }
3645 sc->sc_wdcdev.PIO_cap = 4;
3646 sc->sc_wdcdev.DMA_cap = 2;
3647 if (PDC_IS_276(sc))
3648 sc->sc_wdcdev.UDMA_cap = 6;
3649 else if (PDC_IS_265(sc))
3650 sc->sc_wdcdev.UDMA_cap = 5;
3651 else if (PDC_IS_262(sc))
3652 sc->sc_wdcdev.UDMA_cap = 4;
3653 else
3654 sc->sc_wdcdev.UDMA_cap = 2;
3655 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3656 pdc20268_setup_channel : pdc202xx_setup_channel;
3657 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3658 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3659
3660 if (!PDC_IS_268(sc)) {
3661 /* setup failsafe defaults */
3662 mode = 0;
3663 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3664 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3665 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3666 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3667 for (channel = 0;
3668 channel < sc->sc_wdcdev.nchannels;
3669 channel++) {
3670 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3671 "drive 0 initial timings 0x%x, now 0x%x\n",
3672 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3673 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3674 DEBUG_PROBE);
3675 pci_conf_write(sc->sc_pc, sc->sc_tag,
3676 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3677 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3678 "drive 1 initial timings 0x%x, now 0x%x\n",
3679 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3680 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3681 pci_conf_write(sc->sc_pc, sc->sc_tag,
3682 PDC2xx_TIM(channel, 1), mode);
3683 }
3684
3685 mode = PDC2xx_SCR_DMA;
3686 if (PDC_IS_262(sc)) {
3687 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3688 } else {
3689 /* the BIOS set it up this way */
3690 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3691 }
3692 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3693 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3694 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3695 "now 0x%x\n",
3696 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3697 PDC2xx_SCR),
3698 mode), DEBUG_PROBE);
3699 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3700 PDC2xx_SCR, mode);
3701
3702 /* controller initial state register is OK even without BIOS */
3703 /* Set DMA mode to IDE DMA compatibility */
3704 mode =
3705 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3706 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3707 DEBUG_PROBE);
3708 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3709 mode | 0x1);
3710 mode =
3711 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3712 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3713 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3714 mode | 0x1);
3715 }
3716
3717 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3718 cp = &sc->pciide_channels[channel];
3719 if (pciide_chansetup(sc, channel, interface) == 0)
3720 continue;
3721 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3722 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3723 printf("%s: %s channel ignored (disabled)\n",
3724 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3725 continue;
3726 }
3727 if (PDC_IS_265(sc))
3728 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3729 pdc20265_pci_intr);
3730 else
3731 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3732 pdc202xx_pci_intr);
3733 if (cp->hw_ok == 0)
3734 continue;
3735 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3736 st &= ~(PDC_IS_262(sc) ?
3737 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3738 pciide_map_compat_intr(pa, cp, channel, interface);
3739 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3740 }
3741 if (!PDC_IS_268(sc)) {
3742 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3743 "0x%x\n", st), DEBUG_PROBE);
3744 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3745 }
3746 return;
3747 }
3748
3749 void
3750 pdc202xx_setup_channel(chp)
3751 struct channel_softc *chp;
3752 {
3753 struct ata_drive_datas *drvp;
3754 int drive;
3755 pcireg_t mode, st;
3756 u_int32_t idedma_ctl, scr, atapi;
3757 struct pciide_channel *cp = (struct pciide_channel*)chp;
3758 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3759 int channel = chp->channel;
3760
3761 /* setup DMA if needed */
3762 pciide_channel_dma_setup(cp);
3763
3764 idedma_ctl = 0;
3765 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3766 sc->sc_wdcdev.sc_dev.dv_xname,
3767 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3768 DEBUG_PROBE);
3769
3770 /* Per channel settings */
3771 if (PDC_IS_262(sc)) {
3772 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3773 PDC262_U66);
3774 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3775 /* Trim UDMA mode */
3776 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3777 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3778 chp->ch_drive[0].UDMA_mode <= 2) ||
3779 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3780 chp->ch_drive[1].UDMA_mode <= 2)) {
3781 if (chp->ch_drive[0].UDMA_mode > 2)
3782 chp->ch_drive[0].UDMA_mode = 2;
3783 if (chp->ch_drive[1].UDMA_mode > 2)
3784 chp->ch_drive[1].UDMA_mode = 2;
3785 }
3786 /* Set U66 if needed */
3787 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3788 chp->ch_drive[0].UDMA_mode > 2) ||
3789 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3790 chp->ch_drive[1].UDMA_mode > 2))
3791 scr |= PDC262_U66_EN(channel);
3792 else
3793 scr &= ~PDC262_U66_EN(channel);
3794 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3795 PDC262_U66, scr);
3796 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3797 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3798 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3799 PDC262_ATAPI(channel))), DEBUG_PROBE);
3800 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3801 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3802 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3803 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3804 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3805 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3806 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3807 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3808 atapi = 0;
3809 else
3810 atapi = PDC262_ATAPI_UDMA;
3811 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3812 PDC262_ATAPI(channel), atapi);
3813 }
3814 }
3815 for (drive = 0; drive < 2; drive++) {
3816 drvp = &chp->ch_drive[drive];
3817 /* If no drive, skip */
3818 if ((drvp->drive_flags & DRIVE) == 0)
3819 continue;
3820 mode = 0;
3821 if (drvp->drive_flags & DRIVE_UDMA) {
3822 /* use Ultra/DMA */
3823 drvp->drive_flags &= ~DRIVE_DMA;
3824 mode = PDC2xx_TIM_SET_MB(mode,
3825 pdc2xx_udma_mb[drvp->UDMA_mode]);
3826 mode = PDC2xx_TIM_SET_MC(mode,
3827 pdc2xx_udma_mc[drvp->UDMA_mode]);
3828 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3829 } else if (drvp->drive_flags & DRIVE_DMA) {
3830 mode = PDC2xx_TIM_SET_MB(mode,
3831 pdc2xx_dma_mb[drvp->DMA_mode]);
3832 mode = PDC2xx_TIM_SET_MC(mode,
3833 pdc2xx_dma_mc[drvp->DMA_mode]);
3834 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3835 } else {
3836 mode = PDC2xx_TIM_SET_MB(mode,
3837 pdc2xx_dma_mb[0]);
3838 mode = PDC2xx_TIM_SET_MC(mode,
3839 pdc2xx_dma_mc[0]);
3840 }
3841 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3842 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3843 if (drvp->drive_flags & DRIVE_ATA)
3844 mode |= PDC2xx_TIM_PRE;
3845 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3846 if (drvp->PIO_mode >= 3) {
3847 mode |= PDC2xx_TIM_IORDY;
3848 if (drive == 0)
3849 mode |= PDC2xx_TIM_IORDYp;
3850 }
3851 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3852 "timings 0x%x\n",
3853 sc->sc_wdcdev.sc_dev.dv_xname,
3854 chp->channel, drive, mode), DEBUG_PROBE);
3855 pci_conf_write(sc->sc_pc, sc->sc_tag,
3856 PDC2xx_TIM(chp->channel, drive), mode);
3857 }
3858 if (idedma_ctl != 0) {
3859 /* Add software bits in status register */
3860 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3861 IDEDMA_CTL, idedma_ctl);
3862 }
3863 pciide_print_modes(cp);
3864 }
3865
3866 void
3867 pdc20268_setup_channel(chp)
3868 struct channel_softc *chp;
3869 {
3870 struct ata_drive_datas *drvp;
3871 int drive;
3872 u_int32_t idedma_ctl;
3873 struct pciide_channel *cp = (struct pciide_channel*)chp;
3874 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3875 int u100;
3876
3877 /* setup DMA if needed */
3878 pciide_channel_dma_setup(cp);
3879
3880 idedma_ctl = 0;
3881
3882 /* I don't know what this is for, FreeBSD does it ... */
3883 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3884 IDEDMA_CMD + 0x1, 0x0b);
3885
3886 /*
3887 * I don't know what this is for; FreeBSD checks this ... this is not
3888 * cable type detect.
3889 */
3890 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3891 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
3892
3893 for (drive = 0; drive < 2; drive++) {
3894 drvp = &chp->ch_drive[drive];
3895 /* If no drive, skip */
3896 if ((drvp->drive_flags & DRIVE) == 0)
3897 continue;
3898 if (drvp->drive_flags & DRIVE_UDMA) {
3899 /* use Ultra/DMA */
3900 drvp->drive_flags &= ~DRIVE_DMA;
3901 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3902 if (drvp->UDMA_mode > 2 && u100 == 0)
3903 drvp->UDMA_mode = 2;
3904 } else if (drvp->drive_flags & DRIVE_DMA) {
3905 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3906 }
3907 }
3908 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
3909 if (idedma_ctl != 0) {
3910 /* Add software bits in status register */
3911 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3912 IDEDMA_CTL, idedma_ctl);
3913 }
3914 pciide_print_modes(cp);
3915 }
3916
3917 int
3918 pdc202xx_pci_intr(arg)
3919 void *arg;
3920 {
3921 struct pciide_softc *sc = arg;
3922 struct pciide_channel *cp;
3923 struct channel_softc *wdc_cp;
3924 int i, rv, crv;
3925 u_int32_t scr;
3926
3927 rv = 0;
3928 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3929 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3930 cp = &sc->pciide_channels[i];
3931 wdc_cp = &cp->wdc_channel;
3932 /* If a compat channel skip. */
3933 if (cp->compat)
3934 continue;
3935 if (scr & PDC2xx_SCR_INT(i)) {
3936 crv = wdcintr(wdc_cp);
3937 if (crv == 0)
3938 printf("%s:%d: bogus intr (reg 0x%x)\n",
3939 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3940 else
3941 rv = 1;
3942 }
3943 }
3944 return rv;
3945 }
3946
3947 int
3948 pdc20265_pci_intr(arg)
3949 void *arg;
3950 {
3951 struct pciide_softc *sc = arg;
3952 struct pciide_channel *cp;
3953 struct channel_softc *wdc_cp;
3954 int i, rv, crv;
3955 u_int32_t dmastat;
3956
3957 rv = 0;
3958 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3959 cp = &sc->pciide_channels[i];
3960 wdc_cp = &cp->wdc_channel;
3961 /* If a compat channel skip. */
3962 if (cp->compat)
3963 continue;
3964 /*
3965 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3966 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3967 * So use it instead (requires 2 reg reads instead of 1,
3968 * but we can't do it another way).
3969 */
3970 dmastat = bus_space_read_1(sc->sc_dma_iot,
3971 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3972 if((dmastat & IDEDMA_CTL_INTR) == 0)
3973 continue;
3974 crv = wdcintr(wdc_cp);
3975 if (crv == 0)
3976 printf("%s:%d: bogus intr\n",
3977 sc->sc_wdcdev.sc_dev.dv_xname, i);
3978 else
3979 rv = 1;
3980 }
3981 return rv;
3982 }
3983
3984 void
3985 opti_chip_map(sc, pa)
3986 struct pciide_softc *sc;
3987 struct pci_attach_args *pa;
3988 {
3989 struct pciide_channel *cp;
3990 bus_size_t cmdsize, ctlsize;
3991 pcireg_t interface;
3992 u_int8_t init_ctrl;
3993 int channel;
3994
3995 if (pciide_chipen(sc, pa) == 0)
3996 return;
3997 printf("%s: bus-master DMA support present",
3998 sc->sc_wdcdev.sc_dev.dv_xname);
3999
4000 /*
4001 * XXXSCW:
4002 * There seem to be a couple of buggy revisions/implementations
4003 * of the OPTi pciide chipset. This kludge seems to fix one of
4004 * the reported problems (PR/11644) but still fails for the
4005 * other (PR/13151), although the latter may be due to other
4006 * issues too...
4007 */
4008 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4009 printf(" but disabled due to chip rev. <= 0x12");
4010 sc->sc_dma_ok = 0;
4011 } else
4012 pciide_mapreg_dma(sc, pa);
4013
4014 printf("\n");
4015
4016 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4017 WDC_CAPABILITY_MODE;
4018 sc->sc_wdcdev.PIO_cap = 4;
4019 if (sc->sc_dma_ok) {
4020 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4021 sc->sc_wdcdev.irqack = pciide_irqack;
4022 sc->sc_wdcdev.DMA_cap = 2;
4023 }
4024 sc->sc_wdcdev.set_modes = opti_setup_channel;
4025
4026 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4027 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4028
4029 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4030 OPTI_REG_INIT_CONTROL);
4031
4032 interface = PCI_INTERFACE(pa->pa_class);
4033
4034 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4035 cp = &sc->pciide_channels[channel];
4036 if (pciide_chansetup(sc, channel, interface) == 0)
4037 continue;
4038 if (channel == 1 &&
4039 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4040 printf("%s: %s channel ignored (disabled)\n",
4041 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4042 continue;
4043 }
4044 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4045 pciide_pci_intr);
4046 if (cp->hw_ok == 0)
4047 continue;
4048 pciide_map_compat_intr(pa, cp, channel, interface);
4049 if (cp->hw_ok == 0)
4050 continue;
4051 opti_setup_channel(&cp->wdc_channel);
4052 }
4053 }
4054
4055 void
4056 opti_setup_channel(chp)
4057 struct channel_softc *chp;
4058 {
4059 struct ata_drive_datas *drvp;
4060 struct pciide_channel *cp = (struct pciide_channel*)chp;
4061 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4062 int drive, spd;
4063 int mode[2];
4064 u_int8_t rv, mr;
4065
4066 /*
4067 * The `Delay' and `Address Setup Time' fields of the
4068 * Miscellaneous Register are always zero initially.
4069 */
4070 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4071 mr &= ~(OPTI_MISC_DELAY_MASK |
4072 OPTI_MISC_ADDR_SETUP_MASK |
4073 OPTI_MISC_INDEX_MASK);
4074
4075 /* Prime the control register before setting timing values */
4076 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4077
4078 /* Determine the clockrate of the PCIbus the chip is attached to */
4079 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4080 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4081
4082 /* setup DMA if needed */
4083 pciide_channel_dma_setup(cp);
4084
4085 for (drive = 0; drive < 2; drive++) {
4086 drvp = &chp->ch_drive[drive];
4087 /* If no drive, skip */
4088 if ((drvp->drive_flags & DRIVE) == 0) {
4089 mode[drive] = -1;
4090 continue;
4091 }
4092
4093 if ((drvp->drive_flags & DRIVE_DMA)) {
4094 /*
4095 * Timings will be used for both PIO and DMA,
4096 * so adjust DMA mode if needed
4097 */
4098 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4099 drvp->PIO_mode = drvp->DMA_mode + 2;
4100 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4101 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4102 drvp->PIO_mode - 2 : 0;
4103 if (drvp->DMA_mode == 0)
4104 drvp->PIO_mode = 0;
4105
4106 mode[drive] = drvp->DMA_mode + 5;
4107 } else
4108 mode[drive] = drvp->PIO_mode;
4109
4110 if (drive && mode[0] >= 0 &&
4111 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4112 /*
4113 * Can't have two drives using different values
4114 * for `Address Setup Time'.
4115 * Slow down the faster drive to compensate.
4116 */
4117 int d = (opti_tim_as[spd][mode[0]] >
4118 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4119
4120 mode[d] = mode[1-d];
4121 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4122 chp->ch_drive[d].DMA_mode = 0;
4123 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4124 }
4125 }
4126
4127 for (drive = 0; drive < 2; drive++) {
4128 int m;
4129 if ((m = mode[drive]) < 0)
4130 continue;
4131
4132 /* Set the Address Setup Time and select appropriate index */
4133 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4134 rv |= OPTI_MISC_INDEX(drive);
4135 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4136
4137 /* Set the pulse width and recovery timing parameters */
4138 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4139 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4140 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4141 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4142
4143 /* Set the Enhanced Mode register appropriately */
4144 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4145 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4146 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4147 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4148 }
4149
4150 /* Finally, enable the timings */
4151 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4152
4153 pciide_print_modes(cp);
4154 }
4155
4156 #define ACARD_IS_850(sc) \
4157 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4158
4159 void
4160 acard_chip_map(sc, pa)
4161 struct pciide_softc *sc;
4162 struct pci_attach_args *pa;
4163 {
4164 struct pciide_channel *cp;
4165 int i;
4166 pcireg_t interface;
4167 bus_size_t cmdsize, ctlsize;
4168
4169 if (pciide_chipen(sc, pa) == 0)
4170 return;
4171
4172 /*
4173 * when the chip is in native mode it identifies itself as a
4174 * 'misc mass storage'. Fake interface in this case.
4175 */
4176 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4177 interface = PCI_INTERFACE(pa->pa_class);
4178 } else {
4179 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4180 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4181 }
4182
4183 printf("%s: bus-master DMA support present",
4184 sc->sc_wdcdev.sc_dev.dv_xname);
4185 pciide_mapreg_dma(sc, pa);
4186 printf("\n");
4187 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4188 WDC_CAPABILITY_MODE;
4189
4190 if (sc->sc_dma_ok) {
4191 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4192 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4193 sc->sc_wdcdev.irqack = pciide_irqack;
4194 }
4195 sc->sc_wdcdev.PIO_cap = 4;
4196 sc->sc_wdcdev.DMA_cap = 2;
4197 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4198
4199 sc->sc_wdcdev.set_modes = acard_setup_channel;
4200 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4201 sc->sc_wdcdev.nchannels = 2;
4202
4203 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4204 cp = &sc->pciide_channels[i];
4205 if (pciide_chansetup(sc, i, interface) == 0)
4206 continue;
4207 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4208 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4209 &ctlsize, pciide_pci_intr);
4210 } else {
4211 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4212 &cmdsize, &ctlsize);
4213 }
4214 if (cp->hw_ok == 0)
4215 return;
4216 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4217 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4218 wdcattach(&cp->wdc_channel);
4219 acard_setup_channel(&cp->wdc_channel);
4220 }
4221 if (!ACARD_IS_850(sc)) {
4222 u_int32_t reg;
4223 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4224 reg &= ~ATP860_CTRL_INT;
4225 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4226 }
4227 }
4228
4229 void
4230 acard_setup_channel(chp)
4231 struct channel_softc *chp;
4232 {
4233 struct ata_drive_datas *drvp;
4234 struct pciide_channel *cp = (struct pciide_channel*)chp;
4235 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4236 int channel = chp->channel;
4237 int drive;
4238 u_int32_t idetime, udma_mode;
4239 u_int32_t idedma_ctl;
4240
4241 /* setup DMA if needed */
4242 pciide_channel_dma_setup(cp);
4243
4244 if (ACARD_IS_850(sc)) {
4245 idetime = 0;
4246 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4247 udma_mode &= ~ATP850_UDMA_MASK(channel);
4248 } else {
4249 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4250 idetime &= ~ATP860_SETTIME_MASK(channel);
4251 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4252 udma_mode &= ~ATP860_UDMA_MASK(channel);
4253
4254 /* check 80 pins cable */
4255 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4256 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4257 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4258 & ATP860_CTRL_80P(chp->channel)) {
4259 if (chp->ch_drive[0].UDMA_mode > 2)
4260 chp->ch_drive[0].UDMA_mode = 2;
4261 if (chp->ch_drive[1].UDMA_mode > 2)
4262 chp->ch_drive[1].UDMA_mode = 2;
4263 }
4264 }
4265 }
4266
4267 idedma_ctl = 0;
4268
4269 /* Per drive settings */
4270 for (drive = 0; drive < 2; drive++) {
4271 drvp = &chp->ch_drive[drive];
4272 /* If no drive, skip */
4273 if ((drvp->drive_flags & DRIVE) == 0)
4274 continue;
4275 /* add timing values, setup DMA if needed */
4276 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4277 (drvp->drive_flags & DRIVE_UDMA)) {
4278 /* use Ultra/DMA */
4279 if (ACARD_IS_850(sc)) {
4280 idetime |= ATP850_SETTIME(drive,
4281 acard_act_udma[drvp->UDMA_mode],
4282 acard_rec_udma[drvp->UDMA_mode]);
4283 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4284 acard_udma_conf[drvp->UDMA_mode]);
4285 } else {
4286 idetime |= ATP860_SETTIME(channel, drive,
4287 acard_act_udma[drvp->UDMA_mode],
4288 acard_rec_udma[drvp->UDMA_mode]);
4289 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4290 acard_udma_conf[drvp->UDMA_mode]);
4291 }
4292 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4293 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4294 (drvp->drive_flags & DRIVE_DMA)) {
4295 /* use Multiword DMA */
4296 drvp->drive_flags &= ~DRIVE_UDMA;
4297 if (ACARD_IS_850(sc)) {
4298 idetime |= ATP850_SETTIME(drive,
4299 acard_act_dma[drvp->DMA_mode],
4300 acard_rec_dma[drvp->DMA_mode]);
4301 } else {
4302 idetime |= ATP860_SETTIME(channel, drive,
4303 acard_act_dma[drvp->DMA_mode],
4304 acard_rec_dma[drvp->DMA_mode]);
4305 }
4306 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4307 } else {
4308 /* PIO only */
4309 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4310 if (ACARD_IS_850(sc)) {
4311 idetime |= ATP850_SETTIME(drive,
4312 acard_act_pio[drvp->PIO_mode],
4313 acard_rec_pio[drvp->PIO_mode]);
4314 } else {
4315 idetime |= ATP860_SETTIME(channel, drive,
4316 acard_act_pio[drvp->PIO_mode],
4317 acard_rec_pio[drvp->PIO_mode]);
4318 }
4319 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4320 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4321 | ATP8x0_CTRL_EN(channel));
4322 }
4323 }
4324
4325 if (idedma_ctl != 0) {
4326 /* Add software bits in status register */
4327 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4328 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4329 }
4330 pciide_print_modes(cp);
4331
4332 if (ACARD_IS_850(sc)) {
4333 pci_conf_write(sc->sc_pc, sc->sc_tag,
4334 ATP850_IDETIME(channel), idetime);
4335 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4336 } else {
4337 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4338 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4339 }
4340 }
4341
4342 int
4343 acard_pci_intr(arg)
4344 void *arg;
4345 {
4346 struct pciide_softc *sc = arg;
4347 struct pciide_channel *cp;
4348 struct channel_softc *wdc_cp;
4349 int rv = 0;
4350 int dmastat, i, crv;
4351
4352 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4353 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4354 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4355 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4356 continue;
4357 cp = &sc->pciide_channels[i];
4358 wdc_cp = &cp->wdc_channel;
4359 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4360 (void)wdcintr(wdc_cp);
4361 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4362 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4363 continue;
4364 }
4365 crv = wdcintr(wdc_cp);
4366 if (crv == 0)
4367 printf("%s:%d: bogus intr\n",
4368 sc->sc_wdcdev.sc_dev.dv_xname, i);
4369 else if (crv == 1)
4370 rv = 1;
4371 else if (rv == 0)
4372 rv = crv;
4373 }
4374 return rv;
4375 }
4376
4377 static int
4378 sl82c105_bugchk(struct pci_attach_args *pa)
4379 {
4380
4381 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4382 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4383 return (0);
4384
4385 if (PCI_REVISION(pa->pa_class) <= 0x05)
4386 return (1);
4387
4388 return (0);
4389 }
4390
4391 void
4392 sl82c105_chip_map(sc, pa)
4393 struct pciide_softc *sc;
4394 struct pci_attach_args *pa;
4395 {
4396 struct pciide_channel *cp;
4397 bus_size_t cmdsize, ctlsize;
4398 pcireg_t interface, idecr;
4399 int channel;
4400
4401 if (pciide_chipen(sc, pa) == 0)
4402 return;
4403
4404 printf("%s: bus-master DMA support present",
4405 sc->sc_wdcdev.sc_dev.dv_xname);
4406
4407 /*
4408 * Check to see if we're part of the Winbond 83c553 Southbridge.
4409 * If so, we need to disable DMA on rev. <= 5 of that chip.
4410 */
4411 if (pci_find_device(pa, sl82c105_bugchk)) {
4412 printf(" but disabled due to 83c553 rev. <= 0x05");
4413 sc->sc_dma_ok = 0;
4414 } else
4415 pciide_mapreg_dma(sc, pa);
4416 printf("\n");
4417
4418 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4419 WDC_CAPABILITY_MODE;
4420 sc->sc_wdcdev.PIO_cap = 4;
4421 if (sc->sc_dma_ok) {
4422 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4423 sc->sc_wdcdev.irqack = pciide_irqack;
4424 sc->sc_wdcdev.DMA_cap = 2;
4425 }
4426 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4427
4428 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4429 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4430
4431 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4432
4433 interface = PCI_INTERFACE(pa->pa_class);
4434
4435 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4436 cp = &sc->pciide_channels[channel];
4437 if (pciide_chansetup(sc, channel, interface) == 0)
4438 continue;
4439 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4440 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4441 printf("%s: %s channel ignored (disabled)\n",
4442 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4443 continue;
4444 }
4445 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4446 pciide_pci_intr);
4447 if (cp->hw_ok == 0)
4448 continue;
4449 pciide_map_compat_intr(pa, cp, channel, interface);
4450 if (cp->hw_ok == 0)
4451 continue;
4452 sl82c105_setup_channel(&cp->wdc_channel);
4453 }
4454 }
4455
4456 void
4457 sl82c105_setup_channel(chp)
4458 struct channel_softc *chp;
4459 {
4460 struct ata_drive_datas *drvp;
4461 struct pciide_channel *cp = (struct pciide_channel*)chp;
4462 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4463 int pxdx_reg, drive;
4464 pcireg_t pxdx;
4465
4466 /* Set up DMA if needed. */
4467 pciide_channel_dma_setup(cp);
4468
4469 for (drive = 0; drive < 2; drive++) {
4470 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4471 : SYMPH_P1D0CR) + (drive * 4);
4472
4473 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4474
4475 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4476 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4477
4478 drvp = &chp->ch_drive[drive];
4479 /* If no drive, skip. */
4480 if ((drvp->drive_flags & DRIVE) == 0) {
4481 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4482 continue;
4483 }
4484
4485 if (drvp->drive_flags & DRIVE_DMA) {
4486 /*
4487 * Timings will be used for both PIO and DMA,
4488 * so adjust DMA mode if needed.
4489 */
4490 if (drvp->PIO_mode >= 3) {
4491 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4492 drvp->DMA_mode = drvp->PIO_mode - 2;
4493 if (drvp->DMA_mode < 1) {
4494 /*
4495 * Can't mix both PIO and DMA.
4496 * Disable DMA.
4497 */
4498 drvp->drive_flags &= ~DRIVE_DMA;
4499 }
4500 } else {
4501 /*
4502 * Can't mix both PIO and DMA. Disable
4503 * DMA.
4504 */
4505 drvp->drive_flags &= ~DRIVE_DMA;
4506 }
4507 }
4508
4509 if (drvp->drive_flags & DRIVE_DMA) {
4510 /* Use multi-word DMA. */
4511 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4512 PxDx_CMD_ON_SHIFT;
4513 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4514 } else {
4515 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4516 PxDx_CMD_ON_SHIFT;
4517 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4518 }
4519
4520 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4521
4522 /* ...and set the mode for this drive. */
4523 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4524 }
4525
4526 pciide_print_modes(cp);
4527 }
4528
4529 void
4530 serverworks_chip_map(sc, pa)
4531 struct pciide_softc *sc;
4532 struct pci_attach_args *pa;
4533 {
4534 struct pciide_channel *cp;
4535 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4536 pcitag_t pcib_tag;
4537 int channel;
4538 bus_size_t cmdsize, ctlsize;
4539
4540 if (pciide_chipen(sc, pa) == 0)
4541 return;
4542
4543 printf("%s: bus-master DMA support present",
4544 sc->sc_wdcdev.sc_dev.dv_xname);
4545 pciide_mapreg_dma(sc, pa);
4546 printf("\n");
4547 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4548 WDC_CAPABILITY_MODE;
4549
4550 if (sc->sc_dma_ok) {
4551 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4552 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4553 sc->sc_wdcdev.irqack = pciide_irqack;
4554 }
4555 sc->sc_wdcdev.PIO_cap = 4;
4556 sc->sc_wdcdev.DMA_cap = 2;
4557 switch (sc->sc_pp->ide_product) {
4558 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4559 sc->sc_wdcdev.UDMA_cap = 2;
4560 break;
4561 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4562 if (PCI_REVISION(pa->pa_class) < 0x92)
4563 sc->sc_wdcdev.UDMA_cap = 4;
4564 else
4565 sc->sc_wdcdev.UDMA_cap = 5;
4566 break;
4567 }
4568
4569 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4570 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4571 sc->sc_wdcdev.nchannels = 2;
4572
4573 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4574 cp = &sc->pciide_channels[channel];
4575 if (pciide_chansetup(sc, channel, interface) == 0)
4576 continue;
4577 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4578 serverworks_pci_intr);
4579 if (cp->hw_ok == 0)
4580 return;
4581 pciide_map_compat_intr(pa, cp, channel, interface);
4582 if (cp->hw_ok == 0)
4583 return;
4584 serverworks_setup_channel(&cp->wdc_channel);
4585 }
4586
4587 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4588 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4589 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4590 }
4591
4592 void
4593 serverworks_setup_channel(chp)
4594 struct channel_softc *chp;
4595 {
4596 struct ata_drive_datas *drvp;
4597 struct pciide_channel *cp = (struct pciide_channel*)chp;
4598 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4599 int channel = chp->channel;
4600 int drive, unit;
4601 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4602 u_int32_t idedma_ctl;
4603 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4604 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4605
4606 /* setup DMA if needed */
4607 pciide_channel_dma_setup(cp);
4608
4609 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4610 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4611 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4612 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4613
4614 pio_time &= ~(0xffff << (16 * channel));
4615 dma_time &= ~(0xffff << (16 * channel));
4616 pio_mode &= ~(0xff << (8 * channel + 16));
4617 udma_mode &= ~(0xff << (8 * channel + 16));
4618 udma_mode &= ~(3 << (2 * channel));
4619
4620 idedma_ctl = 0;
4621
4622 /* Per drive settings */
4623 for (drive = 0; drive < 2; drive++) {
4624 drvp = &chp->ch_drive[drive];
4625 /* If no drive, skip */
4626 if ((drvp->drive_flags & DRIVE) == 0)
4627 continue;
4628 unit = drive + 2 * channel;
4629 /* add timing values, setup DMA if needed */
4630 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4631 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4632 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4633 (drvp->drive_flags & DRIVE_UDMA)) {
4634 /* use Ultra/DMA, check for 80-pin cable */
4635 if (drvp->UDMA_mode > 2 &&
4636 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4637 drvp->UDMA_mode = 2;
4638 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4639 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4640 udma_mode |= 1 << unit;
4641 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4642 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4643 (drvp->drive_flags & DRIVE_DMA)) {
4644 /* use Multiword DMA */
4645 drvp->drive_flags &= ~DRIVE_UDMA;
4646 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4647 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4648 } else {
4649 /* PIO only */
4650 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4651 }
4652 }
4653
4654 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4655 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4656 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4657 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4658 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4659
4660 if (idedma_ctl != 0) {
4661 /* Add software bits in status register */
4662 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4663 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4664 }
4665 pciide_print_modes(cp);
4666 }
4667
4668 int
4669 serverworks_pci_intr(arg)
4670 void *arg;
4671 {
4672 struct pciide_softc *sc = arg;
4673 struct pciide_channel *cp;
4674 struct channel_softc *wdc_cp;
4675 int rv = 0;
4676 int dmastat, i, crv;
4677
4678 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4679 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4680 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4681 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4682 IDEDMA_CTL_INTR)
4683 continue;
4684 cp = &sc->pciide_channels[i];
4685 wdc_cp = &cp->wdc_channel;
4686 crv = wdcintr(wdc_cp);
4687 if (crv == 0) {
4688 printf("%s:%d: bogus intr\n",
4689 sc->sc_wdcdev.sc_dev.dv_xname, i);
4690 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4691 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4692 } else
4693 rv = 1;
4694 }
4695 return rv;
4696 }
4697