Home | History | Annotate | Line # | Download | only in pci
aceride.c revision 1.36
      1 /*	$NetBSD: aceride.c,v 1.36 2013/10/07 19:51:55 jakllsch Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: aceride.c,v 1.36 2013/10/07 19:51:55 jakllsch Exp $");
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 
     33 #include <dev/pci/pcivar.h>
     34 #include <dev/pci/pcidevs.h>
     35 #include <dev/pci/pciidereg.h>
     36 #include <dev/pci/pciidevar.h>
     37 #include <dev/pci/pciide_acer_reg.h>
     38 
     39 static int acer_pcib_match(const struct pci_attach_args *);
     40 static void acer_do_reset(struct ata_channel *, int);
     41 static void acer_chip_map(struct pciide_softc*, const struct pci_attach_args*);
     42 static void acer_setup_channel(struct ata_channel*);
     43 static int  acer_pci_intr(void *);
     44 static int  acer_dma_init(void *, int, int, void *, size_t, int);
     45 
     46 static int  aceride_match(device_t, cfdata_t, void *);
     47 static void aceride_attach(device_t, device_t, void *);
     48 
     49 struct aceride_softc {
     50 	struct pciide_softc pciide_sc;
     51 	struct pci_attach_args pcib_pa;
     52 };
     53 
     54 CFATTACH_DECL_NEW(aceride, sizeof(struct aceride_softc),
     55     aceride_match, aceride_attach, pciide_detach, NULL);
     56 
     57 static const struct pciide_product_desc pciide_acer_products[] =  {
     58 	{ PCI_PRODUCT_ALI_M5229,
     59 	  0,
     60 	  "Acer Labs M5229 UDMA IDE Controller",
     61 	  acer_chip_map,
     62 	},
     63 	{ 0,
     64 	  0,
     65 	  NULL,
     66 	  NULL
     67 	}
     68 };
     69 
     70 static int
     71 aceride_match(device_t parent, cfdata_t match, void *aux)
     72 {
     73 	struct pci_attach_args *pa = aux;
     74 
     75 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI &&
     76 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
     77 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
     78 		if (pciide_lookup_product(pa->pa_id, pciide_acer_products))
     79 			return (2);
     80 	}
     81 	return (0);
     82 }
     83 
     84 static void
     85 aceride_attach(device_t parent, device_t self, void *aux)
     86 {
     87 	struct pci_attach_args *pa = aux;
     88 	struct pciide_softc *sc = device_private(self);
     89 
     90 	sc->sc_wdcdev.sc_atac.atac_dev = self;
     91 
     92 	pciide_common_attach(sc, pa,
     93 	    pciide_lookup_product(pa->pa_id, pciide_acer_products));
     94 }
     95 
     96 static int
     97 acer_pcib_match(const struct pci_attach_args *pa)
     98 {
     99 	/*
    100 	 * we need to access the PCI config space of the pcib, see
    101 	 * acer_do_reset()
    102 	 */
    103 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
    104 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_ISA &&
    105 	    PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI &&
    106 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1533)
    107 		return 1;
    108 	return 0;
    109 }
    110 
    111 static void
    112 acer_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
    113 {
    114 	struct pciide_channel *cp;
    115 	int channel;
    116 	pcireg_t cr, interface;
    117 	pcireg_t rev = PCI_REVISION(pa->pa_class);
    118 	struct aceride_softc *acer_sc = (struct aceride_softc *)sc;
    119 
    120 	if (pciide_chipen(sc, pa) == 0)
    121 		return;
    122 
    123 	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
    124 	    "bus-master DMA support present");
    125 	pciide_mapreg_dma(sc, pa);
    126 	aprint_verbose("\n");
    127 	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
    128 	if (sc->sc_dma_ok) {
    129 		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
    130 		if (rev >= 0x20) {
    131 			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
    132 			if (rev >= 0xC7)
    133 				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
    134 			else if (rev >= 0xC4)
    135 				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
    136 			else if (rev >= 0xC2)
    137 				sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
    138 			else
    139 				sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
    140 		}
    141 		sc->sc_wdcdev.irqack = pciide_irqack;
    142 		if (rev <= 0xc4) {
    143 			sc->sc_wdcdev.dma_init = acer_dma_init;
    144 			aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
    145 			 "using PIO transfers above 137GB as workaround for "
    146 			 "48bit DMA access bug, expect reduced performance\n");
    147 		}
    148 	}
    149 
    150 	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
    151 	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
    152 	sc->sc_wdcdev.sc_atac.atac_set_modes = acer_setup_channel;
    153 	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
    154 	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
    155 	sc->sc_wdcdev.wdc_maxdrives = 2;
    156 
    157 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
    158 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
    159 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
    160 
    161 	/* Enable "microsoft register bits" R/W. */
    162 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
    163 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
    164 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
    165 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
    166 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
    167 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
    168 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
    169 	    ~ACER_CHANSTATUSREGS_RO);
    170 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
    171 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
    172 
    173 	{
    174 		/*
    175 		 * some BIOSes (port-cats ABLE) enable native mode, but don't
    176 		 * setup everything correctly, so allow the forcing of
    177 		 * compat mode
    178 		 */
    179 		bool force_compat_mode;
    180 		bool property_is_set;
    181 		property_is_set = prop_dictionary_get_bool(
    182 				device_properties(sc->sc_wdcdev.sc_atac.atac_dev),
    183 				"ali1543-ide-force-compat-mode",
    184 				&force_compat_mode);
    185 		if (property_is_set && force_compat_mode) {
    186 			cr &= ~((PCIIDE_INTERFACE_PCI(0)
    187 				| PCIIDE_INTERFACE_PCI(1))
    188 				<< PCI_INTERFACE_SHIFT);
    189 		}
    190 	}
    191 
    192 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
    193 	/* Don't use cr, re-read the real register content instead */
    194 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
    195 	    PCI_CLASS_REG));
    196 
    197 	/* From linux: enable "Cable Detection" */
    198 	if (rev >= 0xC2) {
    199 		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
    200 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
    201 		    | ACER_0x4B_CDETECT);
    202 	}
    203 
    204 	wdc_allocate_regs(&sc->sc_wdcdev);
    205 	if (rev == 0xC3) {
    206 		/* install reset bug workaround */
    207 		if (pci_find_device(&acer_sc->pcib_pa, acer_pcib_match) == 0) {
    208 			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
    209 			    "WARNING: can't find pci-isa bridge\n");
    210 		} else
    211 			sc->sc_wdcdev.reset = acer_do_reset;
    212 	}
    213 
    214 	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
    215 	     channel++) {
    216 		cp = &sc->pciide_channels[channel];
    217 		if (pciide_chansetup(sc, channel, interface) == 0)
    218 			continue;
    219 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
    220 			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
    221 			    "%s channel ignored (disabled)\n", cp->name);
    222 			cp->ata_channel.ch_flags |= ATACH_DISABLED;
    223 			continue;
    224 		}
    225 		/* newer controllers seems to lack the ACER_CHIDS. Sigh */
    226 		pciide_mapchan(pa, cp, interface,
    227 		     (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
    228 	}
    229 }
    230 
    231 static void
    232 acer_do_reset(struct ata_channel *chp, int poll)
    233 {
    234 	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
    235 	struct aceride_softc *acer_sc = (struct aceride_softc *)sc;
    236 	u_int8_t reg;
    237 
    238 	/*
    239 	 * From OpenSolaris: after a reset we need to disable/enable the
    240 	 * corresponding channel, or data corruption will occur in
    241 	 * UltraDMA modes
    242 	 */
    243 
    244 	wdc_do_reset(chp, poll);
    245 	reg = pciide_pci_read(acer_sc->pcib_pa.pa_pc, acer_sc->pcib_pa.pa_tag,
    246 	    ACER_PCIB_CTRL);
    247 	pciide_pci_write(acer_sc->pcib_pa.pa_pc, acer_sc->pcib_pa.pa_tag,
    248 	    ACER_PCIB_CTRL, reg & ~ACER_PCIB_CTRL_ENCHAN(chp->ch_channel));
    249 	delay(1000);
    250 	pciide_pci_write(acer_sc->pcib_pa.pa_pc, acer_sc->pcib_pa.pa_tag,
    251 	    ACER_PCIB_CTRL, reg);
    252 }
    253 
    254 static void
    255 acer_setup_channel(struct ata_channel *chp)
    256 {
    257 	struct ata_drive_datas *drvp;
    258 	int drive, s;
    259 	u_int32_t acer_fifo_udma;
    260 	u_int32_t idedma_ctl;
    261 	struct pciide_channel *cp = (struct pciide_channel*)chp;
    262 	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
    263 
    264 	idedma_ctl = 0;
    265 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
    266 	ATADEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
    267 	    acer_fifo_udma), DEBUG_PROBE);
    268 	/* setup DMA if needed */
    269 	pciide_channel_dma_setup(cp);
    270 
    271 	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
    272 	    ATA_DRIVE_UDMA) { /* check 80 pins cable */
    273 		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
    274 		    ACER_0x4A_80PIN(chp->ch_channel)) {
    275 			if (chp->ch_drive[0].UDMA_mode > 2)
    276 				chp->ch_drive[0].UDMA_mode = 2;
    277 			if (chp->ch_drive[1].UDMA_mode > 2)
    278 				chp->ch_drive[1].UDMA_mode = 2;
    279 		}
    280 	}
    281 
    282 	for (drive = 0; drive < 2; drive++) {
    283 		drvp = &chp->ch_drive[drive];
    284 		/* If no drive, skip */
    285 		if (drvp->drive_type == ATA_DRIVET_NONE)
    286 			continue;
    287 		ATADEBUG_PRINT(("acer_setup_channel: old timings reg for "
    288 		    "channel %d drive %d 0x%x\n", chp->ch_channel, drive,
    289 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
    290 		    ACER_IDETIM(chp->ch_channel, drive))), DEBUG_PROBE);
    291 		/* clear FIFO/DMA mode */
    292 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->ch_channel, drive, 0x3) |
    293 		    ACER_UDMA_EN(chp->ch_channel, drive) |
    294 		    ACER_UDMA_TIM(chp->ch_channel, drive, 0x7));
    295 
    296 		/* add timing values, setup DMA if needed */
    297 		if ((drvp->drive_flags & ATA_DRIVE_DMA) == 0 &&
    298 		    (drvp->drive_flags & ATA_DRIVE_UDMA) == 0) {
    299 			acer_fifo_udma |=
    300 			    ACER_FTH_OPL(chp->ch_channel, drive, 0x1);
    301 			goto pio;
    302 		}
    303 
    304 		acer_fifo_udma |= ACER_FTH_OPL(chp->ch_channel, drive, 0x2);
    305 		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
    306 			/* use Ultra/DMA */
    307 			s = splbio();
    308 			drvp->drive_flags &= ~ATA_DRIVE_DMA;
    309 			splx(s);
    310 			acer_fifo_udma |= ACER_UDMA_EN(chp->ch_channel, drive);
    311 			acer_fifo_udma |=
    312 			    ACER_UDMA_TIM(chp->ch_channel, drive,
    313 				acer_udma[drvp->UDMA_mode]);
    314 			/* XXX disable if one drive < UDMA3 ? */
    315 			if (drvp->UDMA_mode >= 3) {
    316 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
    317 				    ACER_0x4B,
    318 				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
    319 					ACER_0x4B) | ACER_0x4B_UDMA66);
    320 			}
    321 		} else {
    322 			/*
    323 			 * use Multiword DMA
    324 			 * Timings will be used for both PIO and DMA,
    325 			 * so adjust DMA mode if needed
    326 			 */
    327 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
    328 				drvp->PIO_mode = drvp->DMA_mode + 2;
    329 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
    330 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
    331 				    drvp->PIO_mode - 2 : 0;
    332 			if (drvp->DMA_mode == 0)
    333 				drvp->PIO_mode = 0;
    334 		}
    335 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
    336 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
    337 		    ACER_IDETIM(chp->ch_channel, drive),
    338 		    acer_pio[drvp->PIO_mode]);
    339 	}
    340 	ATADEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
    341 	    acer_fifo_udma), DEBUG_PROBE);
    342 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
    343 	if (idedma_ctl != 0) {
    344 		/* Add software bits in status register */
    345 		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
    346 		    idedma_ctl);
    347 	}
    348 }
    349 
    350 static int
    351 acer_pci_intr(void *arg)
    352 {
    353 	struct pciide_softc *sc = arg;
    354 	struct pciide_channel *cp;
    355 	struct ata_channel *wdc_cp;
    356 	int i, rv, crv;
    357 	u_int32_t chids;
    358 
    359 	rv = 0;
    360 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
    361 	for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) {
    362 		cp = &sc->pciide_channels[i];
    363 		wdc_cp = &cp->ata_channel;
    364 		/* If a compat channel skip. */
    365 		if (cp->compat)
    366 			continue;
    367 		if (chids & ACER_CHIDS_INT(i)) {
    368 			crv = wdcintr(wdc_cp);
    369 			if (crv == 0) {
    370 				aprint_error("%s:%d: bogus intr\n",
    371 				    device_xname(
    372 				      sc->sc_wdcdev.sc_atac.atac_dev), i);
    373 				pciide_irqack(wdc_cp);
    374 			} else
    375 				rv = 1;
    376 		}
    377 	}
    378 	return rv;
    379 }
    380 
    381 static int
    382 acer_dma_init(void *v, int channel, int drive, void *databuf,
    383     size_t datalen, int flags)
    384 {
    385 
    386 	/* use PIO for LBA48 transfer */
    387 	if (flags & WDC_DMA_LBA48)
    388 		return EINVAL;
    389 
    390 	return pciide_dma_init(v, channel, drive, databuf, datalen, flags);
    391 }
    392