1 1.138 riastrad /* $NetBSD: if_vr.c,v 1.138 2024/06/29 12:11:12 riastradh Exp $ */ 2 1.18 thorpej 3 1.18 thorpej /*- 4 1.18 thorpej * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 5 1.18 thorpej * All rights reserved. 6 1.18 thorpej * 7 1.18 thorpej * This code is derived from software contributed to The NetBSD Foundation 8 1.18 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 1.18 thorpej * NASA Ames Research Center. 10 1.18 thorpej * 11 1.18 thorpej * Redistribution and use in source and binary forms, with or without 12 1.18 thorpej * modification, are permitted provided that the following conditions 13 1.18 thorpej * are met: 14 1.18 thorpej * 1. Redistributions of source code must retain the above copyright 15 1.18 thorpej * notice, this list of conditions and the following disclaimer. 16 1.18 thorpej * 2. Redistributions in binary form must reproduce the above copyright 17 1.18 thorpej * notice, this list of conditions and the following disclaimer in the 18 1.18 thorpej * documentation and/or other materials provided with the distribution. 19 1.18 thorpej * 20 1.18 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.18 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.18 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.18 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.18 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.18 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.18 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.18 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.18 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.18 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.18 thorpej * POSSIBILITY OF SUCH DAMAGE. 31 1.18 thorpej */ 32 1.2 sakamoto 33 1.1 sakamoto /* 34 1.1 sakamoto * Copyright (c) 1997, 1998 35 1.1 sakamoto * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved. 36 1.1 sakamoto * 37 1.1 sakamoto * Redistribution and use in source and binary forms, with or without 38 1.1 sakamoto * modification, are permitted provided that the following conditions 39 1.1 sakamoto * are met: 40 1.1 sakamoto * 1. Redistributions of source code must retain the above copyright 41 1.1 sakamoto * notice, this list of conditions and the following disclaimer. 42 1.1 sakamoto * 2. Redistributions in binary form must reproduce the above copyright 43 1.1 sakamoto * notice, this list of conditions and the following disclaimer in the 44 1.1 sakamoto * documentation and/or other materials provided with the distribution. 45 1.1 sakamoto * 3. All advertising materials mentioning features or use of this software 46 1.1 sakamoto * must display the following acknowledgement: 47 1.1 sakamoto * This product includes software developed by Bill Paul. 48 1.1 sakamoto * 4. Neither the name of the author nor the names of any co-contributors 49 1.1 sakamoto * may be used to endorse or promote products derived from this software 50 1.1 sakamoto * without specific prior written permission. 51 1.1 sakamoto * 52 1.1 sakamoto * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 53 1.1 sakamoto * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 1.1 sakamoto * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 1.1 sakamoto * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 56 1.1 sakamoto * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 1.1 sakamoto * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 1.1 sakamoto * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 1.1 sakamoto * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 1.1 sakamoto * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 1.1 sakamoto * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 62 1.1 sakamoto * THE POSSIBILITY OF SUCH DAMAGE. 63 1.1 sakamoto * 64 1.2 sakamoto * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $ 65 1.1 sakamoto */ 66 1.1 sakamoto 67 1.1 sakamoto /* 68 1.1 sakamoto * VIA Rhine fast ethernet PCI NIC driver 69 1.1 sakamoto * 70 1.1 sakamoto * Supports various network adapters based on the VIA Rhine 71 1.1 sakamoto * and Rhine II PCI controllers, including the D-Link DFE530TX. 72 1.1 sakamoto * Datasheets are available at http://www.via.com.tw. 73 1.1 sakamoto * 74 1.1 sakamoto * Written by Bill Paul <wpaul (at) ctr.columbia.edu> 75 1.1 sakamoto * Electrical Engineering Department 76 1.1 sakamoto * Columbia University, New York City 77 1.1 sakamoto */ 78 1.1 sakamoto 79 1.1 sakamoto /* 80 1.1 sakamoto * The VIA Rhine controllers are similar in some respects to the 81 1.1 sakamoto * the DEC tulip chips, except less complicated. The controller 82 1.1 sakamoto * uses an MII bus and an external physical layer interface. The 83 1.1 sakamoto * receiver has a one entry perfect filter and a 64-bit hash table 84 1.1 sakamoto * multicast filter. Transmit and receive descriptors are similar 85 1.1 sakamoto * to the tulip. 86 1.1 sakamoto * 87 1.1 sakamoto * The Rhine has a serious flaw in its transmit DMA mechanism: 88 1.1 sakamoto * transmit buffers must be longword aligned. Unfortunately, 89 1.17 thorpej * the kernel doesn't guarantee that mbufs will be filled in starting 90 1.1 sakamoto * at longword boundaries, so we have to do a buffer copy before 91 1.1 sakamoto * transmission. 92 1.17 thorpej * 93 1.17 thorpej * Apparently, the receive DMA mechanism also has the same flaw. This 94 1.17 thorpej * means that on systems with struct alignment requirements, incoming 95 1.17 thorpej * frames must be copied to a new buffer which shifts the data forward 96 1.17 thorpej * 2 bytes so that the payload is aligned on a 4-byte boundary. 97 1.1 sakamoto */ 98 1.53 lukem 99 1.53 lukem #include <sys/cdefs.h> 100 1.138 riastrad __KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.138 2024/06/29 12:11:12 riastradh Exp $"); 101 1.110 tls 102 1.68 jdolecek 103 1.1 sakamoto 104 1.1 sakamoto #include <sys/param.h> 105 1.1 sakamoto #include <sys/systm.h> 106 1.34 thorpej #include <sys/callout.h> 107 1.1 sakamoto #include <sys/sockio.h> 108 1.1 sakamoto #include <sys/mbuf.h> 109 1.1 sakamoto #include <sys/kernel.h> 110 1.1 sakamoto #include <sys/socket.h> 111 1.6 thorpej #include <sys/device.h> 112 1.1 sakamoto 113 1.115 riastrad #include <sys/rndsource.h> 114 1.68 jdolecek 115 1.1 sakamoto #include <net/if.h> 116 1.1 sakamoto #include <net/if_arp.h> 117 1.1 sakamoto #include <net/if_dl.h> 118 1.1 sakamoto #include <net/if_media.h> 119 1.2 sakamoto #include <net/if_ether.h> 120 1.1 sakamoto 121 1.1 sakamoto #include <net/bpf.h> 122 1.1 sakamoto 123 1.88 ad #include <sys/bus.h> 124 1.88 ad #include <sys/intr.h> 125 1.30 thorpej #include <machine/endian.h> 126 1.1 sakamoto 127 1.10 thorpej #include <dev/mii/mii.h> 128 1.11 thorpej #include <dev/mii/miivar.h> 129 1.29 thorpej #include <dev/mii/mii_bitbang.h> 130 1.10 thorpej 131 1.2 sakamoto #include <dev/pci/pcireg.h> 132 1.2 sakamoto #include <dev/pci/pcivar.h> 133 1.8 thorpej #include <dev/pci/pcidevs.h> 134 1.8 thorpej 135 1.2 sakamoto #include <dev/pci/if_vrreg.h> 136 1.1 sakamoto 137 1.2 sakamoto #define VR_USEIOSPACE 138 1.1 sakamoto 139 1.1 sakamoto /* 140 1.1 sakamoto * Various supported device vendors/types and their names. 141 1.1 sakamoto */ 142 1.134 thorpej static const struct device_compatible_entry compat_data[] = { 143 1.134 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_VIATECH, 144 1.134 thorpej PCI_PRODUCT_VIATECH_VT3043) }, 145 1.134 thorpej 146 1.134 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_VIATECH, 147 1.134 thorpej PCI_PRODUCT_VIATECH_VT6102) }, 148 1.134 thorpej 149 1.134 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_VIATECH, 150 1.134 thorpej PCI_PRODUCT_VIATECH_VT6105) }, 151 1.134 thorpej 152 1.134 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_VIATECH, 153 1.134 thorpej PCI_PRODUCT_VIATECH_VT6105M) }, 154 1.134 thorpej 155 1.134 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_VIATECH, 156 1.134 thorpej PCI_PRODUCT_VIATECH_VT86C100A) }, 157 1.134 thorpej 158 1.134 thorpej PCI_COMPAT_EOL 159 1.1 sakamoto }; 160 1.1 sakamoto 161 1.18 thorpej /* 162 1.18 thorpej * Transmit descriptor list size. 163 1.18 thorpej */ 164 1.18 thorpej #define VR_NTXDESC 64 165 1.18 thorpej #define VR_NTXDESC_MASK (VR_NTXDESC - 1) 166 1.18 thorpej #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK) 167 1.18 thorpej 168 1.18 thorpej /* 169 1.18 thorpej * Receive descriptor list size. 170 1.18 thorpej */ 171 1.18 thorpej #define VR_NRXDESC 64 172 1.18 thorpej #define VR_NRXDESC_MASK (VR_NRXDESC - 1) 173 1.18 thorpej #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK) 174 1.7 thorpej 175 1.18 thorpej /* 176 1.135 andvar * Control data structures that are DMA'd to the Rhine chip. We allocate 177 1.18 thorpej * them in a single clump that maps to a single DMA segment to make several 178 1.18 thorpej * things easier. 179 1.18 thorpej * 180 1.18 thorpej * Note that since we always copy outgoing packets to aligned transmit 181 1.18 thorpej * buffers, we can reduce the transmit descriptors to one per packet. 182 1.18 thorpej */ 183 1.18 thorpej struct vr_control_data { 184 1.18 thorpej struct vr_desc vr_txdescs[VR_NTXDESC]; 185 1.18 thorpej struct vr_desc vr_rxdescs[VR_NRXDESC]; 186 1.7 thorpej }; 187 1.7 thorpej 188 1.18 thorpej #define VR_CDOFF(x) offsetof(struct vr_control_data, x) 189 1.18 thorpej #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)]) 190 1.18 thorpej #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)]) 191 1.7 thorpej 192 1.18 thorpej /* 193 1.18 thorpej * Software state of transmit and receive descriptors. 194 1.18 thorpej */ 195 1.18 thorpej struct vr_descsoft { 196 1.18 thorpej struct mbuf *ds_mbuf; /* head of mbuf chain */ 197 1.18 thorpej bus_dmamap_t ds_dmamap; /* our DMA map */ 198 1.7 thorpej }; 199 1.7 thorpej 200 1.7 thorpej struct vr_softc { 201 1.95 joerg device_t vr_dev; 202 1.14 thorpej void *vr_ih; /* interrupt cookie */ 203 1.14 thorpej bus_space_tag_t vr_bst; /* bus space tag */ 204 1.14 thorpej bus_space_handle_t vr_bsh; /* bus space handle */ 205 1.18 thorpej bus_dma_tag_t vr_dmat; /* bus DMA tag */ 206 1.14 thorpej pci_chipset_tag_t vr_pc; /* PCI chipset info */ 207 1.76 christos pcitag_t vr_tag; /* PCI tag */ 208 1.14 thorpej struct ethercom vr_ec; /* Ethernet common info */ 209 1.129 msaitoh uint8_t vr_enaddr[ETHER_ADDR_LEN]; 210 1.11 thorpej struct mii_data vr_mii; /* MII/media info */ 211 1.18 thorpej 212 1.99 jmcneill pcireg_t vr_id; /* vendor/product ID */ 213 1.83 tsutsui uint8_t vr_revid; /* Rhine chip revision */ 214 1.59 lha 215 1.87 ad callout_t vr_tick_ch; /* tick callout */ 216 1.34 thorpej 217 1.18 thorpej bus_dmamap_t vr_cddmamap; /* control data DMA map */ 218 1.18 thorpej #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr 219 1.18 thorpej 220 1.18 thorpej /* 221 1.18 thorpej * Software state for transmit and receive descriptors. 222 1.18 thorpej */ 223 1.18 thorpej struct vr_descsoft vr_txsoft[VR_NTXDESC]; 224 1.18 thorpej struct vr_descsoft vr_rxsoft[VR_NRXDESC]; 225 1.18 thorpej 226 1.18 thorpej /* 227 1.18 thorpej * Control data structures. 228 1.18 thorpej */ 229 1.18 thorpej struct vr_control_data *vr_control_data; 230 1.18 thorpej 231 1.18 thorpej int vr_txpending; /* number of TX requests pending */ 232 1.18 thorpej int vr_txdirty; /* first dirty TX descriptor */ 233 1.18 thorpej int vr_txlast; /* last used TX descriptor */ 234 1.18 thorpej 235 1.18 thorpej int vr_rxptr; /* next ready RX descriptor */ 236 1.68 jdolecek 237 1.83 tsutsui uint32_t vr_save_iobase; 238 1.83 tsutsui uint32_t vr_save_membase; 239 1.83 tsutsui uint32_t vr_save_irq; 240 1.76 christos 241 1.112 jmcneill bool vr_link; 242 1.112 jmcneill int vr_flags; 243 1.112 jmcneill #define VR_F_RESTART 0x1 /* restart on next tick */ 244 1.131 msaitoh u_short vr_if_flags; 245 1.112 jmcneill 246 1.108 tls krndsource_t rnd_source; /* random source */ 247 1.7 thorpej }; 248 1.7 thorpej 249 1.18 thorpej #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x))) 250 1.18 thorpej #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x))) 251 1.18 thorpej 252 1.18 thorpej #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)]) 253 1.18 thorpej #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)]) 254 1.18 thorpej 255 1.18 thorpej #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)]) 256 1.18 thorpej #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)]) 257 1.18 thorpej 258 1.18 thorpej #define VR_CDTXSYNC(sc, x, ops) \ 259 1.18 thorpej bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \ 260 1.18 thorpej VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops)) 261 1.18 thorpej 262 1.18 thorpej #define VR_CDRXSYNC(sc, x, ops) \ 263 1.18 thorpej bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \ 264 1.18 thorpej VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops)) 265 1.18 thorpej 266 1.18 thorpej /* 267 1.18 thorpej * Note we rely on MCLBYTES being a power of two below. 268 1.18 thorpej */ 269 1.18 thorpej #define VR_INIT_RXDESC(sc, i) \ 270 1.18 thorpej do { \ 271 1.18 thorpej struct vr_desc *__d = VR_CDRX((sc), (i)); \ 272 1.18 thorpej struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \ 273 1.18 thorpej \ 274 1.30 thorpej __d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \ 275 1.30 thorpej __d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr); \ 276 1.30 thorpej __d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \ 277 1.21 thorpej ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \ 278 1.79 tsutsui __d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \ 279 1.79 tsutsui VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \ 280 1.128 msaitoh VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ 281 1.64 tsutsui } while (/* CONSTCOND */ 0) 282 1.18 thorpej 283 1.7 thorpej /* 284 1.7 thorpej * register space access macros 285 1.7 thorpej */ 286 1.18 thorpej #define CSR_WRITE_4(sc, reg, val) \ 287 1.14 thorpej bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val) 288 1.18 thorpej #define CSR_WRITE_2(sc, reg, val) \ 289 1.14 thorpej bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val) 290 1.18 thorpej #define CSR_WRITE_1(sc, reg, val) \ 291 1.14 thorpej bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val) 292 1.7 thorpej 293 1.18 thorpej #define CSR_READ_4(sc, reg) \ 294 1.14 thorpej bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg) 295 1.18 thorpej #define CSR_READ_2(sc, reg) \ 296 1.14 thorpej bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg) 297 1.18 thorpej #define CSR_READ_1(sc, reg) \ 298 1.14 thorpej bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg) 299 1.7 thorpej 300 1.7 thorpej #define VR_TIMEOUT 1000 301 1.1 sakamoto 302 1.69 thorpej static int vr_add_rxbuf(struct vr_softc *, int); 303 1.1 sakamoto 304 1.69 thorpej static void vr_rxeof(struct vr_softc *); 305 1.69 thorpej static void vr_rxeoc(struct vr_softc *); 306 1.69 thorpej static void vr_txeof(struct vr_softc *); 307 1.69 thorpej static int vr_intr(void *); 308 1.69 thorpej static void vr_start(struct ifnet *); 309 1.85 christos static int vr_ioctl(struct ifnet *, u_long, void *); 310 1.69 thorpej static int vr_init(struct ifnet *); 311 1.69 thorpej static void vr_stop(struct ifnet *, int); 312 1.69 thorpej static void vr_rxdrain(struct vr_softc *); 313 1.69 thorpej static void vr_watchdog(struct ifnet *); 314 1.69 thorpej static void vr_tick(void *); 315 1.69 thorpej 316 1.127 msaitoh static int vr_mii_readreg(device_t, int, int, uint16_t *); 317 1.127 msaitoh static int vr_mii_writereg(device_t, int, int, uint16_t); 318 1.111 matt static void vr_mii_statchg(struct ifnet *); 319 1.11 thorpej 320 1.69 thorpej static void vr_setmulti(struct vr_softc *); 321 1.69 thorpej static void vr_reset(struct vr_softc *); 322 1.91 dyoung static int vr_restore_state(pci_chipset_tag_t, pcitag_t, device_t, 323 1.91 dyoung pcireg_t); 324 1.103 dyoung static bool vr_resume(device_t, const pmf_qual_t *); 325 1.1 sakamoto 326 1.23 thorpej int vr_copy_small = 0; 327 1.23 thorpej 328 1.2 sakamoto #define VR_SETBIT(sc, reg, x) \ 329 1.1 sakamoto CSR_WRITE_1(sc, reg, \ 330 1.64 tsutsui CSR_READ_1(sc, reg) | (x)) 331 1.1 sakamoto 332 1.2 sakamoto #define VR_CLRBIT(sc, reg, x) \ 333 1.1 sakamoto CSR_WRITE_1(sc, reg, \ 334 1.64 tsutsui CSR_READ_1(sc, reg) & ~(x)) 335 1.1 sakamoto 336 1.2 sakamoto #define VR_SETBIT16(sc, reg, x) \ 337 1.1 sakamoto CSR_WRITE_2(sc, reg, \ 338 1.64 tsutsui CSR_READ_2(sc, reg) | (x)) 339 1.1 sakamoto 340 1.2 sakamoto #define VR_CLRBIT16(sc, reg, x) \ 341 1.1 sakamoto CSR_WRITE_2(sc, reg, \ 342 1.64 tsutsui CSR_READ_2(sc, reg) & ~(x)) 343 1.1 sakamoto 344 1.2 sakamoto #define VR_SETBIT32(sc, reg, x) \ 345 1.1 sakamoto CSR_WRITE_4(sc, reg, \ 346 1.64 tsutsui CSR_READ_4(sc, reg) | (x)) 347 1.1 sakamoto 348 1.2 sakamoto #define VR_CLRBIT32(sc, reg, x) \ 349 1.1 sakamoto CSR_WRITE_4(sc, reg, \ 350 1.64 tsutsui CSR_READ_4(sc, reg) & ~(x)) 351 1.1 sakamoto 352 1.29 thorpej /* 353 1.29 thorpej * MII bit-bang glue. 354 1.29 thorpej */ 355 1.91 dyoung static uint32_t vr_mii_bitbang_read(device_t); 356 1.91 dyoung static void vr_mii_bitbang_write(device_t, uint32_t); 357 1.1 sakamoto 358 1.69 thorpej static const struct mii_bitbang_ops vr_mii_bitbang_ops = { 359 1.29 thorpej vr_mii_bitbang_read, 360 1.29 thorpej vr_mii_bitbang_write, 361 1.29 thorpej { 362 1.29 thorpej VR_MIICMD_DATAOUT, /* MII_BIT_MDO */ 363 1.29 thorpej VR_MIICMD_DATAIN, /* MII_BIT_MDI */ 364 1.29 thorpej VR_MIICMD_CLK, /* MII_BIT_MDC */ 365 1.29 thorpej VR_MIICMD_DIR, /* MII_BIT_DIR_HOST_PHY */ 366 1.29 thorpej 0, /* MII_BIT_DIR_PHY_HOST */ 367 1.29 thorpej } 368 1.29 thorpej }; 369 1.1 sakamoto 370 1.83 tsutsui static uint32_t 371 1.91 dyoung vr_mii_bitbang_read(device_t self) 372 1.1 sakamoto { 373 1.91 dyoung struct vr_softc *sc = device_private(self); 374 1.1 sakamoto 375 1.29 thorpej return (CSR_READ_1(sc, VR_MIICMD)); 376 1.1 sakamoto } 377 1.1 sakamoto 378 1.69 thorpej static void 379 1.91 dyoung vr_mii_bitbang_write(device_t self, uint32_t val) 380 1.1 sakamoto { 381 1.91 dyoung struct vr_softc *sc = device_private(self); 382 1.1 sakamoto 383 1.29 thorpej CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM); 384 1.1 sakamoto } 385 1.1 sakamoto 386 1.1 sakamoto /* 387 1.1 sakamoto * Read an PHY register through the MII. 388 1.1 sakamoto */ 389 1.15 thorpej static int 390 1.127 msaitoh vr_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 391 1.1 sakamoto { 392 1.91 dyoung struct vr_softc *sc = device_private(self); 393 1.1 sakamoto 394 1.29 thorpej CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 395 1.127 msaitoh return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg, val)); 396 1.1 sakamoto } 397 1.1 sakamoto 398 1.1 sakamoto /* 399 1.1 sakamoto * Write to a PHY register through the MII. 400 1.1 sakamoto */ 401 1.127 msaitoh static int 402 1.127 msaitoh vr_mii_writereg(device_t self, int phy, int reg, uint16_t val) 403 1.1 sakamoto { 404 1.91 dyoung struct vr_softc *sc = device_private(self); 405 1.1 sakamoto 406 1.29 thorpej CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 407 1.127 msaitoh return mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val); 408 1.1 sakamoto } 409 1.1 sakamoto 410 1.15 thorpej static void 411 1.111 matt vr_mii_statchg(struct ifnet *ifp) 412 1.1 sakamoto { 413 1.111 matt struct vr_softc *sc = ifp->if_softc; 414 1.112 jmcneill int i; 415 1.1 sakamoto 416 1.11 thorpej /* 417 1.11 thorpej * In order to fiddle with the 'full-duplex' bit in the netconfig 418 1.11 thorpej * register, we first have to put the transmit and/or receive logic 419 1.11 thorpej * in the idle state. 420 1.11 thorpej */ 421 1.112 jmcneill if ((sc->vr_mii.mii_media_status & IFM_ACTIVE) && 422 1.112 jmcneill IFM_SUBTYPE(sc->vr_mii.mii_media_active) != IFM_NONE) { 423 1.112 jmcneill sc->vr_link = true; 424 1.112 jmcneill 425 1.128 msaitoh if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON | VR_CMD_RX_ON)) 426 1.112 jmcneill VR_CLRBIT16(sc, VR_COMMAND, 427 1.128 msaitoh (VR_CMD_TX_ON | VR_CMD_RX_ON)); 428 1.1 sakamoto 429 1.112 jmcneill if (sc->vr_mii.mii_media_active & IFM_FDX) 430 1.112 jmcneill VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 431 1.112 jmcneill else 432 1.112 jmcneill VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 433 1.1 sakamoto 434 1.128 msaitoh VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON | VR_CMD_RX_ON); 435 1.112 jmcneill } else { 436 1.112 jmcneill sc->vr_link = false; 437 1.128 msaitoh VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_TX_ON | VR_CMD_RX_ON); 438 1.112 jmcneill for (i = VR_TIMEOUT; i > 0; i--) { 439 1.112 jmcneill delay(10); 440 1.112 jmcneill if (!(CSR_READ_2(sc, VR_COMMAND) & 441 1.128 msaitoh (VR_CMD_TX_ON | VR_CMD_RX_ON))) 442 1.112 jmcneill break; 443 1.112 jmcneill } 444 1.112 jmcneill if (i == 0) { 445 1.112 jmcneill #ifdef VR_DEBUG 446 1.123 flxd aprint_error_dev(sc->vr_dev, "rx shutdown error!\n"); 447 1.112 jmcneill #endif 448 1.112 jmcneill sc->vr_flags |= VR_F_RESTART; 449 1.112 jmcneill } 450 1.112 jmcneill } 451 1.1 sakamoto } 452 1.1 sakamoto 453 1.46 tsutsui #define vr_calchash(addr) \ 454 1.46 tsutsui (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) 455 1.1 sakamoto 456 1.1 sakamoto /* 457 1.1 sakamoto * Program the 64-bit multicast hash filter. 458 1.1 sakamoto */ 459 1.15 thorpej static void 460 1.69 thorpej vr_setmulti(struct vr_softc *sc) 461 1.1 sakamoto { 462 1.128 msaitoh struct ethercom *ec = &sc->vr_ec; 463 1.128 msaitoh struct ifnet *ifp = &ec->ec_if; 464 1.15 thorpej int h = 0; 465 1.83 tsutsui uint32_t hashes[2] = { 0, 0 }; 466 1.15 thorpej struct ether_multistep step; 467 1.15 thorpej struct ether_multi *enm; 468 1.15 thorpej int mcnt = 0; 469 1.83 tsutsui uint8_t rxfilt; 470 1.1 sakamoto 471 1.1 sakamoto rxfilt = CSR_READ_1(sc, VR_RXCFG); 472 1.1 sakamoto 473 1.45 enami if (ifp->if_flags & IFF_PROMISC) { 474 1.45 enami allmulti: 475 1.45 enami ifp->if_flags |= IFF_ALLMULTI; 476 1.1 sakamoto rxfilt |= VR_RXCFG_RX_MULTI; 477 1.1 sakamoto CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 478 1.1 sakamoto CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 479 1.1 sakamoto CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 480 1.1 sakamoto return; 481 1.1 sakamoto } 482 1.1 sakamoto 483 1.1 sakamoto /* first, zot all the existing hash bits */ 484 1.1 sakamoto CSR_WRITE_4(sc, VR_MAR0, 0); 485 1.1 sakamoto CSR_WRITE_4(sc, VR_MAR1, 0); 486 1.1 sakamoto 487 1.1 sakamoto /* now program new ones */ 488 1.130 msaitoh ETHER_LOCK(ec); 489 1.128 msaitoh ETHER_FIRST_MULTI(step, ec, enm); 490 1.2 sakamoto while (enm != NULL) { 491 1.45 enami if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 492 1.130 msaitoh ETHER_ADDR_LEN) != 0) { 493 1.130 msaitoh ETHER_UNLOCK(ec); 494 1.45 enami goto allmulti; 495 1.130 msaitoh } 496 1.2 sakamoto 497 1.2 sakamoto h = vr_calchash(enm->enm_addrlo); 498 1.2 sakamoto 499 1.1 sakamoto if (h < 32) 500 1.1 sakamoto hashes[0] |= (1 << h); 501 1.1 sakamoto else 502 1.1 sakamoto hashes[1] |= (1 << (h - 32)); 503 1.2 sakamoto ETHER_NEXT_MULTI(step, enm); 504 1.1 sakamoto mcnt++; 505 1.1 sakamoto } 506 1.130 msaitoh ETHER_UNLOCK(ec); 507 1.45 enami 508 1.45 enami ifp->if_flags &= ~IFF_ALLMULTI; 509 1.1 sakamoto 510 1.1 sakamoto if (mcnt) 511 1.1 sakamoto rxfilt |= VR_RXCFG_RX_MULTI; 512 1.1 sakamoto else 513 1.1 sakamoto rxfilt &= ~VR_RXCFG_RX_MULTI; 514 1.1 sakamoto 515 1.1 sakamoto CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 516 1.1 sakamoto CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 517 1.1 sakamoto CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 518 1.1 sakamoto } 519 1.1 sakamoto 520 1.15 thorpej static void 521 1.69 thorpej vr_reset(struct vr_softc *sc) 522 1.1 sakamoto { 523 1.15 thorpej int i; 524 1.1 sakamoto 525 1.1 sakamoto VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 526 1.1 sakamoto 527 1.1 sakamoto for (i = 0; i < VR_TIMEOUT; i++) { 528 1.1 sakamoto DELAY(10); 529 1.1 sakamoto if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 530 1.1 sakamoto break; 531 1.1 sakamoto } 532 1.59 lha if (i == VR_TIMEOUT) { 533 1.59 lha if (sc->vr_revid < REV_ID_VT3065_A) { 534 1.123 flxd aprint_error_dev(sc->vr_dev, 535 1.123 flxd "reset never completed!\n"); 536 1.59 lha } else { 537 1.59 lha /* Use newer force reset command */ 538 1.123 flxd aprint_normal_dev(sc->vr_dev, 539 1.123 flxd "using force reset command.\n"); 540 1.59 lha VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 541 1.59 lha } 542 1.64 tsutsui } 543 1.1 sakamoto 544 1.1 sakamoto /* Wait a little while for the chip to get its brains in order. */ 545 1.1 sakamoto DELAY(1000); 546 1.1 sakamoto } 547 1.1 sakamoto 548 1.1 sakamoto /* 549 1.1 sakamoto * Initialize an RX descriptor and attach an MBUF cluster. 550 1.1 sakamoto * Note: the length fields are only 11 bits wide, which means the 551 1.1 sakamoto * largest size we can specify is 2047. This is important because 552 1.1 sakamoto * MCLBYTES is 2048, so we have to subtract one otherwise we'll 553 1.1 sakamoto * overflow the field and make a mess. 554 1.1 sakamoto */ 555 1.15 thorpej static int 556 1.69 thorpej vr_add_rxbuf(struct vr_softc *sc, int i) 557 1.1 sakamoto { 558 1.18 thorpej struct vr_descsoft *ds = VR_DSRX(sc, i); 559 1.18 thorpej struct mbuf *m_new; 560 1.18 thorpej int error; 561 1.1 sakamoto 562 1.1 sakamoto MGETHDR(m_new, M_DONTWAIT, MT_DATA); 563 1.18 thorpej if (m_new == NULL) 564 1.2 sakamoto return (ENOBUFS); 565 1.1 sakamoto 566 1.1 sakamoto MCLGET(m_new, M_DONTWAIT); 567 1.18 thorpej if ((m_new->m_flags & M_EXT) == 0) { 568 1.1 sakamoto m_freem(m_new); 569 1.2 sakamoto return (ENOBUFS); 570 1.1 sakamoto } 571 1.1 sakamoto 572 1.18 thorpej if (ds->ds_mbuf != NULL) 573 1.18 thorpej bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 574 1.18 thorpej 575 1.18 thorpej ds->ds_mbuf = m_new; 576 1.18 thorpej 577 1.18 thorpej error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap, 578 1.50 thorpej m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, 579 1.128 msaitoh BUS_DMA_READ | BUS_DMA_NOWAIT); 580 1.18 thorpej if (error) { 581 1.118 msaitoh aprint_error_dev(sc->vr_dev, 582 1.118 msaitoh "unable to load rx DMA map %d, error = %d\n", i, error); 583 1.18 thorpej panic("vr_add_rxbuf"); /* XXX */ 584 1.18 thorpej } 585 1.18 thorpej 586 1.18 thorpej bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 587 1.18 thorpej ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 588 1.18 thorpej 589 1.18 thorpej VR_INIT_RXDESC(sc, i); 590 1.1 sakamoto 591 1.2 sakamoto return (0); 592 1.1 sakamoto } 593 1.1 sakamoto 594 1.1 sakamoto /* 595 1.1 sakamoto * A frame has been uploaded: pass the resulting mbuf chain up to 596 1.1 sakamoto * the higher level protocols. 597 1.1 sakamoto */ 598 1.15 thorpej static void 599 1.69 thorpej vr_rxeof(struct vr_softc *sc) 600 1.1 sakamoto { 601 1.15 thorpej struct mbuf *m; 602 1.15 thorpej struct ifnet *ifp; 603 1.18 thorpej struct vr_desc *d; 604 1.18 thorpej struct vr_descsoft *ds; 605 1.18 thorpej int i, total_len; 606 1.83 tsutsui uint32_t rxstat; 607 1.1 sakamoto 608 1.6 thorpej ifp = &sc->vr_ec.ec_if; 609 1.1 sakamoto 610 1.18 thorpej for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) { 611 1.18 thorpej d = VR_CDRX(sc, i); 612 1.18 thorpej ds = VR_DSRX(sc, i); 613 1.18 thorpej 614 1.128 msaitoh VR_CDRXSYNC(sc, i, 615 1.128 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 616 1.18 thorpej 617 1.30 thorpej rxstat = le32toh(d->vr_status); 618 1.18 thorpej 619 1.18 thorpej if (rxstat & VR_RXSTAT_OWN) { 620 1.18 thorpej /* 621 1.18 thorpej * We have processed all of the receive buffers. 622 1.18 thorpej */ 623 1.18 thorpej break; 624 1.18 thorpej } 625 1.1 sakamoto 626 1.1 sakamoto /* 627 1.1 sakamoto * If an error occurs, update stats, clear the 628 1.1 sakamoto * status word and leave the mbuf cluster in place: 629 1.1 sakamoto * it should simply get re-used next time this descriptor 630 1.2 sakamoto * comes up in the ring. 631 1.1 sakamoto */ 632 1.1 sakamoto if (rxstat & VR_RXSTAT_RXERR) { 633 1.18 thorpej const char *errstr; 634 1.18 thorpej 635 1.132 thorpej if_statinc(ifp, if_ierrors); 636 1.2 sakamoto switch (rxstat & 0x000000FF) { 637 1.1 sakamoto case VR_RXSTAT_CRCERR: 638 1.18 thorpej errstr = "crc error"; 639 1.1 sakamoto break; 640 1.1 sakamoto case VR_RXSTAT_FRAMEALIGNERR: 641 1.18 thorpej errstr = "frame alignment error"; 642 1.1 sakamoto break; 643 1.1 sakamoto case VR_RXSTAT_FIFOOFLOW: 644 1.18 thorpej errstr = "FIFO overflow"; 645 1.1 sakamoto break; 646 1.1 sakamoto case VR_RXSTAT_GIANT: 647 1.18 thorpej errstr = "received giant packet"; 648 1.1 sakamoto break; 649 1.1 sakamoto case VR_RXSTAT_RUNT: 650 1.18 thorpej errstr = "received runt packet"; 651 1.1 sakamoto break; 652 1.1 sakamoto case VR_RXSTAT_BUSERR: 653 1.18 thorpej errstr = "system bus error"; 654 1.1 sakamoto break; 655 1.1 sakamoto case VR_RXSTAT_BUFFERR: 656 1.18 thorpej errstr = "rx buffer error"; 657 1.1 sakamoto break; 658 1.1 sakamoto default: 659 1.18 thorpej errstr = "unknown rx error"; 660 1.1 sakamoto break; 661 1.1 sakamoto } 662 1.123 flxd aprint_error_dev(sc->vr_dev, "receive error: %s\n", 663 1.18 thorpej errstr); 664 1.18 thorpej 665 1.18 thorpej VR_INIT_RXDESC(sc, i); 666 1.18 thorpej 667 1.1 sakamoto continue; 668 1.72 jmmv } else if (!(rxstat & VR_RXSTAT_FIRSTFRAG) || 669 1.129 msaitoh !(rxstat & VR_RXSTAT_LASTFRAG)) { 670 1.72 jmmv /* 671 1.72 jmmv * This driver expects to receive whole packets every 672 1.72 jmmv * time. In case we receive a fragment that is not 673 1.72 jmmv * a complete packet, we discard it. 674 1.72 jmmv */ 675 1.132 thorpej if_statinc(ifp, if_ierrors); 676 1.72 jmmv 677 1.123 flxd aprint_error_dev(sc->vr_dev, 678 1.123 flxd "receive error: incomplete frame; " 679 1.123 flxd "size = %d, status = 0x%x\n", 680 1.123 flxd VR_RXBYTES(le32toh(d->vr_status)), rxstat); 681 1.72 jmmv 682 1.72 jmmv VR_INIT_RXDESC(sc, i); 683 1.72 jmmv 684 1.72 jmmv continue; 685 1.1 sakamoto } 686 1.1 sakamoto 687 1.18 thorpej bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 688 1.18 thorpej ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 689 1.18 thorpej 690 1.2 sakamoto /* No errors; receive the packet. */ 691 1.30 thorpej total_len = VR_RXBYTES(le32toh(d->vr_status)); 692 1.72 jmmv #ifdef DIAGNOSTIC 693 1.72 jmmv if (total_len == 0) { 694 1.72 jmmv /* 695 1.72 jmmv * If we receive a zero-length packet, we probably 696 1.72 jmmv * missed to handle an error condition above. 697 1.72 jmmv * Discard it to avoid a later crash. 698 1.72 jmmv */ 699 1.132 thorpej if_statinc(ifp, if_ierrors); 700 1.72 jmmv 701 1.123 flxd aprint_error_dev(sc->vr_dev, 702 1.123 flxd "receive error: zero-length packet; " 703 1.123 flxd "status = 0x%x\n", rxstat); 704 1.72 jmmv 705 1.72 jmmv VR_INIT_RXDESC(sc, i); 706 1.72 jmmv 707 1.72 jmmv continue; 708 1.72 jmmv } 709 1.72 jmmv #endif 710 1.1 sakamoto 711 1.74 thorpej /* 712 1.74 thorpej * The Rhine chip includes the CRC with every packet. 713 1.74 thorpej * Trim it off here. 714 1.74 thorpej */ 715 1.74 thorpej total_len -= ETHER_CRC_LEN; 716 1.74 thorpej 717 1.17 thorpej #ifdef __NO_STRICT_ALIGNMENT 718 1.1 sakamoto /* 719 1.23 thorpej * If the packet is small enough to fit in a 720 1.23 thorpej * single header mbuf, allocate one and copy 721 1.23 thorpej * the data into it. This greatly reduces 722 1.23 thorpej * memory consumption when we receive lots 723 1.23 thorpej * of small packets. 724 1.23 thorpej * 725 1.23 thorpej * Otherwise, we add a new buffer to the receive 726 1.23 thorpej * chain. If this fails, we drop the packet and 727 1.23 thorpej * recycle the old buffer. 728 1.1 sakamoto */ 729 1.23 thorpej if (vr_copy_small != 0 && total_len <= MHLEN) { 730 1.23 thorpej MGETHDR(m, M_DONTWAIT, MT_DATA); 731 1.23 thorpej if (m == NULL) 732 1.23 thorpej goto dropit; 733 1.85 christos memcpy(mtod(m, void *), 734 1.85 christos mtod(ds->ds_mbuf, void *), total_len); 735 1.18 thorpej VR_INIT_RXDESC(sc, i); 736 1.18 thorpej bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 737 1.23 thorpej ds->ds_dmamap->dm_mapsize, 738 1.23 thorpej BUS_DMASYNC_PREREAD); 739 1.23 thorpej } else { 740 1.23 thorpej m = ds->ds_mbuf; 741 1.23 thorpej if (vr_add_rxbuf(sc, i) == ENOBUFS) { 742 1.23 thorpej dropit: 743 1.132 thorpej if_statinc(ifp, if_ierrors); 744 1.23 thorpej VR_INIT_RXDESC(sc, i); 745 1.23 thorpej bus_dmamap_sync(sc->vr_dmat, 746 1.23 thorpej ds->ds_dmamap, 0, 747 1.23 thorpej ds->ds_dmamap->dm_mapsize, 748 1.23 thorpej BUS_DMASYNC_PREREAD); 749 1.23 thorpej continue; 750 1.23 thorpej } 751 1.1 sakamoto } 752 1.17 thorpej #else 753 1.17 thorpej /* 754 1.17 thorpej * The Rhine's packet buffers must be 4-byte aligned. 755 1.17 thorpej * But this means that the data after the Ethernet header 756 1.17 thorpej * is misaligned. We must allocate a new buffer and 757 1.17 thorpej * copy the data, shifted forward 2 bytes. 758 1.17 thorpej */ 759 1.17 thorpej MGETHDR(m, M_DONTWAIT, MT_DATA); 760 1.17 thorpej if (m == NULL) { 761 1.17 thorpej dropit: 762 1.132 thorpej if_statinc(ifp, if_ierrors); 763 1.18 thorpej VR_INIT_RXDESC(sc, i); 764 1.18 thorpej bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 765 1.18 thorpej ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 766 1.17 thorpej continue; 767 1.17 thorpej } 768 1.17 thorpej if (total_len > (MHLEN - 2)) { 769 1.17 thorpej MCLGET(m, M_DONTWAIT); 770 1.20 thorpej if ((m->m_flags & M_EXT) == 0) { 771 1.20 thorpej m_freem(m); 772 1.17 thorpej goto dropit; 773 1.20 thorpej } 774 1.17 thorpej } 775 1.17 thorpej m->m_data += 2; 776 1.17 thorpej 777 1.17 thorpej /* 778 1.17 thorpej * Note that we use clusters for incoming frames, so the 779 1.17 thorpej * buffer is virtually contiguous. 780 1.17 thorpej */ 781 1.85 christos memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 782 1.17 thorpej total_len); 783 1.17 thorpej 784 1.47 wiz /* Allow the receive descriptor to continue using its mbuf. */ 785 1.18 thorpej VR_INIT_RXDESC(sc, i); 786 1.18 thorpej bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 787 1.18 thorpej ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 788 1.17 thorpej #endif /* __NO_STRICT_ALIGNMENT */ 789 1.40 thorpej 790 1.117 ozaki m_set_rcvif(m, ifp); 791 1.1 sakamoto m->m_pkthdr.len = m->m_len = total_len; 792 1.22 thorpej /* Pass it on. */ 793 1.116 ozaki if_percpuq_enqueue(ifp->if_percpuq, m); 794 1.1 sakamoto } 795 1.18 thorpej 796 1.18 thorpej /* Update the receive pointer. */ 797 1.18 thorpej sc->vr_rxptr = i; 798 1.1 sakamoto } 799 1.1 sakamoto 800 1.15 thorpej void 801 1.69 thorpej vr_rxeoc(struct vr_softc *sc) 802 1.1 sakamoto { 803 1.80 tsutsui struct ifnet *ifp; 804 1.80 tsutsui int i; 805 1.80 tsutsui 806 1.80 tsutsui ifp = &sc->vr_ec.ec_if; 807 1.80 tsutsui 808 1.132 thorpej if_statinc(ifp, if_ierrors); 809 1.80 tsutsui 810 1.80 tsutsui VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 811 1.80 tsutsui for (i = 0; i < VR_TIMEOUT; i++) { 812 1.80 tsutsui DELAY(10); 813 1.80 tsutsui if ((CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON) == 0) 814 1.80 tsutsui break; 815 1.80 tsutsui } 816 1.80 tsutsui if (i == VR_TIMEOUT) { 817 1.80 tsutsui /* XXX need reset? */ 818 1.123 flxd aprint_error_dev(sc->vr_dev, "RX shutdown never completed\n"); 819 1.80 tsutsui } 820 1.1 sakamoto 821 1.1 sakamoto vr_rxeof(sc); 822 1.80 tsutsui 823 1.18 thorpej CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr)); 824 1.1 sakamoto VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 825 1.1 sakamoto VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 826 1.1 sakamoto } 827 1.1 sakamoto 828 1.1 sakamoto /* 829 1.1 sakamoto * A frame was downloaded to the chip. It's safe for us to clean up 830 1.1 sakamoto * the list buffers. 831 1.1 sakamoto */ 832 1.15 thorpej static void 833 1.69 thorpej vr_txeof(struct vr_softc *sc) 834 1.1 sakamoto { 835 1.18 thorpej struct ifnet *ifp = &sc->vr_ec.ec_if; 836 1.18 thorpej struct vr_desc *d; 837 1.18 thorpej struct vr_descsoft *ds; 838 1.83 tsutsui uint32_t txstat; 839 1.82 tsutsui int i, j; 840 1.1 sakamoto 841 1.1 sakamoto /* 842 1.1 sakamoto * Go through our tx list and free mbufs for those 843 1.1 sakamoto * frames that have been transmitted. 844 1.1 sakamoto */ 845 1.18 thorpej for (i = sc->vr_txdirty; sc->vr_txpending != 0; 846 1.18 thorpej i = VR_NEXTTX(i), sc->vr_txpending--) { 847 1.18 thorpej d = VR_CDTX(sc, i); 848 1.18 thorpej ds = VR_DSTX(sc, i); 849 1.1 sakamoto 850 1.128 msaitoh VR_CDTXSYNC(sc, i, 851 1.128 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 852 1.1 sakamoto 853 1.30 thorpej txstat = le32toh(d->vr_status); 854 1.82 tsutsui 855 1.82 tsutsui if (txstat & (VR_TXSTAT_ABRT | VR_TXSTAT_UDF)) { 856 1.82 tsutsui VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 857 1.82 tsutsui for (j = 0; j < VR_TIMEOUT; j++) { 858 1.82 tsutsui DELAY(10); 859 1.82 tsutsui if ((CSR_READ_2(sc, VR_COMMAND) & 860 1.82 tsutsui VR_CMD_TX_ON) == 0) 861 1.82 tsutsui break; 862 1.82 tsutsui } 863 1.82 tsutsui if (j == VR_TIMEOUT) { 864 1.82 tsutsui /* XXX need reset? */ 865 1.123 flxd aprint_error_dev(sc->vr_dev, 866 1.123 flxd "TX shutdown never completed\n"); 867 1.82 tsutsui } 868 1.82 tsutsui d->vr_status = htole32(VR_TXSTAT_OWN); 869 1.82 tsutsui CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, i)); 870 1.82 tsutsui break; 871 1.82 tsutsui } 872 1.82 tsutsui 873 1.1 sakamoto if (txstat & VR_TXSTAT_OWN) 874 1.1 sakamoto break; 875 1.1 sakamoto 876 1.18 thorpej bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 877 1.18 thorpej 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 878 1.18 thorpej bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 879 1.18 thorpej m_freem(ds->ds_mbuf); 880 1.18 thorpej ds->ds_mbuf = NULL; 881 1.18 thorpej 882 1.132 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 883 1.1 sakamoto if (txstat & VR_TXSTAT_ERRSUM) { 884 1.138 riastrad if_statinc_ref(ifp, nsr, if_oerrors); 885 1.1 sakamoto if (txstat & VR_TXSTAT_DEFER) 886 1.138 riastrad if_statinc_ref(ifp, nsr, if_collisions); 887 1.1 sakamoto if (txstat & VR_TXSTAT_LATECOLL) 888 1.138 riastrad if_statinc_ref(ifp, nsr, if_collisions); 889 1.1 sakamoto } 890 1.1 sakamoto 891 1.138 riastrad if_statadd_ref(ifp, nsr, if_collisions, 892 1.132 thorpej (txstat & VR_TXSTAT_COLLCNT) >> 3); 893 1.138 riastrad if_statinc_ref(ifp, nsr, if_opackets); 894 1.132 thorpej IF_STAT_PUTREF(ifp); 895 1.1 sakamoto } 896 1.1 sakamoto 897 1.18 thorpej /* Update the dirty transmit buffer pointer. */ 898 1.18 thorpej sc->vr_txdirty = i; 899 1.1 sakamoto 900 1.18 thorpej /* 901 1.18 thorpej * Cancel the watchdog timer if there are no pending 902 1.18 thorpej * transmissions. 903 1.18 thorpej */ 904 1.18 thorpej if (sc->vr_txpending == 0) 905 1.18 thorpej ifp->if_timer = 0; 906 1.1 sakamoto } 907 1.1 sakamoto 908 1.16 thorpej static int 909 1.69 thorpej vr_intr(void *arg) 910 1.1 sakamoto { 911 1.15 thorpej struct vr_softc *sc; 912 1.15 thorpej struct ifnet *ifp; 913 1.83 tsutsui uint16_t status; 914 1.18 thorpej int handled = 0, dotx = 0; 915 1.1 sakamoto 916 1.1 sakamoto sc = arg; 917 1.6 thorpej ifp = &sc->vr_ec.ec_if; 918 1.1 sakamoto 919 1.18 thorpej /* Suppress unwanted interrupts. */ 920 1.16 thorpej if ((ifp->if_flags & IFF_UP) == 0) { 921 1.39 thorpej vr_stop(ifp, 1); 922 1.16 thorpej return (0); 923 1.1 sakamoto } 924 1.1 sakamoto 925 1.1 sakamoto /* Disable interrupts. */ 926 1.1 sakamoto CSR_WRITE_2(sc, VR_IMR, 0x0000); 927 1.1 sakamoto 928 1.1 sakamoto for (;;) { 929 1.1 sakamoto status = CSR_READ_2(sc, VR_ISR); 930 1.1 sakamoto if (status) 931 1.1 sakamoto CSR_WRITE_2(sc, VR_ISR, status); 932 1.1 sakamoto 933 1.1 sakamoto if ((status & VR_INTRS) == 0) 934 1.1 sakamoto break; 935 1.1 sakamoto 936 1.16 thorpej handled = 1; 937 1.16 thorpej 938 1.110 tls rnd_add_uint32(&sc->rnd_source, status); 939 1.68 jdolecek 940 1.1 sakamoto if (status & VR_ISR_RX_OK) 941 1.1 sakamoto vr_rxeof(sc); 942 1.1 sakamoto 943 1.80 tsutsui if (status & VR_ISR_RX_DROPPED) { 944 1.123 flxd aprint_error_dev(sc->vr_dev, "rx packet lost\n"); 945 1.132 thorpej if_statinc(ifp, if_ierrors); 946 1.80 tsutsui } 947 1.80 tsutsui 948 1.18 thorpej if (status & 949 1.80 tsutsui (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) 950 1.1 sakamoto vr_rxeoc(sc); 951 1.1 sakamoto 952 1.82 tsutsui 953 1.82 tsutsui if (status & (VR_ISR_BUSERR | VR_ISR_TX_UNDERRUN)) { 954 1.82 tsutsui if (status & VR_ISR_BUSERR) 955 1.123 flxd aprint_error_dev(sc->vr_dev, "PCI bus error\n"); 956 1.82 tsutsui if (status & VR_ISR_TX_UNDERRUN) 957 1.123 flxd aprint_error_dev(sc->vr_dev, 958 1.123 flxd "transmit underrun\n"); 959 1.82 tsutsui /* vr_init() calls vr_start() */ 960 1.82 tsutsui dotx = 0; 961 1.82 tsutsui (void)vr_init(ifp); 962 1.82 tsutsui 963 1.82 tsutsui } 964 1.82 tsutsui 965 1.1 sakamoto if (status & VR_ISR_TX_OK) { 966 1.18 thorpej dotx = 1; 967 1.1 sakamoto vr_txeof(sc); 968 1.1 sakamoto } 969 1.1 sakamoto 970 1.82 tsutsui if (status & 971 1.82 tsutsui (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2 | VR_ISR_TX_UDFI)) { 972 1.82 tsutsui if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 973 1.123 flxd aprint_error_dev(sc->vr_dev, 974 1.123 flxd "transmit aborted\n"); 975 1.82 tsutsui if (status & VR_ISR_TX_UDFI) 976 1.123 flxd aprint_error_dev(sc->vr_dev, 977 1.123 flxd "transmit underflow\n"); 978 1.132 thorpej if_statinc(ifp, if_oerrors); 979 1.18 thorpej dotx = 1; 980 1.1 sakamoto vr_txeof(sc); 981 1.18 thorpej if (sc->vr_txpending) { 982 1.1 sakamoto VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 983 1.1 sakamoto VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 984 1.54 christos } 985 1.1 sakamoto } 986 1.1 sakamoto } 987 1.1 sakamoto 988 1.1 sakamoto /* Re-enable interrupts. */ 989 1.1 sakamoto CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 990 1.1 sakamoto 991 1.18 thorpej if (dotx) 992 1.120 ozaki if_schedule_deferred_start(ifp); 993 1.16 thorpej 994 1.16 thorpej return (handled); 995 1.1 sakamoto } 996 1.1 sakamoto 997 1.1 sakamoto /* 998 1.1 sakamoto * Main transmit routine. To avoid having to do mbuf copies, we put pointers 999 1.1 sakamoto * to the mbuf data regions directly in the transmit lists. We also save a 1000 1.1 sakamoto * copy of the pointers since the transmit list fragment pointers are 1001 1.1 sakamoto * physical addresses. 1002 1.1 sakamoto */ 1003 1.15 thorpej static void 1004 1.69 thorpej vr_start(struct ifnet *ifp) 1005 1.1 sakamoto { 1006 1.18 thorpej struct vr_softc *sc = ifp->if_softc; 1007 1.18 thorpej struct mbuf *m0, *m; 1008 1.18 thorpej struct vr_desc *d; 1009 1.18 thorpej struct vr_descsoft *ds; 1010 1.18 thorpej int error, firsttx, nexttx, opending; 1011 1.1 sakamoto 1012 1.136 thorpej if ((ifp->if_flags & IFF_RUNNING) == 0) 1013 1.112 jmcneill return; 1014 1.112 jmcneill if (sc->vr_link == false) 1015 1.112 jmcneill return; 1016 1.112 jmcneill 1017 1.18 thorpej /* 1018 1.18 thorpej * Remember the previous txpending and the first transmit 1019 1.18 thorpej * descriptor we use. 1020 1.18 thorpej */ 1021 1.18 thorpej opending = sc->vr_txpending; 1022 1.18 thorpej firsttx = VR_NEXTTX(sc->vr_txlast); 1023 1.1 sakamoto 1024 1.1 sakamoto /* 1025 1.18 thorpej * Loop through the send queue, setting up transmit descriptors 1026 1.18 thorpej * until we drain the queue, or use up all available transmit 1027 1.18 thorpej * descriptors. 1028 1.1 sakamoto */ 1029 1.18 thorpej while (sc->vr_txpending < VR_NTXDESC) { 1030 1.18 thorpej /* 1031 1.18 thorpej * Grab a packet off the queue. 1032 1.18 thorpej */ 1033 1.42 thorpej IFQ_POLL(&ifp->if_snd, m0); 1034 1.18 thorpej if (m0 == NULL) 1035 1.18 thorpej break; 1036 1.43 thorpej m = NULL; 1037 1.1 sakamoto 1038 1.18 thorpej /* 1039 1.18 thorpej * Get the next available transmit descriptor. 1040 1.18 thorpej */ 1041 1.18 thorpej nexttx = VR_NEXTTX(sc->vr_txlast); 1042 1.18 thorpej d = VR_CDTX(sc, nexttx); 1043 1.18 thorpej ds = VR_DSTX(sc, nexttx); 1044 1.1 sakamoto 1045 1.18 thorpej /* 1046 1.18 thorpej * Load the DMA map. If this fails, the packet didn't 1047 1.18 thorpej * fit in one DMA segment, and we need to copy. Note, 1048 1.18 thorpej * the packet must also be aligned. 1049 1.60 bouyer * if the packet is too small, copy it too, so we're sure 1050 1.71 jmmv * we have enough room for the pad buffer. 1051 1.18 thorpej */ 1052 1.52 mrg if ((mtod(m0, uintptr_t) & 3) != 0 || 1053 1.60 bouyer m0->m_pkthdr.len < VR_MIN_FRAMELEN || 1054 1.18 thorpej bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0, 1055 1.128 msaitoh BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { 1056 1.18 thorpej MGETHDR(m, M_DONTWAIT, MT_DATA); 1057 1.18 thorpej if (m == NULL) { 1058 1.123 flxd aprint_error_dev(sc->vr_dev, 1059 1.123 flxd "unable to allocate Tx mbuf\n"); 1060 1.18 thorpej break; 1061 1.18 thorpej } 1062 1.18 thorpej if (m0->m_pkthdr.len > MHLEN) { 1063 1.18 thorpej MCLGET(m, M_DONTWAIT); 1064 1.18 thorpej if ((m->m_flags & M_EXT) == 0) { 1065 1.123 flxd aprint_error_dev(sc->vr_dev, 1066 1.123 flxd "unable to allocate Tx cluster\n"); 1067 1.18 thorpej m_freem(m); 1068 1.18 thorpej break; 1069 1.18 thorpej } 1070 1.18 thorpej } 1071 1.85 christos m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1072 1.18 thorpej m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1073 1.60 bouyer /* 1074 1.60 bouyer * The Rhine doesn't auto-pad, so we have to do this 1075 1.60 bouyer * ourselves. 1076 1.60 bouyer */ 1077 1.60 bouyer if (m0->m_pkthdr.len < VR_MIN_FRAMELEN) { 1078 1.85 christos memset(mtod(m, char *) + m0->m_pkthdr.len, 1079 1.60 bouyer 0, VR_MIN_FRAMELEN - m0->m_pkthdr.len); 1080 1.60 bouyer m->m_pkthdr.len = m->m_len = VR_MIN_FRAMELEN; 1081 1.60 bouyer } 1082 1.18 thorpej error = bus_dmamap_load_mbuf(sc->vr_dmat, 1083 1.128 msaitoh ds->ds_dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1084 1.18 thorpej if (error) { 1085 1.73 scw m_freem(m); 1086 1.123 flxd aprint_error_dev(sc->vr_dev, "unable to load " 1087 1.123 flxd "Tx buffer, error = %d\n", error); 1088 1.18 thorpej break; 1089 1.18 thorpej } 1090 1.18 thorpej } 1091 1.1 sakamoto 1092 1.42 thorpej IFQ_DEQUEUE(&ifp->if_snd, m0); 1093 1.43 thorpej if (m != NULL) { 1094 1.43 thorpej m_freem(m0); 1095 1.43 thorpej m0 = m; 1096 1.43 thorpej } 1097 1.42 thorpej 1098 1.18 thorpej /* Sync the DMA map. */ 1099 1.18 thorpej bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 1100 1.18 thorpej ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1101 1.1 sakamoto 1102 1.18 thorpej /* 1103 1.18 thorpej * Store a pointer to the packet so we can free it later. 1104 1.18 thorpej */ 1105 1.18 thorpej ds->ds_mbuf = m0; 1106 1.1 sakamoto 1107 1.1 sakamoto /* 1108 1.1 sakamoto * If there's a BPF listener, bounce a copy of this frame 1109 1.1 sakamoto * to him. 1110 1.1 sakamoto */ 1111 1.124 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT); 1112 1.18 thorpej 1113 1.18 thorpej /* 1114 1.60 bouyer * Fill in the transmit descriptor. 1115 1.18 thorpej */ 1116 1.30 thorpej d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr); 1117 1.60 bouyer d->vr_ctl = htole32(m0->m_pkthdr.len); 1118 1.65 tsutsui d->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG | VR_TXCTL_LASTFRAG); 1119 1.64 tsutsui 1120 1.18 thorpej /* 1121 1.18 thorpej * If this is the first descriptor we're enqueuing, 1122 1.18 thorpej * don't give it to the Rhine yet. That could cause 1123 1.18 thorpej * a race condition. We'll do it below. 1124 1.18 thorpej */ 1125 1.18 thorpej if (nexttx == firsttx) 1126 1.18 thorpej d->vr_status = 0; 1127 1.18 thorpej else 1128 1.30 thorpej d->vr_status = htole32(VR_TXSTAT_OWN); 1129 1.18 thorpej 1130 1.18 thorpej VR_CDTXSYNC(sc, nexttx, 1131 1.128 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1132 1.18 thorpej 1133 1.18 thorpej /* Advance the tx pointer. */ 1134 1.18 thorpej sc->vr_txpending++; 1135 1.18 thorpej sc->vr_txlast = nexttx; 1136 1.18 thorpej } 1137 1.18 thorpej 1138 1.18 thorpej if (sc->vr_txpending != opending) { 1139 1.18 thorpej /* 1140 1.18 thorpej * We enqueued packets. If the transmitter was idle, 1141 1.18 thorpej * reset the txdirty pointer. 1142 1.18 thorpej */ 1143 1.18 thorpej if (opending == 0) 1144 1.18 thorpej sc->vr_txdirty = firsttx; 1145 1.18 thorpej 1146 1.18 thorpej /* 1147 1.18 thorpej * Cause a transmit interrupt to happen on the 1148 1.18 thorpej * last packet we enqueued. 1149 1.18 thorpej */ 1150 1.30 thorpej VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT); 1151 1.18 thorpej VR_CDTXSYNC(sc, sc->vr_txlast, 1152 1.128 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1153 1.1 sakamoto 1154 1.18 thorpej /* 1155 1.18 thorpej * The entire packet chain is set up. Give the 1156 1.18 thorpej * first descriptor to the Rhine now. 1157 1.18 thorpej */ 1158 1.30 thorpej VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN); 1159 1.18 thorpej VR_CDTXSYNC(sc, firsttx, 1160 1.128 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1161 1.1 sakamoto 1162 1.18 thorpej /* Start the transmitter. */ 1163 1.65 tsutsui VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1164 1.1 sakamoto 1165 1.18 thorpej /* Set the watchdog timer in case the chip flakes out. */ 1166 1.18 thorpej ifp->if_timer = 5; 1167 1.18 thorpej } 1168 1.1 sakamoto } 1169 1.1 sakamoto 1170 1.13 thorpej /* 1171 1.13 thorpej * Initialize the interface. Must be called at splnet. 1172 1.13 thorpej */ 1173 1.23 thorpej static int 1174 1.69 thorpej vr_init(struct ifnet *ifp) 1175 1.1 sakamoto { 1176 1.39 thorpej struct vr_softc *sc = ifp->if_softc; 1177 1.18 thorpej struct vr_desc *d; 1178 1.23 thorpej struct vr_descsoft *ds; 1179 1.25 hwr int i, error = 0; 1180 1.1 sakamoto 1181 1.18 thorpej /* Cancel pending I/O. */ 1182 1.39 thorpej vr_stop(ifp, 0); 1183 1.18 thorpej 1184 1.18 thorpej /* Reset the Rhine to a known state. */ 1185 1.1 sakamoto vr_reset(sc); 1186 1.1 sakamoto 1187 1.65 tsutsui /* set DMA length in BCR0 and BCR1 */ 1188 1.65 tsutsui VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1189 1.65 tsutsui VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1190 1.65 tsutsui 1191 1.65 tsutsui VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1192 1.65 tsutsui VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTH_128BYTES); 1193 1.65 tsutsui 1194 1.65 tsutsui VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1195 1.65 tsutsui VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTH_STORENFWD); 1196 1.65 tsutsui 1197 1.65 tsutsui /* set DMA threshold length in RXCFG and TXCFG */ 1198 1.1 sakamoto VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1199 1.65 tsutsui VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1200 1.1 sakamoto 1201 1.1 sakamoto VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1202 1.1 sakamoto VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1203 1.1 sakamoto 1204 1.1 sakamoto /* 1205 1.72 jmmv * Initialize the transmit descriptor ring. txlast is initialized 1206 1.18 thorpej * to the end of the list so that it will wrap around to the first 1207 1.18 thorpej * descriptor when the first packet is transmitted. 1208 1.18 thorpej */ 1209 1.18 thorpej for (i = 0; i < VR_NTXDESC; i++) { 1210 1.18 thorpej d = VR_CDTX(sc, i); 1211 1.18 thorpej memset(d, 0, sizeof(struct vr_desc)); 1212 1.30 thorpej d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i))); 1213 1.128 msaitoh VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1214 1.18 thorpej } 1215 1.18 thorpej sc->vr_txpending = 0; 1216 1.18 thorpej sc->vr_txdirty = 0; 1217 1.18 thorpej sc->vr_txlast = VR_NTXDESC - 1; 1218 1.18 thorpej 1219 1.18 thorpej /* 1220 1.23 thorpej * Initialize the receive descriptor ring. 1221 1.18 thorpej */ 1222 1.23 thorpej for (i = 0; i < VR_NRXDESC; i++) { 1223 1.23 thorpej ds = VR_DSRX(sc, i); 1224 1.23 thorpej if (ds->ds_mbuf == NULL) { 1225 1.23 thorpej if ((error = vr_add_rxbuf(sc, i)) != 0) { 1226 1.123 flxd aprint_error_dev(sc->vr_dev, 1227 1.123 flxd "unable to allocate or map rx buffer %d, " 1228 1.123 flxd "error = %d\n", i, error); 1229 1.23 thorpej /* 1230 1.23 thorpej * XXX Should attempt to run with fewer receive 1231 1.23 thorpej * XXX buffers instead of just failing. 1232 1.23 thorpej */ 1233 1.23 thorpej vr_rxdrain(sc); 1234 1.23 thorpej goto out; 1235 1.23 thorpej } 1236 1.51 thorpej } else 1237 1.51 thorpej VR_INIT_RXDESC(sc, i); 1238 1.23 thorpej } 1239 1.18 thorpej sc->vr_rxptr = 0; 1240 1.1 sakamoto 1241 1.1 sakamoto /* If we want promiscuous mode, set the allframes bit. */ 1242 1.1 sakamoto if (ifp->if_flags & IFF_PROMISC) 1243 1.1 sakamoto VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1244 1.1 sakamoto else 1245 1.1 sakamoto VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1246 1.1 sakamoto 1247 1.1 sakamoto /* Set capture broadcast bit to capture broadcast frames. */ 1248 1.1 sakamoto if (ifp->if_flags & IFF_BROADCAST) 1249 1.1 sakamoto VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1250 1.1 sakamoto else 1251 1.1 sakamoto VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1252 1.1 sakamoto 1253 1.18 thorpej /* Program the multicast filter, if necessary. */ 1254 1.1 sakamoto vr_setmulti(sc); 1255 1.1 sakamoto 1256 1.47 wiz /* Give the transmit and receive rings to the Rhine. */ 1257 1.18 thorpej CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr)); 1258 1.18 thorpej CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast))); 1259 1.18 thorpej 1260 1.18 thorpej /* Set current media. */ 1261 1.112 jmcneill sc->vr_link = true; 1262 1.89 dyoung if ((error = ether_mediachange(ifp)) != 0) 1263 1.89 dyoung goto out; 1264 1.1 sakamoto 1265 1.1 sakamoto /* Enable receiver and transmitter. */ 1266 1.128 msaitoh CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL | VR_CMD_START | 1267 1.128 msaitoh VR_CMD_TX_ON | VR_CMD_RX_ON | 1268 1.1 sakamoto VR_CMD_RX_GO); 1269 1.1 sakamoto 1270 1.18 thorpej /* Enable interrupts. */ 1271 1.1 sakamoto CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1272 1.1 sakamoto CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1273 1.1 sakamoto 1274 1.1 sakamoto ifp->if_flags |= IFF_RUNNING; 1275 1.1 sakamoto 1276 1.11 thorpej /* Start one second timer. */ 1277 1.133 thorpej callout_schedule(&sc->vr_tick_ch, hz); 1278 1.18 thorpej 1279 1.18 thorpej /* Attempt to start output on the interface. */ 1280 1.18 thorpej vr_start(ifp); 1281 1.23 thorpej 1282 1.23 thorpej out: 1283 1.23 thorpej if (error) 1284 1.123 flxd aprint_error_dev(sc->vr_dev, "interface not running\n"); 1285 1.23 thorpej return (error); 1286 1.1 sakamoto } 1287 1.1 sakamoto 1288 1.15 thorpej static int 1289 1.85 christos vr_ioctl(struct ifnet *ifp, u_long command, void *data) 1290 1.15 thorpej { 1291 1.15 thorpej struct vr_softc *sc = ifp->if_softc; 1292 1.15 thorpej int s, error = 0; 1293 1.1 sakamoto 1294 1.12 thorpej s = splnet(); 1295 1.1 sakamoto 1296 1.112 jmcneill switch (command) { 1297 1.112 jmcneill case SIOCSIFFLAGS: 1298 1.112 jmcneill if ((error = ifioctl_common(ifp, command, data)) != 0) 1299 1.112 jmcneill break; 1300 1.112 jmcneill 1301 1.112 jmcneill switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) { 1302 1.112 jmcneill case IFF_RUNNING: 1303 1.112 jmcneill vr_stop(ifp, 1); 1304 1.112 jmcneill break; 1305 1.112 jmcneill case IFF_UP: 1306 1.112 jmcneill vr_init(ifp); 1307 1.112 jmcneill break; 1308 1.112 jmcneill case IFF_UP | IFF_RUNNING: 1309 1.112 jmcneill if ((ifp->if_flags ^ sc->vr_if_flags) == IFF_PROMISC) 1310 1.112 jmcneill vr_setmulti(sc); 1311 1.112 jmcneill else 1312 1.112 jmcneill vr_init(ifp); 1313 1.112 jmcneill break; 1314 1.112 jmcneill } 1315 1.112 jmcneill sc->vr_if_flags = ifp->if_flags; 1316 1.112 jmcneill break; 1317 1.112 jmcneill default: 1318 1.112 jmcneill if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 1319 1.112 jmcneill break; 1320 1.112 jmcneill error = 0; 1321 1.112 jmcneill if (command == SIOCADDMULTI || command == SIOCDELMULTI) 1322 1.89 dyoung vr_setmulti(sc); 1323 1.1 sakamoto } 1324 1.112 jmcneill splx(s); 1325 1.1 sakamoto 1326 1.112 jmcneill return error; 1327 1.1 sakamoto } 1328 1.1 sakamoto 1329 1.15 thorpej static void 1330 1.69 thorpej vr_watchdog(struct ifnet *ifp) 1331 1.1 sakamoto { 1332 1.18 thorpej struct vr_softc *sc = ifp->if_softc; 1333 1.1 sakamoto 1334 1.123 flxd aprint_error_dev(sc->vr_dev, "device timeout\n"); 1335 1.132 thorpej if_statinc(ifp, if_oerrors); 1336 1.1 sakamoto 1337 1.39 thorpej (void) vr_init(ifp); 1338 1.1 sakamoto } 1339 1.1 sakamoto 1340 1.1 sakamoto /* 1341 1.11 thorpej * One second timer, used to tick MII. 1342 1.11 thorpej */ 1343 1.11 thorpej static void 1344 1.69 thorpej vr_tick(void *arg) 1345 1.11 thorpej { 1346 1.11 thorpej struct vr_softc *sc = arg; 1347 1.11 thorpej int s; 1348 1.11 thorpej 1349 1.12 thorpej s = splnet(); 1350 1.112 jmcneill if (sc->vr_flags & VR_F_RESTART) { 1351 1.123 flxd aprint_normal_dev(sc->vr_dev, "restarting\n"); 1352 1.112 jmcneill vr_init(&sc->vr_ec.ec_if); 1353 1.112 jmcneill sc->vr_flags &= ~VR_F_RESTART; 1354 1.112 jmcneill } 1355 1.11 thorpej mii_tick(&sc->vr_mii); 1356 1.11 thorpej splx(s); 1357 1.11 thorpej 1358 1.133 thorpej callout_schedule(&sc->vr_tick_ch, hz); 1359 1.11 thorpej } 1360 1.11 thorpej 1361 1.11 thorpej /* 1362 1.23 thorpej * Drain the receive queue. 1363 1.23 thorpej */ 1364 1.23 thorpej static void 1365 1.69 thorpej vr_rxdrain(struct vr_softc *sc) 1366 1.23 thorpej { 1367 1.23 thorpej struct vr_descsoft *ds; 1368 1.23 thorpej int i; 1369 1.23 thorpej 1370 1.23 thorpej for (i = 0; i < VR_NRXDESC; i++) { 1371 1.23 thorpej ds = VR_DSRX(sc, i); 1372 1.23 thorpej if (ds->ds_mbuf != NULL) { 1373 1.23 thorpej bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 1374 1.23 thorpej m_freem(ds->ds_mbuf); 1375 1.23 thorpej ds->ds_mbuf = NULL; 1376 1.23 thorpej } 1377 1.23 thorpej } 1378 1.23 thorpej } 1379 1.23 thorpej 1380 1.23 thorpej /* 1381 1.1 sakamoto * Stop the adapter and free any mbufs allocated to the 1382 1.18 thorpej * transmit lists. 1383 1.1 sakamoto */ 1384 1.15 thorpej static void 1385 1.69 thorpej vr_stop(struct ifnet *ifp, int disable) 1386 1.1 sakamoto { 1387 1.39 thorpej struct vr_softc *sc = ifp->if_softc; 1388 1.18 thorpej struct vr_descsoft *ds; 1389 1.15 thorpej int i; 1390 1.1 sakamoto 1391 1.11 thorpej /* Cancel one second timer. */ 1392 1.34 thorpej callout_stop(&sc->vr_tick_ch); 1393 1.28 thorpej 1394 1.28 thorpej /* Down the MII. */ 1395 1.28 thorpej mii_down(&sc->vr_mii); 1396 1.11 thorpej 1397 1.6 thorpej ifp = &sc->vr_ec.ec_if; 1398 1.1 sakamoto ifp->if_timer = 0; 1399 1.1 sakamoto 1400 1.1 sakamoto VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1401 1.128 msaitoh VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON | VR_CMD_TX_ON)); 1402 1.1 sakamoto CSR_WRITE_2(sc, VR_IMR, 0x0000); 1403 1.1 sakamoto CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1404 1.1 sakamoto CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1405 1.1 sakamoto 1406 1.1 sakamoto /* 1407 1.18 thorpej * Release any queued transmit buffers. 1408 1.1 sakamoto */ 1409 1.18 thorpej for (i = 0; i < VR_NTXDESC; i++) { 1410 1.18 thorpej ds = VR_DSTX(sc, i); 1411 1.18 thorpej if (ds->ds_mbuf != NULL) { 1412 1.18 thorpej bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 1413 1.18 thorpej m_freem(ds->ds_mbuf); 1414 1.18 thorpej ds->ds_mbuf = NULL; 1415 1.1 sakamoto } 1416 1.1 sakamoto } 1417 1.1 sakamoto 1418 1.1 sakamoto /* 1419 1.18 thorpej * Mark the interface down and cancel the watchdog timer. 1420 1.1 sakamoto */ 1421 1.136 thorpej ifp->if_flags &= ~IFF_RUNNING; 1422 1.18 thorpej ifp->if_timer = 0; 1423 1.90 dyoung 1424 1.90 dyoung if (disable) 1425 1.90 dyoung vr_rxdrain(sc); 1426 1.1 sakamoto } 1427 1.1 sakamoto 1428 1.96 cegger static int vr_probe(device_t, cfdata_t, void *); 1429 1.91 dyoung static void vr_attach(device_t, device_t, void *); 1430 1.98 tsutsui static bool vr_shutdown(device_t, int); 1431 1.2 sakamoto 1432 1.95 joerg CFATTACH_DECL_NEW(vr, sizeof (struct vr_softc), 1433 1.57 thorpej vr_probe, vr_attach, NULL, NULL); 1434 1.2 sakamoto 1435 1.2 sakamoto static int 1436 1.96 cegger vr_probe(device_t parent, cfdata_t match, void *aux) 1437 1.2 sakamoto { 1438 1.2 sakamoto struct pci_attach_args *pa = (struct pci_attach_args *)aux; 1439 1.2 sakamoto 1440 1.134 thorpej return pci_compatible_match(pa, compat_data); 1441 1.2 sakamoto } 1442 1.2 sakamoto 1443 1.2 sakamoto /* 1444 1.2 sakamoto * Stop all chip I/O so that the kernel's probe routines don't 1445 1.2 sakamoto * get confused by errant DMAs when rebooting. 1446 1.2 sakamoto */ 1447 1.98 tsutsui static bool 1448 1.98 tsutsui vr_shutdown(device_t self, int howto) 1449 1.2 sakamoto { 1450 1.98 tsutsui struct vr_softc *sc = device_private(self); 1451 1.2 sakamoto 1452 1.39 thorpej vr_stop(&sc->vr_ec.ec_if, 1); 1453 1.98 tsutsui 1454 1.98 tsutsui return true; 1455 1.2 sakamoto } 1456 1.2 sakamoto 1457 1.2 sakamoto /* 1458 1.2 sakamoto * Attach the interface. Allocate softc structures, do ifmedia 1459 1.2 sakamoto * setup and ethernet/BPF attach. 1460 1.2 sakamoto */ 1461 1.2 sakamoto static void 1462 1.91 dyoung vr_attach(device_t parent, device_t self, void *aux) 1463 1.2 sakamoto { 1464 1.91 dyoung struct vr_softc *sc = device_private(self); 1465 1.15 thorpej struct pci_attach_args *pa = (struct pci_attach_args *) aux; 1466 1.18 thorpej bus_dma_segment_t seg; 1467 1.83 tsutsui uint32_t reg; 1468 1.15 thorpej struct ifnet *ifp; 1469 1.128 msaitoh struct mii_data * const mii = &sc->vr_mii; 1470 1.83 tsutsui uint8_t eaddr[ETHER_ADDR_LEN], mac; 1471 1.18 thorpej int i, rseg, error; 1472 1.113 christos char intrbuf[PCI_INTRSTR_LEN]; 1473 1.15 thorpej 1474 1.76 christos #define PCI_CONF_WRITE(r, v) pci_conf_write(sc->vr_pc, sc->vr_tag, (r), (v)) 1475 1.76 christos #define PCI_CONF_READ(r) pci_conf_read(sc->vr_pc, sc->vr_tag, (r)) 1476 1.34 thorpej 1477 1.95 joerg sc->vr_dev = self; 1478 1.76 christos sc->vr_pc = pa->pa_pc; 1479 1.76 christos sc->vr_tag = pa->pa_tag; 1480 1.99 jmcneill sc->vr_id = pa->pa_id; 1481 1.87 ad callout_init(&sc->vr_tick_ch, 0); 1482 1.133 thorpej callout_setfunc(&sc->vr_tick_ch, vr_tick, sc); 1483 1.2 sakamoto 1484 1.109 drochner pci_aprint_devinfo(pa, NULL); 1485 1.2 sakamoto 1486 1.2 sakamoto /* 1487 1.2 sakamoto * Handle power management nonsense. 1488 1.2 sakamoto */ 1489 1.2 sakamoto 1490 1.76 christos sc->vr_save_iobase = PCI_CONF_READ(VR_PCI_LOIO); 1491 1.76 christos sc->vr_save_membase = PCI_CONF_READ(VR_PCI_LOMEM); 1492 1.76 christos sc->vr_save_irq = PCI_CONF_READ(PCI_INTERRUPT_REG); 1493 1.76 christos 1494 1.76 christos /* power up chip */ 1495 1.91 dyoung if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, 1496 1.76 christos vr_restore_state)) && error != EOPNOTSUPP) { 1497 1.119 msaitoh aprint_error_dev(self, "cannot activate %d\n", error); 1498 1.76 christos return; 1499 1.2 sakamoto } 1500 1.2 sakamoto 1501 1.19 thorpej /* Make sure bus mastering is enabled. */ 1502 1.63 tsutsui reg = PCI_CONF_READ(PCI_COMMAND_STATUS_REG); 1503 1.63 tsutsui reg |= PCI_COMMAND_MASTER_ENABLE; 1504 1.63 tsutsui PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, reg); 1505 1.19 thorpej 1506 1.59 lha /* Get revision */ 1507 1.63 tsutsui sc->vr_revid = PCI_REVISION(pa->pa_class); 1508 1.64 tsutsui 1509 1.2 sakamoto /* 1510 1.2 sakamoto * Map control/status registers. 1511 1.2 sakamoto */ 1512 1.2 sakamoto { 1513 1.2 sakamoto bus_space_tag_t iot, memt; 1514 1.2 sakamoto bus_space_handle_t ioh, memh; 1515 1.2 sakamoto int ioh_valid, memh_valid; 1516 1.2 sakamoto pci_intr_handle_t intrhandle; 1517 1.2 sakamoto const char *intrstr; 1518 1.2 sakamoto 1519 1.2 sakamoto ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO, 1520 1.2 sakamoto PCI_MAPREG_TYPE_IO, 0, 1521 1.2 sakamoto &iot, &ioh, NULL, NULL) == 0); 1522 1.2 sakamoto memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM, 1523 1.2 sakamoto PCI_MAPREG_TYPE_MEM | 1524 1.2 sakamoto PCI_MAPREG_MEM_TYPE_32BIT, 1525 1.2 sakamoto 0, &memt, &memh, NULL, NULL) == 0); 1526 1.2 sakamoto #if defined(VR_USEIOSPACE) 1527 1.2 sakamoto if (ioh_valid) { 1528 1.14 thorpej sc->vr_bst = iot; 1529 1.14 thorpej sc->vr_bsh = ioh; 1530 1.2 sakamoto } else if (memh_valid) { 1531 1.14 thorpej sc->vr_bst = memt; 1532 1.14 thorpej sc->vr_bsh = memh; 1533 1.2 sakamoto } 1534 1.2 sakamoto #else 1535 1.2 sakamoto if (memh_valid) { 1536 1.14 thorpej sc->vr_bst = memt; 1537 1.14 thorpej sc->vr_bsh = memh; 1538 1.2 sakamoto } else if (ioh_valid) { 1539 1.14 thorpej sc->vr_bst = iot; 1540 1.14 thorpej sc->vr_bsh = ioh; 1541 1.2 sakamoto } 1542 1.2 sakamoto #endif 1543 1.2 sakamoto else { 1544 1.123 flxd aprint_error_dev(self, 1545 1.123 flxd "unable to map device registers\n"); 1546 1.2 sakamoto return; 1547 1.2 sakamoto } 1548 1.2 sakamoto 1549 1.2 sakamoto /* Allocate interrupt */ 1550 1.44 sommerfe if (pci_intr_map(pa, &intrhandle)) { 1551 1.95 joerg aprint_error_dev(self, "couldn't map interrupt\n"); 1552 1.15 thorpej return; 1553 1.2 sakamoto } 1554 1.119 msaitoh intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf, 1555 1.119 msaitoh sizeof(intrbuf)); 1556 1.126 jdolecek sc->vr_ih = pci_intr_establish_xname(pa->pa_pc, intrhandle, 1557 1.126 jdolecek IPL_NET, vr_intr, sc, device_xname(self)); 1558 1.2 sakamoto if (sc->vr_ih == NULL) { 1559 1.95 joerg aprint_error_dev(self, "couldn't establish interrupt"); 1560 1.2 sakamoto if (intrstr != NULL) 1561 1.100 njoly aprint_error(" at %s", intrstr); 1562 1.100 njoly aprint_error("\n"); 1563 1.123 flxd return; 1564 1.2 sakamoto } 1565 1.100 njoly aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1566 1.2 sakamoto } 1567 1.59 lha 1568 1.59 lha /* 1569 1.59 lha * Windows may put the chip in suspend mode when it 1570 1.59 lha * shuts down. Be sure to kick it in the head to wake it 1571 1.59 lha * up again. 1572 1.81 tsutsui * 1573 1.81 tsutsui * Don't touch this register on VT3043 since it causes 1574 1.81 tsutsui * kernel MCHK trap on macppc. 1575 1.81 tsutsui * (Note some VT86C100A chip returns a product ID of VT3043) 1576 1.59 lha */ 1577 1.81 tsutsui if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT3043) 1578 1.128 msaitoh VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0 | VR_STICKHW_DS1)); 1579 1.2 sakamoto 1580 1.2 sakamoto /* Reset the adapter. */ 1581 1.2 sakamoto vr_reset(sc); 1582 1.2 sakamoto 1583 1.2 sakamoto /* 1584 1.2 sakamoto * Get station address. The way the Rhine chips work, 1585 1.2 sakamoto * you're not allowed to directly access the EEPROM once 1586 1.2 sakamoto * they've been programmed a special way. Consequently, 1587 1.122 flxd * we need to read the node address from the PAR registers. 1588 1.66 scw * 1589 1.66 scw * XXXSCW: On the Rhine III, setting VR_EECSR_LOAD forces a reload 1590 1.66 scw * of the *whole* EEPROM, not just the MAC address. This is 1591 1.66 scw * pretty pointless since the chip does this automatically 1592 1.66 scw * at powerup/reset. 1593 1.66 scw * I suspect the same thing applies to the other Rhine 1594 1.66 scw * variants, but in the absence of a data sheet for those 1595 1.66 scw * (and the lack of anyone else noticing the problems this 1596 1.66 scw * causes) I'm going to retain the old behaviour for the 1597 1.66 scw * other parts. 1598 1.78 scw * In some cases, the chip really does startup without having 1599 1.78 scw * read the EEPROM (kern/34812). To handle this case, we force 1600 1.78 scw * a reload if we see an all-zeroes MAC address. 1601 1.2 sakamoto */ 1602 1.78 scw for (mac = 0, i = 0; i < ETHER_ADDR_LEN; i++) 1603 1.78 scw mac |= (eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i)); 1604 1.78 scw 1605 1.78 scw if (mac == 0 || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6105 && 1606 1.78 scw PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6102)) { 1607 1.66 scw VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 1608 1.66 scw DELAY(200); 1609 1.78 scw for (i = 0; i < ETHER_ADDR_LEN; i++) 1610 1.78 scw eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 1611 1.66 scw } 1612 1.2 sakamoto 1613 1.2 sakamoto /* 1614 1.2 sakamoto * A Rhine chip was detected. Inform the world. 1615 1.2 sakamoto */ 1616 1.125 sevan aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); 1617 1.2 sakamoto 1618 1.49 thorpej memcpy(sc->vr_enaddr, eaddr, ETHER_ADDR_LEN); 1619 1.2 sakamoto 1620 1.18 thorpej sc->vr_dmat = pa->pa_dmat; 1621 1.18 thorpej 1622 1.18 thorpej /* 1623 1.18 thorpej * Allocate the control data structures, and create and load 1624 1.18 thorpej * the DMA map for it. 1625 1.18 thorpej */ 1626 1.18 thorpej if ((error = bus_dmamem_alloc(sc->vr_dmat, 1627 1.18 thorpej sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 1628 1.18 thorpej 0)) != 0) { 1629 1.118 msaitoh aprint_error_dev(self, 1630 1.118 msaitoh "unable to allocate control data, error = %d\n", error); 1631 1.18 thorpej goto fail_0; 1632 1.18 thorpej } 1633 1.18 thorpej 1634 1.18 thorpej if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg, 1635 1.85 christos sizeof(struct vr_control_data), (void **)&sc->vr_control_data, 1636 1.18 thorpej BUS_DMA_COHERENT)) != 0) { 1637 1.118 msaitoh aprint_error_dev(self, 1638 1.118 msaitoh "unable to map control data, error = %d\n", error); 1639 1.18 thorpej goto fail_1; 1640 1.18 thorpej } 1641 1.18 thorpej 1642 1.18 thorpej if ((error = bus_dmamap_create(sc->vr_dmat, 1643 1.18 thorpej sizeof(struct vr_control_data), 1, 1644 1.18 thorpej sizeof(struct vr_control_data), 0, 0, 1645 1.18 thorpej &sc->vr_cddmamap)) != 0) { 1646 1.118 msaitoh aprint_error_dev(self, 1647 1.118 msaitoh "unable to create control data DMA map, error = %d\n", 1648 1.118 msaitoh error); 1649 1.18 thorpej goto fail_2; 1650 1.18 thorpej } 1651 1.18 thorpej 1652 1.18 thorpej if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap, 1653 1.18 thorpej sc->vr_control_data, sizeof(struct vr_control_data), NULL, 1654 1.18 thorpej 0)) != 0) { 1655 1.119 msaitoh aprint_error_dev(self, 1656 1.119 msaitoh "unable to load control data DMA map, error = %d\n", 1657 1.92 cegger error); 1658 1.18 thorpej goto fail_3; 1659 1.18 thorpej } 1660 1.18 thorpej 1661 1.18 thorpej /* 1662 1.18 thorpej * Create the transmit buffer DMA maps. 1663 1.18 thorpej */ 1664 1.18 thorpej for (i = 0; i < VR_NTXDESC; i++) { 1665 1.18 thorpej if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1666 1.18 thorpej 1, MCLBYTES, 0, 0, 1667 1.18 thorpej &VR_DSTX(sc, i)->ds_dmamap)) != 0) { 1668 1.119 msaitoh aprint_error_dev(self, 1669 1.119 msaitoh "unable to create tx DMA map %d, error = %d\n", i, 1670 1.119 msaitoh error); 1671 1.18 thorpej goto fail_4; 1672 1.18 thorpej } 1673 1.18 thorpej } 1674 1.18 thorpej 1675 1.18 thorpej /* 1676 1.18 thorpej * Create the receive buffer DMA maps. 1677 1.18 thorpej */ 1678 1.18 thorpej for (i = 0; i < VR_NRXDESC; i++) { 1679 1.18 thorpej if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1, 1680 1.18 thorpej MCLBYTES, 0, 0, 1681 1.18 thorpej &VR_DSRX(sc, i)->ds_dmamap)) != 0) { 1682 1.119 msaitoh aprint_error_dev(self, 1683 1.119 msaitoh "unable to create rx DMA map %d, error = %d\n", i, 1684 1.119 msaitoh error); 1685 1.18 thorpej goto fail_5; 1686 1.18 thorpej } 1687 1.23 thorpej VR_DSRX(sc, i)->ds_mbuf = NULL; 1688 1.2 sakamoto } 1689 1.2 sakamoto 1690 1.6 thorpej ifp = &sc->vr_ec.ec_if; 1691 1.2 sakamoto ifp->if_softc = sc; 1692 1.2 sakamoto ifp->if_mtu = ETHERMTU; 1693 1.2 sakamoto ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1694 1.2 sakamoto ifp->if_ioctl = vr_ioctl; 1695 1.2 sakamoto ifp->if_start = vr_start; 1696 1.2 sakamoto ifp->if_watchdog = vr_watchdog; 1697 1.39 thorpej ifp->if_init = vr_init; 1698 1.39 thorpej ifp->if_stop = vr_stop; 1699 1.42 thorpej IFQ_SET_READY(&ifp->if_snd); 1700 1.42 thorpej 1701 1.95 joerg strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1702 1.2 sakamoto 1703 1.2 sakamoto /* 1704 1.11 thorpej * Initialize MII/media info. 1705 1.2 sakamoto */ 1706 1.128 msaitoh mii->mii_ifp = ifp; 1707 1.128 msaitoh mii->mii_readreg = vr_mii_readreg; 1708 1.128 msaitoh mii->mii_writereg = vr_mii_writereg; 1709 1.128 msaitoh mii->mii_statchg = vr_mii_statchg; 1710 1.89 dyoung 1711 1.128 msaitoh sc->vr_ec.ec_mii = mii; 1712 1.128 msaitoh ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 1713 1.89 dyoung ether_mediastatus); 1714 1.128 msaitoh mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 1715 1.61 christos MII_OFFSET_ANY, MIIF_FORCEANEG); 1716 1.11 thorpej if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) { 1717 1.128 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 1718 1.128 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 1719 1.11 thorpej } else 1720 1.128 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1721 1.2 sakamoto 1722 1.107 jakllsch sc->vr_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 1723 1.107 jakllsch 1724 1.2 sakamoto /* 1725 1.2 sakamoto * Call MI attach routines. 1726 1.2 sakamoto */ 1727 1.2 sakamoto if_attach(ifp); 1728 1.120 ozaki if_deferred_start_init(ifp, NULL); 1729 1.2 sakamoto ether_ifattach(ifp, sc->vr_enaddr); 1730 1.110 tls 1731 1.95 joerg rnd_attach_source(&sc->rnd_source, device_xname(self), 1732 1.114 tls RND_TYPE_NET, RND_FLAG_DEFAULT); 1733 1.2 sakamoto 1734 1.99 jmcneill if (pmf_device_register1(self, NULL, vr_resume, vr_shutdown)) 1735 1.98 tsutsui pmf_class_network_register(self, ifp); 1736 1.98 tsutsui else 1737 1.98 tsutsui aprint_error_dev(self, "couldn't establish power handler\n"); 1738 1.98 tsutsui 1739 1.18 thorpej return; 1740 1.18 thorpej 1741 1.18 thorpej fail_5: 1742 1.18 thorpej for (i = 0; i < VR_NRXDESC; i++) { 1743 1.18 thorpej if (sc->vr_rxsoft[i].ds_dmamap != NULL) 1744 1.18 thorpej bus_dmamap_destroy(sc->vr_dmat, 1745 1.18 thorpej sc->vr_rxsoft[i].ds_dmamap); 1746 1.18 thorpej } 1747 1.18 thorpej fail_4: 1748 1.18 thorpej for (i = 0; i < VR_NTXDESC; i++) { 1749 1.18 thorpej if (sc->vr_txsoft[i].ds_dmamap != NULL) 1750 1.18 thorpej bus_dmamap_destroy(sc->vr_dmat, 1751 1.18 thorpej sc->vr_txsoft[i].ds_dmamap); 1752 1.18 thorpej } 1753 1.18 thorpej bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap); 1754 1.18 thorpej fail_3: 1755 1.18 thorpej bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap); 1756 1.18 thorpej fail_2: 1757 1.85 christos bus_dmamem_unmap(sc->vr_dmat, (void *)sc->vr_control_data, 1758 1.18 thorpej sizeof(struct vr_control_data)); 1759 1.18 thorpej fail_1: 1760 1.18 thorpej bus_dmamem_free(sc->vr_dmat, &seg, rseg); 1761 1.18 thorpej fail_0: 1762 1.18 thorpej return; 1763 1.2 sakamoto } 1764 1.76 christos 1765 1.76 christos static int 1766 1.91 dyoung vr_restore_state(pci_chipset_tag_t pc, pcitag_t tag, device_t self, 1767 1.91 dyoung pcireg_t state) 1768 1.76 christos { 1769 1.91 dyoung struct vr_softc *sc = device_private(self); 1770 1.76 christos int error; 1771 1.76 christos 1772 1.76 christos if (state == PCI_PMCSR_STATE_D0) 1773 1.76 christos return 0; 1774 1.76 christos if ((error = pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0))) 1775 1.76 christos return error; 1776 1.76 christos 1777 1.76 christos /* Restore PCI config data. */ 1778 1.76 christos PCI_CONF_WRITE(VR_PCI_LOIO, sc->vr_save_iobase); 1779 1.76 christos PCI_CONF_WRITE(VR_PCI_LOMEM, sc->vr_save_membase); 1780 1.76 christos PCI_CONF_WRITE(PCI_INTERRUPT_REG, sc->vr_save_irq); 1781 1.76 christos return 0; 1782 1.76 christos } 1783 1.99 jmcneill 1784 1.99 jmcneill static bool 1785 1.103 dyoung vr_resume(device_t self, const pmf_qual_t *qual) 1786 1.99 jmcneill { 1787 1.99 jmcneill struct vr_softc *sc = device_private(self); 1788 1.99 jmcneill 1789 1.99 jmcneill if (PCI_PRODUCT(sc->vr_id) != PCI_PRODUCT_VIATECH_VT3043) 1790 1.128 msaitoh VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0 | VR_STICKHW_DS1)); 1791 1.99 jmcneill 1792 1.99 jmcneill return true; 1793 1.99 jmcneill } 1794