1 1.62 andvar /* $NetBSD: hd64570.c,v 1.62 2024/09/14 21:22:37 andvar Exp $ */ 2 1.1 explorer 3 1.1 explorer /* 4 1.8 chopps * Copyright (c) 1999 Christian E. Hopps 5 1.1 explorer * Copyright (c) 1998 Vixie Enterprises 6 1.1 explorer * All rights reserved. 7 1.1 explorer * 8 1.1 explorer * Redistribution and use in source and binary forms, with or without 9 1.1 explorer * modification, are permitted provided that the following conditions 10 1.1 explorer * are met: 11 1.1 explorer * 12 1.1 explorer * 1. Redistributions of source code must retain the above copyright 13 1.1 explorer * notice, this list of conditions and the following disclaimer. 14 1.1 explorer * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 explorer * notice, this list of conditions and the following disclaimer in the 16 1.1 explorer * documentation and/or other materials provided with the distribution. 17 1.1 explorer * 3. Neither the name of Vixie Enterprises nor the names 18 1.1 explorer * of its contributors may be used to endorse or promote products derived 19 1.1 explorer * from this software without specific prior written permission. 20 1.1 explorer * 21 1.1 explorer * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND 22 1.1 explorer * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 23 1.1 explorer * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 1.1 explorer * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 1.1 explorer * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR 26 1.1 explorer * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 1.1 explorer * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 1.1 explorer * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 1.1 explorer * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 1.1 explorer * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 1.1 explorer * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 1.1 explorer * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 1.1 explorer * SUCH DAMAGE. 34 1.1 explorer * 35 1.1 explorer * This software has been written for Vixie Enterprises by Michael Graff 36 1.1 explorer * <explorer (at) flame.org>. To learn more about Vixie Enterprises, see 37 1.1 explorer * ``http://www.vix.com''. 38 1.7 erh */ 39 1.7 erh 40 1.7 erh /* 41 1.1 explorer * TODO: 42 1.1 explorer * 43 1.1 explorer * o teach the receive logic about errors, and about long frames that 44 1.1 explorer * span more than one input buffer. (Right now, receive/transmit is 45 1.1 explorer * limited to one descriptor's buffer space, which is MTU + 4 bytes. 46 1.1 explorer * This is currently 1504, which is large enough to hold the HDLC 47 1.1 explorer * header and the packet itself. Packets which are too long are 48 1.1 explorer * silently dropped on transmit and silently dropped on receive. 49 1.1 explorer * o write code to handle the msci interrupts, needed only for CD 50 1.1 explorer * and CTS changes. 51 1.1 explorer * o consider switching back to a "queue tx with DMA active" model which 52 1.1 explorer * should help sustain outgoing traffic 53 1.1 explorer * o through clever use of bus_dma*() functions, it should be possible 54 1.1 explorer * to map the mbuf's data area directly into a descriptor transmit 55 1.1 explorer * buffer, removing the need to allocate extra memory. If, however, 56 1.1 explorer * we run out of descriptors for this, we will need to then allocate 57 1.1 explorer * one large mbuf, copy the fragmented chain into it, and put it onto 58 1.1 explorer * a single descriptor. 59 1.1 explorer * o use bus_dmamap_sync() with the right offset and lengths, rather 60 1.1 explorer * than cheating and always sync'ing the whole region. 61 1.8 chopps * 62 1.8 chopps * o perhaps allow rx and tx to be in more than one page 63 1.23 wiz * if not using DMA. currently the assumption is that 64 1.8 chopps * rx uses a page and tx uses a page. 65 1.1 explorer */ 66 1.20 lukem 67 1.20 lukem #include <sys/cdefs.h> 68 1.62 andvar __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.62 2024/09/14 21:22:37 andvar Exp $"); 69 1.1 explorer 70 1.9 chopps #include "opt_inet.h" 71 1.1 explorer 72 1.1 explorer #include <sys/param.h> 73 1.1 explorer #include <sys/systm.h> 74 1.1 explorer #include <sys/device.h> 75 1.1 explorer #include <sys/mbuf.h> 76 1.1 explorer #include <sys/socket.h> 77 1.1 explorer #include <sys/sockio.h> 78 1.1 explorer #include <sys/kernel.h> 79 1.1 explorer 80 1.1 explorer #include <net/if.h> 81 1.1 explorer #include <net/if_types.h> 82 1.1 explorer 83 1.15 itojun #if defined(INET) || defined(INET6) 84 1.1 explorer #include <netinet/in.h> 85 1.1 explorer #include <netinet/in_systm.h> 86 1.1 explorer #include <netinet/in_var.h> 87 1.1 explorer #include <netinet/ip.h> 88 1.15 itojun #ifdef INET6 89 1.15 itojun #include <netinet6/in6_var.h> 90 1.15 itojun #endif 91 1.9 chopps #endif 92 1.9 chopps 93 1.1 explorer #include <net/bpf.h> 94 1.1 explorer 95 1.38 ad #include <sys/cpu.h> 96 1.38 ad #include <sys/bus.h> 97 1.38 ad #include <sys/intr.h> 98 1.1 explorer 99 1.1 explorer #include <dev/pci/pcivar.h> 100 1.1 explorer #include <dev/pci/pcireg.h> 101 1.1 explorer #include <dev/pci/pcidevs.h> 102 1.1 explorer 103 1.1 explorer #include <dev/ic/hd64570reg.h> 104 1.1 explorer #include <dev/ic/hd64570var.h> 105 1.1 explorer 106 1.1 explorer #define SCA_DEBUG_RX 0x0001 107 1.1 explorer #define SCA_DEBUG_TX 0x0002 108 1.1 explorer #define SCA_DEBUG_CISCO 0x0004 109 1.1 explorer #define SCA_DEBUG_DMA 0x0008 110 1.1 explorer #define SCA_DEBUG_RXPKT 0x0010 111 1.1 explorer #define SCA_DEBUG_TXPKT 0x0020 112 1.1 explorer #define SCA_DEBUG_INTR 0x0040 113 1.8 chopps #define SCA_DEBUG_CLOCK 0x0080 114 1.1 explorer 115 1.1 explorer #if 0 116 1.8 chopps #define SCA_DEBUG_LEVEL ( 0xFFFF ) 117 1.1 explorer #else 118 1.1 explorer #define SCA_DEBUG_LEVEL 0 119 1.1 explorer #endif 120 1.1 explorer 121 1.1 explorer u_int32_t sca_debug = SCA_DEBUG_LEVEL; 122 1.1 explorer 123 1.1 explorer #if SCA_DEBUG_LEVEL > 0 124 1.1 explorer #define SCA_DPRINTF(l, x) do { \ 125 1.1 explorer if ((l) & sca_debug) \ 126 1.1 explorer printf x;\ 127 1.1 explorer } while (0) 128 1.1 explorer #else 129 1.1 explorer #define SCA_DPRINTF(l, x) 130 1.1 explorer #endif 131 1.1 explorer 132 1.1 explorer #if 0 133 1.1 explorer #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */ 134 1.1 explorer #endif 135 1.1 explorer 136 1.1 explorer static inline void msci_write_1(sca_port_t *, u_int, u_int8_t); 137 1.1 explorer static inline u_int8_t msci_read_1(sca_port_t *, u_int); 138 1.1 explorer 139 1.1 explorer static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t); 140 1.1 explorer static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t); 141 1.1 explorer static inline u_int8_t dmac_read_1(sca_port_t *, u_int); 142 1.1 explorer static inline u_int16_t dmac_read_2(sca_port_t *, u_int); 143 1.1 explorer 144 1.1 explorer static void sca_msci_init(struct sca_softc *, sca_port_t *); 145 1.1 explorer static void sca_dmac_init(struct sca_softc *, sca_port_t *); 146 1.1 explorer static void sca_dmac_rxinit(sca_port_t *); 147 1.1 explorer 148 1.1 explorer static int sca_dmac_intr(sca_port_t *, u_int8_t); 149 1.8 chopps static int sca_msci_intr(sca_port_t *, u_int8_t); 150 1.1 explorer 151 1.1 explorer static void sca_get_packets(sca_port_t *); 152 1.8 chopps static int sca_frame_avail(sca_port_t *); 153 1.8 chopps static void sca_frame_process(sca_port_t *); 154 1.8 chopps static void sca_frame_read_done(sca_port_t *); 155 1.1 explorer 156 1.1 explorer static void sca_port_starttx(sca_port_t *); 157 1.1 explorer 158 1.1 explorer static void sca_port_up(sca_port_t *); 159 1.1 explorer static void sca_port_down(sca_port_t *); 160 1.1 explorer 161 1.35 dyoung static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *, 162 1.49 ozaki const struct rtentry *); 163 1.36 christos static int sca_ioctl(struct ifnet *, u_long, void *); 164 1.28 perry static void sca_start(struct ifnet *); 165 1.28 perry static void sca_watchdog(struct ifnet *); 166 1.1 explorer 167 1.36 christos static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int); 168 1.1 explorer 169 1.1 explorer #if SCA_DEBUG_LEVEL > 0 170 1.1 explorer static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *); 171 1.1 explorer #endif 172 1.1 explorer 173 1.1 explorer 174 1.8 chopps #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg) 175 1.8 chopps #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg) 176 1.8 chopps #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val) 177 1.8 chopps #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val) 178 1.1 explorer 179 1.19 mrg #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask) 180 1.1 explorer 181 1.1 explorer static inline void 182 1.1 explorer msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 183 1.1 explorer { 184 1.1 explorer sca_write_1(scp->sca, scp->msci_off + reg, val); 185 1.1 explorer } 186 1.1 explorer 187 1.1 explorer static inline u_int8_t 188 1.1 explorer msci_read_1(sca_port_t *scp, u_int reg) 189 1.1 explorer { 190 1.1 explorer return sca_read_1(scp->sca, scp->msci_off + reg); 191 1.1 explorer } 192 1.1 explorer 193 1.1 explorer static inline void 194 1.1 explorer dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 195 1.1 explorer { 196 1.1 explorer sca_write_1(scp->sca, scp->dmac_off + reg, val); 197 1.1 explorer } 198 1.1 explorer 199 1.1 explorer static inline void 200 1.1 explorer dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val) 201 1.1 explorer { 202 1.1 explorer sca_write_2(scp->sca, scp->dmac_off + reg, val); 203 1.1 explorer } 204 1.1 explorer 205 1.1 explorer static inline u_int8_t 206 1.1 explorer dmac_read_1(sca_port_t *scp, u_int reg) 207 1.1 explorer { 208 1.1 explorer return sca_read_1(scp->sca, scp->dmac_off + reg); 209 1.1 explorer } 210 1.1 explorer 211 1.1 explorer static inline u_int16_t 212 1.1 explorer dmac_read_2(sca_port_t *scp, u_int reg) 213 1.1 explorer { 214 1.1 explorer return sca_read_2(scp->sca, scp->dmac_off + reg); 215 1.1 explorer } 216 1.1 explorer 217 1.45 joerg #if SCA_DEBUG_LEVEL > 0 218 1.8 chopps /* 219 1.8 chopps * read the chain pointer 220 1.8 chopps */ 221 1.8 chopps static inline u_int16_t 222 1.8 chopps sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp) 223 1.8 chopps { 224 1.8 chopps if (sc->sc_usedma) 225 1.8 chopps return ((dp)->sd_chainp); 226 1.8 chopps return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 227 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp))); 228 1.8 chopps } 229 1.45 joerg #endif 230 1.8 chopps 231 1.8 chopps /* 232 1.8 chopps * write the chain pointer 233 1.8 chopps */ 234 1.8 chopps static inline void 235 1.8 chopps sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp) 236 1.8 chopps { 237 1.8 chopps if (sc->sc_usedma) 238 1.8 chopps (dp)->sd_chainp = cp; 239 1.8 chopps else 240 1.8 chopps bus_space_write_2(sc->scu_memt, sc->scu_memh, 241 1.8 chopps sca_page_addr(sc, dp) 242 1.8 chopps + offsetof(struct sca_desc, sd_chainp), cp); 243 1.8 chopps } 244 1.8 chopps 245 1.45 joerg #if SCA_DEBUG_LEVEL > 0 246 1.8 chopps /* 247 1.8 chopps * read the buffer pointer 248 1.8 chopps */ 249 1.8 chopps static inline u_int32_t 250 1.8 chopps sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp) 251 1.8 chopps { 252 1.8 chopps u_int32_t address; 253 1.8 chopps 254 1.8 chopps if (sc->sc_usedma) 255 1.8 chopps address = dp->sd_bufp | dp->sd_hbufp << 16; 256 1.8 chopps else { 257 1.8 chopps address = bus_space_read_2(sc->scu_memt, sc->scu_memh, 258 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp)); 259 1.8 chopps address |= bus_space_read_1(sc->scu_memt, sc->scu_memh, 260 1.8 chopps sca_page_addr(sc, dp) 261 1.8 chopps + offsetof(struct sca_desc, sd_hbufp)) << 16; 262 1.8 chopps } 263 1.8 chopps return (address); 264 1.8 chopps } 265 1.45 joerg #endif 266 1.8 chopps 267 1.8 chopps /* 268 1.8 chopps * write the buffer pointer 269 1.8 chopps */ 270 1.8 chopps static inline void 271 1.8 chopps sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp) 272 1.8 chopps { 273 1.8 chopps if (sc->sc_usedma) { 274 1.8 chopps dp->sd_bufp = bufp & 0xFFFF; 275 1.8 chopps dp->sd_hbufp = (bufp & 0x00FF0000) >> 16; 276 1.8 chopps } else { 277 1.8 chopps bus_space_write_2(sc->scu_memt, sc->scu_memh, 278 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp), 279 1.8 chopps bufp & 0xFFFF); 280 1.8 chopps bus_space_write_1(sc->scu_memt, sc->scu_memh, 281 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp), 282 1.8 chopps (bufp & 0x00FF0000) >> 16); 283 1.8 chopps } 284 1.8 chopps } 285 1.8 chopps 286 1.8 chopps /* 287 1.8 chopps * read the buffer length 288 1.8 chopps */ 289 1.8 chopps static inline u_int16_t 290 1.8 chopps sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp) 291 1.8 chopps { 292 1.8 chopps if (sc->sc_usedma) 293 1.8 chopps return ((dp)->sd_buflen); 294 1.8 chopps return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 295 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen))); 296 1.8 chopps } 297 1.29 perry 298 1.8 chopps /* 299 1.8 chopps * write the buffer length 300 1.8 chopps */ 301 1.8 chopps static inline void 302 1.8 chopps sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len) 303 1.8 chopps { 304 1.8 chopps if (sc->sc_usedma) 305 1.8 chopps (dp)->sd_buflen = len; 306 1.8 chopps else 307 1.8 chopps bus_space_write_2(sc->scu_memt, sc->scu_memh, 308 1.8 chopps sca_page_addr(sc, dp) 309 1.8 chopps + offsetof(struct sca_desc, sd_buflen), len); 310 1.8 chopps } 311 1.8 chopps 312 1.8 chopps /* 313 1.8 chopps * read the descriptor status 314 1.8 chopps */ 315 1.8 chopps static inline u_int8_t 316 1.8 chopps sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp) 317 1.1 explorer { 318 1.8 chopps if (sc->sc_usedma) 319 1.8 chopps return ((dp)->sd_stat); 320 1.8 chopps return (bus_space_read_1(sc->scu_memt, sc->scu_memh, 321 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat))); 322 1.8 chopps } 323 1.1 explorer 324 1.8 chopps /* 325 1.8 chopps * write the descriptor status 326 1.8 chopps */ 327 1.8 chopps static inline void 328 1.8 chopps sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat) 329 1.8 chopps { 330 1.8 chopps if (sc->sc_usedma) 331 1.8 chopps (dp)->sd_stat = stat; 332 1.8 chopps else 333 1.8 chopps bus_space_write_1(sc->scu_memt, sc->scu_memh, 334 1.8 chopps sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat), 335 1.8 chopps stat); 336 1.8 chopps } 337 1.1 explorer 338 1.8 chopps void 339 1.8 chopps sca_init(struct sca_softc *sc) 340 1.8 chopps { 341 1.1 explorer /* 342 1.8 chopps * Do a little sanity check: check number of ports. 343 1.1 explorer */ 344 1.8 chopps if (sc->sc_numports < 1 || sc->sc_numports > 2) 345 1.8 chopps panic("sca can\'t handle more than 2 or less than 1 ports"); 346 1.1 explorer 347 1.1 explorer /* 348 1.1 explorer * disable DMA and MSCI interrupts 349 1.1 explorer */ 350 1.1 explorer sca_write_1(sc, SCA_DMER, 0); 351 1.1 explorer sca_write_1(sc, SCA_IER0, 0); 352 1.1 explorer sca_write_1(sc, SCA_IER1, 0); 353 1.1 explorer sca_write_1(sc, SCA_IER2, 0); 354 1.1 explorer 355 1.1 explorer /* 356 1.1 explorer * configure interrupt system 357 1.1 explorer */ 358 1.8 chopps sca_write_1(sc, SCA_ITCR, 359 1.8 chopps SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR); 360 1.8 chopps #if 0 361 1.61 andvar /* these are for the interrupt ack cycle which we don't use */ 362 1.1 explorer sca_write_1(sc, SCA_IVR, 0x40); 363 1.1 explorer sca_write_1(sc, SCA_IMVR, 0x40); 364 1.8 chopps #endif 365 1.1 explorer 366 1.1 explorer /* 367 1.1 explorer * set wait control register to zero wait states 368 1.1 explorer */ 369 1.1 explorer sca_write_1(sc, SCA_PABR0, 0); 370 1.1 explorer sca_write_1(sc, SCA_PABR1, 0); 371 1.1 explorer sca_write_1(sc, SCA_WCRL, 0); 372 1.1 explorer sca_write_1(sc, SCA_WCRM, 0); 373 1.1 explorer sca_write_1(sc, SCA_WCRH, 0); 374 1.1 explorer 375 1.1 explorer /* 376 1.1 explorer * disable DMA and reset status 377 1.1 explorer */ 378 1.1 explorer sca_write_1(sc, SCA_PCR, SCA_PCR_PR2); 379 1.1 explorer 380 1.1 explorer /* 381 1.1 explorer * disable transmit DMA for all channels 382 1.1 explorer */ 383 1.1 explorer sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0); 384 1.1 explorer sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 385 1.1 explorer sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0); 386 1.1 explorer sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 387 1.1 explorer sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0); 388 1.1 explorer sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 389 1.1 explorer sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0); 390 1.1 explorer sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 391 1.1 explorer 392 1.1 explorer /* 393 1.1 explorer * enable DMA based on channel enable flags for each channel 394 1.1 explorer */ 395 1.1 explorer sca_write_1(sc, SCA_DMER, SCA_DMER_EN); 396 1.1 explorer 397 1.1 explorer /* 398 1.1 explorer * Should check to see if the chip is responding, but for now 399 1.1 explorer * assume it is. 400 1.1 explorer */ 401 1.1 explorer } 402 1.1 explorer 403 1.1 explorer /* 404 1.1 explorer * initialize the port and attach it to the networking layer 405 1.1 explorer */ 406 1.1 explorer void 407 1.1 explorer sca_port_attach(struct sca_softc *sc, u_int port) 408 1.1 explorer { 409 1.32 kardel struct timeval now; 410 1.1 explorer sca_port_t *scp = &sc->sc_ports[port]; 411 1.1 explorer struct ifnet *ifp; 412 1.1 explorer static u_int ntwo_unit = 0; 413 1.1 explorer 414 1.1 explorer scp->sca = sc; /* point back to the parent */ 415 1.1 explorer 416 1.1 explorer scp->sp_port = port; 417 1.1 explorer 418 1.1 explorer if (port == 0) { 419 1.1 explorer scp->msci_off = SCA_MSCI_OFF_0; 420 1.1 explorer scp->dmac_off = SCA_DMAC_OFF_0; 421 1.8 chopps if(sc->sc_parent != NULL) 422 1.31 thorpej ntwo_unit = device_unit(sc->sc_parent) * 2 + 0; 423 1.4 tls else 424 1.4 tls ntwo_unit = 0; /* XXX */ 425 1.1 explorer } else { 426 1.1 explorer scp->msci_off = SCA_MSCI_OFF_1; 427 1.1 explorer scp->dmac_off = SCA_DMAC_OFF_1; 428 1.8 chopps if(sc->sc_parent != NULL) 429 1.31 thorpej ntwo_unit = device_unit(sc->sc_parent) * 2 + 1; 430 1.4 tls else 431 1.4 tls ntwo_unit = 1; /* XXX */ 432 1.1 explorer } 433 1.1 explorer 434 1.1 explorer sca_msci_init(sc, scp); 435 1.1 explorer sca_dmac_init(sc, scp); 436 1.1 explorer 437 1.1 explorer /* 438 1.1 explorer * attach to the network layer 439 1.1 explorer */ 440 1.1 explorer ifp = &scp->sp_if; 441 1.27 itojun snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit); 442 1.1 explorer ifp->if_softc = scp; 443 1.1 explorer ifp->if_mtu = SCA_MTU; 444 1.1 explorer ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 445 1.9 chopps ifp->if_type = IFT_PTPSERIAL; 446 1.1 explorer ifp->if_hdrlen = HDLC_HDRLEN; 447 1.1 explorer ifp->if_ioctl = sca_ioctl; 448 1.1 explorer ifp->if_output = sca_output; 449 1.1 explorer ifp->if_watchdog = sca_watchdog; 450 1.1 explorer ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; 451 1.1 explorer scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */ 452 1.1 explorer #ifdef SCA_USE_FASTQ 453 1.1 explorer scp->fastq.ifq_maxlen = IFQ_MAXLEN; 454 1.1 explorer #endif 455 1.21 itojun IFQ_SET_READY(&ifp->if_snd); 456 1.1 explorer if_attach(ifp); 457 1.53 ozaki if_deferred_start_init(ifp, NULL); 458 1.14 thorpej if_alloc_sadl(ifp); 459 1.43 joerg bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN); 460 1.52 ozaki bpf_mtap_softint_init(ifp); 461 1.1 explorer 462 1.8 chopps if (sc->sc_parent == NULL) 463 1.1 explorer printf("%s: port %d\n", ifp->if_xname, port); 464 1.1 explorer else 465 1.1 explorer printf("%s at %s port %d\n", 466 1.39 cegger ifp->if_xname, device_xname(sc->sc_parent), port); 467 1.1 explorer 468 1.1 explorer /* 469 1.1 explorer * reset the last seen times on the cisco keepalive protocol 470 1.1 explorer */ 471 1.32 kardel getmicrotime(&now); 472 1.32 kardel scp->cka_lasttx = now.tv_usec; 473 1.1 explorer scp->cka_lastrx = 0; 474 1.1 explorer } 475 1.1 explorer 476 1.8 chopps #if 0 477 1.8 chopps /* 478 1.8 chopps * returns log2(div), sets 'tmc' for the required freq 'hz' 479 1.8 chopps */ 480 1.8 chopps static u_int8_t 481 1.8 chopps sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp) 482 1.8 chopps { 483 1.8 chopps u_int32_t tmc, div; 484 1.8 chopps u_int32_t clock; 485 1.8 chopps 486 1.8 chopps /* clock hz = (chipclock / tmc) / 2^(div); */ 487 1.8 chopps /* 488 1.8 chopps * TD == tmc * 2^(n) 489 1.8 chopps * 490 1.8 chopps * note: 491 1.8 chopps * 1 <= TD <= 256 TD is inc of 1 492 1.8 chopps * 2 <= TD <= 512 TD is inc of 2 493 1.8 chopps * 4 <= TD <= 1024 TD is inc of 4 494 1.8 chopps * ... 495 1.29 perry * 512 <= TD <= 256*512 TD is inc of 512 496 1.8 chopps * 497 1.8 chopps * so note there are overlaps. We lose prec 498 1.8 chopps * as div increases so we wish to minize div. 499 1.8 chopps * 500 1.8 chopps * basically we want to do 501 1.8 chopps * 502 1.8 chopps * tmc = chip / hz, but have tmc <= 256 503 1.8 chopps */ 504 1.8 chopps 505 1.22 tsutsui /* assume system clock is 9.8304MHz or 9830400Hz */ 506 1.8 chopps clock = clock = 9830400 >> 1; 507 1.8 chopps 508 1.8 chopps /* round down */ 509 1.8 chopps div = 0; 510 1.8 chopps while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) { 511 1.8 chopps clock >>= 1; 512 1.8 chopps div++; 513 1.8 chopps } 514 1.8 chopps if (clock / tmc > hz) 515 1.8 chopps tmc++; 516 1.8 chopps if (!tmc) 517 1.8 chopps tmc = 1; 518 1.8 chopps 519 1.8 chopps if (div > SCA_RXS_DIV_512) { 520 1.8 chopps /* set to maximums */ 521 1.8 chopps div = SCA_RXS_DIV_512; 522 1.8 chopps tmc = 0; 523 1.8 chopps } 524 1.8 chopps 525 1.8 chopps *tmcp = (tmc & 0xFF); /* 0 == 256 */ 526 1.8 chopps return (div & 0xFF); 527 1.8 chopps } 528 1.8 chopps #endif 529 1.8 chopps 530 1.1 explorer /* 531 1.1 explorer * initialize the port's MSCI 532 1.1 explorer */ 533 1.1 explorer static void 534 1.1 explorer sca_msci_init(struct sca_softc *sc, sca_port_t *scp) 535 1.1 explorer { 536 1.8 chopps /* reset the channel */ 537 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET); 538 1.8 chopps 539 1.1 explorer msci_write_1(scp, SCA_MD00, 540 1.1 explorer ( SCA_MD0_CRC_1 541 1.1 explorer | SCA_MD0_CRC_CCITT 542 1.1 explorer | SCA_MD0_CRC_ENABLE 543 1.1 explorer | SCA_MD0_MODE_HDLC)); 544 1.8 chopps #if 0 545 1.8 chopps /* immediately send receive reset so the above takes */ 546 1.8 chopps msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 547 1.8 chopps #endif 548 1.8 chopps 549 1.1 explorer msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK); 550 1.1 explorer msci_write_1(scp, SCA_MD20, 551 1.8 chopps (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ)); 552 1.1 explorer 553 1.8 chopps /* be safe and do it again */ 554 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 555 1.8 chopps 556 1.8 chopps /* setup underrun and idle control, and initial RTS state */ 557 1.1 explorer msci_write_1(scp, SCA_CTL0, 558 1.8 chopps (SCA_CTL_IDLC_PATTERN 559 1.8 chopps | SCA_CTL_UDRNC_AFTER_FCS 560 1.8 chopps | SCA_CTL_RTS_LOW)); 561 1.8 chopps 562 1.8 chopps /* reset the transmitter */ 563 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 564 1.1 explorer 565 1.1 explorer /* 566 1.8 chopps * set the clock sources 567 1.1 explorer */ 568 1.8 chopps msci_write_1(scp, SCA_RXS0, scp->sp_rxs); 569 1.8 chopps msci_write_1(scp, SCA_TXS0, scp->sp_txs); 570 1.8 chopps msci_write_1(scp, SCA_TMC0, scp->sp_tmc); 571 1.8 chopps 572 1.8 chopps /* set external clock generate as requested */ 573 1.8 chopps sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock); 574 1.1 explorer 575 1.1 explorer /* 576 1.1 explorer * XXX don't pay attention to CTS or CD changes right now. I can't 577 1.1 explorer * simulate one, and the transmitter will try to transmit even if 578 1.1 explorer * CD isn't there anyway, so nothing bad SHOULD happen. 579 1.1 explorer */ 580 1.8 chopps #if 0 581 1.1 explorer msci_write_1(scp, SCA_IE00, 0); 582 1.1 explorer msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */ 583 1.8 chopps #else 584 1.8 chopps /* this would deliver transmitter underrun to ST1/ISR1 */ 585 1.8 chopps msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN); 586 1.8 chopps msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT); 587 1.8 chopps #endif 588 1.1 explorer msci_write_1(scp, SCA_IE20, 0); 589 1.8 chopps 590 1.1 explorer msci_write_1(scp, SCA_FIE0, 0); 591 1.1 explorer 592 1.1 explorer msci_write_1(scp, SCA_SA00, 0); 593 1.1 explorer msci_write_1(scp, SCA_SA10, 0); 594 1.1 explorer 595 1.1 explorer msci_write_1(scp, SCA_IDL0, 0x7e); 596 1.1 explorer 597 1.1 explorer msci_write_1(scp, SCA_RRC0, 0x0e); 598 1.8 chopps /* msci_write_1(scp, SCA_TRC00, 0x10); */ 599 1.8 chopps /* 600 1.8 chopps * the correct values here are important for avoiding underruns 601 1.8 chopps * for any value less than or equal to TRC0 txrdy is activated 602 1.8 chopps * which will start the dmac transfer to the fifo. 603 1.23 wiz * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA. 604 1.8 chopps * 605 1.8 chopps * thus if we are using a very fast clock that empties the fifo 606 1.8 chopps * quickly, delays in the dmac starting to fill the fifo can 607 1.8 chopps * lead to underruns so we want a fairly full fifo to still 608 1.8 chopps * cause the dmac to start. for cards with on board ram this 609 1.23 wiz * has no effect on system performance. For cards that DMA 610 1.8 chopps * to/from system memory it will cause more, shorter, 611 1.8 chopps * bus accesses rather than fewer longer ones. 612 1.8 chopps */ 613 1.8 chopps msci_write_1(scp, SCA_TRC00, 0x00); 614 1.1 explorer msci_write_1(scp, SCA_TRC10, 0x1f); 615 1.1 explorer } 616 1.1 explorer 617 1.1 explorer /* 618 1.1 explorer * Take the memory for the port and construct two circular linked lists of 619 1.1 explorer * descriptors (one tx, one rx) and set the pointers in these descriptors 620 1.1 explorer * to point to the buffer space for this port. 621 1.1 explorer */ 622 1.1 explorer static void 623 1.1 explorer sca_dmac_init(struct sca_softc *sc, sca_port_t *scp) 624 1.1 explorer { 625 1.1 explorer sca_desc_t *desc; 626 1.1 explorer u_int32_t desc_p; 627 1.1 explorer u_int32_t buf_p; 628 1.1 explorer int i; 629 1.1 explorer 630 1.8 chopps if (sc->sc_usedma) 631 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize, 632 1.8 chopps BUS_DMASYNC_PREWRITE); 633 1.8 chopps else { 634 1.8 chopps /* 635 1.8 chopps * XXX assumes that all tx desc and bufs in same page 636 1.8 chopps */ 637 1.8 chopps sc->scu_page_on(sc); 638 1.8 chopps sc->scu_set_page(sc, scp->sp_txdesc_p); 639 1.8 chopps } 640 1.1 explorer 641 1.8 chopps desc = scp->sp_txdesc; 642 1.8 chopps desc_p = scp->sp_txdesc_p; 643 1.8 chopps buf_p = scp->sp_txbuf_p; 644 1.8 chopps scp->sp_txcur = 0; 645 1.8 chopps scp->sp_txinuse = 0; 646 1.8 chopps 647 1.8 chopps #ifdef DEBUG 648 1.8 chopps /* make sure that we won't wrap */ 649 1.8 chopps if ((desc_p & 0xffff0000) != 650 1.8 chopps ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000)) 651 1.25 wiz panic("sca: tx descriptors cross architecural boundary"); 652 1.8 chopps if ((buf_p & 0xff000000) != 653 1.8 chopps ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000)) 654 1.25 wiz panic("sca: tx buffers cross architecural boundary"); 655 1.8 chopps #endif 656 1.1 explorer 657 1.8 chopps for (i = 0 ; i < scp->sp_ntxdesc ; i++) { 658 1.1 explorer /* 659 1.56 andvar * desc_p points to the physical address of the NEXT desc 660 1.1 explorer */ 661 1.1 explorer desc_p += sizeof(sca_desc_t); 662 1.1 explorer 663 1.8 chopps sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 664 1.8 chopps sca_desc_write_bufp(sc, desc, buf_p); 665 1.8 chopps sca_desc_write_buflen(sc, desc, SCA_BSIZE); 666 1.8 chopps sca_desc_write_stat(sc, desc, 0); 667 1.1 explorer 668 1.1 explorer desc++; /* point to the next descriptor */ 669 1.1 explorer buf_p += SCA_BSIZE; 670 1.1 explorer } 671 1.1 explorer 672 1.1 explorer /* 673 1.1 explorer * "heal" the circular list by making the last entry point to the 674 1.1 explorer * first. 675 1.1 explorer */ 676 1.8 chopps sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff); 677 1.1 explorer 678 1.1 explorer /* 679 1.1 explorer * Now, initialize the transmit DMA logic 680 1.1 explorer * 681 1.1 explorer * CPB == chain pointer base address 682 1.1 explorer */ 683 1.1 explorer dmac_write_1(scp, SCA_DSR1, 0); 684 1.1 explorer dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT); 685 1.1 explorer dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF); 686 1.8 chopps /* XXX1 687 1.1 explorer dmac_write_1(scp, SCA_DIR1, 688 1.1 explorer (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 689 1.8 chopps */ 690 1.8 chopps dmac_write_1(scp, SCA_DIR1, 691 1.8 chopps (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 692 1.1 explorer dmac_write_1(scp, SCA_CPB1, 693 1.8 chopps (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16)); 694 1.1 explorer 695 1.1 explorer /* 696 1.1 explorer * now, do the same thing for receive descriptors 697 1.8 chopps * 698 1.8 chopps * XXX assumes that all rx desc and bufs in same page 699 1.1 explorer */ 700 1.8 chopps if (!sc->sc_usedma) 701 1.8 chopps sc->scu_set_page(sc, scp->sp_rxdesc_p); 702 1.1 explorer 703 1.8 chopps desc = scp->sp_rxdesc; 704 1.8 chopps desc_p = scp->sp_rxdesc_p; 705 1.8 chopps buf_p = scp->sp_rxbuf_p; 706 1.8 chopps 707 1.8 chopps #ifdef DEBUG 708 1.8 chopps /* make sure that we won't wrap */ 709 1.8 chopps if ((desc_p & 0xffff0000) != 710 1.8 chopps ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000)) 711 1.25 wiz panic("sca: rx descriptors cross architecural boundary"); 712 1.8 chopps if ((buf_p & 0xff000000) != 713 1.8 chopps ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000)) 714 1.25 wiz panic("sca: rx buffers cross architecural boundary"); 715 1.8 chopps #endif 716 1.8 chopps 717 1.8 chopps for (i = 0 ; i < scp->sp_nrxdesc; i++) { 718 1.1 explorer /* 719 1.56 andvar * desc_p points to the physical address of the NEXT desc 720 1.1 explorer */ 721 1.1 explorer desc_p += sizeof(sca_desc_t); 722 1.1 explorer 723 1.8 chopps sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 724 1.8 chopps sca_desc_write_bufp(sc, desc, buf_p); 725 1.8 chopps /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */ 726 1.8 chopps sca_desc_write_buflen(sc, desc, 0); 727 1.8 chopps sca_desc_write_stat(sc, desc, 0); 728 1.1 explorer 729 1.1 explorer desc++; /* point to the next descriptor */ 730 1.1 explorer buf_p += SCA_BSIZE; 731 1.1 explorer } 732 1.1 explorer 733 1.1 explorer /* 734 1.1 explorer * "heal" the circular list by making the last entry point to the 735 1.1 explorer * first. 736 1.1 explorer */ 737 1.8 chopps sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff); 738 1.1 explorer 739 1.1 explorer sca_dmac_rxinit(scp); 740 1.1 explorer 741 1.8 chopps if (sc->sc_usedma) 742 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 743 1.8 chopps 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE); 744 1.8 chopps else 745 1.8 chopps sc->scu_page_off(sc); 746 1.1 explorer } 747 1.1 explorer 748 1.1 explorer /* 749 1.1 explorer * reset and reinitialize the receive DMA logic 750 1.1 explorer */ 751 1.1 explorer static void 752 1.1 explorer sca_dmac_rxinit(sca_port_t *scp) 753 1.1 explorer { 754 1.1 explorer /* 755 1.1 explorer * ... and the receive DMA logic ... 756 1.1 explorer */ 757 1.1 explorer dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */ 758 1.1 explorer dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT); 759 1.1 explorer 760 1.1 explorer dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF); 761 1.1 explorer dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE); 762 1.1 explorer 763 1.8 chopps /* reset descriptors to initial state */ 764 1.8 chopps scp->sp_rxstart = 0; 765 1.8 chopps scp->sp_rxend = scp->sp_nrxdesc - 1; 766 1.8 chopps 767 1.1 explorer /* 768 1.1 explorer * CPB == chain pointer base 769 1.1 explorer * CDA == current descriptor address 770 1.1 explorer * EDA == error descriptor address (overwrite position) 771 1.8 chopps * because cda can't be eda when starting we always 772 1.8 chopps * have a single buffer gap between cda and eda 773 1.1 explorer */ 774 1.1 explorer dmac_write_1(scp, SCA_CPB0, 775 1.8 chopps (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16)); 776 1.8 chopps dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff)); 777 1.8 chopps dmac_write_2(scp, SCA_EDAL0, (u_int16_t) 778 1.8 chopps (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend))); 779 1.1 explorer 780 1.1 explorer /* 781 1.1 explorer * enable receiver DMA 782 1.1 explorer */ 783 1.29 perry dmac_write_1(scp, SCA_DIR0, 784 1.1 explorer (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF)); 785 1.1 explorer dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE); 786 1.1 explorer } 787 1.1 explorer 788 1.1 explorer /* 789 1.1 explorer * Queue the packet for our start routine to transmit 790 1.1 explorer */ 791 1.1 explorer static int 792 1.33 christos sca_output( 793 1.33 christos struct ifnet *ifp, 794 1.33 christos struct mbuf *m, 795 1.35 dyoung const struct sockaddr *dst, 796 1.49 ozaki const struct rtentry *rt0) 797 1.1 explorer { 798 1.9 chopps struct hdlc_header *hdlc; 799 1.13 thorpej struct ifqueue *ifq = NULL; 800 1.13 thorpej int s, error, len; 801 1.13 thorpej short mflags; 802 1.1 explorer 803 1.1 explorer error = 0; 804 1.1 explorer 805 1.1 explorer if ((ifp->if_flags & IFF_UP) != IFF_UP) { 806 1.1 explorer error = ENETDOWN; 807 1.1 explorer goto bad; 808 1.1 explorer } 809 1.1 explorer 810 1.13 thorpej /* 811 1.13 thorpej * If the queueing discipline needs packet classification, 812 1.13 thorpej * do it before prepending link headers. 813 1.13 thorpej */ 814 1.48 knakahar IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family); 815 1.1 explorer 816 1.1 explorer /* 817 1.1 explorer * determine address family, and priority for this packet 818 1.1 explorer */ 819 1.1 explorer switch (dst->sa_family) { 820 1.9 chopps #ifdef INET 821 1.1 explorer case AF_INET: 822 1.1 explorer #ifdef SCA_USE_FASTQ 823 1.9 chopps if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY) 824 1.9 chopps == IPTOS_LOWDELAY) 825 1.9 chopps ifq = &((sca_port_t *)ifp->if_softc)->fastq; 826 1.1 explorer #endif 827 1.9 chopps /* 828 1.9 chopps * Add cisco serial line header. If there is no 829 1.9 chopps * space in the first mbuf, allocate another. 830 1.29 perry */ 831 1.9 chopps M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 832 1.9 chopps if (m == 0) 833 1.9 chopps return (ENOBUFS); 834 1.9 chopps hdlc = mtod(m, struct hdlc_header *); 835 1.9 chopps hdlc->h_proto = htons(HDLC_PROTOCOL_IP); 836 1.29 perry break; 837 1.9 chopps #endif 838 1.15 itojun #ifdef INET6 839 1.15 itojun case AF_INET6: 840 1.15 itojun /* 841 1.15 itojun * Add cisco serial line header. If there is no 842 1.15 itojun * space in the first mbuf, allocate another. 843 1.29 perry */ 844 1.15 itojun M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 845 1.15 itojun if (m == 0) 846 1.15 itojun return (ENOBUFS); 847 1.15 itojun hdlc = mtod(m, struct hdlc_header *); 848 1.15 itojun hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6); 849 1.29 perry break; 850 1.15 itojun #endif 851 1.1 explorer default: 852 1.1 explorer printf("%s: address family %d unsupported\n", 853 1.1 explorer ifp->if_xname, dst->sa_family); 854 1.1 explorer error = EAFNOSUPPORT; 855 1.1 explorer goto bad; 856 1.1 explorer } 857 1.1 explorer 858 1.9 chopps /* finish */ 859 1.1 explorer if ((m->m_flags & (M_BCAST | M_MCAST)) != 0) 860 1.9 chopps hdlc->h_addr = CISCO_MULTICAST; 861 1.1 explorer else 862 1.9 chopps hdlc->h_addr = CISCO_UNICAST; 863 1.9 chopps hdlc->h_resv = 0; 864 1.1 explorer 865 1.1 explorer /* 866 1.1 explorer * queue the packet. If interactive, use the fast queue. 867 1.1 explorer */ 868 1.13 thorpej mflags = m->m_flags; 869 1.13 thorpej len = m->m_pkthdr.len; 870 1.2 mycroft s = splnet(); 871 1.13 thorpej if (ifq != NULL) { 872 1.13 thorpej if (IF_QFULL(ifq)) { 873 1.13 thorpej IF_DROP(ifq); 874 1.13 thorpej m_freem(m); 875 1.13 thorpej error = ENOBUFS; 876 1.13 thorpej } else 877 1.13 thorpej IF_ENQUEUE(ifq, m); 878 1.13 thorpej } else 879 1.48 knakahar IFQ_ENQUEUE(&ifp->if_snd, m, error); 880 1.55 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 881 1.13 thorpej if (error != 0) { 882 1.59 riastrad if_statinc_ref(ifp, nsr, if_oerrors); 883 1.59 riastrad if_statinc_ref(ifp, nsr, if_collisions); 884 1.55 thorpej IF_STAT_PUTREF(ifp); 885 1.13 thorpej splx(s); 886 1.13 thorpej return (error); 887 1.1 explorer } 888 1.59 riastrad if_statadd_ref(ifp, nsr, if_obytes, len); 889 1.13 thorpej if (mflags & M_MCAST) 890 1.59 riastrad if_statinc_ref(ifp, nsr, if_omcasts); 891 1.55 thorpej IF_STAT_PUTREF(ifp); 892 1.1 explorer 893 1.1 explorer sca_start(ifp); 894 1.1 explorer splx(s); 895 1.1 explorer 896 1.1 explorer return (error); 897 1.1 explorer 898 1.1 explorer bad: 899 1.60 rin m_freem(m); 900 1.1 explorer return (error); 901 1.1 explorer } 902 1.1 explorer 903 1.1 explorer static int 904 1.41 dsl sca_ioctl(struct ifnet *ifp, u_long cmd, void *data) 905 1.1 explorer { 906 1.1 explorer struct ifreq *ifr; 907 1.1 explorer struct ifaddr *ifa; 908 1.1 explorer int error; 909 1.1 explorer int s; 910 1.1 explorer 911 1.2 mycroft s = splnet(); 912 1.1 explorer 913 1.40 dyoung ifr = (struct ifreq *)data; 914 1.40 dyoung ifa = (struct ifaddr *)data; 915 1.1 explorer error = 0; 916 1.1 explorer 917 1.1 explorer switch (cmd) { 918 1.40 dyoung case SIOCINITIFADDR: 919 1.15 itojun switch(ifa->ifa_addr->sa_family) { 920 1.9 chopps #ifdef INET 921 1.15 itojun case AF_INET: 922 1.15 itojun #endif 923 1.15 itojun #ifdef INET6 924 1.15 itojun case AF_INET6: 925 1.15 itojun #endif 926 1.15 itojun #if defined(INET) || defined(INET6) 927 1.9 chopps ifp->if_flags |= IFF_UP; 928 1.1 explorer sca_port_up(ifp->if_softc); 929 1.15 itojun break; 930 1.9 chopps #endif 931 1.15 itojun default: 932 1.1 explorer error = EAFNOSUPPORT; 933 1.15 itojun break; 934 1.15 itojun } 935 1.1 explorer break; 936 1.1 explorer 937 1.1 explorer case SIOCSIFDSTADDR: 938 1.9 chopps #ifdef INET 939 1.15 itojun if (ifa->ifa_addr->sa_family == AF_INET) 940 1.15 itojun break; 941 1.15 itojun #endif 942 1.15 itojun #ifdef INET6 943 1.15 itojun if (ifa->ifa_addr->sa_family == AF_INET6) 944 1.15 itojun break; 945 1.15 itojun #endif 946 1.9 chopps error = EAFNOSUPPORT; 947 1.1 explorer break; 948 1.1 explorer 949 1.1 explorer case SIOCADDMULTI: 950 1.1 explorer case SIOCDELMULTI: 951 1.15 itojun /* XXX need multicast group management code */ 952 1.1 explorer if (ifr == 0) { 953 1.1 explorer error = EAFNOSUPPORT; /* XXX */ 954 1.1 explorer break; 955 1.1 explorer } 956 1.37 dyoung switch (ifreq_getaddr(cmd, ifr)->sa_family) { 957 1.1 explorer #ifdef INET 958 1.1 explorer case AF_INET: 959 1.1 explorer break; 960 1.1 explorer #endif 961 1.15 itojun #ifdef INET6 962 1.15 itojun case AF_INET6: 963 1.15 itojun break; 964 1.15 itojun #endif 965 1.1 explorer default: 966 1.1 explorer error = EAFNOSUPPORT; 967 1.1 explorer break; 968 1.1 explorer } 969 1.1 explorer break; 970 1.1 explorer 971 1.1 explorer case SIOCSIFFLAGS: 972 1.40 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0) 973 1.40 dyoung break; 974 1.9 chopps if (ifr->ifr_flags & IFF_UP) { 975 1.9 chopps ifp->if_flags |= IFF_UP; 976 1.1 explorer sca_port_up(ifp->if_softc); 977 1.9 chopps } else { 978 1.9 chopps ifp->if_flags &= ~IFF_UP; 979 1.1 explorer sca_port_down(ifp->if_softc); 980 1.9 chopps } 981 1.1 explorer 982 1.1 explorer break; 983 1.1 explorer 984 1.1 explorer default: 985 1.40 dyoung error = ifioctl_common(ifp, cmd, data); 986 1.1 explorer } 987 1.1 explorer 988 1.1 explorer splx(s); 989 1.1 explorer return error; 990 1.1 explorer } 991 1.1 explorer 992 1.1 explorer /* 993 1.1 explorer * start packet transmission on the interface 994 1.1 explorer * 995 1.2 mycroft * MUST BE CALLED AT splnet() 996 1.1 explorer */ 997 1.1 explorer static void 998 1.41 dsl sca_start(struct ifnet *ifp) 999 1.1 explorer { 1000 1.1 explorer sca_port_t *scp = ifp->if_softc; 1001 1.1 explorer struct sca_softc *sc = scp->sca; 1002 1.1 explorer struct mbuf *m, *mb_head; 1003 1.1 explorer sca_desc_t *desc; 1004 1.8 chopps u_int8_t *buf, stat; 1005 1.1 explorer u_int32_t buf_p; 1006 1.6 erh int nexttx; 1007 1.1 explorer int trigger_xmit; 1008 1.8 chopps u_int len; 1009 1.8 chopps 1010 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n")); 1011 1.1 explorer 1012 1.1 explorer /* 1013 1.1 explorer * can't queue when we are full or transmitter is busy 1014 1.1 explorer */ 1015 1.8 chopps #ifdef oldcode 1016 1.8 chopps if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1)) 1017 1.8 chopps || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1018 1.8 chopps return; 1019 1.8 chopps #else 1020 1.8 chopps if (scp->sp_txinuse 1021 1.1 explorer || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1022 1.1 explorer return; 1023 1.8 chopps #endif 1024 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse)); 1025 1.1 explorer 1026 1.8 chopps /* 1027 1.8 chopps * XXX assume that all tx desc and bufs in same page 1028 1.8 chopps */ 1029 1.8 chopps if (sc->sc_usedma) 1030 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1031 1.8 chopps 0, sc->scu_allocsize, 1032 1.8 chopps BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1033 1.8 chopps else { 1034 1.8 chopps sc->scu_page_on(sc); 1035 1.8 chopps sc->scu_set_page(sc, scp->sp_txdesc_p); 1036 1.8 chopps } 1037 1.1 explorer 1038 1.1 explorer trigger_xmit = 0; 1039 1.1 explorer 1040 1.1 explorer txloop: 1041 1.1 explorer IF_DEQUEUE(&scp->linkq, mb_head); 1042 1.1 explorer if (mb_head == NULL) 1043 1.1 explorer #ifdef SCA_USE_FASTQ 1044 1.1 explorer IF_DEQUEUE(&scp->fastq, mb_head); 1045 1.1 explorer if (mb_head == NULL) 1046 1.1 explorer #endif 1047 1.21 itojun IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1048 1.1 explorer if (mb_head == NULL) 1049 1.1 explorer goto start_xmit; 1050 1.1 explorer 1051 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n")); 1052 1.8 chopps #ifdef oldcode 1053 1.1 explorer if (scp->txinuse != 0) { 1054 1.6 erh /* Kill EOT interrupts on the previous descriptor. */ 1055 1.8 chopps desc = &scp->sp_txdesc[scp->txcur]; 1056 1.8 chopps stat = sca_desc_read_stat(sc, desc); 1057 1.8 chopps sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT); 1058 1.6 erh 1059 1.6 erh /* Figure out what the next free descriptor is. */ 1060 1.8 chopps nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1061 1.6 erh } else 1062 1.6 erh nexttx = 0; 1063 1.8 chopps #endif /* oldcode */ 1064 1.8 chopps 1065 1.8 chopps if (scp->sp_txinuse) 1066 1.8 chopps nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1067 1.8 chopps else 1068 1.8 chopps nexttx = 0; 1069 1.8 chopps 1070 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx)); 1071 1.8 chopps 1072 1.8 chopps buf = scp->sp_txbuf + SCA_BSIZE * nexttx; 1073 1.8 chopps buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx; 1074 1.8 chopps 1075 1.8 chopps /* XXX hoping we can delay the desc write till after we don't drop. */ 1076 1.8 chopps desc = &scp->sp_txdesc[nexttx]; 1077 1.8 chopps 1078 1.8 chopps /* XXX isn't this set already?? */ 1079 1.8 chopps sca_desc_write_bufp(sc, desc, buf_p); 1080 1.8 chopps len = 0; 1081 1.6 erh 1082 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p)); 1083 1.1 explorer 1084 1.8 chopps #if 0 /* uncomment this for a core in cc1 */ 1085 1.8 chopps X 1086 1.8 chopps #endif 1087 1.1 explorer /* 1088 1.1 explorer * Run through the chain, copying data into the descriptor as we 1089 1.1 explorer * go. If it won't fit in one transmission block, drop the packet. 1090 1.1 explorer * No, this isn't nice, but most of the time it _will_ fit. 1091 1.1 explorer */ 1092 1.1 explorer for (m = mb_head ; m != NULL ; m = m->m_next) { 1093 1.1 explorer if (m->m_len != 0) { 1094 1.8 chopps len += m->m_len; 1095 1.8 chopps if (len > SCA_BSIZE) { 1096 1.1 explorer m_freem(mb_head); 1097 1.1 explorer goto txloop; 1098 1.1 explorer } 1099 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, 1100 1.8 chopps ("TX: about to mbuf len %d\n", m->m_len)); 1101 1.8 chopps 1102 1.8 chopps if (sc->sc_usedma) 1103 1.18 thorpej memcpy(buf, mtod(m, u_int8_t *), m->m_len); 1104 1.8 chopps else 1105 1.8 chopps bus_space_write_region_1(sc->scu_memt, 1106 1.8 chopps sc->scu_memh, sca_page_addr(sc, buf_p), 1107 1.8 chopps mtod(m, u_int8_t *), m->m_len); 1108 1.1 explorer buf += m->m_len; 1109 1.8 chopps buf_p += m->m_len; 1110 1.1 explorer } 1111 1.1 explorer } 1112 1.1 explorer 1113 1.8 chopps /* set the buffer, the length, and mark end of frame and end of xfer */ 1114 1.8 chopps sca_desc_write_buflen(sc, desc, len); 1115 1.8 chopps sca_desc_write_stat(sc, desc, SCA_DESC_EOM); 1116 1.8 chopps 1117 1.55 thorpej if_statinc(ifp, if_opackets); 1118 1.1 explorer 1119 1.1 explorer /* 1120 1.1 explorer * Pass packet to bpf if there is a listener. 1121 1.1 explorer */ 1122 1.54 msaitoh bpf_mtap(ifp, mb_head, BPF_D_OUT); 1123 1.1 explorer 1124 1.1 explorer m_freem(mb_head); 1125 1.1 explorer 1126 1.8 chopps scp->sp_txcur = nexttx; 1127 1.8 chopps scp->sp_txinuse++; 1128 1.1 explorer trigger_xmit = 1; 1129 1.1 explorer 1130 1.1 explorer SCA_DPRINTF(SCA_DEBUG_TX, 1131 1.8 chopps ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur)); 1132 1.1 explorer 1133 1.8 chopps /* 1134 1.8 chopps * XXX so didn't this used to limit us to 1?! - multi may be untested 1135 1.8 chopps * sp_ntxdesc used to be hard coded to 2 with claim of a too hard 1136 1.8 chopps * to find bug 1137 1.8 chopps */ 1138 1.8 chopps #ifdef oldcode 1139 1.8 chopps if (scp->sp_txinuse < (scp->sp_ntxdesc - 1)) 1140 1.8 chopps #endif 1141 1.8 chopps if (scp->sp_txinuse < scp->sp_ntxdesc) 1142 1.1 explorer goto txloop; 1143 1.1 explorer 1144 1.1 explorer start_xmit: 1145 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit)); 1146 1.8 chopps 1147 1.8 chopps if (trigger_xmit != 0) { 1148 1.8 chopps /* set EOT on final descriptor */ 1149 1.8 chopps desc = &scp->sp_txdesc[scp->sp_txcur]; 1150 1.8 chopps stat = sca_desc_read_stat(sc, desc); 1151 1.8 chopps sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT); 1152 1.8 chopps } 1153 1.8 chopps 1154 1.8 chopps if (sc->sc_usedma) 1155 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, 1156 1.8 chopps sc->scu_allocsize, 1157 1.8 chopps BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1158 1.1 explorer 1159 1.1 explorer if (trigger_xmit != 0) 1160 1.1 explorer sca_port_starttx(scp); 1161 1.8 chopps 1162 1.8 chopps if (!sc->sc_usedma) 1163 1.8 chopps sc->scu_page_off(sc); 1164 1.1 explorer } 1165 1.1 explorer 1166 1.1 explorer static void 1167 1.34 christos sca_watchdog(struct ifnet *ifp) 1168 1.1 explorer { 1169 1.1 explorer } 1170 1.1 explorer 1171 1.1 explorer int 1172 1.1 explorer sca_hardintr(struct sca_softc *sc) 1173 1.1 explorer { 1174 1.1 explorer u_int8_t isr0, isr1, isr2; 1175 1.1 explorer int ret; 1176 1.1 explorer 1177 1.1 explorer ret = 0; /* non-zero means we processed at least one interrupt */ 1178 1.1 explorer 1179 1.8 chopps SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n")); 1180 1.8 chopps 1181 1.1 explorer while (1) { 1182 1.1 explorer /* 1183 1.1 explorer * read SCA interrupts 1184 1.1 explorer */ 1185 1.1 explorer isr0 = sca_read_1(sc, SCA_ISR0); 1186 1.1 explorer isr1 = sca_read_1(sc, SCA_ISR1); 1187 1.1 explorer isr2 = sca_read_1(sc, SCA_ISR2); 1188 1.1 explorer 1189 1.1 explorer if (isr0 == 0 && isr1 == 0 && isr2 == 0) 1190 1.1 explorer break; 1191 1.1 explorer 1192 1.1 explorer SCA_DPRINTF(SCA_DEBUG_INTR, 1193 1.1 explorer ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n", 1194 1.1 explorer isr0, isr1, isr2)); 1195 1.1 explorer 1196 1.1 explorer /* 1197 1.8 chopps * check DMAC interrupt 1198 1.1 explorer */ 1199 1.1 explorer if (isr1 & 0x0f) 1200 1.1 explorer ret += sca_dmac_intr(&sc->sc_ports[0], 1201 1.1 explorer isr1 & 0x0f); 1202 1.8 chopps 1203 1.1 explorer if (isr1 & 0xf0) 1204 1.1 explorer ret += sca_dmac_intr(&sc->sc_ports[1], 1205 1.8 chopps (isr1 & 0xf0) >> 4); 1206 1.8 chopps 1207 1.8 chopps /* 1208 1.62 andvar * msci interrupts 1209 1.8 chopps */ 1210 1.8 chopps if (isr0 & 0x0f) 1211 1.8 chopps ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f); 1212 1.1 explorer 1213 1.8 chopps if (isr0 & 0xf0) 1214 1.8 chopps ret += sca_msci_intr(&sc->sc_ports[1], 1215 1.8 chopps (isr0 & 0xf0) >> 4); 1216 1.1 explorer 1217 1.1 explorer #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */ 1218 1.1 explorer if (isr2) 1219 1.1 explorer ret += sca_timer_intr(sc, isr2); 1220 1.1 explorer #endif 1221 1.1 explorer } 1222 1.1 explorer 1223 1.1 explorer return (ret); 1224 1.1 explorer } 1225 1.1 explorer 1226 1.1 explorer static int 1227 1.1 explorer sca_dmac_intr(sca_port_t *scp, u_int8_t isr) 1228 1.1 explorer { 1229 1.1 explorer u_int8_t dsr; 1230 1.1 explorer int ret; 1231 1.1 explorer 1232 1.1 explorer ret = 0; 1233 1.1 explorer 1234 1.1 explorer /* 1235 1.1 explorer * Check transmit channel 1236 1.1 explorer */ 1237 1.8 chopps if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) { 1238 1.1 explorer SCA_DPRINTF(SCA_DEBUG_INTR, 1239 1.8 chopps ("TX INTERRUPT port %d\n", scp->sp_port)); 1240 1.1 explorer 1241 1.1 explorer dsr = 1; 1242 1.1 explorer while (dsr != 0) { 1243 1.1 explorer ret++; 1244 1.1 explorer /* 1245 1.1 explorer * reset interrupt 1246 1.1 explorer */ 1247 1.1 explorer dsr = dmac_read_1(scp, SCA_DSR1); 1248 1.1 explorer dmac_write_1(scp, SCA_DSR1, 1249 1.1 explorer dsr | SCA_DSR_DEWD); 1250 1.1 explorer 1251 1.1 explorer /* 1252 1.1 explorer * filter out the bits we don't care about 1253 1.1 explorer */ 1254 1.1 explorer dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT); 1255 1.1 explorer if (dsr == 0) 1256 1.1 explorer break; 1257 1.1 explorer 1258 1.1 explorer /* 1259 1.1 explorer * check for counter overflow 1260 1.1 explorer */ 1261 1.1 explorer if (dsr & SCA_DSR_COF) { 1262 1.1 explorer printf("%s: TXDMA counter overflow\n", 1263 1.1 explorer scp->sp_if.if_xname); 1264 1.29 perry 1265 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE; 1266 1.8 chopps scp->sp_txcur = 0; 1267 1.8 chopps scp->sp_txinuse = 0; 1268 1.1 explorer } 1269 1.1 explorer 1270 1.1 explorer /* 1271 1.1 explorer * check for buffer overflow 1272 1.1 explorer */ 1273 1.1 explorer if (dsr & SCA_DSR_BOF) { 1274 1.1 explorer printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n", 1275 1.1 explorer scp->sp_if.if_xname, 1276 1.1 explorer dmac_read_2(scp, SCA_CDAL1), 1277 1.1 explorer dmac_read_2(scp, SCA_EDAL1), 1278 1.1 explorer dmac_read_1(scp, SCA_CPB1)); 1279 1.1 explorer 1280 1.1 explorer /* 1281 1.1 explorer * Yikes. Arrange for a full 1282 1.1 explorer * transmitter restart. 1283 1.1 explorer */ 1284 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE; 1285 1.8 chopps scp->sp_txcur = 0; 1286 1.8 chopps scp->sp_txinuse = 0; 1287 1.1 explorer } 1288 1.1 explorer 1289 1.1 explorer /* 1290 1.1 explorer * check for end of transfer, which is not 1291 1.1 explorer * an error. It means that all data queued 1292 1.1 explorer * was transmitted, and we mark ourself as 1293 1.1 explorer * not in use and stop the watchdog timer. 1294 1.1 explorer */ 1295 1.1 explorer if (dsr & SCA_DSR_EOT) { 1296 1.1 explorer SCA_DPRINTF(SCA_DEBUG_TX, 1297 1.8 chopps ("Transmit completed. cda %x eda %x dsr %x\n", 1298 1.8 chopps dmac_read_2(scp, SCA_CDAL1), 1299 1.8 chopps dmac_read_2(scp, SCA_EDAL1), 1300 1.8 chopps dsr)); 1301 1.1 explorer 1302 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE; 1303 1.8 chopps scp->sp_txcur = 0; 1304 1.8 chopps scp->sp_txinuse = 0; 1305 1.1 explorer 1306 1.1 explorer /* 1307 1.1 explorer * check for more packets 1308 1.1 explorer */ 1309 1.53 ozaki if_schedule_deferred_start(&scp->sp_if); 1310 1.1 explorer } 1311 1.1 explorer } 1312 1.1 explorer } 1313 1.1 explorer /* 1314 1.1 explorer * receive channel check 1315 1.1 explorer */ 1316 1.8 chopps if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) { 1317 1.8 chopps SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n", 1318 1.8 chopps (scp == &scp->sca->sc_ports[0] ? 0 : 1))); 1319 1.1 explorer 1320 1.1 explorer dsr = 1; 1321 1.1 explorer while (dsr != 0) { 1322 1.1 explorer ret++; 1323 1.1 explorer 1324 1.1 explorer dsr = dmac_read_1(scp, SCA_DSR0); 1325 1.1 explorer dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD); 1326 1.1 explorer 1327 1.1 explorer /* 1328 1.1 explorer * filter out the bits we don't care about 1329 1.1 explorer */ 1330 1.1 explorer dsr &= (SCA_DSR_EOM | SCA_DSR_COF 1331 1.1 explorer | SCA_DSR_BOF | SCA_DSR_EOT); 1332 1.1 explorer if (dsr == 0) 1333 1.1 explorer break; 1334 1.1 explorer 1335 1.1 explorer /* 1336 1.1 explorer * End of frame 1337 1.1 explorer */ 1338 1.1 explorer if (dsr & SCA_DSR_EOM) { 1339 1.1 explorer SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n")); 1340 1.1 explorer 1341 1.1 explorer sca_get_packets(scp); 1342 1.1 explorer } 1343 1.1 explorer 1344 1.1 explorer /* 1345 1.1 explorer * check for counter overflow 1346 1.1 explorer */ 1347 1.1 explorer if (dsr & SCA_DSR_COF) { 1348 1.1 explorer printf("%s: RXDMA counter overflow\n", 1349 1.1 explorer scp->sp_if.if_xname); 1350 1.1 explorer 1351 1.1 explorer sca_dmac_rxinit(scp); 1352 1.1 explorer } 1353 1.1 explorer 1354 1.1 explorer /* 1355 1.1 explorer * check for end of transfer, which means we 1356 1.1 explorer * ran out of descriptors to receive into. 1357 1.1 explorer * This means the line is much faster than 1358 1.1 explorer * we can handle. 1359 1.1 explorer */ 1360 1.1 explorer if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) { 1361 1.1 explorer printf("%s: RXDMA buffer overflow\n", 1362 1.1 explorer scp->sp_if.if_xname); 1363 1.1 explorer 1364 1.1 explorer sca_dmac_rxinit(scp); 1365 1.1 explorer } 1366 1.1 explorer } 1367 1.1 explorer } 1368 1.1 explorer 1369 1.1 explorer return ret; 1370 1.1 explorer } 1371 1.1 explorer 1372 1.1 explorer static int 1373 1.34 christos sca_msci_intr(sca_port_t *scp, u_int8_t isr) 1374 1.1 explorer { 1375 1.8 chopps u_int8_t st1, trc0; 1376 1.1 explorer 1377 1.8 chopps /* get and clear the specific interrupt -- should act on it :)*/ 1378 1.8 chopps if ((st1 = msci_read_1(scp, SCA_ST10))) { 1379 1.8 chopps /* clear the interrupt */ 1380 1.8 chopps msci_write_1(scp, SCA_ST10, st1); 1381 1.8 chopps 1382 1.8 chopps if (st1 & SCA_ST1_UDRN) { 1383 1.8 chopps /* underrun -- try to increase ready control */ 1384 1.8 chopps trc0 = msci_read_1(scp, SCA_TRC00); 1385 1.8 chopps if (trc0 == 0x1f) 1386 1.16 wiz printf("TX: underrun - fifo depth maxed\n"); 1387 1.8 chopps else { 1388 1.8 chopps if ((trc0 += 2) > 0x1f) 1389 1.8 chopps trc0 = 0x1f; 1390 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, 1391 1.8 chopps ("TX: udrn - incr fifo to %d\n", trc0)); 1392 1.8 chopps msci_write_1(scp, SCA_TRC00, trc0); 1393 1.8 chopps } 1394 1.8 chopps } 1395 1.8 chopps } 1396 1.8 chopps return (0); 1397 1.1 explorer } 1398 1.1 explorer 1399 1.1 explorer static void 1400 1.1 explorer sca_get_packets(sca_port_t *scp) 1401 1.1 explorer { 1402 1.8 chopps struct sca_softc *sc; 1403 1.1 explorer 1404 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n")); 1405 1.1 explorer 1406 1.8 chopps sc = scp->sca; 1407 1.8 chopps if (sc->sc_usedma) 1408 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1409 1.8 chopps 0, sc->scu_allocsize, 1410 1.8 chopps BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1411 1.8 chopps else { 1412 1.8 chopps /* 1413 1.8 chopps * XXX this code is unable to deal with rx stuff 1414 1.8 chopps * in more than 1 page 1415 1.8 chopps */ 1416 1.8 chopps sc->scu_page_on(sc); 1417 1.8 chopps sc->scu_set_page(sc, scp->sp_rxdesc_p); 1418 1.8 chopps } 1419 1.1 explorer 1420 1.8 chopps /* process as many frames as are available */ 1421 1.8 chopps while (sca_frame_avail(scp)) { 1422 1.8 chopps sca_frame_process(scp); 1423 1.8 chopps sca_frame_read_done(scp); 1424 1.1 explorer } 1425 1.1 explorer 1426 1.8 chopps if (sc->sc_usedma) 1427 1.8 chopps bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1428 1.8 chopps 0, sc->scu_allocsize, 1429 1.8 chopps BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1430 1.8 chopps else 1431 1.8 chopps sc->scu_page_off(sc); 1432 1.1 explorer } 1433 1.1 explorer 1434 1.1 explorer /* 1435 1.1 explorer * Starting with the first descriptor we wanted to read into, up to but 1436 1.1 explorer * not including the current SCA read descriptor, look for a packet. 1437 1.8 chopps * 1438 1.8 chopps * must be called at splnet() 1439 1.1 explorer */ 1440 1.1 explorer static int 1441 1.8 chopps sca_frame_avail(sca_port_t *scp) 1442 1.1 explorer { 1443 1.8 chopps u_int16_t cda; 1444 1.8 chopps u_int32_t desc_p; /* physical address (lower 16 bits) */ 1445 1.8 chopps sca_desc_t *desc; 1446 1.8 chopps u_int8_t rxstat; 1447 1.8 chopps int cdaidx, toolong; 1448 1.1 explorer 1449 1.1 explorer /* 1450 1.1 explorer * Read the current descriptor from the SCA. 1451 1.1 explorer */ 1452 1.1 explorer cda = dmac_read_2(scp, SCA_CDAL0); 1453 1.1 explorer 1454 1.1 explorer /* 1455 1.1 explorer * calculate the index of the current descriptor 1456 1.1 explorer */ 1457 1.8 chopps desc_p = (scp->sp_rxdesc_p & 0xFFFF); 1458 1.8 chopps desc_p = cda - desc_p; 1459 1.1 explorer cdaidx = desc_p / sizeof(sca_desc_t); 1460 1.1 explorer 1461 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, 1462 1.8 chopps ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n", 1463 1.8 chopps cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart)); 1464 1.8 chopps 1465 1.8 chopps /* note confusion */ 1466 1.8 chopps if (cdaidx >= scp->sp_nrxdesc) 1467 1.8 chopps panic("current descriptor index out of range"); 1468 1.8 chopps 1469 1.8 chopps /* see if we have a valid frame available */ 1470 1.8 chopps toolong = 0; 1471 1.8 chopps for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) { 1472 1.1 explorer /* 1473 1.1 explorer * We might have a valid descriptor. Set up a pointer 1474 1.1 explorer * to the kva address for it so we can more easily examine 1475 1.1 explorer * the contents. 1476 1.1 explorer */ 1477 1.8 chopps desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1478 1.8 chopps rxstat = sca_desc_read_stat(scp->sca, desc); 1479 1.8 chopps 1480 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n", 1481 1.8 chopps scp->sp_port, scp->sp_rxstart, rxstat)); 1482 1.1 explorer 1483 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n", 1484 1.8 chopps scp->sp_port, sca_desc_read_buflen(scp->sca, desc))); 1485 1.1 explorer 1486 1.1 explorer /* 1487 1.1 explorer * check for errors 1488 1.1 explorer */ 1489 1.8 chopps if (rxstat & SCA_DESC_ERRORS) { 1490 1.8 chopps /* 1491 1.8 chopps * consider an error condition the end 1492 1.8 chopps * of a frame 1493 1.8 chopps */ 1494 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors); 1495 1.8 chopps toolong = 0; 1496 1.8 chopps continue; 1497 1.8 chopps } 1498 1.1 explorer 1499 1.1 explorer /* 1500 1.8 chopps * if we aren't skipping overlong frames 1501 1.8 chopps * we are done, otherwise reset and look for 1502 1.8 chopps * another good frame 1503 1.1 explorer */ 1504 1.1 explorer if (rxstat & SCA_DESC_EOM) { 1505 1.8 chopps if (!toolong) 1506 1.8 chopps return (1); 1507 1.8 chopps toolong = 0; 1508 1.8 chopps } else if (!toolong) { 1509 1.8 chopps /* 1510 1.8 chopps * we currently don't deal with frames 1511 1.8 chopps * larger than a single buffer (fixed MTU) 1512 1.8 chopps */ 1513 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors); 1514 1.8 chopps toolong = 1; 1515 1.1 explorer } 1516 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n", 1517 1.8 chopps scp->sp_rxstart)); 1518 1.1 explorer } 1519 1.1 explorer 1520 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n")); 1521 1.1 explorer return 0; 1522 1.1 explorer } 1523 1.1 explorer 1524 1.1 explorer /* 1525 1.1 explorer * Pass the packet up to the kernel if it is a packet we want to pay 1526 1.1 explorer * attention to. 1527 1.1 explorer * 1528 1.2 mycroft * MUST BE CALLED AT splnet() 1529 1.1 explorer */ 1530 1.1 explorer static void 1531 1.8 chopps sca_frame_process(sca_port_t *scp) 1532 1.1 explorer { 1533 1.47 rmind pktqueue_t *pktq = NULL; 1534 1.9 chopps struct hdlc_header *hdlc; 1535 1.9 chopps struct cisco_pkt *cisco; 1536 1.8 chopps sca_desc_t *desc; 1537 1.8 chopps struct mbuf *m; 1538 1.8 chopps u_int8_t *bufp; 1539 1.8 chopps u_int16_t len; 1540 1.8 chopps u_int32_t t; 1541 1.8 chopps 1542 1.32 kardel t = time_uptime * 1000; 1543 1.8 chopps desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1544 1.8 chopps bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart; 1545 1.8 chopps len = sca_desc_read_buflen(scp->sca, desc); 1546 1.8 chopps 1547 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, 1548 1.8 chopps ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc, 1549 1.8 chopps (bus_addr_t)bufp, len)); 1550 1.1 explorer 1551 1.8 chopps #if SCA_DEBUG_LEVEL > 0 1552 1.8 chopps if (sca_debug & SCA_DEBUG_RXPKT) 1553 1.8 chopps sca_frame_print(scp, desc, bufp); 1554 1.8 chopps #endif 1555 1.1 explorer /* 1556 1.1 explorer * skip packets that are too short 1557 1.1 explorer */ 1558 1.9 chopps if (len < sizeof(struct hdlc_header)) { 1559 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors); 1560 1.1 explorer return; 1561 1.9 chopps } 1562 1.1 explorer 1563 1.8 chopps m = sca_mbuf_alloc(scp->sca, bufp, len); 1564 1.8 chopps if (m == NULL) { 1565 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n")); 1566 1.8 chopps return; 1567 1.8 chopps } 1568 1.1 explorer 1569 1.1 explorer /* 1570 1.1 explorer * read and then strip off the HDLC information 1571 1.1 explorer */ 1572 1.9 chopps m = m_pullup(m, sizeof(struct hdlc_header)); 1573 1.8 chopps if (m == NULL) { 1574 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1575 1.9 chopps return; 1576 1.8 chopps } 1577 1.8 chopps 1578 1.52 ozaki bpf_mtap_softint(&scp->sp_if, m); 1579 1.1 explorer 1580 1.55 thorpej if_statinc(&scp->sp_if, if_ipackets); 1581 1.1 explorer 1582 1.9 chopps hdlc = mtod(m, struct hdlc_header *); 1583 1.9 chopps switch (ntohs(hdlc->h_proto)) { 1584 1.9 chopps #ifdef INET 1585 1.1 explorer case HDLC_PROTOCOL_IP: 1586 1.1 explorer SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1587 1.50 ozaki m_set_rcvif(m, &scp->sp_if); 1588 1.9 chopps m->m_pkthdr.len -= sizeof(struct hdlc_header); 1589 1.9 chopps m->m_data += sizeof(struct hdlc_header); 1590 1.9 chopps m->m_len -= sizeof(struct hdlc_header); 1591 1.47 rmind pktq = ip_pktq; 1592 1.9 chopps break; 1593 1.9 chopps #endif /* INET */ 1594 1.15 itojun #ifdef INET6 1595 1.15 itojun case HDLC_PROTOCOL_IPV6: 1596 1.15 itojun SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1597 1.50 ozaki m_set_rcvif(m, &scp->sp_if); 1598 1.15 itojun m->m_pkthdr.len -= sizeof(struct hdlc_header); 1599 1.15 itojun m->m_data += sizeof(struct hdlc_header); 1600 1.15 itojun m->m_len -= sizeof(struct hdlc_header); 1601 1.47 rmind pktq = ip6_pktq; 1602 1.15 itojun break; 1603 1.15 itojun #endif /* INET6 */ 1604 1.1 explorer case CISCO_KEEPALIVE: 1605 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO, 1606 1.1 explorer ("Received CISCO keepalive packet\n")); 1607 1.1 explorer 1608 1.1 explorer if (len < CISCO_PKT_LEN) { 1609 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO, 1610 1.1 explorer ("short CISCO packet %d, wanted %d\n", 1611 1.1 explorer len, CISCO_PKT_LEN)); 1612 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors); 1613 1.9 chopps goto dropit; 1614 1.1 explorer } 1615 1.1 explorer 1616 1.9 chopps m = m_pullup(m, sizeof(struct cisco_pkt)); 1617 1.8 chopps if (m == NULL) { 1618 1.8 chopps SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1619 1.9 chopps return; 1620 1.8 chopps } 1621 1.1 explorer 1622 1.9 chopps cisco = (struct cisco_pkt *) 1623 1.9 chopps (mtod(m, u_int8_t *) + HDLC_HDRLEN); 1624 1.50 ozaki m_set_rcvif(m, &scp->sp_if); 1625 1.1 explorer 1626 1.1 explorer switch (ntohl(cisco->type)) { 1627 1.1 explorer case CISCO_ADDR_REQ: 1628 1.1 explorer printf("Got CISCO addr_req, ignoring\n"); 1629 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors); 1630 1.9 chopps goto dropit; 1631 1.1 explorer 1632 1.1 explorer case CISCO_ADDR_REPLY: 1633 1.1 explorer printf("Got CISCO addr_reply, ignoring\n"); 1634 1.55 thorpej if_statinc(&scp->sp_if, if_ierrors); 1635 1.9 chopps goto dropit; 1636 1.1 explorer 1637 1.1 explorer case CISCO_KEEPALIVE_REQ: 1638 1.8 chopps 1639 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO, 1640 1.1 explorer ("Received KA, mseq %d," 1641 1.1 explorer " yseq %d, rel 0x%04x, t0" 1642 1.1 explorer " %04x, t1 %04x\n", 1643 1.1 explorer ntohl(cisco->par1), ntohl(cisco->par2), 1644 1.1 explorer ntohs(cisco->rel), ntohs(cisco->time0), 1645 1.1 explorer ntohs(cisco->time1))); 1646 1.1 explorer 1647 1.1 explorer scp->cka_lastrx = ntohl(cisco->par1); 1648 1.1 explorer scp->cka_lasttx++; 1649 1.1 explorer 1650 1.1 explorer /* 1651 1.1 explorer * schedule the transmit right here. 1652 1.1 explorer */ 1653 1.8 chopps cisco->par2 = cisco->par1; 1654 1.8 chopps cisco->par1 = htonl(scp->cka_lasttx); 1655 1.8 chopps cisco->time0 = htons((u_int16_t)(t >> 16)); 1656 1.8 chopps cisco->time1 = htons((u_int16_t)(t & 0x0000ffff)); 1657 1.1 explorer 1658 1.57 thorpej if (IF_QFULL(&scp->linkq)) { 1659 1.57 thorpej IF_DROP(&scp->linkq); 1660 1.9 chopps goto dropit; 1661 1.1 explorer } 1662 1.57 thorpej IF_ENQUEUE(&scp->linkq, m); 1663 1.1 explorer 1664 1.1 explorer sca_start(&scp->sp_if); 1665 1.1 explorer 1666 1.8 chopps /* since start may have reset this fix */ 1667 1.8 chopps if (!scp->sca->sc_usedma) { 1668 1.8 chopps scp->sca->scu_set_page(scp->sca, 1669 1.8 chopps scp->sp_rxdesc_p); 1670 1.8 chopps scp->sca->scu_page_on(scp->sca); 1671 1.8 chopps } 1672 1.9 chopps return; 1673 1.1 explorer default: 1674 1.1 explorer SCA_DPRINTF(SCA_DEBUG_CISCO, 1675 1.1 explorer ("Unknown CISCO keepalive protocol 0x%04x\n", 1676 1.1 explorer ntohl(cisco->type))); 1677 1.29 perry 1678 1.55 thorpej if_statinc(&scp->sp_if, if_noproto); 1679 1.9 chopps goto dropit; 1680 1.1 explorer } 1681 1.9 chopps return; 1682 1.1 explorer default: 1683 1.1 explorer SCA_DPRINTF(SCA_DEBUG_RX, 1684 1.1 explorer ("Unknown/unexpected ethertype 0x%04x\n", 1685 1.9 chopps ntohs(hdlc->h_proto))); 1686 1.55 thorpej if_statinc(&scp->sp_if, if_noproto); 1687 1.9 chopps goto dropit; 1688 1.9 chopps } 1689 1.9 chopps 1690 1.47 rmind /* Queue the packet */ 1691 1.57 thorpej KASSERT(pktq != NULL); 1692 1.57 thorpej if (__predict_false(!pktq_enqueue(pktq, m, 0))) { 1693 1.55 thorpej if_statinc(&scp->sp_if, if_iqdrops); 1694 1.9 chopps goto dropit; 1695 1.9 chopps } 1696 1.9 chopps return; 1697 1.9 chopps dropit: 1698 1.60 rin m_freem(m); 1699 1.9 chopps return; 1700 1.1 explorer } 1701 1.1 explorer 1702 1.1 explorer #if SCA_DEBUG_LEVEL > 0 1703 1.1 explorer /* 1704 1.1 explorer * do a hex dump of the packet received into descriptor "desc" with 1705 1.1 explorer * data buffer "p" 1706 1.1 explorer */ 1707 1.1 explorer static void 1708 1.1 explorer sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p) 1709 1.1 explorer { 1710 1.1 explorer int i; 1711 1.1 explorer int nothing_yet = 1; 1712 1.8 chopps struct sca_softc *sc; 1713 1.8 chopps u_int len; 1714 1.1 explorer 1715 1.8 chopps sc = scp->sca; 1716 1.8 chopps printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n", 1717 1.8 chopps desc, 1718 1.8 chopps sca_desc_read_chainp(sc, desc), 1719 1.8 chopps sca_desc_read_bufp(sc, desc), 1720 1.8 chopps sca_desc_read_stat(sc, desc), 1721 1.8 chopps (len = sca_desc_read_buflen(sc, desc))); 1722 1.8 chopps 1723 1.8 chopps for (i = 0 ; i < len && i < 256; i++) { 1724 1.8 chopps if (nothing_yet == 1 && 1725 1.8 chopps (sc->sc_usedma ? *p 1726 1.8 chopps : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1727 1.8 chopps sca_page_addr(sc, p))) == 0) { 1728 1.1 explorer p++; 1729 1.1 explorer continue; 1730 1.1 explorer } 1731 1.1 explorer nothing_yet = 0; 1732 1.1 explorer if (i % 16 == 0) 1733 1.1 explorer printf("\n"); 1734 1.29 perry printf("%02x ", 1735 1.8 chopps (sc->sc_usedma ? *p 1736 1.8 chopps : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1737 1.8 chopps sca_page_addr(sc, p)))); 1738 1.8 chopps p++; 1739 1.1 explorer } 1740 1.1 explorer 1741 1.1 explorer if (i % 16 != 1) 1742 1.1 explorer printf("\n"); 1743 1.1 explorer } 1744 1.1 explorer #endif 1745 1.1 explorer 1746 1.1 explorer /* 1747 1.26 wiz * adjust things because we have just read the current starting 1748 1.8 chopps * frame 1749 1.8 chopps * 1750 1.8 chopps * must be called at splnet() 1751 1.1 explorer */ 1752 1.1 explorer static void 1753 1.8 chopps sca_frame_read_done(sca_port_t *scp) 1754 1.1 explorer { 1755 1.8 chopps u_int16_t edesc_p; 1756 1.1 explorer 1757 1.58 andvar /* update where our indices are */ 1758 1.8 chopps scp->sp_rxend = scp->sp_rxstart; 1759 1.8 chopps scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc; 1760 1.8 chopps 1761 1.8 chopps /* update the error [end] descriptor */ 1762 1.8 chopps edesc_p = (u_int16_t)scp->sp_rxdesc_p + 1763 1.8 chopps (sizeof(sca_desc_t) * scp->sp_rxend); 1764 1.8 chopps dmac_write_2(scp, SCA_EDAL0, edesc_p); 1765 1.1 explorer } 1766 1.1 explorer 1767 1.1 explorer /* 1768 1.1 explorer * set a port to the "up" state 1769 1.1 explorer */ 1770 1.1 explorer static void 1771 1.1 explorer sca_port_up(sca_port_t *scp) 1772 1.1 explorer { 1773 1.1 explorer struct sca_softc *sc = scp->sca; 1774 1.32 kardel struct timeval now; 1775 1.8 chopps #if 0 1776 1.8 chopps u_int8_t ier0, ier1; 1777 1.8 chopps #endif 1778 1.1 explorer 1779 1.1 explorer /* 1780 1.1 explorer * reset things 1781 1.1 explorer */ 1782 1.1 explorer #if 0 1783 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 1784 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 1785 1.1 explorer #endif 1786 1.1 explorer /* 1787 1.1 explorer * clear in-use flag 1788 1.1 explorer */ 1789 1.1 explorer scp->sp_if.if_flags &= ~IFF_OACTIVE; 1790 1.8 chopps scp->sp_if.if_flags |= IFF_RUNNING; 1791 1.1 explorer 1792 1.1 explorer /* 1793 1.1 explorer * raise DTR 1794 1.1 explorer */ 1795 1.8 chopps sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1); 1796 1.1 explorer 1797 1.1 explorer /* 1798 1.1 explorer * raise RTS 1799 1.1 explorer */ 1800 1.1 explorer msci_write_1(scp, SCA_CTL0, 1801 1.8 chopps (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1802 1.8 chopps | SCA_CTL_RTS_HIGH); 1803 1.1 explorer 1804 1.8 chopps #if 0 1805 1.1 explorer /* 1806 1.8 chopps * enable interrupts (no timer IER2) 1807 1.1 explorer */ 1808 1.8 chopps ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1809 1.8 chopps | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1810 1.8 chopps ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1811 1.8 chopps | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1812 1.8 chopps if (scp->sp_port == 1) { 1813 1.8 chopps ier0 <<= 4; 1814 1.8 chopps ier1 <<= 4; 1815 1.8 chopps } 1816 1.8 chopps sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0); 1817 1.8 chopps sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1); 1818 1.8 chopps #else 1819 1.1 explorer if (scp->sp_port == 0) { 1820 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f); 1821 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f); 1822 1.29 perry } else { 1823 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0); 1824 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0); 1825 1.1 explorer } 1826 1.8 chopps #endif 1827 1.1 explorer 1828 1.1 explorer /* 1829 1.1 explorer * enable transmit and receive 1830 1.1 explorer */ 1831 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE); 1832 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE); 1833 1.1 explorer 1834 1.1 explorer /* 1835 1.1 explorer * reset internal state 1836 1.1 explorer */ 1837 1.8 chopps scp->sp_txinuse = 0; 1838 1.8 chopps scp->sp_txcur = 0; 1839 1.32 kardel getmicrotime(&now); 1840 1.32 kardel scp->cka_lasttx = now.tv_usec; 1841 1.1 explorer scp->cka_lastrx = 0; 1842 1.1 explorer } 1843 1.1 explorer 1844 1.1 explorer /* 1845 1.1 explorer * set a port to the "down" state 1846 1.1 explorer */ 1847 1.1 explorer static void 1848 1.1 explorer sca_port_down(sca_port_t *scp) 1849 1.1 explorer { 1850 1.1 explorer struct sca_softc *sc = scp->sca; 1851 1.8 chopps #if 0 1852 1.8 chopps u_int8_t ier0, ier1; 1853 1.8 chopps #endif 1854 1.1 explorer 1855 1.1 explorer /* 1856 1.1 explorer * lower DTR 1857 1.1 explorer */ 1858 1.8 chopps sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0); 1859 1.1 explorer 1860 1.1 explorer /* 1861 1.1 explorer * lower RTS 1862 1.1 explorer */ 1863 1.1 explorer msci_write_1(scp, SCA_CTL0, 1864 1.8 chopps (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1865 1.8 chopps | SCA_CTL_RTS_LOW); 1866 1.1 explorer 1867 1.1 explorer /* 1868 1.1 explorer * disable interrupts 1869 1.1 explorer */ 1870 1.8 chopps #if 0 1871 1.8 chopps ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1872 1.8 chopps | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1873 1.8 chopps ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1874 1.8 chopps | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1875 1.8 chopps if (scp->sp_port == 1) { 1876 1.8 chopps ier0 <<= 4; 1877 1.8 chopps ier1 <<= 4; 1878 1.8 chopps } 1879 1.8 chopps sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0); 1880 1.8 chopps sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1); 1881 1.8 chopps #else 1882 1.1 explorer if (scp->sp_port == 0) { 1883 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0); 1884 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0); 1885 1.29 perry } else { 1886 1.1 explorer sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f); 1887 1.1 explorer sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f); 1888 1.1 explorer } 1889 1.8 chopps #endif 1890 1.1 explorer 1891 1.1 explorer /* 1892 1.1 explorer * disable transmit and receive 1893 1.1 explorer */ 1894 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE); 1895 1.1 explorer msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE); 1896 1.1 explorer 1897 1.1 explorer /* 1898 1.1 explorer * no, we're not in use anymore 1899 1.1 explorer */ 1900 1.8 chopps scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING); 1901 1.1 explorer } 1902 1.1 explorer 1903 1.1 explorer /* 1904 1.1 explorer * disable all DMA and interrupts for all ports at once. 1905 1.1 explorer */ 1906 1.1 explorer void 1907 1.1 explorer sca_shutdown(struct sca_softc *sca) 1908 1.1 explorer { 1909 1.1 explorer /* 1910 1.1 explorer * disable DMA and interrupts 1911 1.1 explorer */ 1912 1.1 explorer sca_write_1(sca, SCA_DMER, 0); 1913 1.1 explorer sca_write_1(sca, SCA_IER0, 0); 1914 1.1 explorer sca_write_1(sca, SCA_IER1, 0); 1915 1.1 explorer } 1916 1.1 explorer 1917 1.1 explorer /* 1918 1.1 explorer * If there are packets to transmit, start the transmit DMA logic. 1919 1.1 explorer */ 1920 1.1 explorer static void 1921 1.1 explorer sca_port_starttx(sca_port_t *scp) 1922 1.1 explorer { 1923 1.1 explorer u_int32_t startdesc_p, enddesc_p; 1924 1.1 explorer int enddesc; 1925 1.1 explorer 1926 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n")); 1927 1.8 chopps 1928 1.1 explorer if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE) 1929 1.8 chopps || scp->sp_txinuse == 0) 1930 1.1 explorer return; 1931 1.8 chopps 1932 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n")); 1933 1.8 chopps 1934 1.1 explorer scp->sp_if.if_flags |= IFF_OACTIVE; 1935 1.1 explorer 1936 1.1 explorer /* 1937 1.1 explorer * We have something to do, since we have at least one packet 1938 1.1 explorer * waiting, and we are not already marked as active. 1939 1.1 explorer */ 1940 1.8 chopps enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1941 1.8 chopps startdesc_p = scp->sp_txdesc_p; 1942 1.8 chopps enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc; 1943 1.1 explorer 1944 1.8 chopps SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n", 1945 1.8 chopps startdesc_p, enddesc_p)); 1946 1.1 explorer 1947 1.1 explorer dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff)); 1948 1.1 explorer dmac_write_2(scp, SCA_CDAL1, 1949 1.1 explorer (u_int16_t)(startdesc_p & 0x0000ffff)); 1950 1.1 explorer 1951 1.1 explorer /* 1952 1.1 explorer * enable the DMA 1953 1.1 explorer */ 1954 1.1 explorer dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE); 1955 1.1 explorer } 1956 1.1 explorer 1957 1.1 explorer /* 1958 1.1 explorer * allocate an mbuf at least long enough to hold "len" bytes. 1959 1.1 explorer * If "p" is non-NULL, copy "len" bytes from it into the new mbuf, 1960 1.1 explorer * otherwise let the caller handle copying the data in. 1961 1.1 explorer */ 1962 1.1 explorer static struct mbuf * 1963 1.36 christos sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len) 1964 1.1 explorer { 1965 1.1 explorer struct mbuf *m; 1966 1.1 explorer 1967 1.1 explorer /* 1968 1.1 explorer * allocate an mbuf and copy the important bits of data 1969 1.1 explorer * into it. If the packet won't fit in the header, 1970 1.1 explorer * allocate a cluster for it and store it there. 1971 1.1 explorer */ 1972 1.1 explorer MGETHDR(m, M_DONTWAIT, MT_DATA); 1973 1.1 explorer if (m == NULL) 1974 1.1 explorer return NULL; 1975 1.1 explorer if (len > MHLEN) { 1976 1.1 explorer if (len > MCLBYTES) { 1977 1.1 explorer m_freem(m); 1978 1.1 explorer return NULL; 1979 1.1 explorer } 1980 1.1 explorer MCLGET(m, M_DONTWAIT); 1981 1.1 explorer if ((m->m_flags & M_EXT) == 0) { 1982 1.1 explorer m_freem(m); 1983 1.1 explorer return NULL; 1984 1.1 explorer } 1985 1.1 explorer } 1986 1.8 chopps if (p != NULL) { 1987 1.8 chopps /* XXX do we need to sync here? */ 1988 1.8 chopps if (sc->sc_usedma) 1989 1.36 christos memcpy(mtod(m, void *), p, len); 1990 1.8 chopps else 1991 1.8 chopps bus_space_read_region_1(sc->scu_memt, sc->scu_memh, 1992 1.8 chopps sca_page_addr(sc, p), mtod(m, u_int8_t *), len); 1993 1.8 chopps } 1994 1.1 explorer m->m_len = len; 1995 1.1 explorer m->m_pkthdr.len = len; 1996 1.1 explorer 1997 1.1 explorer return (m); 1998 1.1 explorer } 1999 1.8 chopps 2000 1.8 chopps /* 2001 1.8 chopps * get the base clock 2002 1.8 chopps */ 2003 1.29 perry void 2004 1.8 chopps sca_get_base_clock(struct sca_softc *sc) 2005 1.8 chopps { 2006 1.8 chopps struct timeval btv, ctv, dtv; 2007 1.8 chopps u_int64_t bcnt; 2008 1.8 chopps u_int32_t cnt; 2009 1.8 chopps u_int16_t subcnt; 2010 1.8 chopps 2011 1.8 chopps /* disable the timer, set prescale to 0 */ 2012 1.8 chopps sca_write_1(sc, SCA_TCSR0, 0); 2013 1.8 chopps sca_write_1(sc, SCA_TEPR0, 0); 2014 1.8 chopps 2015 1.8 chopps /* reset the counter */ 2016 1.8 chopps (void)sca_read_1(sc, SCA_TCSR0); 2017 1.8 chopps subcnt = sca_read_2(sc, SCA_TCNTL0); 2018 1.8 chopps 2019 1.8 chopps /* count to max */ 2020 1.8 chopps sca_write_2(sc, SCA_TCONRL0, 0xffff); 2021 1.8 chopps 2022 1.8 chopps cnt = 0; 2023 1.8 chopps microtime(&btv); 2024 1.8 chopps /* start the timer -- no interrupt enable */ 2025 1.8 chopps sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME); 2026 1.8 chopps for (;;) { 2027 1.8 chopps microtime(&ctv); 2028 1.8 chopps 2029 1.8 chopps /* end around 3/4 of a second */ 2030 1.8 chopps timersub(&ctv, &btv, &dtv); 2031 1.8 chopps if (dtv.tv_usec >= 750000) 2032 1.8 chopps break; 2033 1.8 chopps 2034 1.8 chopps /* spin */ 2035 1.8 chopps while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF)) 2036 1.8 chopps ; 2037 1.8 chopps /* reset the timer */ 2038 1.8 chopps (void)sca_read_2(sc, SCA_TCNTL0); 2039 1.8 chopps cnt++; 2040 1.8 chopps } 2041 1.8 chopps 2042 1.8 chopps /* stop the timer */ 2043 1.8 chopps sca_write_1(sc, SCA_TCSR0, 0); 2044 1.8 chopps 2045 1.8 chopps subcnt = sca_read_2(sc, SCA_TCNTL0); 2046 1.8 chopps /* add the slop in and get the total timer ticks */ 2047 1.8 chopps cnt = (cnt << 16) | subcnt; 2048 1.8 chopps 2049 1.8 chopps /* cnt is 1/8 the actual time */ 2050 1.8 chopps bcnt = cnt * 8; 2051 1.8 chopps /* make it proportional to 3/4 of a second */ 2052 1.8 chopps bcnt *= (u_int64_t)750000; 2053 1.8 chopps bcnt /= (u_int64_t)dtv.tv_usec; 2054 1.8 chopps cnt = bcnt; 2055 1.8 chopps 2056 1.8 chopps /* make it Hz */ 2057 1.8 chopps cnt *= 4; 2058 1.8 chopps cnt /= 3; 2059 1.8 chopps 2060 1.8 chopps SCA_DPRINTF(SCA_DEBUG_CLOCK, 2061 1.8 chopps ("sca: unadjusted base %lu Hz\n", (u_long)cnt)); 2062 1.8 chopps 2063 1.8 chopps /* 2064 1.8 chopps * round to the nearest 200 -- this allows for +-3 ticks error 2065 1.8 chopps */ 2066 1.8 chopps sc->sc_baseclock = ((cnt + 100) / 200) * 200; 2067 1.8 chopps } 2068 1.8 chopps 2069 1.8 chopps /* 2070 1.8 chopps * print the information about the clock on the ports 2071 1.8 chopps */ 2072 1.8 chopps void 2073 1.8 chopps sca_print_clock_info(struct sca_softc *sc) 2074 1.8 chopps { 2075 1.8 chopps struct sca_port *scp; 2076 1.8 chopps u_int32_t mhz, div; 2077 1.8 chopps int i; 2078 1.8 chopps 2079 1.39 cegger printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent), 2080 1.8 chopps sc->sc_baseclock); 2081 1.8 chopps 2082 1.8 chopps /* print the information about the port clock selection */ 2083 1.8 chopps for (i = 0; i < sc->sc_numports; i++) { 2084 1.8 chopps scp = &sc->sc_ports[i]; 2085 1.8 chopps mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256); 2086 1.8 chopps div = scp->sp_rxs & SCA_RXS_DIV_MASK; 2087 1.8 chopps 2088 1.8 chopps printf("%s: rx clock: ", scp->sp_if.if_xname); 2089 1.8 chopps switch (scp->sp_rxs & SCA_RXS_CLK_MASK) { 2090 1.8 chopps case SCA_RXS_CLK_LINE: 2091 1.8 chopps printf("line"); 2092 1.8 chopps break; 2093 1.8 chopps case SCA_RXS_CLK_LINE_SN: 2094 1.8 chopps printf("line with noise suppression"); 2095 1.8 chopps break; 2096 1.8 chopps case SCA_RXS_CLK_INTERNAL: 2097 1.8 chopps printf("internal %d Hz", (mhz >> div)); 2098 1.8 chopps break; 2099 1.8 chopps case SCA_RXS_CLK_ADPLL_OUT: 2100 1.8 chopps printf("adpll using internal %d Hz", (mhz >> div)); 2101 1.8 chopps break; 2102 1.8 chopps case SCA_RXS_CLK_ADPLL_IN: 2103 1.8 chopps printf("adpll using line clock"); 2104 1.8 chopps break; 2105 1.8 chopps } 2106 1.8 chopps printf(" tx clock: "); 2107 1.8 chopps div = scp->sp_txs & SCA_TXS_DIV_MASK; 2108 1.8 chopps switch (scp->sp_txs & SCA_TXS_CLK_MASK) { 2109 1.8 chopps case SCA_TXS_CLK_LINE: 2110 1.8 chopps printf("line\n"); 2111 1.8 chopps break; 2112 1.8 chopps case SCA_TXS_CLK_INTERNAL: 2113 1.8 chopps printf("internal %d Hz\n", (mhz >> div)); 2114 1.8 chopps break; 2115 1.8 chopps case SCA_TXS_CLK_RXCLK: 2116 1.8 chopps printf("rxclock\n"); 2117 1.8 chopps break; 2118 1.8 chopps } 2119 1.8 chopps if (scp->sp_eclock) 2120 1.8 chopps printf("%s: outputting line clock\n", 2121 1.8 chopps scp->sp_if.if_xname); 2122 1.8 chopps } 2123 1.8 chopps } 2124 1.8 chopps 2125