mtd803.c revision 1.16 1 /* $NetBSD: mtd803.c,v 1.16 2007/12/05 07:58:30 ad Exp $ */
2
3 /*-
4 *
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Peter Bex <Peter.Bex (at) student.kun.nl>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * TODO:
42 * - Most importantly, get some bus_dmamap_syncs in the correct places.
43 * I don't have access to a computer with PCI other than i386, and i386
44 * is just such a machine where dmamap_syncs don't do anything.
45 * - Powerhook for when resuming after standby.
46 * - Watchdog stuff doesn't work yet, the system crashes.
47 * - There seems to be a CardBus version of the card. (see datasheet)
48 * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
49 * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
50 * raised every time a packet is sent. Strange, since everything works anyway
51 */
52
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.16 2007/12/05 07:58:30 ad Exp $");
55
56 #include "bpfilter.h"
57
58 #include <sys/param.h>
59 #include <sys/mbuf.h>
60 #include <sys/systm.h>
61 #include <sys/device.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/syslog.h>
65
66 #include <net/if.h>
67 #include <net/if_ether.h>
68 #include <net/if_media.h>
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/if_inarp.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #endif
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #include <net/bpfdesc.h>
81 #endif
82
83 #include <sys/bus.h>
84
85 #include <dev/ic/mtd803reg.h>
86 #include <dev/ic/mtd803var.h>
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89
90 /*
91 * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
92 * Written by Peter Bex (peter.bex (at) student.kun.nl)
93 *
94 * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com
95 */
96
97 #define MTD_READ_1(sc, reg) \
98 bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
99 #define MTD_WRITE_1(sc, reg, data) \
100 bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
101
102 #define MTD_READ_2(sc, reg) \
103 bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
104 #define MTD_WRITE_2(sc, reg, data) \
105 bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
106
107 #define MTD_READ_4(sc, reg) \
108 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
109 #define MTD_WRITE_4(sc, reg, data) \
110 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
111
112 #define MTD_SETBIT(sc, reg, x) \
113 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
114 #define MTD_CLRBIT(sc, reg, x) \
115 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
116
117 #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len)))
118
119 int mtd_mii_readreg(struct device *, int, int);
120 void mtd_mii_writereg(struct device *, int, int, int);
121 void mtd_mii_statchg(struct device *);
122
123 void mtd_start(struct ifnet *);
124 void mtd_stop(struct ifnet *, int);
125 int mtd_ioctl(struct ifnet *, u_long, void *);
126 void mtd_setmulti(struct mtd_softc *);
127 void mtd_watchdog(struct ifnet *);
128 int mtd_mediachange(struct ifnet *);
129 void mtd_mediastatus(struct ifnet *, struct ifmediareq *);
130
131 int mtd_init(struct ifnet *);
132 void mtd_reset(struct mtd_softc *);
133 void mtd_shutdown(void *);
134 int mtd_init_desc(struct mtd_softc *);
135 int mtd_put(struct mtd_softc *, int, struct mbuf *);
136 struct mbuf *mtd_get(struct mtd_softc *, int, int);
137
138 int mtd_rxirq(struct mtd_softc *);
139 int mtd_txirq(struct mtd_softc *);
140 int mtd_bufirq(struct mtd_softc *);
141
142
143 int
144 mtd_config(sc)
145 struct mtd_softc *sc;
146 {
147 struct ifnet *ifp = &sc->ethercom.ec_if;
148 int i;
149
150 /* Read station address */
151 for (i = 0; i < ETHER_ADDR_LEN; ++i)
152 sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
153
154 /* Initialize ifnet structure */
155 memcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
156 ifp->if_softc = sc;
157 ifp->if_init = mtd_init;
158 ifp->if_start = mtd_start;
159 ifp->if_stop = mtd_stop;
160 ifp->if_ioctl = mtd_ioctl;
161 ifp->if_watchdog = mtd_watchdog;
162 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
163 IFQ_SET_READY(&ifp->if_snd);
164
165 /* Setup MII interface */
166 sc->mii.mii_ifp = ifp;
167 sc->mii.mii_readreg = mtd_mii_readreg;
168 sc->mii.mii_writereg = mtd_mii_writereg;
169 sc->mii.mii_statchg = mtd_mii_statchg;
170
171 ifmedia_init(&sc->mii.mii_media, 0, mtd_mediachange, mtd_mediastatus);
172
173 mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
174
175 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
176 printf("%s: Unable to configure MII\n", sc->dev.dv_xname);
177 return 1;
178 } else {
179 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
180 }
181
182 if (mtd_init_desc(sc))
183 return 1;
184
185 /* Attach interface */
186 if_attach(ifp);
187 ether_ifattach(ifp, sc->eaddr);
188
189 #if NRND > 0
190 /* Initialise random source */
191 rnd_attach_source(&sc->rnd_src, sc->dev.dv_xname, RND_TYPE_NET, 0);
192 #endif
193
194 /* Add shutdown hook to reset card when we reboot */
195 sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
196
197 return 0;
198 }
199
200
201 /*
202 * mtd_init
203 * Must be called at splnet()
204 */
205 int
206 mtd_init(ifp)
207 struct ifnet *ifp;
208 {
209 struct mtd_softc *sc = ifp->if_softc;
210
211 mtd_reset(sc);
212
213 /*
214 * Set cache alignment and burst length. Don't really know what these
215 * mean, so their values are probably suboptimal.
216 */
217 MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
218
219 MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
220
221 /* Promiscuous mode? */
222 if (ifp->if_flags & IFF_PROMISC)
223 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
224 else
225 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
226
227 /* Broadcast mode? */
228 if (ifp->if_flags & IFF_BROADCAST)
229 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
230 else
231 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
232
233 mtd_setmulti(sc);
234
235 /* Enable interrupts */
236 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
237 MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
238
239 /* Set descriptor base addresses */
240 MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
241 + sizeof(struct mtd_desc) * MTD_NUM_RXD));
242 MTD_WRITE_4(sc, MTD_RXLBA,
243 htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
244
245 /* Enable receiver and transmitter */
246 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
247 MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
248
249 /* Interface is running */
250 ifp->if_flags |= IFF_RUNNING;
251 ifp->if_flags &= ~IFF_OACTIVE;
252
253 return 0;
254 }
255
256
257 int
258 mtd_init_desc(sc)
259 struct mtd_softc *sc;
260 {
261 int rseg, err, i;
262 bus_dma_segment_t seg;
263 bus_size_t size;
264
265 /* Allocate memory for descriptors */
266 size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
267
268 /* Allocate DMA-safe memory */
269 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
270 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
271 printf("%s: unable to allocate DMA buffer, error = %d\n",
272 sc->dev.dv_xname, err);
273 return 1;
274 }
275
276 /* Map memory to kernel addressable space */
277 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
278 (void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
279 printf("%s: unable to map DMA buffer, error = %d\n",
280 sc->dev.dv_xname, err);
281 bus_dmamem_free(sc->dma_tag, &seg, rseg);
282 return 1;
283 }
284
285 /* Create a DMA map */
286 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
287 size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
288 printf("%s: unable to create DMA map, error = %d\n",
289 sc->dev.dv_xname, err);
290 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
291 bus_dmamem_free(sc->dma_tag, &seg, rseg);
292 return 1;
293 }
294
295 /* Load the DMA map */
296 if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
297 size, NULL, BUS_DMA_NOWAIT)) != 0) {
298 printf("%s: unable to load DMA map, error = %d\n",
299 sc->dev.dv_xname, err);
300 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
301 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
302 bus_dmamem_free(sc->dma_tag, &seg, rseg);
303 return 1;
304 }
305
306 /* Allocate memory for the buffers */
307 size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
308
309 /* Allocate DMA-safe memory */
310 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
311 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
312 printf("%s: unable to allocate DMA buffer, error = %d\n",
313 sc->dev.dv_xname, err);
314
315 /* Undo DMA map for descriptors */
316 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
317 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
318 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
319 bus_dmamem_free(sc->dma_tag, &seg, rseg);
320 return 1;
321 }
322
323 /* Map memory to kernel addressable space */
324 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
325 &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
326 printf("%s: unable to map DMA buffer, error = %d\n",
327 sc->dev.dv_xname, err);
328 bus_dmamem_free(sc->dma_tag, &seg, rseg);
329
330 /* Undo DMA map for descriptors */
331 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
332 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
333 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
334 bus_dmamem_free(sc->dma_tag, &seg, rseg);
335 return 1;
336 }
337
338 /* Create a DMA map */
339 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
340 size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
341 printf("%s: unable to create DMA map, error = %d\n",
342 sc->dev.dv_xname, err);
343 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
344 bus_dmamem_free(sc->dma_tag, &seg, rseg);
345
346 /* Undo DMA map for descriptors */
347 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
348 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
349 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
350 bus_dmamem_free(sc->dma_tag, &seg, rseg);
351 return 1;
352 }
353
354 /* Load the DMA map */
355 if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
356 size, NULL, BUS_DMA_NOWAIT)) != 0) {
357 printf("%s: unable to load DMA map, error = %d\n",
358 sc->dev.dv_xname, err);
359 bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
360 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
361 bus_dmamem_free(sc->dma_tag, &seg, rseg);
362
363 /* Undo DMA map for descriptors */
364 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
365 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
366 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
367 bus_dmamem_free(sc->dma_tag, &seg, rseg);
368 return 1;
369 }
370
371 /* Descriptors are stored as a circular linked list */
372 /* Fill in rx descriptors */
373 for (i = 0; i < MTD_NUM_RXD; ++i) {
374 sc->desc[i].stat = MTD_RXD_OWNER;
375 if (i == MTD_NUM_RXD - 1) { /* Last descriptor */
376 /* Link back to first rx descriptor */
377 sc->desc[i].next =
378 htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
379 } else {
380 /* Link forward to next rx descriptor */
381 sc->desc[i].next =
382 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
383 + (i + 1) * sizeof(struct mtd_desc));
384 }
385 sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
386 /* Set buffer's address */
387 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
388 + i * MTD_RXBUF_SIZE);
389 }
390
391 /* Fill in tx descriptors */
392 for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
393 sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */
394 if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
395 /* Link back to first tx descriptor */
396 sc->desc[i].next =
397 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
398 +MTD_NUM_RXD * sizeof(struct mtd_desc));
399 } else {
400 /* Link forward to next tx descriptor */
401 sc->desc[i].next =
402 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
403 + (i + 1) * sizeof(struct mtd_desc));
404 }
405 /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
406 /* Set buffer's address */
407 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
408 + MTD_NUM_RXD * MTD_RXBUF_SIZE
409 + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
410 }
411
412 return 0;
413 }
414
415
416 void
417 mtd_mii_statchg(struct device *self)
418 {
419 /*struct mtd_softc *sc = (void *)self;*/
420
421 /* Should we do something here? :) */
422 }
423
424
425 int
426 mtd_mii_readreg(struct device *self, int phy, int reg)
427 {
428 struct mtd_softc *sc = (void *)self;
429
430 return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
431 }
432
433
434 void
435 mtd_mii_writereg(struct device *self, int phy, int reg, int val)
436 {
437 struct mtd_softc *sc = (void *)self;
438
439 MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
440 }
441
442
443 int
444 mtd_put(sc, index, m)
445 struct mtd_softc *sc;
446 int index;
447 struct mbuf *m;
448 {
449 int len, tlen;
450 char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
451 + index * MTD_TXBUF_SIZE;
452 struct mbuf *n;
453
454 for (tlen = 0; m != NULL; m = n) {
455 len = m->m_len;
456 if (len == 0) {
457 MFREE(m, n);
458 continue;
459 } else if (tlen > MTD_TXBUF_SIZE) {
460 /* XXX FIXME: No idea what to do here. */
461 printf("%s: packet too large! Size = %i\n",
462 sc->dev.dv_xname, tlen);
463 MFREE(m, n);
464 continue;
465 }
466 memcpy(buf, mtod(m, void *), len);
467 buf += len;
468 tlen += len;
469 MFREE(m, n);
470 }
471 sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
472 | MTD_TXD_CONF_IRQC
473 | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
474 | (tlen & MTD_TXD_CONF_BUFS);
475
476 return tlen;
477 }
478
479
480 void
481 mtd_start(ifp)
482 struct ifnet *ifp;
483 {
484 struct mtd_softc *sc = ifp->if_softc;
485 struct mbuf *m;
486 int len;
487 int first_tx = sc->cur_tx;
488
489 /* Don't transmit when the interface is busy or inactive */
490 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
491 return;
492
493 for (;;) {
494 IF_DEQUEUE(&ifp->if_snd, m);
495
496 if (m == NULL)
497 break;
498
499 #if NBPFILTER > 0
500 if (ifp->if_bpf)
501 bpf_mtap(ifp->if_bpf, m);
502 #endif
503
504 /* Copy mbuf chain into tx buffer */
505 len = mtd_put(sc, sc->cur_tx, m);
506
507 if (sc->cur_tx != first_tx)
508 sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
509
510 if (++sc->cur_tx >= MTD_NUM_TXD)
511 sc->cur_tx = 0;
512 }
513 /* Mark first & last descriptor */
514 sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
515
516 if (sc->cur_tx == 0) {
517 sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
518 } else {
519 sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
520 }
521
522 /* Give first descriptor to chip to complete transaction */
523 sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
524
525 /* Transmit polling demand */
526 MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
527
528 /* XXX FIXME: Set up a watchdog timer */
529 /* ifp->if_timer = 5; */
530 }
531
532
533 void
534 mtd_stop (ifp, disable)
535 struct ifnet *ifp;
536 int disable;
537 {
538 struct mtd_softc *sc = ifp->if_softc;
539
540 /* Disable transmitter and receiver */
541 MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
542 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
543
544 /* Disable interrupts */
545 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
546
547 /* Must do more at disable??... */
548 if (disable) {
549 /* Delete tx and rx descriptor base addresses */
550 MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
551 MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
552 }
553
554 ifp->if_timer = 0;
555 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
556 }
557
558
559 void
560 mtd_watchdog(ifp)
561 struct ifnet *ifp;
562 {
563 struct mtd_softc *sc = ifp->if_softc;
564 int s;
565
566 log(LOG_ERR, "%s: device timeout\n", sc->dev.dv_xname);
567 ++sc->ethercom.ec_if.if_oerrors;
568
569 mtd_stop(ifp, 0);
570
571 s = splnet();
572 mtd_init(ifp);
573 splx(s);
574
575 return;
576 }
577
578
579 int
580 mtd_ioctl(ifp, cmd, data)
581 struct ifnet * ifp;
582 u_long cmd;
583 void *data;
584 {
585 struct mtd_softc *sc = ifp->if_softc;
586 int s, error = 0;
587
588 s = splnet();
589
590 /* Don't do anything special */
591 switch(cmd) {
592 case SIOCADDMULTI:
593 case SIOCDELMULTI:
594 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
595 /*
596 * Multicast list has changed; set the hardware
597 * filter accordingly.
598 */
599 if (ifp->if_flags & IFF_RUNNING)
600 mtd_setmulti(sc);
601 error = 0;
602 }
603 break;
604
605 default:
606 error = ether_ioctl(ifp, cmd, data);
607 break;
608 }
609
610 splx(s);
611 return error;
612 }
613
614
615 struct mbuf *
616 mtd_get(sc, index, totlen)
617 struct mtd_softc *sc;
618 int index;
619 int totlen;
620 {
621 struct ifnet *ifp = &sc->ethercom.ec_if;
622 struct mbuf *m, *m0, *newm;
623 int len;
624 char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
625
626 MGETHDR(m0, M_DONTWAIT, MT_DATA);
627 if (m0 == NULL)
628 return NULL;
629
630 m0->m_pkthdr.rcvif = ifp;
631 m0->m_pkthdr.len = totlen;
632 m = m0;
633 len = MHLEN;
634
635 while (totlen > 0) {
636 if (totlen >= MINCLSIZE) {
637 MCLGET(m, M_DONTWAIT);
638 if (!(m->m_flags & M_EXT)) {
639 m_freem(m0);
640 return NULL;
641 }
642 len = MCLBYTES;
643 }
644
645 if (m == m0) {
646 char *newdata = (char *)
647 ALIGN(m->m_data + sizeof(struct ether_header)) -
648 sizeof(struct ether_header);
649 len -= newdata - m->m_data;
650 m->m_data = newdata;
651 }
652
653 m->m_len = len = min(totlen, len);
654 memcpy(mtod(m, void *), buf, len);
655 buf += len;
656
657 totlen -= len;
658 if (totlen > 0) {
659 MGET(newm, M_DONTWAIT, MT_DATA);
660 if (newm == NULL) {
661 m_freem(m0);
662 return NULL;
663 }
664 len = MLEN;
665 m = m->m_next = newm;
666 }
667 }
668
669 return m0;
670 }
671
672
673 int
674 mtd_rxirq(sc)
675 struct mtd_softc *sc;
676 {
677 struct ifnet *ifp = &sc->ethercom.ec_if;
678 int len;
679 struct mbuf *m;
680
681 for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
682 /* Error summary set? */
683 if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
684 printf("%s: received packet with errors\n",
685 sc->dev.dv_xname);
686 /* Give up packet, since an error occurred */
687 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
688 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
689 MTD_RXD_CONF_BUFS;
690 ++ifp->if_ierrors;
691 if (++sc->cur_rx >= MTD_NUM_RXD)
692 sc->cur_rx = 0;
693 continue;
694 }
695 /* Get buffer length */
696 len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
697 >> MTD_RXD_FLEN_SHIFT;
698 len -= ETHER_CRC_LEN;
699
700 /* Check packet size */
701 if (len <= sizeof(struct ether_header)) {
702 printf("%s: invalid packet size %d; dropping\n",
703 sc->dev.dv_xname, len);
704 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
705 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
706 MTD_RXD_CONF_BUFS;
707 ++ifp->if_ierrors;
708 if (++sc->cur_rx >= MTD_NUM_RXD)
709 sc->cur_rx = 0;
710 continue;
711 }
712
713 m = mtd_get(sc, (sc->cur_rx), len);
714
715 /* Give descriptor back to card */
716 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
717 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
718
719 if (++sc->cur_rx >= MTD_NUM_RXD)
720 sc->cur_rx = 0;
721
722 if (m == NULL) {
723 printf("%s: error pulling packet off interface\n",
724 sc->dev.dv_xname);
725 ++ifp->if_ierrors;
726 continue;
727 }
728
729 ++ifp->if_ipackets;
730
731 #if NBPFILTER > 0
732 if (ifp->if_bpf)
733 bpf_mtap(ifp->if_bpf, m);
734 #endif
735 /* Pass the packet up */
736 (*ifp->if_input)(ifp, m);
737 }
738
739 return 1;
740 }
741
742
743 int
744 mtd_txirq(sc)
745 struct mtd_softc *sc;
746 {
747 struct ifnet *ifp = &sc->ethercom.ec_if;
748
749 /* Clear timeout */
750 ifp->if_timer = 0;
751
752 ifp->if_flags &= ~IFF_OACTIVE;
753 ++ifp->if_opackets;
754
755 /* XXX FIXME If there is some queued, do an mtd_start? */
756
757 return 1;
758 }
759
760
761 int
762 mtd_bufirq(sc)
763 struct mtd_softc *sc;
764 {
765 struct ifnet *ifp = &sc->ethercom.ec_if;
766
767 /* Clear timeout */
768 ifp->if_timer = 0;
769
770 /* XXX FIXME: Do something here to make sure we get some buffers! */
771
772 return 1;
773 }
774
775
776 int
777 mtd_irq_h(args)
778 void *args;
779 {
780 struct mtd_softc *sc = args;
781 struct ifnet *ifp = &sc->ethercom.ec_if;
782 u_int32_t status;
783 int r = 0;
784
785 if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(&sc->dev))
786 return 0;
787
788 /* Disable interrupts */
789 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
790
791 for(;;) {
792 status = MTD_READ_4(sc, MTD_ISR);
793 #if NRND > 0
794 /* Add random seed before masking out bits */
795 if (status)
796 rnd_add_uint32(&sc->rnd_src, status);
797 #endif
798 status &= MTD_ISR_MASK;
799 if (!status) /* We didn't ask for this */
800 break;
801
802 MTD_WRITE_4(sc, MTD_ISR, status);
803
804 /* NOTE: Perhaps we should reset with some of these errors? */
805
806 if (status & MTD_ISR_RXBUN) {
807 printf("%s: receive buffer unavailable\n",
808 sc->dev.dv_xname);
809 ++ifp->if_ierrors;
810 }
811
812 if (status & MTD_ISR_RXERR) {
813 printf("%s: receive error\n", sc->dev.dv_xname);
814 ++ifp->if_ierrors;
815 }
816
817 if (status & MTD_ISR_TXBUN) {
818 printf("%s: transmit buffer unavailable\n",
819 sc->dev.dv_xname);
820 ++ifp->if_ierrors;
821 }
822
823 if ((status & MTD_ISR_PDF)) {
824 printf("%s: parallel detection fault\n",
825 sc->dev.dv_xname);
826 ++ifp->if_ierrors;
827 }
828
829 if (status & MTD_ISR_FBUSERR) {
830 printf("%s: fatal bus error\n",
831 sc->dev.dv_xname);
832 ++ifp->if_ierrors;
833 }
834
835 if (status & MTD_ISR_TARERR) {
836 printf("%s: target error\n",
837 sc->dev.dv_xname);
838 ++ifp->if_ierrors;
839 }
840
841 if (status & MTD_ISR_MASTERR) {
842 printf("%s: master error\n",
843 sc->dev.dv_xname);
844 ++ifp->if_ierrors;
845 }
846
847 if (status & MTD_ISR_PARERR) {
848 printf("%s: parity error\n",
849 sc->dev.dv_xname);
850 ++ifp->if_ierrors;
851 }
852
853 if (status & MTD_ISR_RXIRQ) /* Receive interrupt */
854 r |= mtd_rxirq(sc);
855
856 if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */
857 r |= mtd_txirq(sc);
858
859 if (status & MTD_ISR_TXEARLY) /* Transmit early */
860 r |= mtd_txirq(sc);
861
862 if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */
863 r |= mtd_bufirq(sc);
864
865 }
866
867 /* Enable interrupts */
868 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
869
870 return r;
871 }
872
873
874 void
875 mtd_setmulti(sc)
876 struct mtd_softc *sc;
877 {
878 struct ifnet *ifp = &sc->ethercom.ec_if;
879 u_int32_t rxtx_stat;
880 u_int32_t hash[2] = {0, 0};
881 u_int32_t crc;
882 struct ether_multi *enm;
883 struct ether_multistep step;
884 int mcnt = 0;
885
886 /* Get old status */
887 rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
888
889 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
890 rxtx_stat |= MTD_RX_AMULTI;
891 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
892 MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
893 MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
894 return;
895 }
896
897 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
898 while (enm != NULL) {
899 /* We need the 6 most significant bits of the CRC */
900 crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
901
902 hash[crc >> 5] |= 1 << (crc & 0xf);
903
904 ++mcnt;
905 ETHER_NEXT_MULTI(step, enm);
906 }
907
908 /* Accept multicast bit needs to be on? */
909 if (mcnt)
910 rxtx_stat |= MTD_RX_AMULTI;
911 else
912 rxtx_stat &= ~MTD_RX_AMULTI;
913
914 /* Write out the hash */
915 MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
916 MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
917 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
918 }
919
920
921 void
922 mtd_reset(sc)
923 struct mtd_softc *sc;
924 {
925 int i;
926
927 MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
928
929 /* Reset descriptor status */
930 sc->cur_tx = 0;
931 sc->cur_rx = 0;
932
933 /* Wait until done with reset */
934 for (i = 0; i < MTD_TIMEOUT; ++i) {
935 DELAY(10);
936 if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
937 break;
938 }
939
940 if (i == MTD_TIMEOUT) {
941 printf("%s: reset timed out\n", sc->dev.dv_xname);
942 }
943
944 /* Wait a little so chip can stabilize */
945 DELAY(1000);
946 }
947
948
949 int
950 mtd_mediachange(ifp)
951 struct ifnet *ifp;
952 {
953 struct mtd_softc *sc = ifp->if_softc;
954
955 if (IFM_TYPE(sc->mii.mii_media.ifm_media) != IFM_ETHER)
956 return EINVAL;
957
958 return mii_mediachg(&sc->mii);
959 }
960
961
962 void
963 mtd_mediastatus(ifp, ifmr)
964 struct ifnet *ifp;
965 struct ifmediareq *ifmr;
966 {
967 struct mtd_softc *sc = ifp->if_softc;
968
969 if ((ifp->if_flags & IFF_UP) == 0)
970 return;
971
972 mii_pollstat(&sc->mii);
973 ifmr->ifm_active = sc->mii.mii_media_active;
974 ifmr->ifm_status = sc->mii.mii_media_status;
975 }
976
977
978 void
979 mtd_shutdown (arg)
980 void *arg;
981 {
982 struct mtd_softc *sc = arg;
983 struct ifnet *ifp = &sc->ethercom.ec_if;
984
985 #if NRND > 0
986 rnd_detach_source(&sc->rnd_src);
987 #endif
988 mtd_stop(ifp, 1);
989 }
990