mtd803.c revision 1.3 1 /* $NetBSD: mtd803.c,v 1.3 2003/07/14 15:47:12 lukem Exp $ */
2
3 /*-
4 *
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Peter Bex <Peter.Bex (at) student.kun.nl>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * TODO:
42 * - Most importantly, get some bus_dmamap_syncs in the correct places.
43 * I don't have access to a computer with PCI other than i386, and i386
44 * is just such a machine where dmamap_syncs don't do anything.
45 * - Powerhook for when resuming after standby.
46 * - Watchdog stuff doesn't work yet, the system crashes.(lockmgr: no context)
47 * - There seems to be a CardBus version of the card. (see datasheet)
48 * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
49 * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
50 * raised every time a packet is sent. Strange, since everything works anyway
51 */
52
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.3 2003/07/14 15:47:12 lukem Exp $");
55
56 #include "bpfilter.h"
57
58 #include <sys/param.h>
59 #include <sys/mbuf.h>
60 #include <sys/systm.h>
61 #include <sys/device.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/syslog.h>
65
66 #include <net/if.h>
67 #include <net/if_ether.h>
68 #include <net/if_media.h>
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/if_inarp.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #endif
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #include <net/bpfdesc.h>
81 #endif
82
83 #include <machine/bus.h>
84
85 #include <dev/ic/mtd803reg.h>
86 #include <dev/ic/mtd803var.h>
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89
90 /*
91 * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
92 * Written by Peter Bex (peter.bex (at) student.kun.nl)
93 *
94 * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com
95 */
96
97 #define MTD_READ_1(sc, reg) \
98 bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
99 #define MTD_WRITE_1(sc, reg, data) \
100 bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
101
102 #define MTD_READ_2(sc, reg) \
103 bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
104 #define MTD_WRITE_2(sc, reg, data) \
105 bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
106
107 #define MTD_READ_4(sc, reg) \
108 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
109 #define MTD_WRITE_4(sc, reg, data) \
110 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
111
112 #define MTD_SETBIT(sc, reg, x) \
113 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
114 #define MTD_CLRBIT(sc, reg, x) \
115 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
116
117 #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len)))
118
119 int mtd_mii_readreg __P((struct device *, int, int));
120 void mtd_mii_writereg __P((struct device *, int, int, int));
121 void mtd_mii_statchg __P((struct device *));
122
123 void mtd_start __P((struct ifnet *));
124 void mtd_stop __P((struct ifnet *, int));
125 int mtd_ioctl __P((struct ifnet *, u_long, caddr_t));
126 void mtd_setmulti __P((struct mtd_softc *));
127 void mtd_watchdog __P((struct ifnet *));
128 int mtd_mediachange __P((struct ifnet *));
129 void mtd_mediastatus __P((struct ifnet *, struct ifmediareq *));
130
131 int mtd_init __P((struct ifnet *));
132 void mtd_reset __P((struct mtd_softc *));
133 void mtd_shutdown __P((void *));
134 int mtd_init_desc __P((struct mtd_softc *));
135 int mtd_put __P((struct mtd_softc *, int, struct mbuf *));
136 struct mbuf *mtd_get __P((struct mtd_softc *, int, int));
137
138 int mtd_rxirq __P((struct mtd_softc *));
139 int mtd_txirq __P((struct mtd_softc *));
140 int mtd_bufirq __P((struct mtd_softc *));
141
142
143 int
144 mtd_config(sc)
145 struct mtd_softc *sc;
146 {
147 struct ifnet *ifp = &sc->ethercom.ec_if;
148 int i;
149
150 /* Read station address */
151 for (i = 0; i < ETHER_ADDR_LEN; ++i)
152 sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
153
154 /* Initialize ifnet structure */
155 memcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
156 ifp->if_softc = sc;
157 ifp->if_init = mtd_init;
158 ifp->if_start = mtd_start;
159 ifp->if_stop = mtd_stop;
160 ifp->if_ioctl = mtd_ioctl;
161 ifp->if_watchdog = mtd_watchdog;
162 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
163 IFQ_SET_READY(&ifp->if_snd);
164
165 /* Setup MII interface */
166 sc->mii.mii_ifp = ifp;
167 sc->mii.mii_readreg = mtd_mii_readreg;
168 sc->mii.mii_writereg = mtd_mii_writereg;
169 sc->mii.mii_statchg = mtd_mii_statchg;
170
171 ifmedia_init(&sc->mii.mii_media, 0, mtd_mediachange, mtd_mediastatus);
172
173 mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
174
175 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
176 printf("%s: Unable to configure MII\n", sc->dev.dv_xname);
177 return 1;
178 } else {
179 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
180 }
181
182 if (mtd_init_desc(sc))
183 return 1;
184
185 /* Attach interface */
186 if_attach(ifp);
187 ether_ifattach(ifp, sc->eaddr);
188
189 #if NRND > 0
190 /* Initialise random source */
191 rnd_attach_source(&sc->rnd_src, sc->dev.dv_xname, RND_TYPE_NET, 0);
192 #endif
193
194 /* Add shutdown hook to reset card when we reboot */
195 sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
196
197 return 0;
198 }
199
200
201 /*
202 * mtd_init
203 * Must be called at splnet()
204 */
205 int
206 mtd_init(ifp)
207 struct ifnet *ifp;
208 {
209 struct mtd_softc *sc = ifp->if_softc;
210
211 mtd_reset(sc);
212
213 /*
214 * Set cache alignment and burst length. Don't really know what these
215 * mean, so their values are probably suboptimal.
216 */
217 MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
218
219 MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_RX_BLEN | MTD_RX_512
220 | MTD_TX_FDPLX);
221
222 /* Promiscuous mode? */
223 if (ifp->if_flags & IFF_PROMISC)
224 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
225 else
226 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
227
228 /* Broadcast mode? */
229 if (ifp->if_flags & IFF_BROADCAST)
230 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
231 else
232 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
233
234 mtd_setmulti(sc);
235
236 /* Enable interrupts */
237 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
238 MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
239
240 /* Set descriptor base addresses */
241 MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
242 + sizeof(struct mtd_desc) * MTD_NUM_RXD));
243 MTD_WRITE_4(sc, MTD_RXLBA,
244 htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
245
246 /* Enable receiver and transmitter */
247 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
248 MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
249
250 /* Interface is running */
251 ifp->if_flags |= IFF_RUNNING;
252 ifp->if_flags &= ~IFF_OACTIVE;
253
254 return 0;
255 }
256
257
258 int
259 mtd_init_desc(sc)
260 struct mtd_softc *sc;
261 {
262 int rseg, err, i;
263 bus_dma_segment_t seg;
264 bus_size_t size;
265
266 /* Allocate memory for descriptors */
267 size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
268
269 /* Allocate DMA-safe memory */
270 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
271 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
272 printf("%s: unable to allocate DMA buffer, error = %d\n",
273 sc->dev.dv_xname, err);
274 return 1;
275 }
276
277 /* Map memory to kernel addressable space */
278 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
279 (caddr_t *)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
280 printf("%s: unable to map DMA buffer, error = %d\n",
281 sc->dev.dv_xname, err);
282 bus_dmamem_free(sc->dma_tag, &seg, rseg);
283 return 1;
284 }
285
286 /* Create a DMA map */
287 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
288 size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
289 printf("%s: unable to create DMA map, error = %d\n",
290 sc->dev.dv_xname, err);
291 bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
292 bus_dmamem_free(sc->dma_tag, &seg, rseg);
293 return 1;
294 }
295
296 /* Load the DMA map */
297 if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
298 size, NULL, BUS_DMA_NOWAIT)) != 0) {
299 printf("%s: unable to load DMA map, error = %d\n",
300 sc->dev.dv_xname, err);
301 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
302 bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
303 bus_dmamem_free(sc->dma_tag, &seg, rseg);
304 return 1;
305 }
306
307 /* Allocate memory for the buffers */
308 size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
309
310 /* Allocate DMA-safe memory */
311 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
312 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
313 printf("%s: unable to allocate DMA buffer, error = %d\n",
314 sc->dev.dv_xname, err);
315
316 /* Undo DMA map for descriptors */
317 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
318 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
319 bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
320 bus_dmamem_free(sc->dma_tag, &seg, rseg);
321 return 1;
322 }
323
324 /* Map memory to kernel addressable space */
325 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
326 &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
327 printf("%s: unable to map DMA buffer, error = %d\n",
328 sc->dev.dv_xname, err);
329 bus_dmamem_free(sc->dma_tag, &seg, rseg);
330
331 /* Undo DMA map for descriptors */
332 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
333 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
334 bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
335 bus_dmamem_free(sc->dma_tag, &seg, rseg);
336 return 1;
337 }
338
339 /* Create a DMA map */
340 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
341 size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
342 printf("%s: unable to create DMA map, error = %d\n",
343 sc->dev.dv_xname, err);
344 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
345 bus_dmamem_free(sc->dma_tag, &seg, rseg);
346
347 /* Undo DMA map for descriptors */
348 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
349 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
350 bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
351 bus_dmamem_free(sc->dma_tag, &seg, rseg);
352 return 1;
353 }
354
355 /* Load the DMA map */
356 if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
357 size, NULL, BUS_DMA_NOWAIT)) != 0) {
358 printf("%s: unable to load DMA map, error = %d\n",
359 sc->dev.dv_xname, err);
360 bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
361 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
362 bus_dmamem_free(sc->dma_tag, &seg, rseg);
363
364 /* Undo DMA map for descriptors */
365 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
366 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
367 bus_dmamem_unmap(sc->dma_tag, (caddr_t)sc->desc, size);
368 bus_dmamem_free(sc->dma_tag, &seg, rseg);
369 return 1;
370 }
371
372 /* Descriptors are stored as a circular linked list */
373 /* Fill in rx descriptors */
374 for (i = 0; i < MTD_NUM_RXD; ++i) {
375 sc->desc[i].stat = MTD_RXD_OWNER;
376 if (i == MTD_NUM_RXD - 1) { /* Last descriptor */
377 /* Link back to first rx descriptor */
378 sc->desc[i].next =
379 htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
380 } else {
381 /* Link forward to next rx descriptor */
382 sc->desc[i].next =
383 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
384 + (i + 1) * sizeof(struct mtd_desc));
385 }
386 sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
387 /* Set buffer's address */
388 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
389 + i * MTD_RXBUF_SIZE);
390 }
391
392 /* Fill in tx descriptors */
393 for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
394 sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */
395 if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
396 /* Link back to first tx descriptor */
397 sc->desc[i].next =
398 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
399 +MTD_NUM_RXD * sizeof(struct mtd_desc));
400 } else {
401 /* Link forward to next tx descriptor */
402 sc->desc[i].next =
403 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
404 + (i + 1) * sizeof(struct mtd_desc));
405 }
406 /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
407 /* Set buffer's address */
408 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
409 + MTD_NUM_RXD * MTD_RXBUF_SIZE
410 + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
411 }
412
413 return 0;
414 }
415
416
417 void
418 mtd_mii_statchg(self)
419 struct device *self;
420 {
421 /*struct mtd_softc *sc = (void *)self;*/
422
423 /* Should we do something here? :) */
424 }
425
426
427 int
428 mtd_mii_readreg(self, phy, reg)
429 struct device *self;
430 int phy, reg;
431 {
432 struct mtd_softc *sc = (void *)self;
433
434 return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
435 }
436
437
438 void
439 mtd_mii_writereg(self, phy, reg, val)
440 struct device *self;
441 int phy, reg, val;
442 {
443 struct mtd_softc *sc = (void *)self;
444
445 MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
446 }
447
448
449 int
450 mtd_put(sc, index, m)
451 struct mtd_softc *sc;
452 int index;
453 struct mbuf *m;
454 {
455 int len, tlen;
456 caddr_t buf = sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
457 + index * MTD_TXBUF_SIZE;
458 struct mbuf *n;
459
460 for (tlen = 0; m != NULL; m = n) {
461 len = m->m_len;
462 if (len == 0) {
463 MFREE(m, n);
464 continue;
465 } else if (tlen > MTD_TXBUF_SIZE) {
466 /* XXX FIXME: No idea what to do here. */
467 printf("%s: packet too large!\n",
468 sc->dev.dv_xname);
469 MFREE(m, n);
470 continue;
471 }
472 memcpy(buf, mtod(m, caddr_t), len);
473 buf += len;
474 tlen += len;
475 MFREE(m, n);
476 }
477 sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
478 | MTD_TXD_CONF_IRQC
479 | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
480 | (tlen & MTD_TXD_CONF_BUFS);
481
482 return tlen;
483 }
484
485
486 void
487 mtd_start(ifp)
488 struct ifnet *ifp;
489 {
490 struct mtd_softc *sc = ifp->if_softc;
491 struct mbuf *m;
492 int len;
493 int first_tx = sc->cur_tx;
494
495 /* Don't transmit when the interface is busy or inactive */
496 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
497 return;
498
499 for (;;) {
500 IF_DEQUEUE(&ifp->if_snd, m);
501
502 if (m == NULL)
503 break;
504
505 #if NBPFILTER > 0
506 if (ifp->if_bpf)
507 bpf_mtap(ifp->if_bpf, m);
508 #endif
509
510 /* Copy mbuf chain into tx buffer */
511 len = mtd_put(sc, sc->cur_tx, m);
512
513 if (sc->cur_tx != first_tx)
514 sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
515
516 if (++sc->cur_tx >= MTD_NUM_TXD)
517 sc->cur_tx = 0;
518 }
519 /* Mark first & last descriptor */
520 sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
521
522 if (sc->cur_tx == 0) {
523 sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
524 } else {
525 sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
526 }
527
528 /* Give first descriptor to chip to complete transaction */
529 sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
530
531 /* Transmit polling demand */
532 MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
533
534 /* XXX FIXME: Set up a watchdog timer */
535 /* ifp->if_timer = 5; */
536 }
537
538
539 void
540 mtd_stop (ifp, disable)
541 struct ifnet *ifp;
542 int disable;
543 {
544 struct mtd_softc *sc = ifp->if_softc;
545
546 /* Disable transmitter and receiver */
547 MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
548 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
549
550 /* Disable interrupts */
551 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
552
553 /* Must do more at disable??... */
554 if (disable) {
555 /* Delete tx and rx descriptor base adresses */
556 MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
557 MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
558 }
559
560 ifp->if_timer = 0;
561 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
562 }
563
564
565 void
566 mtd_watchdog(ifp)
567 struct ifnet *ifp;
568 {
569 struct mtd_softc *sc = ifp->if_softc;
570 int s;
571
572 log(LOG_ERR, "%s: device timeout\n", sc->dev.dv_xname);
573 ++sc->ethercom.ec_if.if_oerrors;
574
575 mtd_stop(ifp, 0);
576
577 s = splnet();
578 mtd_init(ifp);
579 splx(s);
580
581 return;
582 }
583
584
585 int
586 mtd_ioctl(ifp, cmd, data)
587 struct ifnet * ifp;
588 u_long cmd;
589 caddr_t data;
590 {
591 struct mtd_softc *sc = ifp->if_softc;
592 struct ifreq *ifr = (struct ifreq *)data;
593 int s, error = 0;
594
595 s = splnet();
596
597 /* Don't do anything special */
598 switch(cmd) {
599 case SIOCADDMULTI:
600 case SIOCDELMULTI:
601 error = (cmd == SIOCADDMULTI) ?
602 ether_addmulti(ifr, &sc->ethercom) :
603 ether_delmulti(ifr, &sc->ethercom);
604
605 if (error == ENETRESET) {
606 /*
607 * Multicast list has changed; set the hardware
608 * filter accordingly.
609 */
610 mtd_setmulti(sc);
611 error = 0;
612 }
613 break;
614
615 default:
616 error = ether_ioctl(ifp, cmd, data);
617 break;
618 }
619
620 splx(s);
621 return error;
622 }
623
624
625 struct mbuf *
626 mtd_get(sc, index, totlen)
627 struct mtd_softc *sc;
628 int index;
629 int totlen;
630 {
631 struct ifnet *ifp = &sc->ethercom.ec_if;
632 struct mbuf *m, *m0, *newm;
633 int len;
634 caddr_t buf = sc->buf + index * MTD_RXBUF_SIZE;
635
636 MGETHDR(m0, M_DONTWAIT, MT_DATA);
637 if (m0 == NULL)
638 return NULL;
639
640 m0->m_pkthdr.rcvif = ifp;
641 m0->m_pkthdr.len = totlen;
642 m = m0;
643 len = MHLEN;
644
645 while (totlen > 0) {
646 if (totlen >= MINCLSIZE) {
647 MCLGET(m, M_DONTWAIT);
648 if (!(m->m_flags & M_EXT)) {
649 m_freem(m0);
650 return NULL;
651 }
652 len = MCLBYTES;
653 }
654
655 if (m == m0) {
656 caddr_t newdata = (caddr_t)
657 ALIGN(m->m_data + sizeof(struct ether_header)) -
658 sizeof(struct ether_header);
659 len -= newdata - m->m_data;
660 m->m_data = newdata;
661 }
662
663 m->m_len = len = min(totlen, len);
664 memcpy(mtod(m, caddr_t), buf, len);
665 buf += len;
666
667 totlen -= len;
668 if (totlen > 0) {
669 MGET(newm, M_DONTWAIT, MT_DATA);
670 if (newm == NULL) {
671 m_freem(m0);
672 return NULL;
673 }
674 len = MLEN;
675 m = m->m_next = newm;
676 }
677 }
678
679 return m0;
680 }
681
682
683 int
684 mtd_rxirq(sc)
685 struct mtd_softc *sc;
686 {
687 struct ifnet *ifp = &sc->ethercom.ec_if;
688 int len;
689 struct mbuf *m;
690
691 for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
692 /* Error summary set? */
693 if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
694 printf("%s: received packet with errors\n",
695 sc->dev.dv_xname);
696 /* Give up packet, since an error occurred */
697 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
698 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
699 MTD_RXD_CONF_BUFS;
700 ++ifp->if_ierrors;
701 if (++sc->cur_rx >= MTD_NUM_RXD)
702 sc->cur_rx = 0;
703 continue;
704 }
705 /* Get buffer length */
706 len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
707 >> MTD_RXD_FLEN_SHIFT;
708 len -= ETHER_CRC_LEN;
709
710 /* Check packet size */
711 if (len <= sizeof(struct ether_header)) {
712 printf("%s: invalid packet size %d; dropping\n",
713 sc->dev.dv_xname, len);
714 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
715 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
716 MTD_RXD_CONF_BUFS;
717 ++ifp->if_ierrors;
718 if (++sc->cur_rx >= MTD_NUM_RXD)
719 sc->cur_rx = 0;
720 continue;
721 }
722
723 m = mtd_get(sc, (sc->cur_rx), len);
724
725 /* Give descriptor back to card */
726 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
727 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
728
729 if (++sc->cur_rx >= MTD_NUM_RXD)
730 sc->cur_rx = 0;
731
732 if (m == NULL) {
733 printf("%s: error pulling packet off interface\n",
734 sc->dev.dv_xname);
735 ++ifp->if_ierrors;
736 continue;
737 }
738
739 ++ifp->if_ipackets;
740
741 #if NBPFILTER > 0
742 if (ifp->if_bpf)
743 bpf_mtap(ifp->if_bpf, m);
744 #endif
745 /* Pass the packet up */
746 (*ifp->if_input)(ifp, m);
747 }
748
749 return 1;
750 }
751
752
753 int
754 mtd_txirq(sc)
755 struct mtd_softc *sc;
756 {
757 struct ifnet *ifp = &sc->ethercom.ec_if;
758
759 /* Clear timeout */
760 ifp->if_timer = 0;
761
762 ifp->if_flags &= ~IFF_OACTIVE;
763 ++ifp->if_opackets;
764
765 /* XXX FIXME If there is some queued, do an mtd_start? */
766
767 return 1;
768 }
769
770
771 int
772 mtd_bufirq(sc)
773 struct mtd_softc *sc;
774 {
775 struct ifnet *ifp = &sc->ethercom.ec_if;
776
777 /* Clear timeout */
778 ifp->if_timer = 0;
779
780 /* XXX FIXME: Do something here to make sure we get some buffers! */
781
782 return 1;
783 }
784
785
786 int
787 mtd_irq_h(args)
788 void *args;
789 {
790 struct mtd_softc *sc = args;
791 struct ifnet *ifp = &sc->ethercom.ec_if;
792 u_int32_t status;
793 int r = 0;
794
795 if (!(ifp->if_flags & IFF_RUNNING) ||
796 !(sc->dev.dv_flags & DVF_ACTIVE))
797 return 0;
798
799 /* Disable interrupts */
800 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
801
802 for(;;) {
803 status = MTD_READ_4(sc, MTD_ISR);
804 #if NRND > 0
805 /* Add random seed before masking out bits */
806 if (status)
807 rnd_add_uint32(&sc->rnd_src, status);
808 #endif
809 status &= MTD_ISR_MASK;
810 if (!status) /* We didn't ask for this */
811 break;
812
813 MTD_WRITE_4(sc, MTD_ISR, status);
814
815 /* NOTE: Perhaps we should reset with some of these errors? */
816
817 if (status & MTD_ISR_RXBUN) {
818 printf("%s: receive buffer unavailable\n",
819 sc->dev.dv_xname);
820 ++ifp->if_ierrors;
821 }
822
823 if (status & MTD_ISR_RXERR) {
824 printf("%s: receive error\n", sc->dev.dv_xname);
825 ++ifp->if_ierrors;
826 }
827
828 if (status & MTD_ISR_TXBUN) {
829 printf("%s: transmit buffer unavailable\n",
830 sc->dev.dv_xname);
831 ++ifp->if_ierrors;
832 }
833
834 if ((status & MTD_ISR_PDF)) {
835 printf("%s: parallel detection fault\n",
836 sc->dev.dv_xname);
837 ++ifp->if_ierrors;
838 }
839
840 if (status & MTD_ISR_FBUSERR) {
841 printf("%s: fatal bus error\n",
842 sc->dev.dv_xname);
843 ++ifp->if_ierrors;
844 }
845
846 if (status & MTD_ISR_TARERR) {
847 printf("%s: target error\n",
848 sc->dev.dv_xname);
849 ++ifp->if_ierrors;
850 }
851
852 if (status & MTD_ISR_MASTERR) {
853 printf("%s: master error\n",
854 sc->dev.dv_xname);
855 ++ifp->if_ierrors;
856 }
857
858 if (status & MTD_ISR_PARERR) {
859 printf("%s: parity error\n",
860 sc->dev.dv_xname);
861 ++ifp->if_ierrors;
862 }
863
864 if (status & MTD_ISR_RXIRQ) /* Receive interrupt */
865 r |= mtd_rxirq(sc);
866
867 if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */
868 r |= mtd_txirq(sc);
869
870 if (status & MTD_ISR_TXEARLY) /* Transmit early */
871 r |= mtd_txirq(sc);
872
873 if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */
874 r |= mtd_bufirq(sc);
875
876 }
877
878 /* Enable interrupts */
879 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
880
881 return r;
882 }
883
884
885 void
886 mtd_setmulti(sc)
887 struct mtd_softc *sc;
888 {
889 struct ifnet *ifp = &sc->ethercom.ec_if;
890 u_int32_t rxtx_stat;
891 u_int32_t hash[2] = {0, 0};
892 u_int32_t crc;
893 struct ether_multi *enm;
894 struct ether_multistep step;
895 int mcnt = 0;
896
897 /* Get old status */
898 rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
899
900 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
901 rxtx_stat |= MTD_RX_AMULTI;
902 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
903 MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
904 MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
905 return;
906 }
907
908 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
909 while (enm != NULL) {
910 /* We need the 6 most significant bits of the CRC */
911 crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
912
913 hash[crc >> 5] |= 1 << (crc & 0xf);
914
915 ++mcnt;
916 ETHER_NEXT_MULTI(step, enm);
917 }
918
919 /* Accept multicast bit needs to be on? */
920 if (mcnt)
921 rxtx_stat |= MTD_RX_AMULTI;
922 else
923 rxtx_stat &= ~MTD_RX_AMULTI;
924
925 /* Write out the hash */
926 MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
927 MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
928 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
929 }
930
931
932 void
933 mtd_reset(sc)
934 struct mtd_softc *sc;
935 {
936 int i;
937
938 MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
939
940 /* Reset descriptor status */
941 sc->cur_tx = 0;
942 sc->cur_rx = 0;
943
944 /* Wait until done with reset */
945 for (i = 0; i < MTD_TIMEOUT; ++i) {
946 DELAY(10);
947 if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
948 break;
949 }
950
951 if (i == MTD_TIMEOUT) {
952 printf("%s: reset timed out\n", sc->dev.dv_xname);
953 }
954
955 /* Wait a little so chip can stabilize */
956 DELAY(1000);
957 }
958
959
960 int
961 mtd_mediachange(ifp)
962 struct ifnet *ifp;
963 {
964 struct mtd_softc *sc = ifp->if_softc;
965
966 if (IFM_TYPE(sc->mii.mii_media.ifm_media) != IFM_ETHER)
967 return EINVAL;
968
969 return mii_mediachg(&sc->mii);
970 }
971
972
973 void
974 mtd_mediastatus(ifp, ifmr)
975 struct ifnet *ifp;
976 struct ifmediareq *ifmr;
977 {
978 struct mtd_softc *sc = ifp->if_softc;
979
980 if ((ifp->if_flags & IFF_UP) == 0)
981 return;
982
983 mii_pollstat(&sc->mii);
984 ifmr->ifm_active = sc->mii.mii_media_active;
985 ifmr->ifm_status = sc->mii.mii_media_status;
986 }
987
988
989 void
990 mtd_shutdown (arg)
991 void *arg;
992 {
993 struct mtd_softc *sc = arg;
994 struct ifnet *ifp = &sc->ethercom.ec_if;
995
996 #if NRND > 0
997 rnd_detach_source(&sc->rnd_src);
998 #endif
999 mtd_stop(ifp, 1);
1000 }
1001