mtd803.c revision 1.24 1 /* $NetBSD: mtd803.c,v 1.24 2010/04/05 07:19:35 joerg Exp $ */
2
3 /*-
4 *
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Peter Bex <Peter.Bex (at) student.kun.nl>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * TODO:
35 * - Most importantly, get some bus_dmamap_syncs in the correct places.
36 * I don't have access to a computer with PCI other than i386, and i386
37 * is just such a machine where dmamap_syncs don't do anything.
38 * - Powerhook for when resuming after standby.
39 * - Watchdog stuff doesn't work yet, the system crashes.
40 * - There seems to be a CardBus version of the card. (see datasheet)
41 * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
42 * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
43 * raised every time a packet is sent. Strange, since everything works anyway
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.24 2010/04/05 07:19:35 joerg Exp $");
48
49
50 #include <sys/param.h>
51 #include <sys/mbuf.h>
52 #include <sys/systm.h>
53 #include <sys/device.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/syslog.h>
57
58 #include <net/if.h>
59 #include <net/if_ether.h>
60 #include <net/if_media.h>
61
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/if_inarp.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #endif
69
70 #include <net/bpf.h>
71 #include <net/bpfdesc.h>
72
73 #include <sys/bus.h>
74
75 #include <dev/ic/mtd803reg.h>
76 #include <dev/ic/mtd803var.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /*
81 * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
82 * Written by Peter Bex (peter.bex (at) student.kun.nl)
83 *
84 * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com
85 */
86
87 #define MTD_READ_1(sc, reg) \
88 bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
89 #define MTD_WRITE_1(sc, reg, data) \
90 bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
91
92 #define MTD_READ_2(sc, reg) \
93 bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
94 #define MTD_WRITE_2(sc, reg, data) \
95 bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
96
97 #define MTD_READ_4(sc, reg) \
98 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
99 #define MTD_WRITE_4(sc, reg, data) \
100 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
101
102 #define MTD_SETBIT(sc, reg, x) \
103 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
104 #define MTD_CLRBIT(sc, reg, x) \
105 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
106
107 #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len)))
108
109 int mtd_mii_readreg(device_t, int, int);
110 void mtd_mii_writereg(device_t, int, int, int);
111 void mtd_mii_statchg(device_t);
112
113 void mtd_start(struct ifnet *);
114 void mtd_stop(struct ifnet *, int);
115 int mtd_ioctl(struct ifnet *, u_long, void *);
116 void mtd_setmulti(struct mtd_softc *);
117 void mtd_watchdog(struct ifnet *);
118
119 int mtd_init(struct ifnet *);
120 void mtd_reset(struct mtd_softc *);
121 void mtd_shutdown(void *);
122 int mtd_init_desc(struct mtd_softc *);
123 int mtd_put(struct mtd_softc *, int, struct mbuf *);
124 struct mbuf *mtd_get(struct mtd_softc *, int, int);
125
126 int mtd_rxirq(struct mtd_softc *);
127 int mtd_txirq(struct mtd_softc *);
128 int mtd_bufirq(struct mtd_softc *);
129
130
131 int
132 mtd_config(struct mtd_softc *sc)
133 {
134 struct ifnet *ifp = &sc->ethercom.ec_if;
135 int i;
136
137 /* Read station address */
138 for (i = 0; i < ETHER_ADDR_LEN; ++i)
139 sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
140
141 /* Initialize ifnet structure */
142 memcpy(ifp->if_xname, device_xname(&sc->dev), IFNAMSIZ);
143 ifp->if_softc = sc;
144 ifp->if_init = mtd_init;
145 ifp->if_start = mtd_start;
146 ifp->if_stop = mtd_stop;
147 ifp->if_ioctl = mtd_ioctl;
148 ifp->if_watchdog = mtd_watchdog;
149 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
150 IFQ_SET_READY(&ifp->if_snd);
151
152 /* Setup MII interface */
153 sc->mii.mii_ifp = ifp;
154 sc->mii.mii_readreg = mtd_mii_readreg;
155 sc->mii.mii_writereg = mtd_mii_writereg;
156 sc->mii.mii_statchg = mtd_mii_statchg;
157
158 sc->ethercom.ec_mii = &sc->mii;
159 ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange,
160 ether_mediastatus);
161
162 mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
163
164 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
165 aprint_error_dev(&sc->dev, "Unable to configure MII\n");
166 return 1;
167 } else {
168 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
169 }
170
171 if (mtd_init_desc(sc))
172 return 1;
173
174 /* Attach interface */
175 if_attach(ifp);
176 ether_ifattach(ifp, sc->eaddr);
177
178 #if NRND > 0
179 /* Initialise random source */
180 rnd_attach_source(&sc->rnd_src, device_xname(&sc->dev), RND_TYPE_NET, 0);
181 #endif
182
183 /* Add shutdown hook to reset card when we reboot */
184 sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
185
186 return 0;
187 }
188
189
190 /*
191 * mtd_init
192 * Must be called at splnet()
193 */
194 int
195 mtd_init(struct ifnet *ifp)
196 {
197 struct mtd_softc *sc = ifp->if_softc;
198
199 mtd_reset(sc);
200
201 /*
202 * Set cache alignment and burst length. Don't really know what these
203 * mean, so their values are probably suboptimal.
204 */
205 MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
206
207 MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
208
209 /* Promiscuous mode? */
210 if (ifp->if_flags & IFF_PROMISC)
211 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
212 else
213 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
214
215 /* Broadcast mode? */
216 if (ifp->if_flags & IFF_BROADCAST)
217 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
218 else
219 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
220
221 mtd_setmulti(sc);
222
223 /* Enable interrupts */
224 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
225 MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
226
227 /* Set descriptor base addresses */
228 MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
229 + sizeof(struct mtd_desc) * MTD_NUM_RXD));
230 MTD_WRITE_4(sc, MTD_RXLBA,
231 htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
232
233 /* Enable receiver and transmitter */
234 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
235 MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
236
237 /* Interface is running */
238 ifp->if_flags |= IFF_RUNNING;
239 ifp->if_flags &= ~IFF_OACTIVE;
240
241 return 0;
242 }
243
244
245 int
246 mtd_init_desc(struct mtd_softc *sc)
247 {
248 int rseg, err, i;
249 bus_dma_segment_t seg;
250 bus_size_t size;
251
252 /* Allocate memory for descriptors */
253 size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
254
255 /* Allocate DMA-safe memory */
256 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
257 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
258 aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n", err);
259 return 1;
260 }
261
262 /* Map memory to kernel addressable space */
263 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
264 (void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
265 aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n", err);
266 bus_dmamem_free(sc->dma_tag, &seg, rseg);
267 return 1;
268 }
269
270 /* Create a DMA map */
271 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
272 size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
273 aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n", err);
274 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
275 bus_dmamem_free(sc->dma_tag, &seg, rseg);
276 return 1;
277 }
278
279 /* Load the DMA map */
280 if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
281 size, NULL, BUS_DMA_NOWAIT)) != 0) {
282 aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
283 err);
284 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
285 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
286 bus_dmamem_free(sc->dma_tag, &seg, rseg);
287 return 1;
288 }
289
290 /* Allocate memory for the buffers */
291 size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
292
293 /* Allocate DMA-safe memory */
294 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
295 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
296 aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n",
297 err);
298
299 /* Undo DMA map for descriptors */
300 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
301 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
302 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
303 bus_dmamem_free(sc->dma_tag, &seg, rseg);
304 return 1;
305 }
306
307 /* Map memory to kernel addressable space */
308 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
309 &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
310 aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n",
311 err);
312 bus_dmamem_free(sc->dma_tag, &seg, rseg);
313
314 /* Undo DMA map for descriptors */
315 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
316 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
317 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
318 bus_dmamem_free(sc->dma_tag, &seg, rseg);
319 return 1;
320 }
321
322 /* Create a DMA map */
323 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
324 size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
325 aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n",
326 err);
327 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
328 bus_dmamem_free(sc->dma_tag, &seg, rseg);
329
330 /* Undo DMA map for descriptors */
331 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
332 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
333 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
334 bus_dmamem_free(sc->dma_tag, &seg, rseg);
335 return 1;
336 }
337
338 /* Load the DMA map */
339 if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
340 size, NULL, BUS_DMA_NOWAIT)) != 0) {
341 aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n",
342 err);
343 bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
344 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
345 bus_dmamem_free(sc->dma_tag, &seg, rseg);
346
347 /* Undo DMA map for descriptors */
348 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
349 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
350 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
351 bus_dmamem_free(sc->dma_tag, &seg, rseg);
352 return 1;
353 }
354
355 /* Descriptors are stored as a circular linked list */
356 /* Fill in rx descriptors */
357 for (i = 0; i < MTD_NUM_RXD; ++i) {
358 sc->desc[i].stat = MTD_RXD_OWNER;
359 if (i == MTD_NUM_RXD - 1) { /* Last descriptor */
360 /* Link back to first rx descriptor */
361 sc->desc[i].next =
362 htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
363 } else {
364 /* Link forward to next rx descriptor */
365 sc->desc[i].next =
366 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
367 + (i + 1) * sizeof(struct mtd_desc));
368 }
369 sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
370 /* Set buffer's address */
371 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
372 + i * MTD_RXBUF_SIZE);
373 }
374
375 /* Fill in tx descriptors */
376 for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
377 sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */
378 if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
379 /* Link back to first tx descriptor */
380 sc->desc[i].next =
381 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
382 +MTD_NUM_RXD * sizeof(struct mtd_desc));
383 } else {
384 /* Link forward to next tx descriptor */
385 sc->desc[i].next =
386 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
387 + (i + 1) * sizeof(struct mtd_desc));
388 }
389 /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
390 /* Set buffer's address */
391 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
392 + MTD_NUM_RXD * MTD_RXBUF_SIZE
393 + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
394 }
395
396 return 0;
397 }
398
399
400 void
401 mtd_mii_statchg(device_t self)
402 {
403 /* Should we do something here? :) */
404 }
405
406
407 int
408 mtd_mii_readreg(device_t self, int phy, int reg)
409 {
410 struct mtd_softc *sc = device_private(self);
411
412 return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
413 }
414
415
416 void
417 mtd_mii_writereg(device_t self, int phy, int reg, int val)
418 {
419 struct mtd_softc *sc = device_private(self);
420
421 MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
422 }
423
424
425 int
426 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m)
427 {
428 int len, tlen;
429 char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
430 + index * MTD_TXBUF_SIZE;
431 struct mbuf *n;
432
433 for (tlen = 0; m != NULL; m = n) {
434 len = m->m_len;
435 if (len == 0) {
436 MFREE(m, n);
437 continue;
438 } else if (tlen > MTD_TXBUF_SIZE) {
439 /* XXX FIXME: No idea what to do here. */
440 aprint_error_dev(&sc->dev, "packet too large! Size = %i\n",
441 tlen);
442 MFREE(m, n);
443 continue;
444 }
445 memcpy(buf, mtod(m, void *), len);
446 buf += len;
447 tlen += len;
448 MFREE(m, n);
449 }
450 sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
451 | MTD_TXD_CONF_IRQC
452 | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
453 | (tlen & MTD_TXD_CONF_BUFS);
454
455 return tlen;
456 }
457
458
459 void
460 mtd_start(struct ifnet *ifp)
461 {
462 struct mtd_softc *sc = ifp->if_softc;
463 struct mbuf *m;
464 int len;
465 int first_tx = sc->cur_tx;
466
467 /* Don't transmit when the interface is busy or inactive */
468 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
469 return;
470
471 for (;;) {
472 IF_DEQUEUE(&ifp->if_snd, m);
473
474 if (m == NULL)
475 break;
476
477 bpf_mtap(ifp, m);
478
479 /* Copy mbuf chain into tx buffer */
480 len = mtd_put(sc, sc->cur_tx, m);
481
482 if (sc->cur_tx != first_tx)
483 sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
484
485 if (++sc->cur_tx >= MTD_NUM_TXD)
486 sc->cur_tx = 0;
487 }
488 /* Mark first & last descriptor */
489 sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
490
491 if (sc->cur_tx == 0) {
492 sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
493 } else {
494 sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
495 }
496
497 /* Give first descriptor to chip to complete transaction */
498 sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
499
500 /* Transmit polling demand */
501 MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
502
503 /* XXX FIXME: Set up a watchdog timer */
504 /* ifp->if_timer = 5; */
505 }
506
507
508 void
509 mtd_stop(struct ifnet *ifp, int disable)
510 {
511 struct mtd_softc *sc = ifp->if_softc;
512
513 /* Disable transmitter and receiver */
514 MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
515 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
516
517 /* Disable interrupts */
518 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
519
520 /* Must do more at disable??... */
521 if (disable) {
522 /* Delete tx and rx descriptor base addresses */
523 MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
524 MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
525 }
526
527 ifp->if_timer = 0;
528 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
529 }
530
531
532 void
533 mtd_watchdog(struct ifnet *ifp)
534 {
535 struct mtd_softc *sc = ifp->if_softc;
536 int s;
537
538 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->dev));
539 ++sc->ethercom.ec_if.if_oerrors;
540
541 mtd_stop(ifp, 0);
542
543 s = splnet();
544 mtd_init(ifp);
545 splx(s);
546
547 return;
548 }
549
550
551 int
552 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data)
553 {
554 struct mtd_softc *sc = ifp->if_softc;
555 int s, error = 0;
556
557 s = splnet();
558
559 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
560 /*
561 * Multicast list has changed; set the hardware
562 * filter accordingly.
563 */
564 if (ifp->if_flags & IFF_RUNNING)
565 mtd_setmulti(sc);
566 error = 0;
567 }
568
569 splx(s);
570 return error;
571 }
572
573
574 struct mbuf *
575 mtd_get(struct mtd_softc *sc, int index, int totlen)
576 {
577 struct ifnet *ifp = &sc->ethercom.ec_if;
578 struct mbuf *m, *m0, *newm;
579 int len;
580 char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
581
582 MGETHDR(m0, M_DONTWAIT, MT_DATA);
583 if (m0 == NULL)
584 return NULL;
585
586 m0->m_pkthdr.rcvif = ifp;
587 m0->m_pkthdr.len = totlen;
588 m = m0;
589 len = MHLEN;
590
591 while (totlen > 0) {
592 if (totlen >= MINCLSIZE) {
593 MCLGET(m, M_DONTWAIT);
594 if (!(m->m_flags & M_EXT)) {
595 m_freem(m0);
596 return NULL;
597 }
598 len = MCLBYTES;
599 }
600
601 if (m == m0) {
602 char *newdata = (char *)
603 ALIGN(m->m_data + sizeof(struct ether_header)) -
604 sizeof(struct ether_header);
605 len -= newdata - m->m_data;
606 m->m_data = newdata;
607 }
608
609 m->m_len = len = min(totlen, len);
610 memcpy(mtod(m, void *), buf, len);
611 buf += len;
612
613 totlen -= len;
614 if (totlen > 0) {
615 MGET(newm, M_DONTWAIT, MT_DATA);
616 if (newm == NULL) {
617 m_freem(m0);
618 return NULL;
619 }
620 len = MLEN;
621 m = m->m_next = newm;
622 }
623 }
624
625 return m0;
626 }
627
628
629 int
630 mtd_rxirq(struct mtd_softc *sc)
631 {
632 struct ifnet *ifp = &sc->ethercom.ec_if;
633 int len;
634 struct mbuf *m;
635
636 for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
637 /* Error summary set? */
638 if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
639 aprint_error_dev(&sc->dev, "received packet with errors\n");
640 /* Give up packet, since an error occurred */
641 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
642 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
643 MTD_RXD_CONF_BUFS;
644 ++ifp->if_ierrors;
645 if (++sc->cur_rx >= MTD_NUM_RXD)
646 sc->cur_rx = 0;
647 continue;
648 }
649 /* Get buffer length */
650 len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
651 >> MTD_RXD_FLEN_SHIFT;
652 len -= ETHER_CRC_LEN;
653
654 /* Check packet size */
655 if (len <= sizeof(struct ether_header)) {
656 aprint_error_dev(&sc->dev, "invalid packet size %d; dropping\n",
657 len);
658 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
659 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
660 MTD_RXD_CONF_BUFS;
661 ++ifp->if_ierrors;
662 if (++sc->cur_rx >= MTD_NUM_RXD)
663 sc->cur_rx = 0;
664 continue;
665 }
666
667 m = mtd_get(sc, (sc->cur_rx), len);
668
669 /* Give descriptor back to card */
670 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
671 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
672
673 if (++sc->cur_rx >= MTD_NUM_RXD)
674 sc->cur_rx = 0;
675
676 if (m == NULL) {
677 aprint_error_dev(&sc->dev, "error pulling packet off interface\n");
678 ++ifp->if_ierrors;
679 continue;
680 }
681
682 ++ifp->if_ipackets;
683
684 bpf_mtap(ifp, m);
685 /* Pass the packet up */
686 (*ifp->if_input)(ifp, m);
687 }
688
689 return 1;
690 }
691
692
693 int
694 mtd_txirq(struct mtd_softc *sc)
695 {
696 struct ifnet *ifp = &sc->ethercom.ec_if;
697
698 /* Clear timeout */
699 ifp->if_timer = 0;
700
701 ifp->if_flags &= ~IFF_OACTIVE;
702 ++ifp->if_opackets;
703
704 /* XXX FIXME If there is some queued, do an mtd_start? */
705
706 return 1;
707 }
708
709
710 int
711 mtd_bufirq(struct mtd_softc *sc)
712 {
713 struct ifnet *ifp = &sc->ethercom.ec_if;
714
715 /* Clear timeout */
716 ifp->if_timer = 0;
717
718 /* XXX FIXME: Do something here to make sure we get some buffers! */
719
720 return 1;
721 }
722
723
724 int
725 mtd_irq_h(void *args)
726 {
727 struct mtd_softc *sc = args;
728 struct ifnet *ifp = &sc->ethercom.ec_if;
729 u_int32_t status;
730 int r = 0;
731
732 if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(&sc->dev))
733 return 0;
734
735 /* Disable interrupts */
736 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
737
738 for(;;) {
739 status = MTD_READ_4(sc, MTD_ISR);
740 #if NRND > 0
741 /* Add random seed before masking out bits */
742 if (status)
743 rnd_add_uint32(&sc->rnd_src, status);
744 #endif
745 status &= MTD_ISR_MASK;
746 if (!status) /* We didn't ask for this */
747 break;
748
749 MTD_WRITE_4(sc, MTD_ISR, status);
750
751 /* NOTE: Perhaps we should reset with some of these errors? */
752
753 if (status & MTD_ISR_RXBUN) {
754 aprint_error_dev(&sc->dev, "receive buffer unavailable\n");
755 ++ifp->if_ierrors;
756 }
757
758 if (status & MTD_ISR_RXERR) {
759 aprint_error_dev(&sc->dev, "receive error\n");
760 ++ifp->if_ierrors;
761 }
762
763 if (status & MTD_ISR_TXBUN) {
764 aprint_error_dev(&sc->dev, "transmit buffer unavailable\n");
765 ++ifp->if_ierrors;
766 }
767
768 if ((status & MTD_ISR_PDF)) {
769 aprint_error_dev(&sc->dev, "parallel detection fault\n");
770 ++ifp->if_ierrors;
771 }
772
773 if (status & MTD_ISR_FBUSERR) {
774 aprint_error_dev(&sc->dev, "fatal bus error\n");
775 ++ifp->if_ierrors;
776 }
777
778 if (status & MTD_ISR_TARERR) {
779 aprint_error_dev(&sc->dev, "target error\n");
780 ++ifp->if_ierrors;
781 }
782
783 if (status & MTD_ISR_MASTERR) {
784 aprint_error_dev(&sc->dev, "master error\n");
785 ++ifp->if_ierrors;
786 }
787
788 if (status & MTD_ISR_PARERR) {
789 aprint_error_dev(&sc->dev, "parity error\n");
790 ++ifp->if_ierrors;
791 }
792
793 if (status & MTD_ISR_RXIRQ) /* Receive interrupt */
794 r |= mtd_rxirq(sc);
795
796 if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */
797 r |= mtd_txirq(sc);
798
799 if (status & MTD_ISR_TXEARLY) /* Transmit early */
800 r |= mtd_txirq(sc);
801
802 if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */
803 r |= mtd_bufirq(sc);
804
805 }
806
807 /* Enable interrupts */
808 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
809
810 return r;
811 }
812
813
814 void
815 mtd_setmulti(struct mtd_softc *sc)
816 {
817 struct ifnet *ifp = &sc->ethercom.ec_if;
818 u_int32_t rxtx_stat;
819 u_int32_t hash[2] = {0, 0};
820 u_int32_t crc;
821 struct ether_multi *enm;
822 struct ether_multistep step;
823 int mcnt = 0;
824
825 /* Get old status */
826 rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
827
828 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
829 rxtx_stat |= MTD_RX_AMULTI;
830 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
831 MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
832 MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
833 return;
834 }
835
836 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
837 while (enm != NULL) {
838 /* We need the 6 most significant bits of the CRC */
839 crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
840
841 hash[crc >> 5] |= 1 << (crc & 0xf);
842
843 ++mcnt;
844 ETHER_NEXT_MULTI(step, enm);
845 }
846
847 /* Accept multicast bit needs to be on? */
848 if (mcnt)
849 rxtx_stat |= MTD_RX_AMULTI;
850 else
851 rxtx_stat &= ~MTD_RX_AMULTI;
852
853 /* Write out the hash */
854 MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
855 MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
856 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
857 }
858
859
860 void
861 mtd_reset(struct mtd_softc *sc)
862 {
863 int i;
864
865 MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
866
867 /* Reset descriptor status */
868 sc->cur_tx = 0;
869 sc->cur_rx = 0;
870
871 /* Wait until done with reset */
872 for (i = 0; i < MTD_TIMEOUT; ++i) {
873 DELAY(10);
874 if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
875 break;
876 }
877
878 if (i == MTD_TIMEOUT) {
879 aprint_error_dev(&sc->dev, "reset timed out\n");
880 }
881
882 /* Wait a little so chip can stabilize */
883 DELAY(1000);
884 }
885
886
887 void
888 mtd_shutdown (void *arg)
889 {
890 struct mtd_softc *sc = arg;
891 struct ifnet *ifp = &sc->ethercom.ec_if;
892
893 #if NRND > 0
894 rnd_detach_source(&sc->rnd_src);
895 #endif
896 mtd_stop(ifp, 1);
897 }
898