mtd803.c revision 1.13.16.3 1 /* mtd803.c,v 1.13.16.2 2008/01/09 01:52:56 matt Exp */
2
3 /*-
4 *
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Peter Bex <Peter.Bex (at) student.kun.nl>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * TODO:
42 * - Most importantly, get some bus_dmamap_syncs in the correct places.
43 * I don't have access to a computer with PCI other than i386, and i386
44 * is just such a machine where dmamap_syncs don't do anything.
45 * - Powerhook for when resuming after standby.
46 * - Watchdog stuff doesn't work yet, the system crashes.
47 * - There seems to be a CardBus version of the card. (see datasheet)
48 * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
49 * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
50 * raised every time a packet is sent. Strange, since everything works anyway
51 */
52
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "mtd803.c,v 1.13.16.2 2008/01/09 01:52:56 matt Exp");
55
56 #include "bpfilter.h"
57
58 #include <sys/param.h>
59 #include <sys/mbuf.h>
60 #include <sys/systm.h>
61 #include <sys/device.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/syslog.h>
65
66 #include <net/if.h>
67 #include <net/if_ether.h>
68 #include <net/if_media.h>
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/if_inarp.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #endif
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #include <net/bpfdesc.h>
81 #endif
82
83 #include <sys/bus.h>
84
85 #include <dev/ic/mtd803reg.h>
86 #include <dev/ic/mtd803var.h>
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89
90 /*
91 * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
92 * Written by Peter Bex (peter.bex (at) student.kun.nl)
93 *
94 * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com
95 */
96
97 #define MTD_READ_1(sc, reg) \
98 bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
99 #define MTD_WRITE_1(sc, reg, data) \
100 bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
101
102 #define MTD_READ_2(sc, reg) \
103 bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
104 #define MTD_WRITE_2(sc, reg, data) \
105 bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
106
107 #define MTD_READ_4(sc, reg) \
108 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
109 #define MTD_WRITE_4(sc, reg, data) \
110 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
111
112 #define MTD_SETBIT(sc, reg, x) \
113 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
114 #define MTD_CLRBIT(sc, reg, x) \
115 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
116
117 #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len)))
118
119 int mtd_mii_readreg(struct device *, int, int);
120 void mtd_mii_writereg(struct device *, int, int, int);
121 void mtd_mii_statchg(struct device *);
122
123 void mtd_start(struct ifnet *);
124 void mtd_stop(struct ifnet *, int);
125 int mtd_ioctl(struct ifnet *, u_long, void *);
126 void mtd_setmulti(struct mtd_softc *);
127 void mtd_watchdog(struct ifnet *);
128
129 int mtd_init(struct ifnet *);
130 void mtd_reset(struct mtd_softc *);
131 void mtd_shutdown(void *);
132 int mtd_init_desc(struct mtd_softc *);
133 int mtd_put(struct mtd_softc *, int, struct mbuf *);
134 struct mbuf *mtd_get(struct mtd_softc *, int, int);
135
136 int mtd_rxirq(struct mtd_softc *);
137 int mtd_txirq(struct mtd_softc *);
138 int mtd_bufirq(struct mtd_softc *);
139
140
141 int
142 mtd_config(struct mtd_softc *sc)
143 {
144 struct ifnet *ifp = &sc->ethercom.ec_if;
145 int i;
146
147 /* Read station address */
148 for (i = 0; i < ETHER_ADDR_LEN; ++i)
149 sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
150
151 /* Initialize ifnet structure */
152 memcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
153 ifp->if_softc = sc;
154 ifp->if_init = mtd_init;
155 ifp->if_start = mtd_start;
156 ifp->if_stop = mtd_stop;
157 ifp->if_ioctl = mtd_ioctl;
158 ifp->if_watchdog = mtd_watchdog;
159 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
160 IFQ_SET_READY(&ifp->if_snd);
161
162 /* Setup MII interface */
163 sc->mii.mii_ifp = ifp;
164 sc->mii.mii_readreg = mtd_mii_readreg;
165 sc->mii.mii_writereg = mtd_mii_writereg;
166 sc->mii.mii_statchg = mtd_mii_statchg;
167
168 sc->ethercom.ec_mii = &sc->mii;
169 ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange,
170 ether_mediastatus);
171
172 mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
173
174 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
175 printf("%s: Unable to configure MII\n", sc->dev.dv_xname);
176 return 1;
177 } else {
178 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
179 }
180
181 if (mtd_init_desc(sc))
182 return 1;
183
184 /* Attach interface */
185 if_attach(ifp);
186 ether_ifattach(ifp, sc->eaddr);
187
188 #if NRND > 0
189 /* Initialise random source */
190 rnd_attach_source(&sc->rnd_src, sc->dev.dv_xname, RND_TYPE_NET, 0);
191 #endif
192
193 /* Add shutdown hook to reset card when we reboot */
194 sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
195
196 return 0;
197 }
198
199
200 /*
201 * mtd_init
202 * Must be called at splnet()
203 */
204 int
205 mtd_init(struct ifnet *ifp)
206 {
207 struct mtd_softc *sc = ifp->if_softc;
208
209 mtd_reset(sc);
210
211 /*
212 * Set cache alignment and burst length. Don't really know what these
213 * mean, so their values are probably suboptimal.
214 */
215 MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
216
217 MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
218
219 /* Promiscuous mode? */
220 if (ifp->if_flags & IFF_PROMISC)
221 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
222 else
223 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
224
225 /* Broadcast mode? */
226 if (ifp->if_flags & IFF_BROADCAST)
227 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
228 else
229 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
230
231 mtd_setmulti(sc);
232
233 /* Enable interrupts */
234 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
235 MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
236
237 /* Set descriptor base addresses */
238 MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
239 + sizeof(struct mtd_desc) * MTD_NUM_RXD));
240 MTD_WRITE_4(sc, MTD_RXLBA,
241 htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
242
243 /* Enable receiver and transmitter */
244 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
245 MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
246
247 /* Interface is running */
248 ifp->if_flags |= IFF_RUNNING;
249 ifp->if_flags &= ~IFF_OACTIVE;
250
251 return 0;
252 }
253
254
255 int
256 mtd_init_desc(struct mtd_softc *sc)
257 {
258 int rseg, err, i;
259 bus_dma_segment_t seg;
260 bus_size_t size;
261
262 /* Allocate memory for descriptors */
263 size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
264
265 /* Allocate DMA-safe memory */
266 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
267 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
268 printf("%s: unable to allocate DMA buffer, error = %d\n",
269 sc->dev.dv_xname, err);
270 return 1;
271 }
272
273 /* Map memory to kernel addressable space */
274 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
275 (void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
276 printf("%s: unable to map DMA buffer, error = %d\n",
277 sc->dev.dv_xname, err);
278 bus_dmamem_free(sc->dma_tag, &seg, rseg);
279 return 1;
280 }
281
282 /* Create a DMA map */
283 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
284 size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
285 printf("%s: unable to create DMA map, error = %d\n",
286 sc->dev.dv_xname, err);
287 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
288 bus_dmamem_free(sc->dma_tag, &seg, rseg);
289 return 1;
290 }
291
292 /* Load the DMA map */
293 if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
294 size, NULL, BUS_DMA_NOWAIT)) != 0) {
295 printf("%s: unable to load DMA map, error = %d\n",
296 sc->dev.dv_xname, err);
297 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
298 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
299 bus_dmamem_free(sc->dma_tag, &seg, rseg);
300 return 1;
301 }
302
303 /* Allocate memory for the buffers */
304 size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
305
306 /* Allocate DMA-safe memory */
307 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
308 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
309 printf("%s: unable to allocate DMA buffer, error = %d\n",
310 sc->dev.dv_xname, err);
311
312 /* Undo DMA map for descriptors */
313 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
314 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
315 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
316 bus_dmamem_free(sc->dma_tag, &seg, rseg);
317 return 1;
318 }
319
320 /* Map memory to kernel addressable space */
321 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
322 &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
323 printf("%s: unable to map DMA buffer, error = %d\n",
324 sc->dev.dv_xname, err);
325 bus_dmamem_free(sc->dma_tag, &seg, rseg);
326
327 /* Undo DMA map for descriptors */
328 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
329 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
330 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
331 bus_dmamem_free(sc->dma_tag, &seg, rseg);
332 return 1;
333 }
334
335 /* Create a DMA map */
336 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
337 size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
338 printf("%s: unable to create DMA map, error = %d\n",
339 sc->dev.dv_xname, err);
340 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
341 bus_dmamem_free(sc->dma_tag, &seg, rseg);
342
343 /* Undo DMA map for descriptors */
344 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
345 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
346 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
347 bus_dmamem_free(sc->dma_tag, &seg, rseg);
348 return 1;
349 }
350
351 /* Load the DMA map */
352 if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
353 size, NULL, BUS_DMA_NOWAIT)) != 0) {
354 printf("%s: unable to load DMA map, error = %d\n",
355 sc->dev.dv_xname, err);
356 bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
357 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
358 bus_dmamem_free(sc->dma_tag, &seg, rseg);
359
360 /* Undo DMA map for descriptors */
361 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
362 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
363 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
364 bus_dmamem_free(sc->dma_tag, &seg, rseg);
365 return 1;
366 }
367
368 /* Descriptors are stored as a circular linked list */
369 /* Fill in rx descriptors */
370 for (i = 0; i < MTD_NUM_RXD; ++i) {
371 sc->desc[i].stat = MTD_RXD_OWNER;
372 if (i == MTD_NUM_RXD - 1) { /* Last descriptor */
373 /* Link back to first rx descriptor */
374 sc->desc[i].next =
375 htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
376 } else {
377 /* Link forward to next rx descriptor */
378 sc->desc[i].next =
379 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
380 + (i + 1) * sizeof(struct mtd_desc));
381 }
382 sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
383 /* Set buffer's address */
384 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
385 + i * MTD_RXBUF_SIZE);
386 }
387
388 /* Fill in tx descriptors */
389 for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
390 sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */
391 if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
392 /* Link back to first tx descriptor */
393 sc->desc[i].next =
394 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
395 +MTD_NUM_RXD * sizeof(struct mtd_desc));
396 } else {
397 /* Link forward to next tx descriptor */
398 sc->desc[i].next =
399 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
400 + (i + 1) * sizeof(struct mtd_desc));
401 }
402 /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
403 /* Set buffer's address */
404 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
405 + MTD_NUM_RXD * MTD_RXBUF_SIZE
406 + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
407 }
408
409 return 0;
410 }
411
412
413 void
414 mtd_mii_statchg(device_t self)
415 {
416 /* Should we do something here? :) */
417 }
418
419
420 int
421 mtd_mii_readreg(device_t self, int phy, int reg)
422 {
423 struct mtd_softc *sc = device_private(self);
424
425 return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
426 }
427
428
429 void
430 mtd_mii_writereg(device_t self, int phy, int reg, int val)
431 {
432 struct mtd_softc *sc = device_private(self);
433
434 MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
435 }
436
437
438 int
439 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m)
440 {
441 int len, tlen;
442 char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
443 + index * MTD_TXBUF_SIZE;
444 struct mbuf *n;
445
446 for (tlen = 0; m != NULL; m = n) {
447 len = m->m_len;
448 if (len == 0) {
449 MFREE(m, n);
450 continue;
451 } else if (tlen > MTD_TXBUF_SIZE) {
452 /* XXX FIXME: No idea what to do here. */
453 printf("%s: packet too large! Size = %i\n",
454 sc->dev.dv_xname, tlen);
455 MFREE(m, n);
456 continue;
457 }
458 memcpy(buf, mtod(m, void *), len);
459 buf += len;
460 tlen += len;
461 MFREE(m, n);
462 }
463 sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
464 | MTD_TXD_CONF_IRQC
465 | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
466 | (tlen & MTD_TXD_CONF_BUFS);
467
468 return tlen;
469 }
470
471
472 void
473 mtd_start(struct ifnet *ifp)
474 {
475 struct mtd_softc *sc = ifp->if_softc;
476 struct mbuf *m;
477 int len;
478 int first_tx = sc->cur_tx;
479
480 /* Don't transmit when the interface is busy or inactive */
481 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
482 return;
483
484 for (;;) {
485 IF_DEQUEUE(&ifp->if_snd, m);
486
487 if (m == NULL)
488 break;
489
490 #if NBPFILTER > 0
491 if (ifp->if_bpf)
492 bpf_mtap(ifp->if_bpf, m);
493 #endif
494
495 /* Copy mbuf chain into tx buffer */
496 len = mtd_put(sc, sc->cur_tx, m);
497
498 if (sc->cur_tx != first_tx)
499 sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
500
501 if (++sc->cur_tx >= MTD_NUM_TXD)
502 sc->cur_tx = 0;
503 }
504 /* Mark first & last descriptor */
505 sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
506
507 if (sc->cur_tx == 0) {
508 sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
509 } else {
510 sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
511 }
512
513 /* Give first descriptor to chip to complete transaction */
514 sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
515
516 /* Transmit polling demand */
517 MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
518
519 /* XXX FIXME: Set up a watchdog timer */
520 /* ifp->if_timer = 5; */
521 }
522
523
524 void
525 mtd_stop(struct ifnet *ifp, int disable)
526 {
527 struct mtd_softc *sc = ifp->if_softc;
528
529 /* Disable transmitter and receiver */
530 MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
531 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
532
533 /* Disable interrupts */
534 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
535
536 /* Must do more at disable??... */
537 if (disable) {
538 /* Delete tx and rx descriptor base addresses */
539 MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
540 MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
541 }
542
543 ifp->if_timer = 0;
544 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
545 }
546
547
548 void
549 mtd_watchdog(struct ifnet *ifp)
550 {
551 struct mtd_softc *sc = ifp->if_softc;
552 int s;
553
554 log(LOG_ERR, "%s: device timeout\n", sc->dev.dv_xname);
555 ++sc->ethercom.ec_if.if_oerrors;
556
557 mtd_stop(ifp, 0);
558
559 s = splnet();
560 mtd_init(ifp);
561 splx(s);
562
563 return;
564 }
565
566
567 int
568 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data)
569 {
570 struct mtd_softc *sc = ifp->if_softc;
571 int s, error = 0;
572
573 s = splnet();
574
575 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
576 /*
577 * Multicast list has changed; set the hardware
578 * filter accordingly.
579 */
580 if (ifp->if_flags & IFF_RUNNING)
581 mtd_setmulti(sc);
582 error = 0;
583 }
584
585 splx(s);
586 return error;
587 }
588
589
590 struct mbuf *
591 mtd_get(struct mtd_softc *sc, int index, int totlen)
592 {
593 struct ifnet *ifp = &sc->ethercom.ec_if;
594 struct mbuf *m, *m0, *newm;
595 int len;
596 char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
597
598 MGETHDR(m0, M_DONTWAIT, MT_DATA);
599 if (m0 == NULL)
600 return NULL;
601
602 m0->m_pkthdr.rcvif = ifp;
603 m0->m_pkthdr.len = totlen;
604 m = m0;
605 len = MHLEN;
606
607 while (totlen > 0) {
608 if (totlen >= MINCLSIZE) {
609 MCLGET(m, M_DONTWAIT);
610 if (!(m->m_flags & M_EXT)) {
611 m_freem(m0);
612 return NULL;
613 }
614 len = MCLBYTES;
615 }
616
617 if (m == m0) {
618 char *newdata = (char *)
619 ALIGN(m->m_data + sizeof(struct ether_header)) -
620 sizeof(struct ether_header);
621 len -= newdata - m->m_data;
622 m->m_data = newdata;
623 }
624
625 m->m_len = len = min(totlen, len);
626 memcpy(mtod(m, void *), buf, len);
627 buf += len;
628
629 totlen -= len;
630 if (totlen > 0) {
631 MGET(newm, M_DONTWAIT, MT_DATA);
632 if (newm == NULL) {
633 m_freem(m0);
634 return NULL;
635 }
636 len = MLEN;
637 m = m->m_next = newm;
638 }
639 }
640
641 return m0;
642 }
643
644
645 int
646 mtd_rxirq(struct mtd_softc *sc)
647 {
648 struct ifnet *ifp = &sc->ethercom.ec_if;
649 int len;
650 struct mbuf *m;
651
652 for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
653 /* Error summary set? */
654 if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
655 printf("%s: received packet with errors\n",
656 sc->dev.dv_xname);
657 /* Give up packet, since an error occurred */
658 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
659 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
660 MTD_RXD_CONF_BUFS;
661 ++ifp->if_ierrors;
662 if (++sc->cur_rx >= MTD_NUM_RXD)
663 sc->cur_rx = 0;
664 continue;
665 }
666 /* Get buffer length */
667 len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
668 >> MTD_RXD_FLEN_SHIFT;
669 len -= ETHER_CRC_LEN;
670
671 /* Check packet size */
672 if (len <= sizeof(struct ether_header)) {
673 printf("%s: invalid packet size %d; dropping\n",
674 sc->dev.dv_xname, len);
675 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
676 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
677 MTD_RXD_CONF_BUFS;
678 ++ifp->if_ierrors;
679 if (++sc->cur_rx >= MTD_NUM_RXD)
680 sc->cur_rx = 0;
681 continue;
682 }
683
684 m = mtd_get(sc, (sc->cur_rx), len);
685
686 /* Give descriptor back to card */
687 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
688 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
689
690 if (++sc->cur_rx >= MTD_NUM_RXD)
691 sc->cur_rx = 0;
692
693 if (m == NULL) {
694 printf("%s: error pulling packet off interface\n",
695 sc->dev.dv_xname);
696 ++ifp->if_ierrors;
697 continue;
698 }
699
700 ++ifp->if_ipackets;
701
702 #if NBPFILTER > 0
703 if (ifp->if_bpf)
704 bpf_mtap(ifp->if_bpf, m);
705 #endif
706 /* Pass the packet up */
707 (*ifp->if_input)(ifp, m);
708 }
709
710 return 1;
711 }
712
713
714 int
715 mtd_txirq(struct mtd_softc *sc)
716 {
717 struct ifnet *ifp = &sc->ethercom.ec_if;
718
719 /* Clear timeout */
720 ifp->if_timer = 0;
721
722 ifp->if_flags &= ~IFF_OACTIVE;
723 ++ifp->if_opackets;
724
725 /* XXX FIXME If there is some queued, do an mtd_start? */
726
727 return 1;
728 }
729
730
731 int
732 mtd_bufirq(struct mtd_softc *sc)
733 {
734 struct ifnet *ifp = &sc->ethercom.ec_if;
735
736 /* Clear timeout */
737 ifp->if_timer = 0;
738
739 /* XXX FIXME: Do something here to make sure we get some buffers! */
740
741 return 1;
742 }
743
744
745 int
746 mtd_irq_h(void *args)
747 {
748 struct mtd_softc *sc = args;
749 struct ifnet *ifp = &sc->ethercom.ec_if;
750 u_int32_t status;
751 int r = 0;
752
753 if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(&sc->dev))
754 return 0;
755
756 /* Disable interrupts */
757 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
758
759 for(;;) {
760 status = MTD_READ_4(sc, MTD_ISR);
761 #if NRND > 0
762 /* Add random seed before masking out bits */
763 if (status)
764 rnd_add_uint32(&sc->rnd_src, status);
765 #endif
766 status &= MTD_ISR_MASK;
767 if (!status) /* We didn't ask for this */
768 break;
769
770 MTD_WRITE_4(sc, MTD_ISR, status);
771
772 /* NOTE: Perhaps we should reset with some of these errors? */
773
774 if (status & MTD_ISR_RXBUN) {
775 printf("%s: receive buffer unavailable\n",
776 sc->dev.dv_xname);
777 ++ifp->if_ierrors;
778 }
779
780 if (status & MTD_ISR_RXERR) {
781 printf("%s: receive error\n", sc->dev.dv_xname);
782 ++ifp->if_ierrors;
783 }
784
785 if (status & MTD_ISR_TXBUN) {
786 printf("%s: transmit buffer unavailable\n",
787 sc->dev.dv_xname);
788 ++ifp->if_ierrors;
789 }
790
791 if ((status & MTD_ISR_PDF)) {
792 printf("%s: parallel detection fault\n",
793 sc->dev.dv_xname);
794 ++ifp->if_ierrors;
795 }
796
797 if (status & MTD_ISR_FBUSERR) {
798 printf("%s: fatal bus error\n",
799 sc->dev.dv_xname);
800 ++ifp->if_ierrors;
801 }
802
803 if (status & MTD_ISR_TARERR) {
804 printf("%s: target error\n",
805 sc->dev.dv_xname);
806 ++ifp->if_ierrors;
807 }
808
809 if (status & MTD_ISR_MASTERR) {
810 printf("%s: master error\n",
811 sc->dev.dv_xname);
812 ++ifp->if_ierrors;
813 }
814
815 if (status & MTD_ISR_PARERR) {
816 printf("%s: parity error\n",
817 sc->dev.dv_xname);
818 ++ifp->if_ierrors;
819 }
820
821 if (status & MTD_ISR_RXIRQ) /* Receive interrupt */
822 r |= mtd_rxirq(sc);
823
824 if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */
825 r |= mtd_txirq(sc);
826
827 if (status & MTD_ISR_TXEARLY) /* Transmit early */
828 r |= mtd_txirq(sc);
829
830 if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */
831 r |= mtd_bufirq(sc);
832
833 }
834
835 /* Enable interrupts */
836 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
837
838 return r;
839 }
840
841
842 void
843 mtd_setmulti(struct mtd_softc *sc)
844 {
845 struct ifnet *ifp = &sc->ethercom.ec_if;
846 u_int32_t rxtx_stat;
847 u_int32_t hash[2] = {0, 0};
848 u_int32_t crc;
849 struct ether_multi *enm;
850 struct ether_multistep step;
851 int mcnt = 0;
852
853 /* Get old status */
854 rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
855
856 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
857 rxtx_stat |= MTD_RX_AMULTI;
858 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
859 MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
860 MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
861 return;
862 }
863
864 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
865 while (enm != NULL) {
866 /* We need the 6 most significant bits of the CRC */
867 crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
868
869 hash[crc >> 5] |= 1 << (crc & 0xf);
870
871 ++mcnt;
872 ETHER_NEXT_MULTI(step, enm);
873 }
874
875 /* Accept multicast bit needs to be on? */
876 if (mcnt)
877 rxtx_stat |= MTD_RX_AMULTI;
878 else
879 rxtx_stat &= ~MTD_RX_AMULTI;
880
881 /* Write out the hash */
882 MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
883 MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
884 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
885 }
886
887
888 void
889 mtd_reset(struct mtd_softc *sc)
890 {
891 int i;
892
893 MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
894
895 /* Reset descriptor status */
896 sc->cur_tx = 0;
897 sc->cur_rx = 0;
898
899 /* Wait until done with reset */
900 for (i = 0; i < MTD_TIMEOUT; ++i) {
901 DELAY(10);
902 if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
903 break;
904 }
905
906 if (i == MTD_TIMEOUT) {
907 printf("%s: reset timed out\n", sc->dev.dv_xname);
908 }
909
910 /* Wait a little so chip can stabilize */
911 DELAY(1000);
912 }
913
914
915 void
916 mtd_shutdown (arg)
917 void *arg;
918 {
919 struct mtd_softc *sc = arg;
920 struct ifnet *ifp = &sc->ethercom.ec_if;
921
922 #if NRND > 0
923 rnd_detach_source(&sc->rnd_src);
924 #endif
925 mtd_stop(ifp, 1);
926 }
927