mtd803.c revision 1.35 1 /* $NetBSD: mtd803.c,v 1.35 2018/06/26 06:48:00 msaitoh Exp $ */
2
3 /*-
4 *
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Peter Bex <Peter.Bex (at) student.kun.nl>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * TODO:
35 * - Most importantly, get some bus_dmamap_syncs in the correct places.
36 * I don't have access to a computer with PCI other than i386, and i386
37 * is just such a machine where dmamap_syncs don't do anything.
38 * - Powerhook for when resuming after standby.
39 * - Watchdog stuff doesn't work yet, the system crashes.
40 * - There seems to be a CardBus version of the card. (see datasheet)
41 * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc)
42 * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets
43 * raised every time a packet is sent. Strange, since everything works anyway
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.35 2018/06/26 06:48:00 msaitoh Exp $");
48
49
50 #include <sys/param.h>
51 #include <sys/mbuf.h>
52 #include <sys/systm.h>
53 #include <sys/device.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/syslog.h>
57
58 #include <net/if.h>
59 #include <net/if_ether.h>
60 #include <net/if_media.h>
61 #include <net/bpf.h>
62
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/if_inarp.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
70
71 #include <sys/bus.h>
72
73 #include <dev/ic/mtd803reg.h>
74 #include <dev/ic/mtd803var.h>
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77
78 /*
79 * Device driver for the MTD803 3-in-1 Fast Ethernet Controller
80 * Written by Peter Bex (peter.bex (at) student.kun.nl)
81 *
82 * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com
83 */
84
85 #define MTD_READ_1(sc, reg) \
86 bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg))
87 #define MTD_WRITE_1(sc, reg, data) \
88 bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
89
90 #define MTD_READ_2(sc, reg) \
91 bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg))
92 #define MTD_WRITE_2(sc, reg, data) \
93 bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
94
95 #define MTD_READ_4(sc, reg) \
96 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg))
97 #define MTD_WRITE_4(sc, reg, data) \
98 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data))
99
100 #define MTD_SETBIT(sc, reg, x) \
101 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x))
102 #define MTD_CLRBIT(sc, reg, x) \
103 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x))
104
105 #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len)))
106
107 int mtd_mii_readreg(device_t, int, int);
108 void mtd_mii_writereg(device_t, int, int, int);
109 void mtd_mii_statchg(struct ifnet *);
110
111 void mtd_start(struct ifnet *);
112 void mtd_stop(struct ifnet *, int);
113 int mtd_ioctl(struct ifnet *, u_long, void *);
114 void mtd_setmulti(struct mtd_softc *);
115 void mtd_watchdog(struct ifnet *);
116
117 int mtd_init(struct ifnet *);
118 void mtd_reset(struct mtd_softc *);
119 void mtd_shutdown(void *);
120 int mtd_init_desc(struct mtd_softc *);
121 int mtd_put(struct mtd_softc *, int, struct mbuf *);
122 struct mbuf *mtd_get(struct mtd_softc *, int, int);
123
124 int mtd_rxirq(struct mtd_softc *);
125 int mtd_txirq(struct mtd_softc *);
126 int mtd_bufirq(struct mtd_softc *);
127
128
129 int
130 mtd_config(struct mtd_softc *sc)
131 {
132 struct ifnet *ifp = &sc->ethercom.ec_if;
133 int i;
134
135 /* Read station address */
136 for (i = 0; i < ETHER_ADDR_LEN; ++i)
137 sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i);
138
139 /* Initialize ifnet structure */
140 memcpy(ifp->if_xname, device_xname(sc->dev), IFNAMSIZ);
141 ifp->if_softc = sc;
142 ifp->if_init = mtd_init;
143 ifp->if_start = mtd_start;
144 ifp->if_stop = mtd_stop;
145 ifp->if_ioctl = mtd_ioctl;
146 ifp->if_watchdog = mtd_watchdog;
147 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
148 IFQ_SET_READY(&ifp->if_snd);
149
150 /* Setup MII interface */
151 sc->mii.mii_ifp = ifp;
152 sc->mii.mii_readreg = mtd_mii_readreg;
153 sc->mii.mii_writereg = mtd_mii_writereg;
154 sc->mii.mii_statchg = mtd_mii_statchg;
155
156 sc->ethercom.ec_mii = &sc->mii;
157 ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange,
158 ether_mediastatus);
159
160 mii_attach(sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0);
161
162 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
163 aprint_error_dev(sc->dev, "Unable to configure MII\n");
164 return 1;
165 } else {
166 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
167 }
168
169 if (mtd_init_desc(sc))
170 return 1;
171
172 /* Attach interface */
173 if_attach(ifp);
174 ether_ifattach(ifp, sc->eaddr);
175
176 /* Initialise random source */
177 rnd_attach_source(&sc->rnd_src, device_xname(sc->dev),
178 RND_TYPE_NET, RND_FLAG_DEFAULT);
179
180 /* Add shutdown hook to reset card when we reboot */
181 sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc);
182
183 return 0;
184 }
185
186
187 /*
188 * mtd_init
189 * Must be called at splnet()
190 */
191 int
192 mtd_init(struct ifnet *ifp)
193 {
194 struct mtd_softc *sc = ifp->if_softc;
195
196 mtd_reset(sc);
197
198 /*
199 * Set cache alignment and burst length. Don't really know what these
200 * mean, so their values are probably suboptimal.
201 */
202 MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16);
203
204 MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX);
205
206 /* Promiscuous mode? */
207 if (ifp->if_flags & IFF_PROMISC)
208 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM);
209 else
210 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM);
211
212 /* Broadcast mode? */
213 if (ifp->if_flags & IFF_BROADCAST)
214 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
215 else
216 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD);
217
218 mtd_setmulti(sc);
219
220 /* Enable interrupts */
221 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
222 MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE);
223
224 /* Set descriptor base addresses */
225 MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr
226 + sizeof(struct mtd_desc) * MTD_NUM_RXD));
227 MTD_WRITE_4(sc, MTD_RXLBA,
228 htole32(sc->desc_dma_map->dm_segs[0].ds_addr));
229
230 /* Enable receiver and transmitter */
231 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
232 MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
233
234 /* Interface is running */
235 ifp->if_flags |= IFF_RUNNING;
236 ifp->if_flags &= ~IFF_OACTIVE;
237
238 return 0;
239 }
240
241
242 int
243 mtd_init_desc(struct mtd_softc *sc)
244 {
245 int rseg, err, i;
246 bus_dma_segment_t seg;
247 bus_size_t size;
248
249 /* Allocate memory for descriptors */
250 size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc);
251
252 /* Allocate DMA-safe memory */
253 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
254 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
255 aprint_error_dev(sc->dev, "unable to allocate DMA buffer, error = %d\n", err);
256 return 1;
257 }
258
259 /* Map memory to kernel addressable space */
260 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
261 (void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
262 aprint_error_dev(sc->dev, "unable to map DMA buffer, error = %d\n", err);
263 bus_dmamem_free(sc->dma_tag, &seg, rseg);
264 return 1;
265 }
266
267 /* Create a DMA map */
268 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
269 size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) {
270 aprint_error_dev(sc->dev, "unable to create DMA map, error = %d\n", err);
271 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
272 bus_dmamem_free(sc->dma_tag, &seg, rseg);
273 return 1;
274 }
275
276 /* Load the DMA map */
277 if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc,
278 size, NULL, BUS_DMA_NOWAIT)) != 0) {
279 aprint_error_dev(sc->dev, "unable to load DMA map, error = %d\n",
280 err);
281 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
282 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
283 bus_dmamem_free(sc->dma_tag, &seg, rseg);
284 return 1;
285 }
286
287 /* Allocate memory for the buffers */
288 size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE;
289
290 /* Allocate DMA-safe memory */
291 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN,
292 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
293 aprint_error_dev(sc->dev, "unable to allocate DMA buffer, error = %d\n",
294 err);
295
296 /* Undo DMA map for descriptors */
297 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
298 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
299 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
300 bus_dmamem_free(sc->dma_tag, &seg, rseg);
301 return 1;
302 }
303
304 /* Map memory to kernel addressable space */
305 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size,
306 &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
307 aprint_error_dev(sc->dev, "unable to map DMA buffer, error = %d\n",
308 err);
309 bus_dmamem_free(sc->dma_tag, &seg, rseg);
310
311 /* Undo DMA map for descriptors */
312 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
313 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
314 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
315 bus_dmamem_free(sc->dma_tag, &seg, rseg);
316 return 1;
317 }
318
319 /* Create a DMA map */
320 if ((err = bus_dmamap_create(sc->dma_tag, size, 1,
321 size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) {
322 aprint_error_dev(sc->dev, "unable to create DMA map, error = %d\n",
323 err);
324 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
325 bus_dmamem_free(sc->dma_tag, &seg, rseg);
326
327 /* Undo DMA map for descriptors */
328 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
329 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
330 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
331 bus_dmamem_free(sc->dma_tag, &seg, rseg);
332 return 1;
333 }
334
335 /* Load the DMA map */
336 if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf,
337 size, NULL, BUS_DMA_NOWAIT)) != 0) {
338 aprint_error_dev(sc->dev, "unable to load DMA map, error = %d\n",
339 err);
340 bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map);
341 bus_dmamem_unmap(sc->dma_tag, sc->buf, size);
342 bus_dmamem_free(sc->dma_tag, &seg, rseg);
343
344 /* Undo DMA map for descriptors */
345 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map);
346 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map);
347 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size);
348 bus_dmamem_free(sc->dma_tag, &seg, rseg);
349 return 1;
350 }
351
352 /* Descriptors are stored as a circular linked list */
353 /* Fill in rx descriptors */
354 for (i = 0; i < MTD_NUM_RXD; ++i) {
355 sc->desc[i].stat = MTD_RXD_OWNER;
356 if (i == MTD_NUM_RXD - 1) { /* Last descriptor */
357 /* Link back to first rx descriptor */
358 sc->desc[i].next =
359 htole32(sc->desc_dma_map->dm_segs[0].ds_addr);
360 } else {
361 /* Link forward to next rx descriptor */
362 sc->desc[i].next =
363 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
364 + (i + 1) * sizeof(struct mtd_desc));
365 }
366 sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
367 /* Set buffer's address */
368 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
369 + i * MTD_RXBUF_SIZE);
370 }
371
372 /* Fill in tx descriptors */
373 for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) {
374 sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */
375 if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */
376 /* Link back to first tx descriptor */
377 sc->desc[i].next =
378 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
379 +MTD_NUM_RXD * sizeof(struct mtd_desc));
380 } else {
381 /* Link forward to next tx descriptor */
382 sc->desc[i].next =
383 htole32(sc->desc_dma_map->dm_segs[0].ds_addr
384 + (i + 1) * sizeof(struct mtd_desc));
385 }
386 /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */
387 /* Set buffer's address */
388 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr
389 + MTD_NUM_RXD * MTD_RXBUF_SIZE
390 + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE);
391 }
392
393 return 0;
394 }
395
396
397 void
398 mtd_mii_statchg(struct ifnet *ifp)
399 {
400 /* Should we do something here? :) */
401 }
402
403
404 int
405 mtd_mii_readreg(device_t self, int phy, int reg)
406 {
407 struct mtd_softc *sc = device_private(self);
408
409 return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2));
410 }
411
412
413 void
414 mtd_mii_writereg(device_t self, int phy, int reg, int val)
415 {
416 struct mtd_softc *sc = device_private(self);
417
418 MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val);
419 }
420
421
422 int
423 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m)
424 {
425 int len, tlen;
426 char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE
427 + index * MTD_TXBUF_SIZE;
428 struct mbuf *n;
429
430 for (tlen = 0; m != NULL; m = n) {
431 len = m->m_len;
432 if (len == 0) {
433 n = m_free(m);
434 continue;
435 } else if (tlen > MTD_TXBUF_SIZE) {
436 /* XXX FIXME: No idea what to do here. */
437 aprint_error_dev(sc->dev, "packet too large! Size = %i\n",
438 tlen);
439 n = m_free(m);
440 continue;
441 }
442 memcpy(buf, mtod(m, void *), len);
443 buf += len;
444 tlen += len;
445 n = m_free(m);
446 }
447 sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC
448 | MTD_TXD_CONF_IRQC
449 | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS)
450 | (tlen & MTD_TXD_CONF_BUFS);
451
452 return tlen;
453 }
454
455
456 void
457 mtd_start(struct ifnet *ifp)
458 {
459 struct mtd_softc *sc = ifp->if_softc;
460 struct mbuf *m;
461 int first_tx = sc->cur_tx;
462
463 /* Don't transmit when the interface is busy or inactive */
464 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
465 return;
466
467 for (;;) {
468 IF_DEQUEUE(&ifp->if_snd, m);
469
470 if (m == NULL)
471 break;
472
473 bpf_mtap(ifp, m, BPF_D_OUT);
474
475 /* Copy mbuf chain into tx buffer */
476 (void)mtd_put(sc, sc->cur_tx, m);
477
478 if (sc->cur_tx != first_tx)
479 sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER;
480
481 if (++sc->cur_tx >= MTD_NUM_TXD)
482 sc->cur_tx = 0;
483 }
484 /* Mark first & last descriptor */
485 sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD;
486
487 if (sc->cur_tx == 0) {
488 sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD;
489 } else {
490 sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD;
491 }
492
493 /* Give first descriptor to chip to complete transaction */
494 sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER;
495
496 /* Transmit polling demand */
497 MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND);
498
499 /* XXX FIXME: Set up a watchdog timer */
500 /* ifp->if_timer = 5; */
501 }
502
503
504 void
505 mtd_stop(struct ifnet *ifp, int disable)
506 {
507 struct mtd_softc *sc = ifp->if_softc;
508
509 /* Disable transmitter and receiver */
510 MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE);
511 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE);
512
513 /* Disable interrupts */
514 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
515
516 /* Must do more at disable??... */
517 if (disable) {
518 /* Delete tx and rx descriptor base addresses */
519 MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000);
520 MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000);
521 }
522
523 ifp->if_timer = 0;
524 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
525 }
526
527
528 void
529 mtd_watchdog(struct ifnet *ifp)
530 {
531 struct mtd_softc *sc = ifp->if_softc;
532 int s;
533
534 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->dev));
535 ++sc->ethercom.ec_if.if_oerrors;
536
537 mtd_stop(ifp, 0);
538
539 s = splnet();
540 mtd_init(ifp);
541 splx(s);
542
543 return;
544 }
545
546
547 int
548 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data)
549 {
550 struct mtd_softc *sc = ifp->if_softc;
551 int s, error = 0;
552
553 s = splnet();
554
555 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
556 /*
557 * Multicast list has changed; set the hardware
558 * filter accordingly.
559 */
560 if (ifp->if_flags & IFF_RUNNING)
561 mtd_setmulti(sc);
562 error = 0;
563 }
564
565 splx(s);
566 return error;
567 }
568
569
570 struct mbuf *
571 mtd_get(struct mtd_softc *sc, int index, int totlen)
572 {
573 struct ifnet *ifp = &sc->ethercom.ec_if;
574 struct mbuf *m, *m0, *newm;
575 int len;
576 char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE;
577
578 MGETHDR(m0, M_DONTWAIT, MT_DATA);
579 if (m0 == NULL)
580 return NULL;
581
582 m_set_rcvif(m0, ifp);
583 m0->m_pkthdr.len = totlen;
584 m = m0;
585 len = MHLEN;
586
587 while (totlen > 0) {
588 if (totlen >= MINCLSIZE) {
589 MCLGET(m, M_DONTWAIT);
590 if (!(m->m_flags & M_EXT)) {
591 m_freem(m0);
592 return NULL;
593 }
594 len = MCLBYTES;
595 }
596
597 if (m == m0) {
598 char *newdata = (char *)
599 ALIGN(m->m_data + sizeof(struct ether_header)) -
600 sizeof(struct ether_header);
601 len -= newdata - m->m_data;
602 m->m_data = newdata;
603 }
604
605 m->m_len = len = min(totlen, len);
606 memcpy(mtod(m, void *), buf, len);
607 buf += len;
608
609 totlen -= len;
610 if (totlen > 0) {
611 MGET(newm, M_DONTWAIT, MT_DATA);
612 if (newm == NULL) {
613 m_freem(m0);
614 return NULL;
615 }
616 len = MLEN;
617 m = m->m_next = newm;
618 }
619 }
620
621 return m0;
622 }
623
624
625 int
626 mtd_rxirq(struct mtd_softc *sc)
627 {
628 struct ifnet *ifp = &sc->ethercom.ec_if;
629 int len;
630 struct mbuf *m;
631
632 for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) {
633 /* Error summary set? */
634 if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) {
635 aprint_error_dev(sc->dev, "received packet with errors\n");
636 /* Give up packet, since an error occurred */
637 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
638 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
639 MTD_RXD_CONF_BUFS;
640 ++ifp->if_ierrors;
641 if (++sc->cur_rx >= MTD_NUM_RXD)
642 sc->cur_rx = 0;
643 continue;
644 }
645 /* Get buffer length */
646 len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN)
647 >> MTD_RXD_FLEN_SHIFT;
648 len -= ETHER_CRC_LEN;
649
650 /* Check packet size */
651 if (len <= sizeof(struct ether_header)) {
652 aprint_error_dev(sc->dev, "invalid packet size %d; dropping\n",
653 len);
654 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
655 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE &
656 MTD_RXD_CONF_BUFS;
657 ++ifp->if_ierrors;
658 if (++sc->cur_rx >= MTD_NUM_RXD)
659 sc->cur_rx = 0;
660 continue;
661 }
662
663 m = mtd_get(sc, (sc->cur_rx), len);
664
665 /* Give descriptor back to card */
666 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS;
667 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER;
668
669 if (++sc->cur_rx >= MTD_NUM_RXD)
670 sc->cur_rx = 0;
671
672 if (m == NULL) {
673 aprint_error_dev(sc->dev, "error pulling packet off interface\n");
674 ++ifp->if_ierrors;
675 continue;
676 }
677
678 /* Pass the packet up */
679 if_percpuq_enqueue(ifp->if_percpuq, m);
680 }
681
682 return 1;
683 }
684
685
686 int
687 mtd_txirq(struct mtd_softc *sc)
688 {
689 struct ifnet *ifp = &sc->ethercom.ec_if;
690
691 /* Clear timeout */
692 ifp->if_timer = 0;
693
694 ifp->if_flags &= ~IFF_OACTIVE;
695 ++ifp->if_opackets;
696
697 /* XXX FIXME If there is some queued, do an mtd_start? */
698
699 return 1;
700 }
701
702
703 int
704 mtd_bufirq(struct mtd_softc *sc)
705 {
706 struct ifnet *ifp = &sc->ethercom.ec_if;
707
708 /* Clear timeout */
709 ifp->if_timer = 0;
710
711 /* XXX FIXME: Do something here to make sure we get some buffers! */
712
713 return 1;
714 }
715
716
717 int
718 mtd_irq_h(void *args)
719 {
720 struct mtd_softc *sc = args;
721 struct ifnet *ifp = &sc->ethercom.ec_if;
722 u_int32_t status;
723 int r = 0;
724
725 if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(sc->dev))
726 return 0;
727
728 /* Disable interrupts */
729 MTD_WRITE_4(sc, MTD_IMR, 0x00000000);
730
731 for(;;) {
732 status = MTD_READ_4(sc, MTD_ISR);
733
734 /* Add random seed before masking out bits */
735 if (status)
736 rnd_add_uint32(&sc->rnd_src, status);
737
738 status &= MTD_ISR_MASK;
739 if (!status) /* We didn't ask for this */
740 break;
741
742 MTD_WRITE_4(sc, MTD_ISR, status);
743
744 /* NOTE: Perhaps we should reset with some of these errors? */
745
746 if (status & MTD_ISR_RXBUN) {
747 aprint_error_dev(sc->dev, "receive buffer unavailable\n");
748 ++ifp->if_ierrors;
749 }
750
751 if (status & MTD_ISR_RXERR) {
752 aprint_error_dev(sc->dev, "receive error\n");
753 ++ifp->if_ierrors;
754 }
755
756 if (status & MTD_ISR_TXBUN) {
757 aprint_error_dev(sc->dev, "transmit buffer unavailable\n");
758 ++ifp->if_ierrors;
759 }
760
761 if ((status & MTD_ISR_PDF)) {
762 aprint_error_dev(sc->dev, "parallel detection fault\n");
763 ++ifp->if_ierrors;
764 }
765
766 if (status & MTD_ISR_FBUSERR) {
767 aprint_error_dev(sc->dev, "fatal bus error\n");
768 ++ifp->if_ierrors;
769 }
770
771 if (status & MTD_ISR_TARERR) {
772 aprint_error_dev(sc->dev, "target error\n");
773 ++ifp->if_ierrors;
774 }
775
776 if (status & MTD_ISR_MASTERR) {
777 aprint_error_dev(sc->dev, "master error\n");
778 ++ifp->if_ierrors;
779 }
780
781 if (status & MTD_ISR_PARERR) {
782 aprint_error_dev(sc->dev, "parity error\n");
783 ++ifp->if_ierrors;
784 }
785
786 if (status & MTD_ISR_RXIRQ) /* Receive interrupt */
787 r |= mtd_rxirq(sc);
788
789 if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */
790 r |= mtd_txirq(sc);
791
792 if (status & MTD_ISR_TXEARLY) /* Transmit early */
793 r |= mtd_txirq(sc);
794
795 if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */
796 r |= mtd_bufirq(sc);
797
798 }
799
800 /* Enable interrupts */
801 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK);
802
803 return r;
804 }
805
806
807 void
808 mtd_setmulti(struct mtd_softc *sc)
809 {
810 struct ifnet *ifp = &sc->ethercom.ec_if;
811 u_int32_t rxtx_stat;
812 u_int32_t hash[2] = {0, 0};
813 u_int32_t crc;
814 struct ether_multi *enm;
815 struct ether_multistep step;
816 int mcnt = 0;
817
818 /* Get old status */
819 rxtx_stat = MTD_READ_4(sc, MTD_RXTXR);
820
821 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
822 rxtx_stat |= MTD_RX_AMULTI;
823 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
824 MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR);
825 MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR);
826 return;
827 }
828
829 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
830 while (enm != NULL) {
831 /* We need the 6 most significant bits of the CRC */
832 crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
833
834 hash[crc >> 5] |= 1 << (crc & 0xf);
835
836 ++mcnt;
837 ETHER_NEXT_MULTI(step, enm);
838 }
839
840 /* Accept multicast bit needs to be on? */
841 if (mcnt)
842 rxtx_stat |= MTD_RX_AMULTI;
843 else
844 rxtx_stat &= ~MTD_RX_AMULTI;
845
846 /* Write out the hash */
847 MTD_WRITE_4(sc, MTD_MAR0, hash[0]);
848 MTD_WRITE_4(sc, MTD_MAR1, hash[1]);
849 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat);
850 }
851
852
853 void
854 mtd_reset(struct mtd_softc *sc)
855 {
856 int i;
857
858 MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET);
859
860 /* Reset descriptor status */
861 sc->cur_tx = 0;
862 sc->cur_rx = 0;
863
864 /* Wait until done with reset */
865 for (i = 0; i < MTD_TIMEOUT; ++i) {
866 DELAY(10);
867 if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET))
868 break;
869 }
870
871 if (i == MTD_TIMEOUT) {
872 aprint_error_dev(sc->dev, "reset timed out\n");
873 }
874
875 /* Wait a little so chip can stabilize */
876 DELAY(1000);
877 }
878
879
880 void
881 mtd_shutdown (void *arg)
882 {
883 struct mtd_softc *sc = arg;
884 struct ifnet *ifp = &sc->ethercom.ec_if;
885
886 rnd_detach_source(&sc->rnd_src);
887 mtd_stop(ifp, 1);
888 }
889