sbmac.c revision 1.35 1 /* $NetBSD: sbmac.c,v 1.35 2009/12/14 00:46:08 matt Exp $ */
2
3 /*
4 * Copyright 2000, 2001, 2004
5 * Broadcom Corporation. All rights reserved.
6 *
7 * This software is furnished under license and may be used and copied only
8 * in accordance with the following terms and conditions. Subject to these
9 * conditions, you may download, copy, install, use, modify and distribute
10 * modified or unmodified copies of this software in source and/or binary
11 * form. No title or ownership is transferred hereby.
12 *
13 * 1) Any source code used, modified or distributed must reproduce and
14 * retain this copyright notice and list of conditions as they appear in
15 * the source file.
16 *
17 * 2) No right is granted to use any trade name, trademark, or logo of
18 * Broadcom Corporation. The "Broadcom Corporation" name may not be
19 * used to endorse or promote products derived from this software
20 * without the prior written permission of Broadcom Corporation.
21 *
22 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
25 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
26 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
27 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.35 2009/12/14 00:46:08 matt Exp $");
37
38 #include "bpfilter.h"
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sockio.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/queue.h>
50 #include <sys/device.h>
51
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/if_inarp.h>
65 #endif
66
67 #ifdef NS
68 #include <netns/ns.h>
69 #include <netns/ns_if.h>
70 #endif
71
72 #include <machine/locore.h>
73
74 #include "sbobiovar.h"
75
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78 #include <dev/mii/mii_bitbang.h>
79
80 #include <mips/sibyte/include/sb1250_defs.h>
81 #include <mips/sibyte/include/sb1250_regs.h>
82 #include <mips/sibyte/include/sb1250_mac.h>
83 #include <mips/sibyte/include/sb1250_dma.h>
84 #include <mips/sibyte/include/sb1250_scd.h>
85
86 /* Simple types */
87
88 typedef u_long sbmac_port_t;
89 typedef uint64_t sbmac_physaddr_t;
90 typedef uint64_t sbmac_enetaddr_t;
91
92 typedef enum { sbmac_speed_auto, sbmac_speed_10,
93 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
94
95 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
96 sbmac_duplex_full } sbmac_duplex_t;
97
98 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
99 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
100
101 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
102 sbmac_state_broken } sbmac_state_t;
103
104
105 /* Macros */
106
107 #define SBMAC_EVENT_COUNTERS /* Include counters for various events */
108
109 #define SBDMA_NEXTBUF(d, f) ((f + 1) & (d)->sbdma_dscr_mask)
110
111 #define CACHELINESIZE 32
112 #define NUMCACHEBLKS(x) (((x)+CACHELINESIZE-1)/CACHELINESIZE)
113 #define KMALLOC(x) malloc((x), M_DEVBUF, M_DONTWAIT)
114 #define KVTOPHYS(x) kvtophys((vaddr_t)(x))
115
116 #ifdef SBMACDEBUG
117 #define dprintf(x) printf x
118 #else
119 #define dprintf(x)
120 #endif
121
122 #define SBMAC_READCSR(t) mips3_ld((volatile uint64_t *) (t))
123 #define SBMAC_WRITECSR(t, v) mips3_sd((volatile uint64_t *) (t), (v))
124
125 #define PKSEG1(x) ((sbmac_port_t) MIPS_PHYS_TO_KSEG1(x))
126
127 /* These are limited to fit within one virtual page, and must be 2**N. */
128 #define SBMAC_MAX_TXDESCR 256 /* should be 1024 */
129 #define SBMAC_MAX_RXDESCR 256 /* should be 512 */
130
131 #define ETHER_ALIGN 2
132
133 /* DMA Descriptor structure */
134
135 typedef struct sbdmadscr_s {
136 uint64_t dscr_a;
137 uint64_t dscr_b;
138 } sbdmadscr_t;
139
140
141 /* DMA Controller structure */
142
143 typedef struct sbmacdma_s {
144
145 /*
146 * This stuff is used to identify the channel and the registers
147 * associated with it.
148 */
149
150 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
151 int sbdma_channel; /* channel number */
152 int sbdma_txdir; /* direction (1=transmit) */
153 int sbdma_maxdescr; /* total # of descriptors in ring */
154 sbmac_port_t sbdma_config0; /* DMA config register 0 */
155 sbmac_port_t sbdma_config1; /* DMA config register 1 */
156 sbmac_port_t sbdma_dscrbase; /* Descriptor base address */
157 sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */
158 sbmac_port_t sbdma_curdscr; /* current descriptor address */
159
160 /*
161 * This stuff is for maintenance of the ring
162 */
163 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
164 struct mbuf **sbdma_ctxtable; /* context table, one per descr */
165 unsigned int sbdma_dscr_mask; /* sbdma_maxdescr - 1 */
166 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
167 unsigned int sbdma_add_index; /* next dscr for sw to add */
168 unsigned int sbdma_rem_index; /* next dscr for sw to remove */
169 } sbmacdma_t;
170
171
172 /* Ethernet softc structure */
173
174 struct sbmac_softc {
175
176 /*
177 * NetBSD-specific things
178 */
179 struct device sc_dev; /* base device (must be first) */
180 struct ethercom sc_ethercom; /* Ethernet common part */
181 struct mii_data sc_mii;
182 struct callout sc_tick_ch;
183
184 int sbm_if_flags;
185 void *sbm_intrhand;
186
187 /*
188 * Controller-specific things
189 */
190
191 sbmac_port_t sbm_base; /* MAC's base address */
192 sbmac_state_t sbm_state; /* current state */
193
194 sbmac_port_t sbm_macenable; /* MAC Enable Register */
195 sbmac_port_t sbm_maccfg; /* MAC Configuration Register */
196 sbmac_port_t sbm_fifocfg; /* FIFO configuration register */
197 sbmac_port_t sbm_framecfg; /* Frame configuration register */
198 sbmac_port_t sbm_rxfilter; /* receive filter register */
199 sbmac_port_t sbm_isr; /* Interrupt status register */
200 sbmac_port_t sbm_imr; /* Interrupt mask register */
201
202 sbmac_speed_t sbm_speed; /* current speed */
203 sbmac_duplex_t sbm_duplex; /* current duplex */
204 sbmac_fc_t sbm_fc; /* current flow control setting */
205 int sbm_rxflags; /* received packet flags */
206
207 u_char sbm_hwaddr[ETHER_ADDR_LEN];
208
209 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
210 sbmacdma_t sbm_rxdma;
211
212 int sbm_pass3_dma; /* chip has pass3 SOC DMA features */
213
214 #ifdef SBMAC_EVENT_COUNTERS
215 struct evcnt sbm_ev_rxintr; /* Rx interrupts */
216 struct evcnt sbm_ev_txintr; /* Tx interrupts */
217 struct evcnt sbm_ev_txdrop; /* Tx dropped due to no mbuf alloc failed */
218 struct evcnt sbm_ev_txstall; /* Tx stalled due to no descriptors free */
219
220 struct evcnt sbm_ev_txsplit; /* pass3 Tx split mbuf */
221 struct evcnt sbm_ev_txkeep; /* pass3 Tx didn't split mbuf */
222 #endif
223 };
224
225
226 #ifdef SBMAC_EVENT_COUNTERS
227 #define SBMAC_EVCNT_INCR(ev) (ev).ev_count++
228 #else
229 #define SBMAC_EVCNT_INCR(ev) do { /* nothing */ } while (0)
230 #endif
231
232 /* Externs */
233
234 extern paddr_t kvtophys(vaddr_t);
235
236 /* Prototypes */
237
238 static void sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *s, int chan,
239 int txrx, int maxdescr);
240 static void sbdma_channel_start(sbmacdma_t *d);
241 static int sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m);
242 static int sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m);
243 static void sbdma_emptyring(sbmacdma_t *d);
244 static void sbdma_fillring(sbmacdma_t *d);
245 static void sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d);
246 static void sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d);
247 static void sbmac_initctx(struct sbmac_softc *s);
248 static void sbmac_channel_start(struct sbmac_softc *s);
249 static void sbmac_channel_stop(struct sbmac_softc *s);
250 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,
251 sbmac_state_t);
252 static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
253 static void sbmac_init_and_start(struct sbmac_softc *sc);
254 static uint64_t sbmac_addr2reg(u_char *ptr);
255 static void sbmac_intr(void *xsc, uint32_t status, vaddr_t pc);
256 static void sbmac_start(struct ifnet *ifp);
257 static void sbmac_setmulti(struct sbmac_softc *sc);
258 static int sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data);
259 static int sbmac_ioctl(struct ifnet *, u_long, void *);
260 static void sbmac_watchdog(struct ifnet *ifp);
261 static int sbmac_match(struct device *parent, struct cfdata *match, void *aux);
262 static void sbmac_attach(struct device *parent, struct device *self, void *aux);
263 static int sbmac_set_speed(struct sbmac_softc *s, sbmac_speed_t speed);
264 static int sbmac_set_duplex(struct sbmac_softc *s, sbmac_duplex_t duplex,
265 sbmac_fc_t fc);
266 static void sbmac_tick(void *arg);
267
268
269 /* Globals */
270
271 CFATTACH_DECL(sbmac, sizeof(struct sbmac_softc),
272 sbmac_match, sbmac_attach, NULL, NULL);
273
274 static uint32_t sbmac_mii_bitbang_read(struct device *self);
275 static void sbmac_mii_bitbang_write(struct device *self, uint32_t val);
276
277 static const struct mii_bitbang_ops sbmac_mii_bitbang_ops = {
278 sbmac_mii_bitbang_read,
279 sbmac_mii_bitbang_write,
280 {
281 (uint32_t)M_MAC_MDIO_OUT, /* MII_BIT_MDO */
282 (uint32_t)M_MAC_MDIO_IN, /* MII_BIT_MDI */
283 (uint32_t)M_MAC_MDC, /* MII_BIT_MDC */
284 0, /* MII_BIT_DIR_HOST_PHY */
285 (uint32_t)M_MAC_MDIO_DIR /* MII_BIT_DIR_PHY_HOST */
286 }
287 };
288
289 static uint32_t
290 sbmac_mii_bitbang_read(struct device *self)
291 {
292 struct sbmac_softc *sc = (void *) self;
293 sbmac_port_t reg;
294
295 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
296 return (uint32_t) SBMAC_READCSR(reg);
297 }
298
299 static void
300 sbmac_mii_bitbang_write(struct device *self, uint32_t val)
301 {
302 struct sbmac_softc *sc = (void *) self;
303 sbmac_port_t reg;
304
305 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
306
307 SBMAC_WRITECSR(reg, (val &
308 (M_MAC_MDC|M_MAC_MDIO_DIR|M_MAC_MDIO_OUT|M_MAC_MDIO_IN)));
309 }
310
311 /*
312 * Read an PHY register through the MII.
313 */
314 static int
315 sbmac_mii_readreg(struct device *self, int phy, int reg)
316 {
317
318 return (mii_bitbang_readreg(self, &sbmac_mii_bitbang_ops, phy, reg));
319 }
320
321 /*
322 * Write to a PHY register through the MII.
323 */
324 static void
325 sbmac_mii_writereg(struct device *self, int phy, int reg, int val)
326 {
327
328 mii_bitbang_writereg(self, &sbmac_mii_bitbang_ops, phy, reg, val);
329 }
330
331 static void
332 sbmac_mii_statchg(struct device *self)
333 {
334 struct sbmac_softc *sc = (struct sbmac_softc *)self;
335 sbmac_state_t oldstate;
336
337 /* Stop the MAC in preparation for changing all of the parameters. */
338 oldstate = sbmac_set_channel_state(sc, sbmac_state_off);
339
340 switch (sc->sc_ethercom.ec_if.if_baudrate) {
341 default: /* if autonegotiation fails, assume 10Mbit */
342 case IF_Mbps(10):
343 sbmac_set_speed(sc, sbmac_speed_10);
344 break;
345
346 case IF_Mbps(100):
347 sbmac_set_speed(sc, sbmac_speed_100);
348 break;
349
350 case IF_Mbps(1000):
351 sbmac_set_speed(sc, sbmac_speed_1000);
352 break;
353 }
354
355 if (sc->sc_mii.mii_media_active & IFM_FDX) {
356 /* Configure for full-duplex */
357 /* XXX: is flow control right for 10, 100? */
358 sbmac_set_duplex(sc, sbmac_duplex_full, sbmac_fc_frame);
359 } else {
360 /* Configure for half-duplex */
361 /* XXX: is flow control right? */
362 sbmac_set_duplex(sc, sbmac_duplex_half, sbmac_fc_disabled);
363 }
364
365 /* And put it back into its former state. */
366 sbmac_set_channel_state(sc, oldstate);
367 }
368
369 /*
370 * SBDMA_INITCTX(d, s, chan, txrx, maxdescr)
371 *
372 * Initialize a DMA channel context. Since there are potentially
373 * eight DMA channels per MAC, it's nice to do this in a standard
374 * way.
375 *
376 * Input parameters:
377 * d - sbmacdma_t structure (DMA channel context)
378 * s - sbmac_softc structure (pointer to a MAC)
379 * chan - channel number (0..1 right now)
380 * txrx - Identifies DMA_TX or DMA_RX for channel direction
381 * maxdescr - number of descriptors
382 *
383 * Return value:
384 * nothing
385 */
386
387 static void
388 sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *s, int chan, int txrx,
389 int maxdescr)
390 {
391 /*
392 * Save away interesting stuff in the structure
393 */
394
395 d->sbdma_eth = s;
396 d->sbdma_channel = chan;
397 d->sbdma_txdir = txrx;
398
399 /*
400 * initialize register pointers
401 */
402
403 d->sbdma_config0 = PKSEG1(s->sbm_base +
404 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG0));
405 d->sbdma_config1 = PKSEG1(s->sbm_base +
406 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG1));
407 d->sbdma_dscrbase = PKSEG1(s->sbm_base +
408 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_BASE));
409 d->sbdma_dscrcnt = PKSEG1(s->sbm_base +
410 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_CNT));
411 d->sbdma_curdscr = PKSEG1(s->sbm_base +
412 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CUR_DSCRADDR));
413
414 /*
415 * Allocate memory for the ring
416 */
417
418 d->sbdma_maxdescr = maxdescr;
419 d->sbdma_dscr_mask = d->sbdma_maxdescr - 1;
420
421 d->sbdma_dscrtable = (sbdmadscr_t *)
422 KMALLOC(d->sbdma_maxdescr * sizeof(sbdmadscr_t));
423
424 memset(d->sbdma_dscrtable, 0, d->sbdma_maxdescr*sizeof(sbdmadscr_t));
425
426 d->sbdma_dscrtable_phys = KVTOPHYS(d->sbdma_dscrtable);
427
428 /*
429 * And context table
430 */
431
432 d->sbdma_ctxtable = (struct mbuf **)
433 KMALLOC(d->sbdma_maxdescr*sizeof(struct mbuf *));
434
435 memset(d->sbdma_ctxtable, 0, d->sbdma_maxdescr*sizeof(struct mbuf *));
436 }
437
438 /*
439 * SBDMA_CHANNEL_START(d)
440 *
441 * Initialize the hardware registers for a DMA channel.
442 *
443 * Input parameters:
444 * d - DMA channel to init (context must be previously init'd
445 *
446 * Return value:
447 * nothing
448 */
449
450 static void
451 sbdma_channel_start(sbmacdma_t *d)
452 {
453 /*
454 * Turn on the DMA channel
455 */
456
457 SBMAC_WRITECSR(d->sbdma_config1, 0);
458
459 SBMAC_WRITECSR(d->sbdma_dscrbase, d->sbdma_dscrtable_phys);
460
461 SBMAC_WRITECSR(d->sbdma_config0, V_DMA_RINGSZ(d->sbdma_maxdescr) | 0);
462
463 /*
464 * Initialize ring pointers
465 */
466
467 d->sbdma_add_index = 0;
468 d->sbdma_rem_index = 0;
469 }
470
471 /*
472 * SBDMA_ADD_RCVBUFFER(d, m)
473 *
474 * Add a buffer to the specified DMA channel. For receive channels,
475 * this queues a buffer for inbound packets.
476 *
477 * Input parameters:
478 * d - DMA channel descriptor
479 * m - mbuf to add, or NULL if we should allocate one.
480 *
481 * Return value:
482 * 0 if buffer could not be added (ring is full)
483 * 1 if buffer added successfully
484 */
485
486 static int
487 sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m)
488 {
489 unsigned int dsc, nextdsc;
490 struct mbuf *m_new = NULL;
491
492 /* get pointer to our current place in the ring */
493
494 dsc = d->sbdma_add_index;
495 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
496
497 /*
498 * figure out if the ring is full - if the next descriptor
499 * is the same as the one that we're going to remove from
500 * the ring, the ring is full
501 */
502
503 if (nextdsc == d->sbdma_rem_index)
504 return ENOSPC;
505
506 /*
507 * Allocate an mbuf if we don't already have one.
508 * If we do have an mbuf, reset it so that it's empty.
509 */
510
511 if (m == NULL) {
512 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
513 if (m_new == NULL) {
514 printf("%s: mbuf allocation failed\n",
515 d->sbdma_eth->sc_dev.dv_xname);
516 return ENOBUFS;
517 }
518
519 MCLGET(m_new, M_DONTWAIT);
520 if (!(m_new->m_flags & M_EXT)) {
521 printf("%s: mbuf cluster allocation failed\n",
522 d->sbdma_eth->sc_dev.dv_xname);
523 m_freem(m_new);
524 return ENOBUFS;
525 }
526
527 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
528 m_adj(m_new, ETHER_ALIGN);
529 } else {
530 m_new = m;
531 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
532 m_new->m_data = m_new->m_ext.ext_buf;
533 m_adj(m_new, ETHER_ALIGN);
534 }
535
536 /*
537 * fill in the descriptor
538 */
539
540 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new, void *)) |
541 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(ETHER_ALIGN + m_new->m_len)) |
542 M_DMA_DSCRA_INTERRUPT;
543
544 /* receiving: no options */
545 d->sbdma_dscrtable[dsc].dscr_b = 0;
546
547 /*
548 * fill in the context
549 */
550
551 d->sbdma_ctxtable[dsc] = m_new;
552
553 /*
554 * point at next packet
555 */
556
557 d->sbdma_add_index = nextdsc;
558
559 /*
560 * Give the buffer to the DMA engine.
561 */
562
563 SBMAC_WRITECSR(d->sbdma_dscrcnt, 1);
564
565 return 0; /* we did it */
566 }
567
568 /*
569 * SBDMA_ADD_TXBUFFER(d, m)
570 *
571 * Add a transmit buffer to the specified DMA channel, causing a
572 * transmit to start.
573 *
574 * Input parameters:
575 * d - DMA channel descriptor
576 * m - mbuf to add
577 *
578 * Return value:
579 * 0 transmit queued successfully
580 * otherwise error code
581 */
582
583 static int
584 sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m)
585 {
586 unsigned int dsc, nextdsc, prevdsc, origdesc;
587 int length;
588 int num_mbufs = 0;
589 struct sbmac_softc *sc = d->sbdma_eth;
590
591 /* get pointer to our current place in the ring */
592
593 dsc = d->sbdma_add_index;
594 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
595
596 /*
597 * figure out if the ring is full - if the next descriptor
598 * is the same as the one that we're going to remove from
599 * the ring, the ring is full
600 */
601
602 if (nextdsc == d->sbdma_rem_index) {
603 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
604 return ENOSPC;
605 }
606
607 /*
608 * PASS3 parts do not have buffer alignment restriction.
609 * No need to copy/coalesce to new mbuf. Also has different
610 * descriptor format
611 */
612 if (sc->sbm_pass3_dma) {
613 struct mbuf *m_temp = NULL;
614
615 /*
616 * Loop thru this mbuf record.
617 * The head mbuf will have SOP set.
618 */
619 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m,void *)) |
620 M_DMA_ETHTX_SOP;
621
622 /*
623 * transmitting: set outbound options,buffer A size(+ low 5
624 * bits of start addr),and packet length.
625 */
626 d->sbdma_dscrtable[dsc].dscr_b =
627 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
628 V_DMA_DSCRB_A_SIZE((m->m_len +
629 (mtod(m,uintptr_t) & 0x0000001F))) |
630 V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) |
631 V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff);
632
633 d->sbdma_add_index = nextdsc;
634 origdesc = prevdsc = dsc;
635 dsc = d->sbdma_add_index;
636 num_mbufs++;
637
638 /* Start with first non-head mbuf */
639 for(m_temp = m->m_next; m_temp != 0; m_temp = m_temp->m_next) {
640 int len, next_len;
641 uint64_t addr;
642
643 if (m_temp->m_len == 0)
644 continue; /* Skip 0-length mbufs */
645
646 len = m_temp->m_len;
647 addr = KVTOPHYS(mtod(m_temp, void *));
648
649 /*
650 * Check to see if the mbuf spans a page boundary. If
651 * it does, and the physical pages behind the virtual
652 * pages are not contiguous, split it so that each
653 * virtual page uses it's own Tx descriptor.
654 */
655 if (trunc_page(addr) != trunc_page(addr + len - 1)) {
656 next_len = (addr + len) - trunc_page(addr + len);
657
658 len -= next_len;
659
660 if (addr + len ==
661 KVTOPHYS(mtod(m_temp, char *) + len)) {
662 SBMAC_EVCNT_INCR(sc->sbm_ev_txkeep);
663 len += next_len;
664 next_len = 0;
665 } else {
666 SBMAC_EVCNT_INCR(sc->sbm_ev_txsplit);
667 }
668 } else {
669 next_len = 0;
670 }
671
672 again:
673 /*
674 * fill in the descriptor
675 */
676 d->sbdma_dscrtable[dsc].dscr_a = addr;
677
678 /*
679 * transmitting: set outbound options,buffer A
680 * size(+ low 5 bits of start addr)
681 */
682 d->sbdma_dscrtable[dsc].dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_NOTSOP) |
683 V_DMA_DSCRB_A_SIZE((len + (addr & 0x0000001F)));
684
685 d->sbdma_ctxtable[dsc] = NULL;
686
687 /*
688 * point at next descriptor
689 */
690 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
691 if (nextdsc == d->sbdma_rem_index) {
692 d->sbdma_add_index = origdesc;
693 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
694 return ENOSPC;
695 }
696 d->sbdma_add_index = nextdsc;
697
698 prevdsc = dsc;
699 dsc = d->sbdma_add_index;
700 num_mbufs++;
701
702 if (next_len != 0) {
703 addr = KVTOPHYS(mtod(m_temp, char *) + len);
704 len = next_len;
705
706 next_len = 0;
707 goto again;
708 }
709
710 }
711 /* Set head mbuf to last context index */
712 d->sbdma_ctxtable[prevdsc] = m;
713
714 /* Interrupt on last dscr of packet. */
715 d->sbdma_dscrtable[prevdsc].dscr_a |= M_DMA_DSCRA_INTERRUPT;
716 } else {
717 struct mbuf *m_new = NULL;
718 /*
719 * [BEGIN XXX]
720 * XXX Copy/coalesce the mbufs into a single mbuf cluster (we
721 * assume it will fit). This is a temporary hack to get us
722 * going.
723 */
724
725 MGETHDR(m_new,M_DONTWAIT,MT_DATA);
726 if (m_new == NULL) {
727 printf("%s: mbuf allocation failed\n",
728 d->sbdma_eth->sc_dev.dv_xname);
729 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
730 return ENOBUFS;
731 }
732
733 MCLGET(m_new,M_DONTWAIT);
734 if (!(m_new->m_flags & M_EXT)) {
735 printf("%s: mbuf cluster allocation failed\n",
736 d->sbdma_eth->sc_dev.dv_xname);
737 m_freem(m_new);
738 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
739 return ENOBUFS;
740 }
741
742 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
743 /*m_adj(m_new,ETHER_ALIGN);*/
744
745 /*
746 * XXX Don't forget to include the offset portion in the
747 * XXX cache block calculation when this code is rewritten!
748 */
749
750 /*
751 * Copy data
752 */
753
754 m_copydata(m,0,m->m_pkthdr.len,mtod(m_new,void *));
755 m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len;
756
757 /* Free old mbuf 'm', actual mbuf is now 'm_new' */
758
759 // XXX: CALLERS WILL FREE, they might have to bpf_mtap() if this
760 // XXX: function succeeds.
761 // m_freem(m);
762 length = m_new->m_len;
763
764 /* [END XXX] */
765 /*
766 * fill in the descriptor
767 */
768
769 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new,void *)) |
770 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(m_new->m_len)) |
771 M_DMA_DSCRA_INTERRUPT |
772 M_DMA_ETHTX_SOP;
773
774 /* transmitting: set outbound options and length */
775 d->sbdma_dscrtable[dsc].dscr_b =
776 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
777 V_DMA_DSCRB_PKT_SIZE(length);
778
779 num_mbufs++;
780
781 /*
782 * fill in the context
783 */
784
785 d->sbdma_ctxtable[dsc] = m_new;
786
787 /*
788 * point at next packet
789 */
790 d->sbdma_add_index = nextdsc;
791 }
792
793 /*
794 * Give the buffer to the DMA engine.
795 */
796
797 SBMAC_WRITECSR(d->sbdma_dscrcnt, num_mbufs);
798
799 return 0; /* we did it */
800 }
801
802 /*
803 * SBDMA_EMPTYRING(d)
804 *
805 * Free all allocated mbufs on the specified DMA channel;
806 *
807 * Input parameters:
808 * d - DMA channel
809 *
810 * Return value:
811 * nothing
812 */
813
814 static void
815 sbdma_emptyring(sbmacdma_t *d)
816 {
817 int idx;
818 struct mbuf *m;
819
820 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
821 m = d->sbdma_ctxtable[idx];
822 if (m) {
823 m_freem(m);
824 d->sbdma_ctxtable[idx] = NULL;
825 }
826 }
827 }
828
829 /*
830 * SBDMA_FILLRING(d)
831 *
832 * Fill the specified DMA channel (must be receive channel)
833 * with mbufs
834 *
835 * Input parameters:
836 * d - DMA channel
837 *
838 * Return value:
839 * nothing
840 */
841
842 static void
843 sbdma_fillring(sbmacdma_t *d)
844 {
845 int idx;
846
847 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++)
848 if (sbdma_add_rcvbuffer(d, NULL) != 0)
849 break;
850 }
851
852 /*
853 * SBDMA_RX_PROCESS(sc, d)
854 *
855 * Process "completed" receive buffers on the specified DMA channel.
856 * Note that this isn't really ideal for priority channels, since
857 * it processes all of the packets on a given channel before
858 * returning.
859 *
860 * Input parameters:
861 * sc - softc structure
862 * d - DMA channel context
863 *
864 * Return value:
865 * nothing
866 */
867
868 static void
869 sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d)
870 {
871 int curidx;
872 int hwidx;
873 sbdmadscr_t *dscp;
874 struct mbuf *m;
875 int len;
876
877 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
878
879 for (;;) {
880 /*
881 * figure out where we are (as an index) and where
882 * the hardware is (also as an index)
883 *
884 * This could be done faster if (for example) the
885 * descriptor table was page-aligned and contiguous in
886 * both virtual and physical memory -- you could then
887 * just compare the low-order bits of the virtual address
888 * (sbdma_rem_index) and the physical address
889 * (sbdma_curdscr CSR).
890 */
891
892 curidx = d->sbdma_rem_index;
893 hwidx = (int)
894 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
895 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
896
897 /*
898 * If they're the same, that means we've processed all
899 * of the descriptors up to (but not including) the one that
900 * the hardware is working on right now.
901 */
902
903 if (curidx == hwidx)
904 break;
905
906 /*
907 * Otherwise, get the packet's mbuf ptr back
908 */
909
910 dscp = &(d->sbdma_dscrtable[curidx]);
911 m = d->sbdma_ctxtable[curidx];
912 d->sbdma_ctxtable[curidx] = NULL;
913
914 len = (int)G_DMA_DSCRB_PKT_SIZE(dscp->dscr_b) - 4;
915
916 /*
917 * Check packet status. If good, process it.
918 * If not, silently drop it and put it back on the
919 * receive ring.
920 */
921
922 if (! (dscp->dscr_a & M_DMA_ETHRX_BAD)) {
923
924 /*
925 * Set length into the packet
926 * XXX do we remove the CRC here?
927 */
928 m->m_pkthdr.len = m->m_len = len;
929
930 ifp->if_ipackets++;
931 m->m_pkthdr.rcvif = ifp;
932
933
934 /*
935 * Add a new buffer to replace the old one.
936 */
937 sbdma_add_rcvbuffer(d, NULL);
938
939 #if (NBPFILTER > 0)
940 /*
941 * Handle BPF listeners. Let the BPF user see the
942 * packet, but don't pass it up to the ether_input()
943 * layer unless it's a broadcast packet, multicast
944 * packet, matches our ethernet address or the
945 * interface is in promiscuous mode.
946 */
947
948 if (ifp->if_bpf)
949 bpf_mtap(ifp->if_bpf, m);
950 #endif
951 /*
952 * Pass the buffer to the kernel
953 */
954 (*ifp->if_input)(ifp, m);
955 } else {
956 /*
957 * Packet was mangled somehow. Just drop it and
958 * put it back on the receive ring.
959 */
960 sbdma_add_rcvbuffer(d, m);
961 }
962
963 /*
964 * .. and advance to the next buffer.
965 */
966
967 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
968 }
969 }
970
971 /*
972 * SBDMA_TX_PROCESS(sc, d)
973 *
974 * Process "completed" transmit buffers on the specified DMA channel.
975 * This is normally called within the interrupt service routine.
976 * Note that this isn't really ideal for priority channels, since
977 * it processes all of the packets on a given channel before
978 * returning.
979 *
980 * Input parameters:
981 * sc - softc structure
982 * d - DMA channel context
983 *
984 * Return value:
985 * nothing
986 */
987
988 static void
989 sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d)
990 {
991 int curidx;
992 int hwidx;
993 struct mbuf *m;
994
995 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
996
997 for (;;) {
998 /*
999 * figure out where we are (as an index) and where
1000 * the hardware is (also as an index)
1001 *
1002 * This could be done faster if (for example) the
1003 * descriptor table was page-aligned and contiguous in
1004 * both virtual and physical memory -- you could then
1005 * just compare the low-order bits of the virtual address
1006 * (sbdma_rem_index) and the physical address
1007 * (sbdma_curdscr CSR).
1008 */
1009
1010 curidx = d->sbdma_rem_index;
1011 hwidx = (int)
1012 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1013 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1014
1015 /*
1016 * If they're the same, that means we've processed all
1017 * of the descriptors up to (but not including) the one that
1018 * the hardware is working on right now.
1019 */
1020
1021 if (curidx == hwidx)
1022 break;
1023
1024 /*
1025 * Otherwise, get the packet's mbuf ptr back
1026 */
1027
1028 m = d->sbdma_ctxtable[curidx];
1029 d->sbdma_ctxtable[curidx] = NULL;
1030
1031 /*
1032 * for transmits we just free buffers and count packets.
1033 */
1034 ifp->if_opackets++;
1035 m_freem(m);
1036
1037 /*
1038 * .. and advance to the next buffer.
1039 */
1040
1041 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
1042 }
1043
1044 /*
1045 * Decide what to set the IFF_OACTIVE bit in the interface to.
1046 * It's supposed to reflect if the interface is actively
1047 * transmitting, but that's really hard to do quickly.
1048 */
1049
1050 ifp->if_flags &= ~IFF_OACTIVE;
1051 }
1052
1053 /*
1054 * SBMAC_INITCTX(s)
1055 *
1056 * Initialize an Ethernet context structure - this is called
1057 * once per MAC on the 1250. Memory is allocated here, so don't
1058 * call it again from inside the ioctl routines that bring the
1059 * interface up/down
1060 *
1061 * Input parameters:
1062 * s - sbmac context structure
1063 *
1064 * Return value:
1065 * 0
1066 */
1067
1068 static void
1069 sbmac_initctx(struct sbmac_softc *s)
1070 {
1071 uint64_t sysrev;
1072
1073 /*
1074 * figure out the addresses of some ports
1075 */
1076
1077 s->sbm_macenable = PKSEG1(s->sbm_base + R_MAC_ENABLE);
1078 s->sbm_maccfg = PKSEG1(s->sbm_base + R_MAC_CFG);
1079 s->sbm_fifocfg = PKSEG1(s->sbm_base + R_MAC_THRSH_CFG);
1080 s->sbm_framecfg = PKSEG1(s->sbm_base + R_MAC_FRAMECFG);
1081 s->sbm_rxfilter = PKSEG1(s->sbm_base + R_MAC_ADFILTER_CFG);
1082 s->sbm_isr = PKSEG1(s->sbm_base + R_MAC_STATUS);
1083 s->sbm_imr = PKSEG1(s->sbm_base + R_MAC_INT_MASK);
1084
1085 /*
1086 * Initialize the DMA channels. Right now, only one per MAC is used
1087 * Note: Only do this _once_, as it allocates memory from the kernel!
1088 */
1089
1090 sbdma_initctx(&(s->sbm_txdma), s, 0, DMA_TX, SBMAC_MAX_TXDESCR);
1091 sbdma_initctx(&(s->sbm_rxdma), s, 0, DMA_RX, SBMAC_MAX_RXDESCR);
1092
1093 /*
1094 * initial state is OFF
1095 */
1096
1097 s->sbm_state = sbmac_state_off;
1098
1099 /*
1100 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1101 */
1102
1103 s->sbm_speed = sbmac_speed_10;
1104 s->sbm_duplex = sbmac_duplex_half;
1105 s->sbm_fc = sbmac_fc_disabled;
1106
1107 /*
1108 * Determine SOC type. 112x has Pass3 SOC features.
1109 */
1110 sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) );
1111 s->sbm_pass3_dma = (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1120 ||
1112 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125 ||
1113 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125H ||
1114 (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250 &&
1115 G_SYS_REVISION(sysrev) >= K_SYS_REVISION_BCM1250_PASS3));
1116 #ifdef SBMAC_EVENT_COUNTERS
1117 evcnt_attach_dynamic(&s->sbm_ev_rxintr, EVCNT_TYPE_INTR,
1118 NULL, s->sc_dev.dv_xname, "rxintr");
1119 evcnt_attach_dynamic(&s->sbm_ev_txintr, EVCNT_TYPE_INTR,
1120 NULL, s->sc_dev.dv_xname, "txintr");
1121 evcnt_attach_dynamic(&s->sbm_ev_txdrop, EVCNT_TYPE_MISC,
1122 NULL, s->sc_dev.dv_xname, "txdrop");
1123 evcnt_attach_dynamic(&s->sbm_ev_txstall, EVCNT_TYPE_MISC,
1124 NULL, s->sc_dev.dv_xname, "txstall");
1125 if (s->sbm_pass3_dma) {
1126 evcnt_attach_dynamic(&s->sbm_ev_txsplit, EVCNT_TYPE_MISC,
1127 NULL, s->sc_dev.dv_xname, "pass3tx-split");
1128 evcnt_attach_dynamic(&s->sbm_ev_txkeep, EVCNT_TYPE_MISC,
1129 NULL, s->sc_dev.dv_xname, "pass3tx-keep");
1130 }
1131 #endif
1132 }
1133
1134 /*
1135 * SBMAC_CHANNEL_START(s)
1136 *
1137 * Start packet processing on this MAC.
1138 *
1139 * Input parameters:
1140 * s - sbmac structure
1141 *
1142 * Return value:
1143 * nothing
1144 */
1145
1146 static void
1147 sbmac_channel_start(struct sbmac_softc *s)
1148 {
1149 uint64_t reg;
1150 sbmac_port_t port;
1151 uint64_t cfg, fifo, framecfg;
1152 int idx;
1153 uint64_t dma_cfg0, fifo_cfg;
1154 sbmacdma_t *txdma;
1155
1156 /*
1157 * Don't do this if running
1158 */
1159
1160 if (s->sbm_state == sbmac_state_on)
1161 return;
1162
1163 /*
1164 * Bring the controller out of reset, but leave it off.
1165 */
1166
1167 SBMAC_WRITECSR(s->sbm_macenable, 0);
1168
1169 /*
1170 * Ignore all received packets
1171 */
1172
1173 SBMAC_WRITECSR(s->sbm_rxfilter, 0);
1174
1175 /*
1176 * Calculate values for various control registers.
1177 */
1178
1179 cfg = M_MAC_RETRY_EN |
1180 M_MAC_TX_HOLD_SOP_EN |
1181 V_MAC_TX_PAUSE_CNT_16K |
1182 M_MAC_AP_STAT_EN |
1183 M_MAC_SS_EN |
1184 0;
1185
1186 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1187 V_MAC_TX_RD_THRSH(4) |
1188 V_MAC_TX_RL_THRSH(4) |
1189 V_MAC_RX_PL_THRSH(4) |
1190 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1191 V_MAC_RX_PL_THRSH(4) |
1192 V_MAC_RX_RL_THRSH(8) |
1193 0;
1194
1195 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1196 V_MAC_MAX_FRAMESZ_DEFAULT |
1197 V_MAC_BACKOFF_SEL(1);
1198
1199 /*
1200 * Clear out the hash address map
1201 */
1202
1203 port = PKSEG1(s->sbm_base + R_MAC_HASH_BASE);
1204 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1205 SBMAC_WRITECSR(port, 0);
1206 port += sizeof(uint64_t);
1207 }
1208
1209 /*
1210 * Clear out the exact-match table
1211 */
1212
1213 port = PKSEG1(s->sbm_base + R_MAC_ADDR_BASE);
1214 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1215 SBMAC_WRITECSR(port, 0);
1216 port += sizeof(uint64_t);
1217 }
1218
1219 /*
1220 * Clear out the DMA Channel mapping table registers
1221 */
1222
1223 port = PKSEG1(s->sbm_base + R_MAC_CHUP0_BASE);
1224 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1225 SBMAC_WRITECSR(port, 0);
1226 port += sizeof(uint64_t);
1227 }
1228
1229 port = PKSEG1(s->sbm_base + R_MAC_CHLO0_BASE);
1230 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1231 SBMAC_WRITECSR(port, 0);
1232 port += sizeof(uint64_t);
1233 }
1234
1235 /*
1236 * Program the hardware address. It goes into the hardware-address
1237 * register as well as the first filter register.
1238 */
1239
1240 reg = sbmac_addr2reg(s->sbm_hwaddr);
1241
1242 port = PKSEG1(s->sbm_base + R_MAC_ADDR_BASE);
1243 SBMAC_WRITECSR(port, reg);
1244 port = PKSEG1(s->sbm_base + R_MAC_ETHERNET_ADDR);
1245 SBMAC_WRITECSR(port, 0); // pass1 workaround
1246
1247 /*
1248 * Set the receive filter for no packets, and write values
1249 * to the various config registers
1250 */
1251
1252 SBMAC_WRITECSR(s->sbm_rxfilter, 0);
1253 SBMAC_WRITECSR(s->sbm_imr, 0);
1254 SBMAC_WRITECSR(s->sbm_framecfg, framecfg);
1255 SBMAC_WRITECSR(s->sbm_fifocfg, fifo);
1256 SBMAC_WRITECSR(s->sbm_maccfg, cfg);
1257
1258 /*
1259 * Initialize DMA channels (rings should be ok now)
1260 */
1261
1262 sbdma_channel_start(&(s->sbm_rxdma));
1263 sbdma_channel_start(&(s->sbm_txdma));
1264
1265 /*
1266 * Configure the speed, duplex, and flow control
1267 */
1268
1269 sbmac_set_speed(s, s->sbm_speed);
1270 sbmac_set_duplex(s, s->sbm_duplex, s->sbm_fc);
1271
1272 /*
1273 * Fill the receive ring
1274 */
1275
1276 sbdma_fillring(&(s->sbm_rxdma));
1277
1278 /*
1279 * Turn on the rest of the bits in the enable register
1280 */
1281
1282 SBMAC_WRITECSR(s->sbm_macenable, M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 |
1283 M_MAC_RX_ENABLE | M_MAC_TX_ENABLE);
1284
1285
1286 /*
1287 * Accept any kind of interrupt on TX and RX DMA channel 0
1288 */
1289 SBMAC_WRITECSR(s->sbm_imr,
1290 (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1291 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
1292
1293 /*
1294 * Enable receiving unicasts and broadcasts
1295 */
1296
1297 SBMAC_WRITECSR(s->sbm_rxfilter, M_MAC_UCAST_EN | M_MAC_BCAST_EN);
1298
1299 /*
1300 * On chips which support unaligned DMA features, set the descriptor
1301 * ring for transmit channels to use the unaligned buffer format.
1302 */
1303 txdma = &(s->sbm_txdma);
1304
1305 if (s->sbm_pass3_dma) {
1306 dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0);
1307 dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) |
1308 M_DMA_TBX_EN | M_DMA_TDX_EN;
1309 SBMAC_WRITECSR(txdma->sbdma_config0,dma_cfg0);
1310
1311 fifo_cfg = SBMAC_READCSR(s->sbm_fifocfg);
1312 fifo_cfg |= V_MAC_TX_WR_THRSH(8) |
1313 V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8);
1314 SBMAC_WRITECSR(s->sbm_fifocfg,fifo_cfg);
1315 }
1316
1317 /*
1318 * we're running now.
1319 */
1320
1321 s->sbm_state = sbmac_state_on;
1322 s->sc_ethercom.ec_if.if_flags |= IFF_RUNNING;
1323
1324 /*
1325 * Program multicast addresses
1326 */
1327
1328 sbmac_setmulti(s);
1329
1330 /*
1331 * If channel was in promiscuous mode before, turn that on
1332 */
1333
1334 if (s->sc_ethercom.ec_if.if_flags & IFF_PROMISC)
1335 sbmac_promiscuous_mode(s, 1);
1336
1337 /*
1338 * Turn on the once-per-second timer
1339 */
1340
1341 callout_reset(&(s->sc_tick_ch), hz, sbmac_tick, s);
1342 }
1343
1344 /*
1345 * SBMAC_CHANNEL_STOP(s)
1346 *
1347 * Stop packet processing on this MAC.
1348 *
1349 * Input parameters:
1350 * s - sbmac structure
1351 *
1352 * Return value:
1353 * nothing
1354 */
1355
1356 static void
1357 sbmac_channel_stop(struct sbmac_softc *s)
1358 {
1359 uint64_t ctl;
1360
1361 /* don't do this if already stopped */
1362
1363 if (s->sbm_state == sbmac_state_off)
1364 return;
1365
1366 /* don't accept any packets, disable all interrupts */
1367
1368 SBMAC_WRITECSR(s->sbm_rxfilter, 0);
1369 SBMAC_WRITECSR(s->sbm_imr, 0);
1370
1371 /* Turn off ticker */
1372
1373 callout_stop(&(s->sc_tick_ch));
1374
1375 /* turn off receiver and transmitter */
1376
1377 ctl = SBMAC_READCSR(s->sbm_macenable);
1378 ctl &= ~(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0);
1379 SBMAC_WRITECSR(s->sbm_macenable, ctl);
1380
1381 /* We're stopped now. */
1382
1383 s->sbm_state = sbmac_state_off;
1384 s->sc_ethercom.ec_if.if_flags &= ~IFF_RUNNING;
1385
1386 /* Empty the receive and transmit rings */
1387
1388 sbdma_emptyring(&(s->sbm_rxdma));
1389 sbdma_emptyring(&(s->sbm_txdma));
1390 }
1391
1392 /*
1393 * SBMAC_SET_CHANNEL_STATE(state)
1394 *
1395 * Set the channel's state ON or OFF
1396 *
1397 * Input parameters:
1398 * state - new state
1399 *
1400 * Return value:
1401 * old state
1402 */
1403
1404 static sbmac_state_t
1405 sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state)
1406 {
1407 sbmac_state_t oldstate = sc->sbm_state;
1408
1409 /*
1410 * If same as previous state, return
1411 */
1412
1413 if (state == oldstate)
1414 return oldstate;
1415
1416 /*
1417 * If new state is ON, turn channel on
1418 */
1419
1420 if (state == sbmac_state_on)
1421 sbmac_channel_start(sc);
1422 else
1423 sbmac_channel_stop(sc);
1424
1425 /*
1426 * Return previous state
1427 */
1428
1429 return oldstate;
1430 }
1431
1432 /*
1433 * SBMAC_PROMISCUOUS_MODE(sc, onoff)
1434 *
1435 * Turn on or off promiscuous mode
1436 *
1437 * Input parameters:
1438 * sc - softc
1439 * onoff - 1 to turn on, 0 to turn off
1440 *
1441 * Return value:
1442 * nothing
1443 */
1444
1445 static void
1446 sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff)
1447 {
1448 uint64_t reg;
1449
1450 if (sc->sbm_state != sbmac_state_on)
1451 return;
1452
1453 if (onoff) {
1454 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1455 reg |= M_MAC_ALLPKT_EN;
1456 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1457 } else {
1458 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1459 reg &= ~M_MAC_ALLPKT_EN;
1460 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1461 }
1462 }
1463
1464 /*
1465 * SBMAC_INIT_AND_START(sc)
1466 *
1467 * Stop the channel and restart it. This is generally used
1468 * when we have to do something to the channel that requires
1469 * a swift kick.
1470 *
1471 * Input parameters:
1472 * sc - softc
1473 */
1474
1475 static void
1476 sbmac_init_and_start(struct sbmac_softc *sc)
1477 {
1478 int s;
1479
1480 s = splnet();
1481
1482 mii_pollstat(&sc->sc_mii); /* poll phy for current speed */
1483 sbmac_mii_statchg((struct device *) sc); /* set state to new speed */
1484 sbmac_set_channel_state(sc, sbmac_state_on);
1485
1486 splx(s);
1487 }
1488
1489 /*
1490 * SBMAC_ADDR2REG(ptr)
1491 *
1492 * Convert six bytes into the 64-bit register value that
1493 * we typically write into the SBMAC's address/mcast registers
1494 *
1495 * Input parameters:
1496 * ptr - pointer to 6 bytes
1497 *
1498 * Return value:
1499 * register value
1500 */
1501
1502 static uint64_t
1503 sbmac_addr2reg(u_char *ptr)
1504 {
1505 uint64_t reg = 0;
1506
1507 ptr += 6;
1508
1509 reg |= (uint64_t) *(--ptr);
1510 reg <<= 8;
1511 reg |= (uint64_t) *(--ptr);
1512 reg <<= 8;
1513 reg |= (uint64_t) *(--ptr);
1514 reg <<= 8;
1515 reg |= (uint64_t) *(--ptr);
1516 reg <<= 8;
1517 reg |= (uint64_t) *(--ptr);
1518 reg <<= 8;
1519 reg |= (uint64_t) *(--ptr);
1520
1521 return reg;
1522 }
1523
1524 /*
1525 * SBMAC_SET_SPEED(s, speed)
1526 *
1527 * Configure LAN speed for the specified MAC.
1528 * Warning: must be called when MAC is off!
1529 *
1530 * Input parameters:
1531 * s - sbmac structure
1532 * speed - speed to set MAC to (see sbmac_speed_t enum)
1533 *
1534 * Return value:
1535 * 1 if successful
1536 * 0 indicates invalid parameters
1537 */
1538
1539 static int
1540 sbmac_set_speed(struct sbmac_softc *s, sbmac_speed_t speed)
1541 {
1542 uint64_t cfg;
1543 uint64_t framecfg;
1544
1545 /*
1546 * Save new current values
1547 */
1548
1549 s->sbm_speed = speed;
1550
1551 if (s->sbm_state != sbmac_state_off)
1552 panic("sbmac_set_speed while MAC not off");
1553
1554 /*
1555 * Read current register values
1556 */
1557
1558 cfg = SBMAC_READCSR(s->sbm_maccfg);
1559 framecfg = SBMAC_READCSR(s->sbm_framecfg);
1560
1561 /*
1562 * Mask out the stuff we want to change
1563 */
1564
1565 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1566 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1567 M_MAC_SLOT_SIZE);
1568
1569 /*
1570 * Now add in the new bits
1571 */
1572
1573 switch (speed) {
1574 case sbmac_speed_10:
1575 framecfg |= V_MAC_IFG_RX_10 |
1576 V_MAC_IFG_TX_10 |
1577 K_MAC_IFG_THRSH_10 |
1578 V_MAC_SLOT_SIZE_10;
1579 cfg |= V_MAC_SPEED_SEL_10MBPS;
1580 break;
1581
1582 case sbmac_speed_100:
1583 framecfg |= V_MAC_IFG_RX_100 |
1584 V_MAC_IFG_TX_100 |
1585 V_MAC_IFG_THRSH_100 |
1586 V_MAC_SLOT_SIZE_100;
1587 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1588 break;
1589
1590 case sbmac_speed_1000:
1591 framecfg |= V_MAC_IFG_RX_1000 |
1592 V_MAC_IFG_TX_1000 |
1593 V_MAC_IFG_THRSH_1000 |
1594 V_MAC_SLOT_SIZE_1000;
1595 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1596 break;
1597
1598 case sbmac_speed_auto: /* XXX not implemented */
1599 /* fall through */
1600 default:
1601 return 0;
1602 }
1603
1604 /*
1605 * Send the bits back to the hardware
1606 */
1607
1608 SBMAC_WRITECSR(s->sbm_framecfg, framecfg);
1609 SBMAC_WRITECSR(s->sbm_maccfg, cfg);
1610
1611 return 1;
1612 }
1613
1614 /*
1615 * SBMAC_SET_DUPLEX(s, duplex, fc)
1616 *
1617 * Set Ethernet duplex and flow control options for this MAC
1618 * Warning: must be called when MAC is off!
1619 *
1620 * Input parameters:
1621 * s - sbmac structure
1622 * duplex - duplex setting (see sbmac_duplex_t)
1623 * fc - flow control setting (see sbmac_fc_t)
1624 *
1625 * Return value:
1626 * 1 if ok
1627 * 0 if an invalid parameter combination was specified
1628 */
1629
1630 static int
1631 sbmac_set_duplex(struct sbmac_softc *s, sbmac_duplex_t duplex, sbmac_fc_t fc)
1632 {
1633 uint64_t cfg;
1634
1635 /*
1636 * Save new current values
1637 */
1638
1639 s->sbm_duplex = duplex;
1640 s->sbm_fc = fc;
1641
1642 if (s->sbm_state != sbmac_state_off)
1643 panic("sbmac_set_duplex while MAC not off");
1644
1645 /*
1646 * Read current register values
1647 */
1648
1649 cfg = SBMAC_READCSR(s->sbm_maccfg);
1650
1651 /*
1652 * Mask off the stuff we're about to change
1653 */
1654
1655 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1656
1657 switch (duplex) {
1658 case sbmac_duplex_half:
1659 switch (fc) {
1660 case sbmac_fc_disabled:
1661 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1662 break;
1663
1664 case sbmac_fc_collision:
1665 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1666 break;
1667
1668 case sbmac_fc_carrier:
1669 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1670 break;
1671
1672 case sbmac_fc_auto: /* XXX not implemented */
1673 /* fall through */
1674 case sbmac_fc_frame: /* not valid in half duplex */
1675 default: /* invalid selection */
1676 panic("%s: invalid half duplex fc selection %d",
1677 s->sc_dev.dv_xname, fc);
1678 return 0;
1679 }
1680 break;
1681
1682 case sbmac_duplex_full:
1683 switch (fc) {
1684 case sbmac_fc_disabled:
1685 cfg |= V_MAC_FC_CMD_DISABLED;
1686 break;
1687
1688 case sbmac_fc_frame:
1689 cfg |= V_MAC_FC_CMD_ENABLED;
1690 break;
1691
1692 case sbmac_fc_collision: /* not valid in full duplex */
1693 case sbmac_fc_carrier: /* not valid in full duplex */
1694 case sbmac_fc_auto: /* XXX not implemented */
1695 /* fall through */
1696 default:
1697 panic("%s: invalid full duplex fc selection %d",
1698 s->sc_dev.dv_xname, fc);
1699 return 0;
1700 }
1701 break;
1702
1703 default:
1704 /* fall through */
1705 case sbmac_duplex_auto:
1706 panic("%s: bad duplex %d", s->sc_dev.dv_xname, duplex);
1707 /* XXX not implemented */
1708 break;
1709 }
1710
1711 /*
1712 * Send the bits back to the hardware
1713 */
1714
1715 SBMAC_WRITECSR(s->sbm_maccfg, cfg);
1716
1717 return 1;
1718 }
1719
1720 /*
1721 * SBMAC_INTR()
1722 *
1723 * Interrupt handler for MAC interrupts
1724 *
1725 * Input parameters:
1726 * MAC structure
1727 *
1728 * Return value:
1729 * nothing
1730 */
1731
1732 /* ARGSUSED */
1733 static void
1734 sbmac_intr(void *xsc, uint32_t status, vaddr_t pc)
1735 {
1736 struct sbmac_softc *sc = (struct sbmac_softc *) xsc;
1737 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1738 uint64_t isr;
1739
1740 for (;;) {
1741
1742 /*
1743 * Read the ISR (this clears the bits in the real register)
1744 */
1745
1746 isr = SBMAC_READCSR(sc->sbm_isr);
1747
1748 if (isr == 0)
1749 break;
1750
1751 /*
1752 * Transmits on channel 0
1753 */
1754
1755 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
1756 sbdma_tx_process(sc, &(sc->sbm_txdma));
1757 SBMAC_EVCNT_INCR(sc->sbm_ev_txintr);
1758 }
1759
1760 /*
1761 * Receives on channel 0
1762 */
1763
1764 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
1765 sbdma_rx_process(sc, &(sc->sbm_rxdma));
1766 SBMAC_EVCNT_INCR(sc->sbm_ev_rxintr);
1767 }
1768 }
1769
1770 /* try to get more packets going */
1771 sbmac_start(ifp);
1772 }
1773
1774
1775 /*
1776 * SBMAC_START(ifp)
1777 *
1778 * Start output on the specified interface. Basically, we
1779 * queue as many buffers as we can until the ring fills up, or
1780 * we run off the end of the queue, whichever comes first.
1781 *
1782 * Input parameters:
1783 * ifp - interface
1784 *
1785 * Return value:
1786 * nothing
1787 */
1788
1789 static void
1790 sbmac_start(struct ifnet *ifp)
1791 {
1792 struct sbmac_softc *sc;
1793 struct mbuf *m_head = NULL;
1794 int rv;
1795
1796 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1797 return;
1798
1799 sc = ifp->if_softc;
1800
1801 for (;;) {
1802
1803 IF_DEQUEUE(&ifp->if_snd, m_head);
1804 if (m_head == NULL)
1805 break;
1806
1807 /*
1808 * Put the buffer on the transmit ring. If we
1809 * don't have room, set the OACTIVE flag and wait
1810 * for the NIC to drain the ring.
1811 */
1812
1813 rv = sbdma_add_txbuffer(&(sc->sbm_txdma), m_head);
1814
1815 if (rv == 0) {
1816 /*
1817 * If there's a BPF listener, bounce a copy of this
1818 * frame to it.
1819 */
1820 #if (NBPFILTER > 0)
1821 if (ifp->if_bpf)
1822 bpf_mtap(ifp->if_bpf, m_head);
1823 #endif
1824 if (!sc->sbm_pass3_dma) {
1825 /*
1826 * Don't free mbuf if we're not copying to new
1827 * mbuf in sbdma_add_txbuffer. It will be
1828 * freed in sbdma_tx_process.
1829 */
1830 m_freem(m_head);
1831 }
1832 } else {
1833 IF_PREPEND(&ifp->if_snd, m_head);
1834 ifp->if_flags |= IFF_OACTIVE;
1835 break;
1836 }
1837 }
1838 }
1839
1840 /*
1841 * SBMAC_SETMULTI(sc)
1842 *
1843 * Reprogram the multicast table into the hardware, given
1844 * the list of multicasts associated with the interface
1845 * structure.
1846 *
1847 * Input parameters:
1848 * sc - softc
1849 *
1850 * Return value:
1851 * nothing
1852 */
1853
1854 static void
1855 sbmac_setmulti(struct sbmac_softc *sc)
1856 {
1857 struct ifnet *ifp;
1858 uint64_t reg;
1859 sbmac_port_t port;
1860 int idx;
1861 struct ether_multi *enm;
1862 struct ether_multistep step;
1863
1864 ifp = &sc->sc_ethercom.ec_if;
1865
1866 /*
1867 * Clear out entire multicast table. We do this by nuking
1868 * the entire hash table and all the direct matches except
1869 * the first one, which is used for our station address
1870 */
1871
1872 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
1873 port = PKSEG1(sc->sbm_base +
1874 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1875 SBMAC_WRITECSR(port, 0);
1876 }
1877
1878 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1879 port = PKSEG1(sc->sbm_base +
1880 R_MAC_HASH_BASE+(idx*sizeof(uint64_t)));
1881 SBMAC_WRITECSR(port, 0);
1882 }
1883
1884 /*
1885 * Clear the filter to say we don't want any multicasts.
1886 */
1887
1888 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1889 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1890 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1891
1892 if (ifp->if_flags & IFF_ALLMULTI) {
1893 /*
1894 * Enable ALL multicasts. Do this by inverting the
1895 * multicast enable bit.
1896 */
1897 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1898 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1899 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1900 return;
1901 }
1902
1903 /*
1904 * Progam new multicast entries. For now, only use the
1905 * perfect filter. In the future we'll need to use the
1906 * hash filter if the perfect filter overflows
1907 */
1908
1909 /*
1910 * XXX only using perfect filter for now, need to use hash
1911 * XXX if the table overflows
1912 */
1913
1914 idx = 1; /* skip station address */
1915 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1916 while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) {
1917 reg = sbmac_addr2reg(enm->enm_addrlo);
1918 port = PKSEG1(sc->sbm_base +
1919 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1920 SBMAC_WRITECSR(port, reg);
1921 idx++;
1922 ETHER_NEXT_MULTI(step, enm);
1923 }
1924
1925 /*
1926 * Enable the "accept multicast bits" if we programmed at least one
1927 * multicast.
1928 */
1929
1930 if (idx > 1) {
1931 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1932 reg |= M_MAC_MCAST_EN;
1933 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1934 }
1935 }
1936
1937 /*
1938 * SBMAC_ETHER_IOCTL(ifp, cmd, data)
1939 *
1940 * Generic IOCTL requests for this interface. The basic
1941 * stuff is handled here for bringing the interface up,
1942 * handling multicasts, etc.
1943 *
1944 * Input parameters:
1945 * ifp - interface structure
1946 * cmd - command code
1947 * data - pointer to data
1948 *
1949 * Return value:
1950 * return value (0 is success)
1951 */
1952
1953 static int
1954 sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1955 {
1956 struct ifaddr *ifa = (struct ifaddr *) data;
1957 struct sbmac_softc *sc = ifp->if_softc;
1958
1959 switch (cmd) {
1960 case SIOCINITIFADDR:
1961 ifp->if_flags |= IFF_UP;
1962
1963 switch (ifa->ifa_addr->sa_family) {
1964 #ifdef INET
1965 case AF_INET:
1966 sbmac_init_and_start(sc);
1967 arp_ifinit(ifp, ifa);
1968 break;
1969 #endif
1970 #ifdef NS
1971 case AF_NS:
1972 {
1973 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1974
1975 if (ns_nullhost(*ina))
1976 ina->x_host =
1977 *(union ns_host *)LLADDR(ifp->if_sadl);
1978 else
1979 memcpy(LLADDR(ifp->if_sadl), ina->x_host.c_host,
1980 ifp->if_addrlen);
1981 /* Set new address. */
1982 sbmac_init_and_start(sc);
1983 break;
1984 }
1985 #endif
1986 default:
1987 sbmac_init_and_start(sc);
1988 break;
1989 }
1990 break;
1991
1992 default:
1993 return ENOTTY;
1994 }
1995
1996 return (0);
1997 }
1998
1999 /*
2000 * SBMAC_IOCTL(ifp, cmd, data)
2001 *
2002 * Main IOCTL handler - dispatches to other IOCTLs for various
2003 * types of requests.
2004 *
2005 * Input parameters:
2006 * ifp - interface pointer
2007 * cmd - command code
2008 * data - pointer to argument data
2009 *
2010 * Return value:
2011 * 0 if ok
2012 * else error code
2013 */
2014
2015 static int
2016 sbmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2017 {
2018 struct sbmac_softc *sc = ifp->if_softc;
2019 struct ifreq *ifr = (struct ifreq *) data;
2020 int s, error = 0;
2021
2022 s = splnet();
2023
2024 switch (cmd) {
2025 case SIOCINITIFADDR:
2026 error = sbmac_ether_ioctl(ifp, cmd, data);
2027 break;
2028 case SIOCSIFMTU:
2029 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2030 error = EINVAL;
2031 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
2032 /* XXX Program new MTU here */
2033 error = 0;
2034 break;
2035 case SIOCSIFFLAGS:
2036 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2037 break;
2038 if (ifp->if_flags & IFF_UP) {
2039 /*
2040 * If only the state of the PROMISC flag changed,
2041 * just tweak the hardware registers.
2042 */
2043 if ((ifp->if_flags & IFF_RUNNING) &&
2044 (ifp->if_flags & IFF_PROMISC)) {
2045 /* turn on promiscuous mode */
2046 sbmac_promiscuous_mode(sc, 1);
2047 } else if (ifp->if_flags & IFF_RUNNING &&
2048 !(ifp->if_flags & IFF_PROMISC)) {
2049 /* turn off promiscuous mode */
2050 sbmac_promiscuous_mode(sc, 0);
2051 } else
2052 sbmac_set_channel_state(sc, sbmac_state_on);
2053 } else {
2054 if (ifp->if_flags & IFF_RUNNING)
2055 sbmac_set_channel_state(sc, sbmac_state_off);
2056 }
2057
2058 sc->sbm_if_flags = ifp->if_flags;
2059 error = 0;
2060 break;
2061
2062 case SIOCADDMULTI:
2063 case SIOCDELMULTI:
2064 case SIOCSIFMEDIA:
2065 case SIOCGIFMEDIA:
2066 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2067 error = 0;
2068 if (ifp->if_flags & IFF_RUNNING)
2069 sbmac_setmulti(sc);
2070 }
2071 break;
2072 default:
2073 error = ether_ioctl(ifp, cmd, data);
2074 break;
2075 }
2076
2077 (void)splx(s);
2078
2079 return(error);
2080 }
2081
2082 /*
2083 * SBMAC_IFMEDIA_UPD(ifp)
2084 *
2085 * Configure an appropriate media type for this interface,
2086 * given the data in the interface structure
2087 *
2088 * Input parameters:
2089 * ifp - interface
2090 *
2091 * Return value:
2092 * 0 if ok
2093 * else error code
2094 */
2095
2096 /*
2097 * SBMAC_IFMEDIA_STS(ifp, ifmr)
2098 *
2099 * Report current media status (used by ifconfig, for example)
2100 *
2101 * Input parameters:
2102 * ifp - interface structure
2103 * ifmr - media request structure
2104 *
2105 * Return value:
2106 * nothing
2107 */
2108
2109 /*
2110 * SBMAC_WATCHDOG(ifp)
2111 *
2112 * Called periodically to make sure we're still happy.
2113 *
2114 * Input parameters:
2115 * ifp - interface structure
2116 *
2117 * Return value:
2118 * nothing
2119 */
2120
2121 static void
2122 sbmac_watchdog(struct ifnet *ifp)
2123 {
2124
2125 /* XXX do something */
2126 }
2127
2128 /*
2129 * One second timer, used to tick MII.
2130 */
2131 static void
2132 sbmac_tick(void *arg)
2133 {
2134 struct sbmac_softc *sc = arg;
2135 int s;
2136
2137 s = splnet();
2138 mii_tick(&sc->sc_mii);
2139 splx(s);
2140
2141 callout_reset(&sc->sc_tick_ch, hz, sbmac_tick, sc);
2142 }
2143
2144
2145 /*
2146 * SBMAC_MATCH(parent, match, aux)
2147 *
2148 * Part of the config process - see if this device matches the
2149 * info about what we expect to find on the bus.
2150 *
2151 * Input parameters:
2152 * parent - parent bus structure
2153 * match -
2154 * aux - bus-specific args
2155 *
2156 * Return value:
2157 * 1 if we match
2158 * 0 if we don't match
2159 */
2160
2161 static int
2162 sbmac_match(struct device *parent, struct cfdata *match, void *aux)
2163 {
2164 struct sbobio_attach_args *sap = aux;
2165
2166 /*
2167 * Make sure it's a MAC
2168 */
2169
2170 if (sap->sa_locs.sa_type != SBOBIO_DEVTYPE_MAC)
2171 return 0;
2172
2173 /*
2174 * Yup, it is.
2175 */
2176
2177 return 1;
2178 }
2179
2180 /*
2181 * SBMAC_PARSE_XDIGIT(str)
2182 *
2183 * Parse a hex digit, returning its value
2184 *
2185 * Input parameters:
2186 * str - character
2187 *
2188 * Return value:
2189 * hex value, or -1 if invalid
2190 */
2191
2192 static int
2193 sbmac_parse_xdigit(char str)
2194 {
2195 int digit;
2196
2197 if ((str >= '0') && (str <= '9'))
2198 digit = str - '0';
2199 else if ((str >= 'a') && (str <= 'f'))
2200 digit = str - 'a' + 10;
2201 else if ((str >= 'A') && (str <= 'F'))
2202 digit = str - 'A' + 10;
2203 else
2204 digit = -1;
2205
2206 return digit;
2207 }
2208
2209 /*
2210 * SBMAC_PARSE_HWADDR(str, hwaddr)
2211 *
2212 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2213 * Ethernet address.
2214 *
2215 * Input parameters:
2216 * str - string
2217 * hwaddr - pointer to hardware address
2218 *
2219 * Return value:
2220 * 0 if ok, else -1
2221 */
2222
2223 static int
2224 sbmac_parse_hwaddr(const char *str, u_char *hwaddr)
2225 {
2226 int digit1, digit2;
2227 int idx = 6;
2228
2229 while (*str && (idx > 0)) {
2230 digit1 = sbmac_parse_xdigit(*str);
2231 if (digit1 < 0)
2232 return -1;
2233 str++;
2234 if (!*str)
2235 return -1;
2236
2237 if ((*str == ':') || (*str == '-')) {
2238 digit2 = digit1;
2239 digit1 = 0;
2240 } else {
2241 digit2 = sbmac_parse_xdigit(*str);
2242 if (digit2 < 0)
2243 return -1;
2244 str++;
2245 }
2246
2247 *hwaddr++ = (digit1 << 4) | digit2;
2248 idx--;
2249
2250 if (*str == '-')
2251 str++;
2252 if (*str == ':')
2253 str++;
2254 }
2255 return 0;
2256 }
2257
2258 /*
2259 * SBMAC_ATTACH(parent, self, aux)
2260 *
2261 * Attach routine - init hardware and hook ourselves into NetBSD.
2262 *
2263 * Input parameters:
2264 * parent - parent bus device
2265 * self - our softc
2266 * aux - attach data
2267 *
2268 * Return value:
2269 * nothing
2270 */
2271
2272 static void
2273 sbmac_attach(struct device *parent, struct device *self, void *aux)
2274 {
2275 struct ifnet *ifp;
2276 struct sbmac_softc *sc;
2277 struct sbobio_attach_args *sap = aux;
2278 u_char *eaddr;
2279 static int unit = 0; /* XXX */
2280 uint64_t ea_reg;
2281 int idx;
2282
2283 sc = (struct sbmac_softc *)self;
2284
2285 /* Determine controller base address */
2286
2287 sc->sbm_base = sap->sa_locs.sa_addr;
2288
2289 eaddr = sc->sbm_hwaddr;
2290
2291 /*
2292 * Initialize context (get pointers to registers and stuff), then
2293 * allocate the memory for the descriptor tables.
2294 */
2295
2296 sbmac_initctx(sc);
2297
2298 callout_init(&(sc->sc_tick_ch), 0);
2299
2300 /*
2301 * Read the ethernet address. The firwmare left this programmed
2302 * for us in the ethernet address register for each mac.
2303 */
2304
2305 ea_reg = SBMAC_READCSR(PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR));
2306 for (idx = 0; idx < 6; idx++) {
2307 eaddr[idx] = (uint8_t) (ea_reg & 0xFF);
2308 ea_reg >>= 8;
2309 }
2310
2311 #define SBMAC_DEFAULT_HWADDR "40:00:00:00:01:00"
2312 if (eaddr[0] == 0 && eaddr[1] == 0 && eaddr[2] == 0 &&
2313 eaddr[3] == 0 && eaddr[4] == 0 && eaddr[5] == 0) {
2314 sbmac_parse_hwaddr(SBMAC_DEFAULT_HWADDR, eaddr);
2315 eaddr[5] = unit;
2316 }
2317
2318 #ifdef SBMAC_ETH0_HWADDR
2319 if (unit == 0)
2320 sbmac_parse_hwaddr(SBMAC_ETH0_HWADDR, eaddr);
2321 #endif
2322 #ifdef SBMAC_ETH1_HWADDR
2323 if (unit == 1)
2324 sbmac_parse_hwaddr(SBMAC_ETH1_HWADDR, eaddr);
2325 #endif
2326 #ifdef SBMAC_ETH2_HWADDR
2327 if (unit == 2)
2328 sbmac_parse_hwaddr(SBMAC_ETH2_HWADDR, eaddr);
2329 #endif
2330 unit++;
2331
2332 /*
2333 * Display Ethernet address (this is called during the config process
2334 * so we need to finish off the config message that was being displayed)
2335 */
2336 printf(": Ethernet%s\n",
2337 sc->sbm_pass3_dma ? ", using unaligned tx DMA" : "");
2338 printf("%s: Ethernet address: %s\n", self->dv_xname,
2339 ether_sprintf(eaddr));
2340
2341
2342 /*
2343 * Set up ifnet structure
2344 */
2345
2346 ifp = &sc->sc_ethercom.ec_if;
2347 ifp->if_softc = sc;
2348 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
2349 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
2350 IFF_NOTRAILERS;
2351 ifp->if_ioctl = sbmac_ioctl;
2352 ifp->if_start = sbmac_start;
2353 ifp->if_watchdog = sbmac_watchdog;
2354 ifp->if_snd.ifq_maxlen = SBMAC_MAX_TXDESCR - 1;
2355
2356 /*
2357 * Set up ifmedia support.
2358 */
2359
2360 /*
2361 * Initialize MII/media info.
2362 */
2363 sc->sc_mii.mii_ifp = ifp;
2364 sc->sc_mii.mii_readreg = sbmac_mii_readreg;
2365 sc->sc_mii.mii_writereg = sbmac_mii_writereg;
2366 sc->sc_mii.mii_statchg = sbmac_mii_statchg;
2367 sc->sc_ethercom.ec_mii = &sc->sc_mii;
2368 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
2369 ether_mediastatus);
2370 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2371 MII_OFFSET_ANY, 0);
2372
2373 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2374 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2375 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2376 } else {
2377 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2378 }
2379
2380
2381 /*
2382 * map/route interrupt
2383 */
2384
2385 sc->sbm_intrhand = cpu_intr_establish(sap->sa_locs.sa_intr[0], IPL_NET,
2386 sbmac_intr, sc);
2387
2388 /*
2389 * Call MI attach routines.
2390 */
2391 if_attach(ifp);
2392 ether_ifattach(ifp, eaddr);
2393 }
2394