sbmac.c revision 1.40 1 /* $NetBSD: sbmac.c,v 1.40 2011/02/20 07:47:39 matt Exp $ */
2
3 /*
4 * Copyright 2000, 2001, 2004
5 * Broadcom Corporation. All rights reserved.
6 *
7 * This software is furnished under license and may be used and copied only
8 * in accordance with the following terms and conditions. Subject to these
9 * conditions, you may download, copy, install, use, modify and distribute
10 * modified or unmodified copies of this software in source and/or binary
11 * form. No title or ownership is transferred hereby.
12 *
13 * 1) Any source code used, modified or distributed must reproduce and
14 * retain this copyright notice and list of conditions as they appear in
15 * the source file.
16 *
17 * 2) No right is granted to use any trade name, trademark, or logo of
18 * Broadcom Corporation. The "Broadcom Corporation" name may not be
19 * used to endorse or promote products derived from this software
20 * without the prior written permission of Broadcom Corporation.
21 *
22 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
25 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
26 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
27 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.40 2011/02/20 07:47:39 matt Exp $");
37
38 #include "opt_inet.h"
39 #include "opt_ns.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/queue.h>
49 #include <sys/device.h>
50
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/if_ether.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56
57 #include <net/bpf.h>
58
59 #ifdef INET
60 #include <netinet/in.h>
61 #include <netinet/if_inarp.h>
62 #endif
63
64 #ifdef NS
65 #include <netns/ns.h>
66 #include <netns/ns_if.h>
67 #endif
68
69 #include <machine/locore.h>
70
71 #include "sbobiovar.h"
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 #include <dev/mii/mii_bitbang.h>
76
77 #include <mips/sibyte/include/sb1250_defs.h>
78 #include <mips/sibyte/include/sb1250_regs.h>
79 #include <mips/sibyte/include/sb1250_mac.h>
80 #include <mips/sibyte/include/sb1250_dma.h>
81 #include <mips/sibyte/include/sb1250_scd.h>
82
83 /* Simple types */
84
85 typedef u_long sbmac_port_t;
86 typedef uint64_t sbmac_physaddr_t;
87 typedef uint64_t sbmac_enetaddr_t;
88
89 typedef enum { sbmac_speed_auto, sbmac_speed_10,
90 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
91
92 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
93 sbmac_duplex_full } sbmac_duplex_t;
94
95 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
96 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
97
98 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
99 sbmac_state_broken } sbmac_state_t;
100
101
102 /* Macros */
103
104 #define SBMAC_EVENT_COUNTERS /* Include counters for various events */
105
106 #define SBDMA_NEXTBUF(d, f) ((f + 1) & (d)->sbdma_dscr_mask)
107
108 #define CACHELINESIZE 32
109 #define NUMCACHEBLKS(x) (((x)+CACHELINESIZE-1)/CACHELINESIZE)
110 #define KMALLOC(x) malloc((x), M_DEVBUF, M_DONTWAIT)
111 #define KVTOPHYS(x) kvtophys((vaddr_t)(x))
112
113 #ifdef SBMACDEBUG
114 #define dprintf(x) printf x
115 #else
116 #define dprintf(x)
117 #endif
118
119 #define SBMAC_READCSR(t) mips3_ld((volatile uint64_t *) (t))
120 #define SBMAC_WRITECSR(t, v) mips3_sd((volatile uint64_t *) (t), (v))
121
122 #define PKSEG1(x) ((sbmac_port_t) MIPS_PHYS_TO_KSEG1(x))
123
124 /* These are limited to fit within one virtual page, and must be 2**N. */
125 #define SBMAC_MAX_TXDESCR 256 /* should be 1024 */
126 #define SBMAC_MAX_RXDESCR 256 /* should be 512 */
127
128 #define ETHER_ALIGN 2
129
130 /* DMA Descriptor structure */
131
132 typedef struct sbdmadscr_s {
133 uint64_t dscr_a;
134 uint64_t dscr_b;
135 } sbdmadscr_t;
136
137
138 /* DMA Controller structure */
139
140 typedef struct sbmacdma_s {
141
142 /*
143 * This stuff is used to identify the channel and the registers
144 * associated with it.
145 */
146
147 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
148 int sbdma_channel; /* channel number */
149 int sbdma_txdir; /* direction (1=transmit) */
150 int sbdma_maxdescr; /* total # of descriptors in ring */
151 sbmac_port_t sbdma_config0; /* DMA config register 0 */
152 sbmac_port_t sbdma_config1; /* DMA config register 1 */
153 sbmac_port_t sbdma_dscrbase; /* Descriptor base address */
154 sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */
155 sbmac_port_t sbdma_curdscr; /* current descriptor address */
156
157 /*
158 * This stuff is for maintenance of the ring
159 */
160 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
161 struct mbuf **sbdma_ctxtable; /* context table, one per descr */
162 unsigned int sbdma_dscr_mask; /* sbdma_maxdescr - 1 */
163 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
164 unsigned int sbdma_add_index; /* next dscr for sw to add */
165 unsigned int sbdma_rem_index; /* next dscr for sw to remove */
166 } sbmacdma_t;
167
168
169 /* Ethernet softc structure */
170
171 struct sbmac_softc {
172
173 /*
174 * NetBSD-specific things
175 */
176 struct ethercom sc_ethercom; /* Ethernet common part */
177 struct mii_data sc_mii;
178 struct callout sc_tick_ch;
179
180 device_t sc_dev; /* device */
181 int sbm_if_flags;
182 void *sbm_intrhand;
183
184 /*
185 * Controller-specific things
186 */
187
188 sbmac_port_t sbm_base; /* MAC's base address */
189 sbmac_state_t sbm_state; /* current state */
190
191 sbmac_port_t sbm_macenable; /* MAC Enable Register */
192 sbmac_port_t sbm_maccfg; /* MAC Configuration Register */
193 sbmac_port_t sbm_fifocfg; /* FIFO configuration register */
194 sbmac_port_t sbm_framecfg; /* Frame configuration register */
195 sbmac_port_t sbm_rxfilter; /* receive filter register */
196 sbmac_port_t sbm_isr; /* Interrupt status register */
197 sbmac_port_t sbm_imr; /* Interrupt mask register */
198
199 sbmac_speed_t sbm_speed; /* current speed */
200 sbmac_duplex_t sbm_duplex; /* current duplex */
201 sbmac_fc_t sbm_fc; /* current flow control setting */
202 int sbm_rxflags; /* received packet flags */
203
204 u_char sbm_hwaddr[ETHER_ADDR_LEN];
205
206 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
207 sbmacdma_t sbm_rxdma;
208
209 int sbm_pass3_dma; /* chip has pass3 SOC DMA features */
210
211 #ifdef SBMAC_EVENT_COUNTERS
212 struct evcnt sbm_ev_rxintr; /* Rx interrupts */
213 struct evcnt sbm_ev_txintr; /* Tx interrupts */
214 struct evcnt sbm_ev_txdrop; /* Tx dropped due to no mbuf alloc failed */
215 struct evcnt sbm_ev_txstall; /* Tx stalled due to no descriptors free */
216
217 struct evcnt sbm_ev_txsplit; /* pass3 Tx split mbuf */
218 struct evcnt sbm_ev_txkeep; /* pass3 Tx didn't split mbuf */
219 #endif
220 };
221
222
223 #ifdef SBMAC_EVENT_COUNTERS
224 #define SBMAC_EVCNT_INCR(ev) (ev).ev_count++
225 #else
226 #define SBMAC_EVCNT_INCR(ev) do { /* nothing */ } while (0)
227 #endif
228
229 /* Externs */
230
231 extern paddr_t kvtophys(vaddr_t);
232
233 /* Prototypes */
234
235 static void sbdma_initctx(sbmacdma_t *, struct sbmac_softc *, int, int, int);
236 static void sbdma_channel_start(sbmacdma_t *);
237 static int sbdma_add_rcvbuffer(sbmacdma_t *, struct mbuf *);
238 static int sbdma_add_txbuffer(sbmacdma_t *, struct mbuf *);
239 static void sbdma_emptyring(sbmacdma_t *);
240 static void sbdma_fillring(sbmacdma_t *);
241 static void sbdma_rx_process(struct sbmac_softc *, sbmacdma_t *);
242 static void sbdma_tx_process(struct sbmac_softc *, sbmacdma_t *);
243 static void sbmac_initctx(struct sbmac_softc *);
244 static void sbmac_channel_start(struct sbmac_softc *);
245 static void sbmac_channel_stop(struct sbmac_softc *);
246 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,
247 sbmac_state_t);
248 static void sbmac_promiscuous_mode(struct sbmac_softc *, bool);
249 static void sbmac_init_and_start(struct sbmac_softc *);
250 static uint64_t sbmac_addr2reg(u_char *);
251 static void sbmac_intr(void *, uint32_t, vaddr_t);
252 static void sbmac_start(struct ifnet *);
253 static void sbmac_setmulti(struct sbmac_softc *);
254 static int sbmac_ether_ioctl(struct ifnet *, u_long, void *);
255 static int sbmac_ioctl(struct ifnet *, u_long, void *);
256 static void sbmac_watchdog(struct ifnet *);
257 static int sbmac_match(device_t, cfdata_t, void *);
258 static void sbmac_attach(device_t, device_t, void *);
259 static bool sbmac_set_speed(struct sbmac_softc *, sbmac_speed_t);
260 static bool sbmac_set_duplex(struct sbmac_softc *, sbmac_duplex_t, sbmac_fc_t);
261 static void sbmac_tick(void *);
262
263
264 /* Globals */
265
266 CFATTACH_DECL_NEW(sbmac, sizeof(struct sbmac_softc),
267 sbmac_match, sbmac_attach, NULL, NULL);
268
269 static uint32_t sbmac_mii_bitbang_read(device_t self);
270 static void sbmac_mii_bitbang_write(device_t self, uint32_t val);
271
272 static const struct mii_bitbang_ops sbmac_mii_bitbang_ops = {
273 sbmac_mii_bitbang_read,
274 sbmac_mii_bitbang_write,
275 {
276 (uint32_t)M_MAC_MDIO_OUT, /* MII_BIT_MDO */
277 (uint32_t)M_MAC_MDIO_IN, /* MII_BIT_MDI */
278 (uint32_t)M_MAC_MDC, /* MII_BIT_MDC */
279 0, /* MII_BIT_DIR_HOST_PHY */
280 (uint32_t)M_MAC_MDIO_DIR /* MII_BIT_DIR_PHY_HOST */
281 }
282 };
283
284 static uint32_t
285 sbmac_mii_bitbang_read(device_t self)
286 {
287 struct sbmac_softc *sc = device_private(self);
288 sbmac_port_t reg;
289
290 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
291 return (uint32_t) SBMAC_READCSR(reg);
292 }
293
294 static void
295 sbmac_mii_bitbang_write(device_t self, uint32_t val)
296 {
297 struct sbmac_softc *sc = device_private(self);
298 sbmac_port_t reg;
299
300 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
301
302 SBMAC_WRITECSR(reg, (val &
303 (M_MAC_MDC|M_MAC_MDIO_DIR|M_MAC_MDIO_OUT|M_MAC_MDIO_IN)));
304 }
305
306 /*
307 * Read an PHY register through the MII.
308 */
309 static int
310 sbmac_mii_readreg(device_t self, int phy, int reg)
311 {
312
313 return (mii_bitbang_readreg(self, &sbmac_mii_bitbang_ops, phy, reg));
314 }
315
316 /*
317 * Write to a PHY register through the MII.
318 */
319 static void
320 sbmac_mii_writereg(device_t self, int phy, int reg, int val)
321 {
322
323 mii_bitbang_writereg(self, &sbmac_mii_bitbang_ops, phy, reg, val);
324 }
325
326 static void
327 sbmac_mii_statchg(device_t self)
328 {
329 struct sbmac_softc *sc = device_private(self);
330 sbmac_state_t oldstate;
331
332 /* Stop the MAC in preparation for changing all of the parameters. */
333 oldstate = sbmac_set_channel_state(sc, sbmac_state_off);
334
335 switch (sc->sc_ethercom.ec_if.if_baudrate) {
336 default: /* if autonegotiation fails, assume 10Mbit */
337 case IF_Mbps(10):
338 sbmac_set_speed(sc, sbmac_speed_10);
339 break;
340
341 case IF_Mbps(100):
342 sbmac_set_speed(sc, sbmac_speed_100);
343 break;
344
345 case IF_Mbps(1000):
346 sbmac_set_speed(sc, sbmac_speed_1000);
347 break;
348 }
349
350 if (sc->sc_mii.mii_media_active & IFM_FDX) {
351 /* Configure for full-duplex */
352 /* XXX: is flow control right for 10, 100? */
353 sbmac_set_duplex(sc, sbmac_duplex_full, sbmac_fc_frame);
354 } else {
355 /* Configure for half-duplex */
356 /* XXX: is flow control right? */
357 sbmac_set_duplex(sc, sbmac_duplex_half, sbmac_fc_disabled);
358 }
359
360 /* And put it back into its former state. */
361 sbmac_set_channel_state(sc, oldstate);
362 }
363
364 /*
365 * SBDMA_INITCTX(d, sc, chan, txrx, maxdescr)
366 *
367 * Initialize a DMA channel context. Since there are potentially
368 * eight DMA channels per MAC, it's nice to do this in a standard
369 * way.
370 *
371 * Input parameters:
372 * d - sbmacdma_t structure (DMA channel context)
373 * sc - sbmac_softc structure (pointer to a MAC)
374 * chan - channel number (0..1 right now)
375 * txrx - Identifies DMA_TX or DMA_RX for channel direction
376 * maxdescr - number of descriptors
377 *
378 * Return value:
379 * nothing
380 */
381
382 static void
383 sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *sc, int chan, int txrx,
384 int maxdescr)
385 {
386 /*
387 * Save away interesting stuff in the structure
388 */
389
390 d->sbdma_eth = sc;
391 d->sbdma_channel = chan;
392 d->sbdma_txdir = txrx;
393
394 /*
395 * initialize register pointers
396 */
397
398 d->sbdma_config0 = PKSEG1(sc->sbm_base +
399 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG0));
400 d->sbdma_config1 = PKSEG1(sc->sbm_base +
401 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG1));
402 d->sbdma_dscrbase = PKSEG1(sc->sbm_base +
403 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_BASE));
404 d->sbdma_dscrcnt = PKSEG1(sc->sbm_base +
405 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_CNT));
406 d->sbdma_curdscr = PKSEG1(sc->sbm_base +
407 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CUR_DSCRADDR));
408
409 /*
410 * Allocate memory for the ring
411 */
412
413 d->sbdma_maxdescr = maxdescr;
414 d->sbdma_dscr_mask = d->sbdma_maxdescr - 1;
415
416 d->sbdma_dscrtable = (sbdmadscr_t *)
417 KMALLOC(d->sbdma_maxdescr * sizeof(sbdmadscr_t));
418
419 memset(d->sbdma_dscrtable, 0, d->sbdma_maxdescr*sizeof(sbdmadscr_t));
420
421 d->sbdma_dscrtable_phys = KVTOPHYS(d->sbdma_dscrtable);
422
423 /*
424 * And context table
425 */
426
427 d->sbdma_ctxtable = (struct mbuf **)
428 KMALLOC(d->sbdma_maxdescr*sizeof(struct mbuf *));
429
430 memset(d->sbdma_ctxtable, 0, d->sbdma_maxdescr*sizeof(struct mbuf *));
431 }
432
433 /*
434 * SBDMA_CHANNEL_START(d)
435 *
436 * Initialize the hardware registers for a DMA channel.
437 *
438 * Input parameters:
439 * d - DMA channel to init (context must be previously init'd
440 *
441 * Return value:
442 * nothing
443 */
444
445 static void
446 sbdma_channel_start(sbmacdma_t *d)
447 {
448 /*
449 * Turn on the DMA channel
450 */
451
452 SBMAC_WRITECSR(d->sbdma_config1, 0);
453
454 SBMAC_WRITECSR(d->sbdma_dscrbase, d->sbdma_dscrtable_phys);
455
456 SBMAC_WRITECSR(d->sbdma_config0, V_DMA_RINGSZ(d->sbdma_maxdescr) | 0);
457
458 /*
459 * Initialize ring pointers
460 */
461
462 d->sbdma_add_index = 0;
463 d->sbdma_rem_index = 0;
464 }
465
466 /*
467 * SBDMA_ADD_RCVBUFFER(d, m)
468 *
469 * Add a buffer to the specified DMA channel. For receive channels,
470 * this queues a buffer for inbound packets.
471 *
472 * Input parameters:
473 * d - DMA channel descriptor
474 * m - mbuf to add, or NULL if we should allocate one.
475 *
476 * Return value:
477 * 0 if buffer could not be added (ring is full)
478 * 1 if buffer added successfully
479 */
480
481 static int
482 sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m)
483 {
484 unsigned int dsc, nextdsc;
485 struct mbuf *m_new = NULL;
486
487 /* get pointer to our current place in the ring */
488
489 dsc = d->sbdma_add_index;
490 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
491
492 /*
493 * figure out if the ring is full - if the next descriptor
494 * is the same as the one that we're going to remove from
495 * the ring, the ring is full
496 */
497
498 if (nextdsc == d->sbdma_rem_index)
499 return ENOSPC;
500
501 /*
502 * Allocate an mbuf if we don't already have one.
503 * If we do have an mbuf, reset it so that it's empty.
504 */
505
506 if (m == NULL) {
507 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
508 if (m_new == NULL) {
509 aprint_error_dev(d->sbdma_eth->sc_dev,
510 "mbuf allocation failed\n");
511 return ENOBUFS;
512 }
513
514 MCLGET(m_new, M_DONTWAIT);
515 if (!(m_new->m_flags & M_EXT)) {
516 aprint_error_dev(d->sbdma_eth->sc_dev,
517 "mbuf cluster allocation failed\n");
518 m_freem(m_new);
519 return ENOBUFS;
520 }
521
522 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
523 m_adj(m_new, ETHER_ALIGN);
524 } else {
525 m_new = m;
526 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
527 m_new->m_data = m_new->m_ext.ext_buf;
528 m_adj(m_new, ETHER_ALIGN);
529 }
530
531 /*
532 * fill in the descriptor
533 */
534
535 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new, void *)) |
536 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(ETHER_ALIGN + m_new->m_len)) |
537 M_DMA_DSCRA_INTERRUPT;
538
539 /* receiving: no options */
540 d->sbdma_dscrtable[dsc].dscr_b = 0;
541
542 /*
543 * fill in the context
544 */
545
546 d->sbdma_ctxtable[dsc] = m_new;
547
548 /*
549 * point at next packet
550 */
551
552 d->sbdma_add_index = nextdsc;
553
554 /*
555 * Give the buffer to the DMA engine.
556 */
557
558 SBMAC_WRITECSR(d->sbdma_dscrcnt, 1);
559
560 return 0; /* we did it */
561 }
562
563 /*
564 * SBDMA_ADD_TXBUFFER(d, m)
565 *
566 * Add a transmit buffer to the specified DMA channel, causing a
567 * transmit to start.
568 *
569 * Input parameters:
570 * d - DMA channel descriptor
571 * m - mbuf to add
572 *
573 * Return value:
574 * 0 transmit queued successfully
575 * otherwise error code
576 */
577
578 static int
579 sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m)
580 {
581 unsigned int dsc, nextdsc, prevdsc, origdesc;
582 int length;
583 int num_mbufs = 0;
584 struct sbmac_softc *sc = d->sbdma_eth;
585
586 /* get pointer to our current place in the ring */
587
588 dsc = d->sbdma_add_index;
589 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
590
591 /*
592 * figure out if the ring is full - if the next descriptor
593 * is the same as the one that we're going to remove from
594 * the ring, the ring is full
595 */
596
597 if (nextdsc == d->sbdma_rem_index) {
598 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
599 return ENOSPC;
600 }
601
602 /*
603 * PASS3 parts do not have buffer alignment restriction.
604 * No need to copy/coalesce to new mbuf. Also has different
605 * descriptor format
606 */
607 if (sc->sbm_pass3_dma) {
608 struct mbuf *m_temp = NULL;
609
610 /*
611 * Loop thru this mbuf record.
612 * The head mbuf will have SOP set.
613 */
614 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m,void *)) |
615 M_DMA_ETHTX_SOP;
616
617 /*
618 * transmitting: set outbound options,buffer A size(+ low 5
619 * bits of start addr),and packet length.
620 */
621 d->sbdma_dscrtable[dsc].dscr_b =
622 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
623 V_DMA_DSCRB_A_SIZE((m->m_len +
624 (mtod(m,uintptr_t) & 0x0000001F))) |
625 V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) |
626 V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff);
627
628 d->sbdma_add_index = nextdsc;
629 origdesc = prevdsc = dsc;
630 dsc = d->sbdma_add_index;
631 num_mbufs++;
632
633 /* Start with first non-head mbuf */
634 for(m_temp = m->m_next; m_temp != 0; m_temp = m_temp->m_next) {
635 int len, next_len;
636 uint64_t addr;
637
638 if (m_temp->m_len == 0)
639 continue; /* Skip 0-length mbufs */
640
641 len = m_temp->m_len;
642 addr = KVTOPHYS(mtod(m_temp, void *));
643
644 /*
645 * Check to see if the mbuf spans a page boundary. If
646 * it does, and the physical pages behind the virtual
647 * pages are not contiguous, split it so that each
648 * virtual page uses it's own Tx descriptor.
649 */
650 if (trunc_page(addr) != trunc_page(addr + len - 1)) {
651 next_len = (addr + len) - trunc_page(addr + len);
652
653 len -= next_len;
654
655 if (addr + len ==
656 KVTOPHYS(mtod(m_temp, char *) + len)) {
657 SBMAC_EVCNT_INCR(sc->sbm_ev_txkeep);
658 len += next_len;
659 next_len = 0;
660 } else {
661 SBMAC_EVCNT_INCR(sc->sbm_ev_txsplit);
662 }
663 } else {
664 next_len = 0;
665 }
666
667 again:
668 /*
669 * fill in the descriptor
670 */
671 d->sbdma_dscrtable[dsc].dscr_a = addr;
672
673 /*
674 * transmitting: set outbound options,buffer A
675 * size(+ low 5 bits of start addr)
676 */
677 d->sbdma_dscrtable[dsc].dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_NOTSOP) |
678 V_DMA_DSCRB_A_SIZE((len + (addr & 0x0000001F)));
679
680 d->sbdma_ctxtable[dsc] = NULL;
681
682 /*
683 * point at next descriptor
684 */
685 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
686 if (nextdsc == d->sbdma_rem_index) {
687 d->sbdma_add_index = origdesc;
688 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
689 return ENOSPC;
690 }
691 d->sbdma_add_index = nextdsc;
692
693 prevdsc = dsc;
694 dsc = d->sbdma_add_index;
695 num_mbufs++;
696
697 if (next_len != 0) {
698 addr = KVTOPHYS(mtod(m_temp, char *) + len);
699 len = next_len;
700
701 next_len = 0;
702 goto again;
703 }
704
705 }
706 /* Set head mbuf to last context index */
707 d->sbdma_ctxtable[prevdsc] = m;
708
709 /* Interrupt on last dscr of packet. */
710 d->sbdma_dscrtable[prevdsc].dscr_a |= M_DMA_DSCRA_INTERRUPT;
711 } else {
712 struct mbuf *m_new = NULL;
713 /*
714 * [BEGIN XXX]
715 * XXX Copy/coalesce the mbufs into a single mbuf cluster (we
716 * assume it will fit). This is a temporary hack to get us
717 * going.
718 */
719
720 MGETHDR(m_new,M_DONTWAIT,MT_DATA);
721 if (m_new == NULL) {
722 aprint_error_dev(d->sbdma_eth->sc_dev,
723 "mbuf allocation failed\n");
724 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
725 return ENOBUFS;
726 }
727
728 MCLGET(m_new,M_DONTWAIT);
729 if (!(m_new->m_flags & M_EXT)) {
730 aprint_error_dev(d->sbdma_eth->sc_dev,
731 "mbuf cluster allocation failed\n");
732 m_freem(m_new);
733 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
734 return ENOBUFS;
735 }
736
737 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
738 /*m_adj(m_new,ETHER_ALIGN);*/
739
740 /*
741 * XXX Don't forget to include the offset portion in the
742 * XXX cache block calculation when this code is rewritten!
743 */
744
745 /*
746 * Copy data
747 */
748
749 m_copydata(m,0,m->m_pkthdr.len,mtod(m_new,void *));
750 m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len;
751
752 /* Free old mbuf 'm', actual mbuf is now 'm_new' */
753
754 // XXX: CALLERS WILL FREE, they might have to bpf_mtap() if this
755 // XXX: function succeeds.
756 // m_freem(m);
757 length = m_new->m_len;
758
759 /* [END XXX] */
760 /*
761 * fill in the descriptor
762 */
763
764 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new,void *)) |
765 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(m_new->m_len)) |
766 M_DMA_DSCRA_INTERRUPT |
767 M_DMA_ETHTX_SOP;
768
769 /* transmitting: set outbound options and length */
770 d->sbdma_dscrtable[dsc].dscr_b =
771 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
772 V_DMA_DSCRB_PKT_SIZE(length);
773
774 num_mbufs++;
775
776 /*
777 * fill in the context
778 */
779
780 d->sbdma_ctxtable[dsc] = m_new;
781
782 /*
783 * point at next packet
784 */
785 d->sbdma_add_index = nextdsc;
786 }
787
788 /*
789 * Give the buffer to the DMA engine.
790 */
791
792 SBMAC_WRITECSR(d->sbdma_dscrcnt, num_mbufs);
793
794 return 0; /* we did it */
795 }
796
797 /*
798 * SBDMA_EMPTYRING(d)
799 *
800 * Free all allocated mbufs on the specified DMA channel;
801 *
802 * Input parameters:
803 * d - DMA channel
804 *
805 * Return value:
806 * nothing
807 */
808
809 static void
810 sbdma_emptyring(sbmacdma_t *d)
811 {
812 int idx;
813 struct mbuf *m;
814
815 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
816 m = d->sbdma_ctxtable[idx];
817 if (m) {
818 m_freem(m);
819 d->sbdma_ctxtable[idx] = NULL;
820 }
821 }
822 }
823
824 /*
825 * SBDMA_FILLRING(d)
826 *
827 * Fill the specified DMA channel (must be receive channel)
828 * with mbufs
829 *
830 * Input parameters:
831 * d - DMA channel
832 *
833 * Return value:
834 * nothing
835 */
836
837 static void
838 sbdma_fillring(sbmacdma_t *d)
839 {
840 int idx;
841
842 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++)
843 if (sbdma_add_rcvbuffer(d, NULL) != 0)
844 break;
845 }
846
847 /*
848 * SBDMA_RX_PROCESS(sc, d)
849 *
850 * Process "completed" receive buffers on the specified DMA channel.
851 * Note that this isn't really ideal for priority channels, since
852 * it processes all of the packets on a given channel before
853 * returning.
854 *
855 * Input parameters:
856 * sc - softc structure
857 * d - DMA channel context
858 *
859 * Return value:
860 * nothing
861 */
862
863 static void
864 sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d)
865 {
866 int curidx;
867 int hwidx;
868 sbdmadscr_t *dscp;
869 struct mbuf *m;
870 int len;
871
872 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
873
874 for (;;) {
875 /*
876 * figure out where we are (as an index) and where
877 * the hardware is (also as an index)
878 *
879 * This could be done faster if (for example) the
880 * descriptor table was page-aligned and contiguous in
881 * both virtual and physical memory -- you could then
882 * just compare the low-order bits of the virtual address
883 * (sbdma_rem_index) and the physical address
884 * (sbdma_curdscr CSR).
885 */
886
887 curidx = d->sbdma_rem_index;
888 hwidx = (int)
889 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
890 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
891
892 /*
893 * If they're the same, that means we've processed all
894 * of the descriptors up to (but not including) the one that
895 * the hardware is working on right now.
896 */
897
898 if (curidx == hwidx)
899 break;
900
901 /*
902 * Otherwise, get the packet's mbuf ptr back
903 */
904
905 dscp = &(d->sbdma_dscrtable[curidx]);
906 m = d->sbdma_ctxtable[curidx];
907 d->sbdma_ctxtable[curidx] = NULL;
908
909 len = (int)G_DMA_DSCRB_PKT_SIZE(dscp->dscr_b) - 4;
910
911 /*
912 * Check packet status. If good, process it.
913 * If not, silently drop it and put it back on the
914 * receive ring.
915 */
916
917 if (! (dscp->dscr_a & M_DMA_ETHRX_BAD)) {
918
919 /*
920 * Set length into the packet
921 * XXX do we remove the CRC here?
922 */
923 m->m_pkthdr.len = m->m_len = len;
924
925 ifp->if_ipackets++;
926 m->m_pkthdr.rcvif = ifp;
927
928
929 /*
930 * Add a new buffer to replace the old one.
931 */
932 sbdma_add_rcvbuffer(d, NULL);
933
934 /*
935 * Handle BPF listeners. Let the BPF user see the
936 * packet, but don't pass it up to the ether_input()
937 * layer unless it's a broadcast packet, multicast
938 * packet, matches our ethernet address or the
939 * interface is in promiscuous mode.
940 */
941
942 bpf_mtap(ifp, m);
943 /*
944 * Pass the buffer to the kernel
945 */
946 (*ifp->if_input)(ifp, m);
947 } else {
948 /*
949 * Packet was mangled somehow. Just drop it and
950 * put it back on the receive ring.
951 */
952 sbdma_add_rcvbuffer(d, m);
953 }
954
955 /*
956 * .. and advance to the next buffer.
957 */
958
959 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
960 }
961 }
962
963 /*
964 * SBDMA_TX_PROCESS(sc, d)
965 *
966 * Process "completed" transmit buffers on the specified DMA channel.
967 * This is normally called within the interrupt service routine.
968 * Note that this isn't really ideal for priority channels, since
969 * it processes all of the packets on a given channel before
970 * returning.
971 *
972 * Input parameters:
973 * sc - softc structure
974 * d - DMA channel context
975 *
976 * Return value:
977 * nothing
978 */
979
980 static void
981 sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d)
982 {
983 int curidx;
984 int hwidx;
985 struct mbuf *m;
986
987 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
988
989 for (;;) {
990 /*
991 * figure out where we are (as an index) and where
992 * the hardware is (also as an index)
993 *
994 * This could be done faster if (for example) the
995 * descriptor table was page-aligned and contiguous in
996 * both virtual and physical memory -- you could then
997 * just compare the low-order bits of the virtual address
998 * (sbdma_rem_index) and the physical address
999 * (sbdma_curdscr CSR).
1000 */
1001
1002 curidx = d->sbdma_rem_index;
1003 hwidx = (int)
1004 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1005 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1006
1007 /*
1008 * If they're the same, that means we've processed all
1009 * of the descriptors up to (but not including) the one that
1010 * the hardware is working on right now.
1011 */
1012
1013 if (curidx == hwidx)
1014 break;
1015
1016 /*
1017 * Otherwise, get the packet's mbuf ptr back
1018 */
1019
1020 m = d->sbdma_ctxtable[curidx];
1021 d->sbdma_ctxtable[curidx] = NULL;
1022
1023 /*
1024 * for transmits we just free buffers and count packets.
1025 */
1026 ifp->if_opackets++;
1027 m_freem(m);
1028
1029 /*
1030 * .. and advance to the next buffer.
1031 */
1032
1033 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
1034 }
1035
1036 /*
1037 * Decide what to set the IFF_OACTIVE bit in the interface to.
1038 * It's supposed to reflect if the interface is actively
1039 * transmitting, but that's really hard to do quickly.
1040 */
1041
1042 ifp->if_flags &= ~IFF_OACTIVE;
1043 }
1044
1045 /*
1046 * SBMAC_INITCTX(s)
1047 *
1048 * Initialize an Ethernet context structure - this is called
1049 * once per MAC on the 1250. Memory is allocated here, so don't
1050 * call it again from inside the ioctl routines that bring the
1051 * interface up/down
1052 *
1053 * Input parameters:
1054 * sc - sbmac context structure
1055 *
1056 * Return value:
1057 * 0
1058 */
1059
1060 static void
1061 sbmac_initctx(struct sbmac_softc *sc)
1062 {
1063 uint64_t sysrev;
1064
1065 /*
1066 * figure out the addresses of some ports
1067 */
1068
1069 sc->sbm_macenable = PKSEG1(sc->sbm_base + R_MAC_ENABLE);
1070 sc->sbm_maccfg = PKSEG1(sc->sbm_base + R_MAC_CFG);
1071 sc->sbm_fifocfg = PKSEG1(sc->sbm_base + R_MAC_THRSH_CFG);
1072 sc->sbm_framecfg = PKSEG1(sc->sbm_base + R_MAC_FRAMECFG);
1073 sc->sbm_rxfilter = PKSEG1(sc->sbm_base + R_MAC_ADFILTER_CFG);
1074 sc->sbm_isr = PKSEG1(sc->sbm_base + R_MAC_STATUS);
1075 sc->sbm_imr = PKSEG1(sc->sbm_base + R_MAC_INT_MASK);
1076
1077 /*
1078 * Initialize the DMA channels. Right now, only one per MAC is used
1079 * Note: Only do this _once_, as it allocates memory from the kernel!
1080 */
1081
1082 sbdma_initctx(&(sc->sbm_txdma), sc, 0, DMA_TX, SBMAC_MAX_TXDESCR);
1083 sbdma_initctx(&(sc->sbm_rxdma), sc, 0, DMA_RX, SBMAC_MAX_RXDESCR);
1084
1085 /*
1086 * initial state is OFF
1087 */
1088
1089 sc->sbm_state = sbmac_state_off;
1090
1091 /*
1092 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1093 */
1094
1095 sc->sbm_speed = sbmac_speed_10;
1096 sc->sbm_duplex = sbmac_duplex_half;
1097 sc->sbm_fc = sbmac_fc_disabled;
1098
1099 /*
1100 * Determine SOC type. 112x has Pass3 SOC features.
1101 */
1102 sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) );
1103 sc->sbm_pass3_dma = (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1120 ||
1104 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125 ||
1105 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125H ||
1106 (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250 &&
1107 G_SYS_REVISION(sysrev) >= K_SYS_REVISION_BCM1250_PASS3));
1108 #ifdef SBMAC_EVENT_COUNTERS
1109 const char * const xname = device_xname(sc->sc_dev);
1110 evcnt_attach_dynamic(&sc->sbm_ev_rxintr, EVCNT_TYPE_INTR,
1111 NULL, xname, "rxintr");
1112 evcnt_attach_dynamic(&sc->sbm_ev_txintr, EVCNT_TYPE_INTR,
1113 NULL, xname, "txintr");
1114 evcnt_attach_dynamic(&sc->sbm_ev_txdrop, EVCNT_TYPE_MISC,
1115 NULL, xname, "txdrop");
1116 evcnt_attach_dynamic(&sc->sbm_ev_txstall, EVCNT_TYPE_MISC,
1117 NULL, xname, "txstall");
1118 if (sc->sbm_pass3_dma) {
1119 evcnt_attach_dynamic(&sc->sbm_ev_txsplit, EVCNT_TYPE_MISC,
1120 NULL, xname, "pass3tx-split");
1121 evcnt_attach_dynamic(&sc->sbm_ev_txkeep, EVCNT_TYPE_MISC,
1122 NULL, xname, "pass3tx-keep");
1123 }
1124 #endif
1125 }
1126
1127 /*
1128 * SBMAC_CHANNEL_START(s)
1129 *
1130 * Start packet processing on this MAC.
1131 *
1132 * Input parameters:
1133 * sc - sbmac structure
1134 *
1135 * Return value:
1136 * nothing
1137 */
1138
1139 static void
1140 sbmac_channel_start(struct sbmac_softc *sc)
1141 {
1142 uint64_t reg;
1143 sbmac_port_t port;
1144 uint64_t cfg, fifo, framecfg;
1145 int idx;
1146 uint64_t dma_cfg0, fifo_cfg;
1147 sbmacdma_t *txdma;
1148
1149 /*
1150 * Don't do this if running
1151 */
1152
1153 if (sc->sbm_state == sbmac_state_on)
1154 return;
1155
1156 /*
1157 * Bring the controller out of reset, but leave it off.
1158 */
1159
1160 SBMAC_WRITECSR(sc->sbm_macenable, 0);
1161
1162 /*
1163 * Ignore all received packets
1164 */
1165
1166 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1167
1168 /*
1169 * Calculate values for various control registers.
1170 */
1171
1172 cfg = M_MAC_RETRY_EN |
1173 M_MAC_TX_HOLD_SOP_EN |
1174 V_MAC_TX_PAUSE_CNT_16K |
1175 M_MAC_AP_STAT_EN |
1176 M_MAC_SS_EN |
1177 0;
1178
1179 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1180 V_MAC_TX_RD_THRSH(4) |
1181 V_MAC_TX_RL_THRSH(4) |
1182 V_MAC_RX_PL_THRSH(4) |
1183 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1184 V_MAC_RX_PL_THRSH(4) |
1185 V_MAC_RX_RL_THRSH(8) |
1186 0;
1187
1188 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1189 V_MAC_MAX_FRAMESZ_DEFAULT |
1190 V_MAC_BACKOFF_SEL(1);
1191
1192 /*
1193 * Clear out the hash address map
1194 */
1195
1196 port = PKSEG1(sc->sbm_base + R_MAC_HASH_BASE);
1197 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1198 SBMAC_WRITECSR(port, 0);
1199 port += sizeof(uint64_t);
1200 }
1201
1202 /*
1203 * Clear out the exact-match table
1204 */
1205
1206 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1207 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1208 SBMAC_WRITECSR(port, 0);
1209 port += sizeof(uint64_t);
1210 }
1211
1212 /*
1213 * Clear out the DMA Channel mapping table registers
1214 */
1215
1216 port = PKSEG1(sc->sbm_base + R_MAC_CHUP0_BASE);
1217 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1218 SBMAC_WRITECSR(port, 0);
1219 port += sizeof(uint64_t);
1220 }
1221
1222 port = PKSEG1(sc->sbm_base + R_MAC_CHLO0_BASE);
1223 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1224 SBMAC_WRITECSR(port, 0);
1225 port += sizeof(uint64_t);
1226 }
1227
1228 /*
1229 * Program the hardware address. It goes into the hardware-address
1230 * register as well as the first filter register.
1231 */
1232
1233 reg = sbmac_addr2reg(sc->sbm_hwaddr);
1234
1235 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1236 SBMAC_WRITECSR(port, reg);
1237 port = PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR);
1238 SBMAC_WRITECSR(port, 0); // pass1 workaround
1239
1240 /*
1241 * Set the receive filter for no packets, and write values
1242 * to the various config registers
1243 */
1244
1245 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1246 SBMAC_WRITECSR(sc->sbm_imr, 0);
1247 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1248 SBMAC_WRITECSR(sc->sbm_fifocfg, fifo);
1249 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1250
1251 /*
1252 * Initialize DMA channels (rings should be ok now)
1253 */
1254
1255 sbdma_channel_start(&(sc->sbm_rxdma));
1256 sbdma_channel_start(&(sc->sbm_txdma));
1257
1258 /*
1259 * Configure the speed, duplex, and flow control
1260 */
1261
1262 sbmac_set_speed(sc, sc->sbm_speed);
1263 sbmac_set_duplex(sc, sc->sbm_duplex, sc->sbm_fc);
1264
1265 /*
1266 * Fill the receive ring
1267 */
1268
1269 sbdma_fillring(&(sc->sbm_rxdma));
1270
1271 /*
1272 * Turn on the rest of the bits in the enable register
1273 */
1274
1275 SBMAC_WRITECSR(sc->sbm_macenable, M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 |
1276 M_MAC_RX_ENABLE | M_MAC_TX_ENABLE);
1277
1278
1279 /*
1280 * Accept any kind of interrupt on TX and RX DMA channel 0
1281 */
1282 SBMAC_WRITECSR(sc->sbm_imr,
1283 (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1284 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
1285
1286 /*
1287 * Enable receiving unicasts and broadcasts
1288 */
1289
1290 SBMAC_WRITECSR(sc->sbm_rxfilter, M_MAC_UCAST_EN | M_MAC_BCAST_EN);
1291
1292 /*
1293 * On chips which support unaligned DMA features, set the descriptor
1294 * ring for transmit channels to use the unaligned buffer format.
1295 */
1296 txdma = &(sc->sbm_txdma);
1297
1298 if (sc->sbm_pass3_dma) {
1299 dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0);
1300 dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) |
1301 M_DMA_TBX_EN | M_DMA_TDX_EN;
1302 SBMAC_WRITECSR(txdma->sbdma_config0,dma_cfg0);
1303
1304 fifo_cfg = SBMAC_READCSR(sc->sbm_fifocfg);
1305 fifo_cfg |= V_MAC_TX_WR_THRSH(8) |
1306 V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8);
1307 SBMAC_WRITECSR(sc->sbm_fifocfg,fifo_cfg);
1308 }
1309
1310 /*
1311 * we're running now.
1312 */
1313
1314 sc->sbm_state = sbmac_state_on;
1315 sc->sc_ethercom.ec_if.if_flags |= IFF_RUNNING;
1316
1317 /*
1318 * Program multicast addresses
1319 */
1320
1321 sbmac_setmulti(sc);
1322
1323 /*
1324 * If channel was in promiscuous mode before, turn that on
1325 */
1326
1327 if (sc->sc_ethercom.ec_if.if_flags & IFF_PROMISC)
1328 sbmac_promiscuous_mode(sc, true);
1329
1330 /*
1331 * Turn on the once-per-second timer
1332 */
1333
1334 callout_reset(&(sc->sc_tick_ch), hz, sbmac_tick, sc);
1335 }
1336
1337 /*
1338 * SBMAC_CHANNEL_STOP(s)
1339 *
1340 * Stop packet processing on this MAC.
1341 *
1342 * Input parameters:
1343 * sc - sbmac structure
1344 *
1345 * Return value:
1346 * nothing
1347 */
1348
1349 static void
1350 sbmac_channel_stop(struct sbmac_softc *sc)
1351 {
1352 uint64_t ctl;
1353
1354 /* don't do this if already stopped */
1355
1356 if (sc->sbm_state == sbmac_state_off)
1357 return;
1358
1359 /* don't accept any packets, disable all interrupts */
1360
1361 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1362 SBMAC_WRITECSR(sc->sbm_imr, 0);
1363
1364 /* Turn off ticker */
1365
1366 callout_stop(&(sc->sc_tick_ch));
1367
1368 /* turn off receiver and transmitter */
1369
1370 ctl = SBMAC_READCSR(sc->sbm_macenable);
1371 ctl &= ~(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0);
1372 SBMAC_WRITECSR(sc->sbm_macenable, ctl);
1373
1374 /* We're stopped now. */
1375
1376 sc->sbm_state = sbmac_state_off;
1377 sc->sc_ethercom.ec_if.if_flags &= ~IFF_RUNNING;
1378
1379 /* Empty the receive and transmit rings */
1380
1381 sbdma_emptyring(&(sc->sbm_rxdma));
1382 sbdma_emptyring(&(sc->sbm_txdma));
1383 }
1384
1385 /*
1386 * SBMAC_SET_CHANNEL_STATE(state)
1387 *
1388 * Set the channel's state ON or OFF
1389 *
1390 * Input parameters:
1391 * state - new state
1392 *
1393 * Return value:
1394 * old state
1395 */
1396
1397 static sbmac_state_t
1398 sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state)
1399 {
1400 sbmac_state_t oldstate = sc->sbm_state;
1401
1402 /*
1403 * If same as previous state, return
1404 */
1405
1406 if (state == oldstate)
1407 return oldstate;
1408
1409 /*
1410 * If new state is ON, turn channel on
1411 */
1412
1413 if (state == sbmac_state_on)
1414 sbmac_channel_start(sc);
1415 else
1416 sbmac_channel_stop(sc);
1417
1418 /*
1419 * Return previous state
1420 */
1421
1422 return oldstate;
1423 }
1424
1425 /*
1426 * SBMAC_PROMISCUOUS_MODE(sc, enabled)
1427 *
1428 * Turn on or off promiscuous mode
1429 *
1430 * Input parameters:
1431 * sc - softc
1432 * enabled - true to turn on, false to turn off
1433 *
1434 * Return value:
1435 * nothing
1436 */
1437
1438 static void
1439 sbmac_promiscuous_mode(struct sbmac_softc *sc, bool enabled)
1440 {
1441 uint64_t reg;
1442
1443 if (sc->sbm_state != sbmac_state_on)
1444 return;
1445
1446 if (enabled) {
1447 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1448 reg |= M_MAC_ALLPKT_EN;
1449 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1450 } else {
1451 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1452 reg &= ~M_MAC_ALLPKT_EN;
1453 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1454 }
1455 }
1456
1457 /*
1458 * SBMAC_INIT_AND_START(sc)
1459 *
1460 * Stop the channel and restart it. This is generally used
1461 * when we have to do something to the channel that requires
1462 * a swift kick.
1463 *
1464 * Input parameters:
1465 * sc - softc
1466 */
1467
1468 static void
1469 sbmac_init_and_start(struct sbmac_softc *sc)
1470 {
1471 int s;
1472
1473 s = splnet();
1474
1475 mii_pollstat(&sc->sc_mii); /* poll phy for current speed */
1476 sbmac_mii_statchg(sc->sc_dev); /* set state to new speed */
1477 sbmac_set_channel_state(sc, sbmac_state_on);
1478
1479 splx(s);
1480 }
1481
1482 /*
1483 * SBMAC_ADDR2REG(ptr)
1484 *
1485 * Convert six bytes into the 64-bit register value that
1486 * we typically write into the SBMAC's address/mcast registers
1487 *
1488 * Input parameters:
1489 * ptr - pointer to 6 bytes
1490 *
1491 * Return value:
1492 * register value
1493 */
1494
1495 static uint64_t
1496 sbmac_addr2reg(u_char *ptr)
1497 {
1498 uint64_t reg = 0;
1499
1500 ptr += 6;
1501
1502 reg |= (uint64_t) *(--ptr);
1503 reg <<= 8;
1504 reg |= (uint64_t) *(--ptr);
1505 reg <<= 8;
1506 reg |= (uint64_t) *(--ptr);
1507 reg <<= 8;
1508 reg |= (uint64_t) *(--ptr);
1509 reg <<= 8;
1510 reg |= (uint64_t) *(--ptr);
1511 reg <<= 8;
1512 reg |= (uint64_t) *(--ptr);
1513
1514 return reg;
1515 }
1516
1517 /*
1518 * SBMAC_SET_SPEED(sc, speed)
1519 *
1520 * Configure LAN speed for the specified MAC.
1521 * Warning: must be called when MAC is off!
1522 *
1523 * Input parameters:
1524 * sc - sbmac structure
1525 * speed - speed to set MAC to (see sbmac_speed_t enum)
1526 *
1527 * Return value:
1528 * true if successful
1529 * false indicates invalid parameters
1530 */
1531
1532 static bool
1533 sbmac_set_speed(struct sbmac_softc *sc, sbmac_speed_t speed)
1534 {
1535 uint64_t cfg;
1536 uint64_t framecfg;
1537
1538 /*
1539 * Save new current values
1540 */
1541
1542 sc->sbm_speed = speed;
1543
1544 if (sc->sbm_state != sbmac_state_off)
1545 panic("sbmac_set_speed while MAC not off");
1546
1547 /*
1548 * Read current register values
1549 */
1550
1551 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1552 framecfg = SBMAC_READCSR(sc->sbm_framecfg);
1553
1554 /*
1555 * Mask out the stuff we want to change
1556 */
1557
1558 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1559 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1560 M_MAC_SLOT_SIZE);
1561
1562 /*
1563 * Now add in the new bits
1564 */
1565
1566 switch (speed) {
1567 case sbmac_speed_10:
1568 framecfg |= V_MAC_IFG_RX_10 |
1569 V_MAC_IFG_TX_10 |
1570 K_MAC_IFG_THRSH_10 |
1571 V_MAC_SLOT_SIZE_10;
1572 cfg |= V_MAC_SPEED_SEL_10MBPS;
1573 break;
1574
1575 case sbmac_speed_100:
1576 framecfg |= V_MAC_IFG_RX_100 |
1577 V_MAC_IFG_TX_100 |
1578 V_MAC_IFG_THRSH_100 |
1579 V_MAC_SLOT_SIZE_100;
1580 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1581 break;
1582
1583 case sbmac_speed_1000:
1584 framecfg |= V_MAC_IFG_RX_1000 |
1585 V_MAC_IFG_TX_1000 |
1586 V_MAC_IFG_THRSH_1000 |
1587 V_MAC_SLOT_SIZE_1000;
1588 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1589 break;
1590
1591 case sbmac_speed_auto: /* XXX not implemented */
1592 /* fall through */
1593 default:
1594 return false;
1595 }
1596
1597 /*
1598 * Send the bits back to the hardware
1599 */
1600
1601 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1602 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1603
1604 return true;
1605 }
1606
1607 /*
1608 * SBMAC_SET_DUPLEX(sc, duplex, fc)
1609 *
1610 * Set Ethernet duplex and flow control options for this MAC
1611 * Warning: must be called when MAC is off!
1612 *
1613 * Input parameters:
1614 * sc - sbmac structure
1615 * duplex - duplex setting (see sbmac_duplex_t)
1616 * fc - flow control setting (see sbmac_fc_t)
1617 *
1618 * Return value:
1619 * true if ok
1620 * false if an invalid parameter combination was specified
1621 */
1622
1623 static bool
1624 sbmac_set_duplex(struct sbmac_softc *sc, sbmac_duplex_t duplex, sbmac_fc_t fc)
1625 {
1626 uint64_t cfg;
1627
1628 /*
1629 * Save new current values
1630 */
1631
1632 sc->sbm_duplex = duplex;
1633 sc->sbm_fc = fc;
1634
1635 if (sc->sbm_state != sbmac_state_off)
1636 panic("sbmac_set_duplex while MAC not off");
1637
1638 /*
1639 * Read current register values
1640 */
1641
1642 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1643
1644 /*
1645 * Mask off the stuff we're about to change
1646 */
1647
1648 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1649
1650 switch (duplex) {
1651 case sbmac_duplex_half:
1652 switch (fc) {
1653 case sbmac_fc_disabled:
1654 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1655 break;
1656
1657 case sbmac_fc_collision:
1658 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1659 break;
1660
1661 case sbmac_fc_carrier:
1662 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1663 break;
1664
1665 case sbmac_fc_auto: /* XXX not implemented */
1666 /* fall through */
1667 case sbmac_fc_frame: /* not valid in half duplex */
1668 default: /* invalid selection */
1669 panic("%s: invalid half duplex fc selection %d",
1670 device_xname(sc->sc_dev), fc);
1671 return false;
1672 }
1673 break;
1674
1675 case sbmac_duplex_full:
1676 switch (fc) {
1677 case sbmac_fc_disabled:
1678 cfg |= V_MAC_FC_CMD_DISABLED;
1679 break;
1680
1681 case sbmac_fc_frame:
1682 cfg |= V_MAC_FC_CMD_ENABLED;
1683 break;
1684
1685 case sbmac_fc_collision: /* not valid in full duplex */
1686 case sbmac_fc_carrier: /* not valid in full duplex */
1687 case sbmac_fc_auto: /* XXX not implemented */
1688 /* fall through */
1689 default:
1690 panic("%s: invalid full duplex fc selection %d",
1691 device_xname(sc->sc_dev), fc);
1692 return false;
1693 }
1694 break;
1695
1696 default:
1697 /* fall through */
1698 case sbmac_duplex_auto:
1699 panic("%s: bad duplex %d", device_xname(sc->sc_dev), duplex);
1700 /* XXX not implemented */
1701 break;
1702 }
1703
1704 /*
1705 * Send the bits back to the hardware
1706 */
1707
1708 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1709
1710 return true;
1711 }
1712
1713 /*
1714 * SBMAC_INTR()
1715 *
1716 * Interrupt handler for MAC interrupts
1717 *
1718 * Input parameters:
1719 * MAC structure
1720 *
1721 * Return value:
1722 * nothing
1723 */
1724
1725 /* ARGSUSED */
1726 static void
1727 sbmac_intr(void *xsc, uint32_t status, vaddr_t pc)
1728 {
1729 struct sbmac_softc *sc = xsc;
1730 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1731 uint64_t isr;
1732
1733 for (;;) {
1734
1735 /*
1736 * Read the ISR (this clears the bits in the real register)
1737 */
1738
1739 isr = SBMAC_READCSR(sc->sbm_isr);
1740
1741 if (isr == 0)
1742 break;
1743
1744 /*
1745 * Transmits on channel 0
1746 */
1747
1748 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
1749 sbdma_tx_process(sc, &(sc->sbm_txdma));
1750 SBMAC_EVCNT_INCR(sc->sbm_ev_txintr);
1751 }
1752
1753 /*
1754 * Receives on channel 0
1755 */
1756
1757 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
1758 sbdma_rx_process(sc, &(sc->sbm_rxdma));
1759 SBMAC_EVCNT_INCR(sc->sbm_ev_rxintr);
1760 }
1761 }
1762
1763 /* try to get more packets going */
1764 sbmac_start(ifp);
1765 }
1766
1767
1768 /*
1769 * SBMAC_START(ifp)
1770 *
1771 * Start output on the specified interface. Basically, we
1772 * queue as many buffers as we can until the ring fills up, or
1773 * we run off the end of the queue, whichever comes first.
1774 *
1775 * Input parameters:
1776 * ifp - interface
1777 *
1778 * Return value:
1779 * nothing
1780 */
1781
1782 static void
1783 sbmac_start(struct ifnet *ifp)
1784 {
1785 struct sbmac_softc *sc;
1786 struct mbuf *m_head = NULL;
1787 int rv;
1788
1789 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1790 return;
1791
1792 sc = ifp->if_softc;
1793
1794 for (;;) {
1795
1796 IF_DEQUEUE(&ifp->if_snd, m_head);
1797 if (m_head == NULL)
1798 break;
1799
1800 /*
1801 * Put the buffer on the transmit ring. If we
1802 * don't have room, set the OACTIVE flag and wait
1803 * for the NIC to drain the ring.
1804 */
1805
1806 rv = sbdma_add_txbuffer(&(sc->sbm_txdma), m_head);
1807
1808 if (rv == 0) {
1809 /*
1810 * If there's a BPF listener, bounce a copy of this
1811 * frame to it.
1812 */
1813 bpf_mtap(ifp, m_head);
1814 if (!sc->sbm_pass3_dma) {
1815 /*
1816 * Don't free mbuf if we're not copying to new
1817 * mbuf in sbdma_add_txbuffer. It will be
1818 * freed in sbdma_tx_process.
1819 */
1820 m_freem(m_head);
1821 }
1822 } else {
1823 IF_PREPEND(&ifp->if_snd, m_head);
1824 ifp->if_flags |= IFF_OACTIVE;
1825 break;
1826 }
1827 }
1828 }
1829
1830 /*
1831 * SBMAC_SETMULTI(sc)
1832 *
1833 * Reprogram the multicast table into the hardware, given
1834 * the list of multicasts associated with the interface
1835 * structure.
1836 *
1837 * Input parameters:
1838 * sc - softc
1839 *
1840 * Return value:
1841 * nothing
1842 */
1843
1844 static void
1845 sbmac_setmulti(struct sbmac_softc *sc)
1846 {
1847 struct ifnet *ifp;
1848 uint64_t reg;
1849 sbmac_port_t port;
1850 int idx;
1851 struct ether_multi *enm;
1852 struct ether_multistep step;
1853
1854 ifp = &sc->sc_ethercom.ec_if;
1855
1856 /*
1857 * Clear out entire multicast table. We do this by nuking
1858 * the entire hash table and all the direct matches except
1859 * the first one, which is used for our station address
1860 */
1861
1862 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
1863 port = PKSEG1(sc->sbm_base +
1864 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1865 SBMAC_WRITECSR(port, 0);
1866 }
1867
1868 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1869 port = PKSEG1(sc->sbm_base +
1870 R_MAC_HASH_BASE+(idx*sizeof(uint64_t)));
1871 SBMAC_WRITECSR(port, 0);
1872 }
1873
1874 /*
1875 * Clear the filter to say we don't want any multicasts.
1876 */
1877
1878 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1879 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1880 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1881
1882 if (ifp->if_flags & IFF_ALLMULTI) {
1883 /*
1884 * Enable ALL multicasts. Do this by inverting the
1885 * multicast enable bit.
1886 */
1887 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1888 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1889 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1890 return;
1891 }
1892
1893 /*
1894 * Progam new multicast entries. For now, only use the
1895 * perfect filter. In the future we'll need to use the
1896 * hash filter if the perfect filter overflows
1897 */
1898
1899 /*
1900 * XXX only using perfect filter for now, need to use hash
1901 * XXX if the table overflows
1902 */
1903
1904 idx = 1; /* skip station address */
1905 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1906 while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) {
1907 reg = sbmac_addr2reg(enm->enm_addrlo);
1908 port = PKSEG1(sc->sbm_base +
1909 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1910 SBMAC_WRITECSR(port, reg);
1911 idx++;
1912 ETHER_NEXT_MULTI(step, enm);
1913 }
1914
1915 /*
1916 * Enable the "accept multicast bits" if we programmed at least one
1917 * multicast.
1918 */
1919
1920 if (idx > 1) {
1921 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1922 reg |= M_MAC_MCAST_EN;
1923 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1924 }
1925 }
1926
1927 /*
1928 * SBMAC_ETHER_IOCTL(ifp, cmd, data)
1929 *
1930 * Generic IOCTL requests for this interface. The basic
1931 * stuff is handled here for bringing the interface up,
1932 * handling multicasts, etc.
1933 *
1934 * Input parameters:
1935 * ifp - interface structure
1936 * cmd - command code
1937 * data - pointer to data
1938 *
1939 * Return value:
1940 * return value (0 is success)
1941 */
1942
1943 static int
1944 sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1945 {
1946 struct ifaddr *ifa = (struct ifaddr *) data;
1947 struct sbmac_softc *sc = ifp->if_softc;
1948
1949 switch (cmd) {
1950 case SIOCINITIFADDR:
1951 ifp->if_flags |= IFF_UP;
1952
1953 switch (ifa->ifa_addr->sa_family) {
1954 #ifdef INET
1955 case AF_INET:
1956 sbmac_init_and_start(sc);
1957 arp_ifinit(ifp, ifa);
1958 break;
1959 #endif
1960 #ifdef NS
1961 case AF_NS:
1962 {
1963 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1964
1965 if (ns_nullhost(*ina))
1966 ina->x_host =
1967 *(union ns_host *)LLADDR(ifp->if_sadl);
1968 else
1969 memcpy(LLADDR(ifp->if_sadl), ina->x_host.c_host,
1970 ifp->if_addrlen);
1971 /* Set new address. */
1972 sbmac_init_and_start(sc);
1973 break;
1974 }
1975 #endif
1976 default:
1977 sbmac_init_and_start(sc);
1978 break;
1979 }
1980 break;
1981
1982 default:
1983 return ENOTTY;
1984 }
1985
1986 return (0);
1987 }
1988
1989 /*
1990 * SBMAC_IOCTL(ifp, cmd, data)
1991 *
1992 * Main IOCTL handler - dispatches to other IOCTLs for various
1993 * types of requests.
1994 *
1995 * Input parameters:
1996 * ifp - interface pointer
1997 * cmd - command code
1998 * data - pointer to argument data
1999 *
2000 * Return value:
2001 * 0 if ok
2002 * else error code
2003 */
2004
2005 static int
2006 sbmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2007 {
2008 struct sbmac_softc *sc = ifp->if_softc;
2009 struct ifreq *ifr = (struct ifreq *) data;
2010 int s, error = 0;
2011
2012 s = splnet();
2013
2014 switch (cmd) {
2015 case SIOCINITIFADDR:
2016 error = sbmac_ether_ioctl(ifp, cmd, data);
2017 break;
2018 case SIOCSIFMTU:
2019 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2020 error = EINVAL;
2021 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
2022 /* XXX Program new MTU here */
2023 error = 0;
2024 break;
2025 case SIOCSIFFLAGS:
2026 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2027 break;
2028 if (ifp->if_flags & IFF_UP) {
2029 /*
2030 * If only the state of the PROMISC flag changed,
2031 * just tweak the hardware registers.
2032 */
2033 if ((ifp->if_flags & IFF_RUNNING) &&
2034 (ifp->if_flags & IFF_PROMISC)) {
2035 /* turn on promiscuous mode */
2036 sbmac_promiscuous_mode(sc, true);
2037 } else if (ifp->if_flags & IFF_RUNNING &&
2038 !(ifp->if_flags & IFF_PROMISC)) {
2039 /* turn off promiscuous mode */
2040 sbmac_promiscuous_mode(sc, false);
2041 } else
2042 sbmac_set_channel_state(sc, sbmac_state_on);
2043 } else {
2044 if (ifp->if_flags & IFF_RUNNING)
2045 sbmac_set_channel_state(sc, sbmac_state_off);
2046 }
2047
2048 sc->sbm_if_flags = ifp->if_flags;
2049 error = 0;
2050 break;
2051
2052 case SIOCADDMULTI:
2053 case SIOCDELMULTI:
2054 case SIOCSIFMEDIA:
2055 case SIOCGIFMEDIA:
2056 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2057 error = 0;
2058 if (ifp->if_flags & IFF_RUNNING)
2059 sbmac_setmulti(sc);
2060 }
2061 break;
2062 default:
2063 error = ether_ioctl(ifp, cmd, data);
2064 break;
2065 }
2066
2067 (void)splx(s);
2068
2069 return(error);
2070 }
2071
2072 /*
2073 * SBMAC_IFMEDIA_UPD(ifp)
2074 *
2075 * Configure an appropriate media type for this interface,
2076 * given the data in the interface structure
2077 *
2078 * Input parameters:
2079 * ifp - interface
2080 *
2081 * Return value:
2082 * 0 if ok
2083 * else error code
2084 */
2085
2086 /*
2087 * SBMAC_IFMEDIA_STS(ifp, ifmr)
2088 *
2089 * Report current media status (used by ifconfig, for example)
2090 *
2091 * Input parameters:
2092 * ifp - interface structure
2093 * ifmr - media request structure
2094 *
2095 * Return value:
2096 * nothing
2097 */
2098
2099 /*
2100 * SBMAC_WATCHDOG(ifp)
2101 *
2102 * Called periodically to make sure we're still happy.
2103 *
2104 * Input parameters:
2105 * ifp - interface structure
2106 *
2107 * Return value:
2108 * nothing
2109 */
2110
2111 static void
2112 sbmac_watchdog(struct ifnet *ifp)
2113 {
2114
2115 /* XXX do something */
2116 }
2117
2118 /*
2119 * One second timer, used to tick MII.
2120 */
2121 static void
2122 sbmac_tick(void *arg)
2123 {
2124 struct sbmac_softc *sc = arg;
2125 int s;
2126
2127 s = splnet();
2128 mii_tick(&sc->sc_mii);
2129 splx(s);
2130
2131 callout_reset(&sc->sc_tick_ch, hz, sbmac_tick, sc);
2132 }
2133
2134
2135 /*
2136 * SBMAC_MATCH(parent, match, aux)
2137 *
2138 * Part of the config process - see if this device matches the
2139 * info about what we expect to find on the bus.
2140 *
2141 * Input parameters:
2142 * parent - parent bus structure
2143 * match -
2144 * aux - bus-specific args
2145 *
2146 * Return value:
2147 * 1 if we match
2148 * 0 if we don't match
2149 */
2150
2151 static int
2152 sbmac_match(device_t parent, cfdata_t match, void *aux)
2153 {
2154 struct sbobio_attach_args *sa = aux;
2155
2156 /*
2157 * Make sure it's a MAC
2158 */
2159 if (sa->sa_locs.sa_type != SBOBIO_DEVTYPE_MAC)
2160 return 0;
2161
2162 /*
2163 * Yup, it is.
2164 */
2165
2166 return 1;
2167 }
2168
2169 /*
2170 * SBMAC_PARSE_XDIGIT(str)
2171 *
2172 * Parse a hex digit, returning its value
2173 *
2174 * Input parameters:
2175 * str - character
2176 *
2177 * Return value:
2178 * hex value, or -1 if invalid
2179 */
2180
2181 static int
2182 sbmac_parse_xdigit(char str)
2183 {
2184 int digit;
2185
2186 if ((str >= '0') && (str <= '9'))
2187 digit = str - '0';
2188 else if ((str >= 'a') && (str <= 'f'))
2189 digit = str - 'a' + 10;
2190 else if ((str >= 'A') && (str <= 'F'))
2191 digit = str - 'A' + 10;
2192 else
2193 digit = -1;
2194
2195 return digit;
2196 }
2197
2198 /*
2199 * SBMAC_PARSE_HWADDR(str, hwaddr)
2200 *
2201 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2202 * Ethernet address.
2203 *
2204 * Input parameters:
2205 * str - string
2206 * hwaddr - pointer to hardware address
2207 *
2208 * Return value:
2209 * 0 if ok, else -1
2210 */
2211
2212 static int
2213 sbmac_parse_hwaddr(const char *str, u_char *hwaddr)
2214 {
2215 int digit1, digit2;
2216 int idx = 6;
2217
2218 while (*str && (idx > 0)) {
2219 digit1 = sbmac_parse_xdigit(*str);
2220 if (digit1 < 0)
2221 return -1;
2222 str++;
2223 if (!*str)
2224 return -1;
2225
2226 if ((*str == ':') || (*str == '-')) {
2227 digit2 = digit1;
2228 digit1 = 0;
2229 } else {
2230 digit2 = sbmac_parse_xdigit(*str);
2231 if (digit2 < 0)
2232 return -1;
2233 str++;
2234 }
2235
2236 *hwaddr++ = (digit1 << 4) | digit2;
2237 idx--;
2238
2239 if (*str == '-')
2240 str++;
2241 if (*str == ':')
2242 str++;
2243 }
2244 return 0;
2245 }
2246
2247 /*
2248 * SBMAC_ATTACH(parent, self, aux)
2249 *
2250 * Attach routine - init hardware and hook ourselves into NetBSD.
2251 *
2252 * Input parameters:
2253 * parent - parent bus device
2254 * self - our softc
2255 * aux - attach data
2256 *
2257 * Return value:
2258 * nothing
2259 */
2260
2261 static void
2262 sbmac_attach(device_t parent, device_t self, void *aux)
2263 {
2264 struct sbmac_softc * const sc = device_private(self);
2265 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
2266 struct sbobio_attach_args * const sa = aux;
2267 u_char *eaddr;
2268 static int unit = 0; /* XXX */
2269 uint64_t ea_reg;
2270 int idx;
2271
2272 sc->sc_dev = self;
2273
2274 /* Determine controller base address */
2275
2276 sc->sbm_base = sa->sa_base + sa->sa_locs.sa_offset;
2277
2278 eaddr = sc->sbm_hwaddr;
2279
2280 /*
2281 * Initialize context (get pointers to registers and stuff), then
2282 * allocate the memory for the descriptor tables.
2283 */
2284
2285 sbmac_initctx(sc);
2286
2287 callout_init(&(sc->sc_tick_ch), 0);
2288
2289 /*
2290 * Read the ethernet address. The firwmare left this programmed
2291 * for us in the ethernet address register for each mac.
2292 */
2293
2294 ea_reg = SBMAC_READCSR(PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR));
2295 for (idx = 0; idx < 6; idx++) {
2296 eaddr[idx] = (uint8_t) (ea_reg & 0xFF);
2297 ea_reg >>= 8;
2298 }
2299
2300 #define SBMAC_DEFAULT_HWADDR "40:00:00:00:01:00"
2301 if (eaddr[0] == 0 && eaddr[1] == 0 && eaddr[2] == 0 &&
2302 eaddr[3] == 0 && eaddr[4] == 0 && eaddr[5] == 0) {
2303 sbmac_parse_hwaddr(SBMAC_DEFAULT_HWADDR, eaddr);
2304 eaddr[5] = unit;
2305 }
2306
2307 #ifdef SBMAC_ETH0_HWADDR
2308 if (unit == 0)
2309 sbmac_parse_hwaddr(SBMAC_ETH0_HWADDR, eaddr);
2310 #endif
2311 #ifdef SBMAC_ETH1_HWADDR
2312 if (unit == 1)
2313 sbmac_parse_hwaddr(SBMAC_ETH1_HWADDR, eaddr);
2314 #endif
2315 #ifdef SBMAC_ETH2_HWADDR
2316 if (unit == 2)
2317 sbmac_parse_hwaddr(SBMAC_ETH2_HWADDR, eaddr);
2318 #endif
2319 unit++;
2320
2321 /*
2322 * Display Ethernet address (this is called during the config process
2323 * so we need to finish off the config message that was being displayed)
2324 */
2325 aprint_normal(": Ethernet%s\n",
2326 sc->sbm_pass3_dma ? ", using unaligned tx DMA" : "");
2327 aprint_normal_dev(self, "Ethernet address: %s\n", ether_sprintf(eaddr));
2328
2329
2330 /*
2331 * Set up ifnet structure
2332 */
2333
2334 ifp->if_softc = sc;
2335 memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
2336 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
2337 IFF_NOTRAILERS;
2338 ifp->if_ioctl = sbmac_ioctl;
2339 ifp->if_start = sbmac_start;
2340 ifp->if_watchdog = sbmac_watchdog;
2341 ifp->if_snd.ifq_maxlen = SBMAC_MAX_TXDESCR - 1;
2342
2343 /*
2344 * Set up ifmedia support.
2345 */
2346
2347 /*
2348 * Initialize MII/media info.
2349 */
2350 sc->sc_mii.mii_ifp = ifp;
2351 sc->sc_mii.mii_readreg = sbmac_mii_readreg;
2352 sc->sc_mii.mii_writereg = sbmac_mii_writereg;
2353 sc->sc_mii.mii_statchg = sbmac_mii_statchg;
2354 sc->sc_ethercom.ec_mii = &sc->sc_mii;
2355 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
2356 ether_mediastatus);
2357 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2358 MII_OFFSET_ANY, 0);
2359
2360 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2361 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2362 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2363 } else {
2364 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2365 }
2366
2367
2368 /*
2369 * map/route interrupt
2370 */
2371
2372 sc->sbm_intrhand = cpu_intr_establish(sa->sa_locs.sa_intr[0], IPL_NET,
2373 sbmac_intr, sc);
2374
2375 /*
2376 * Call MI attach routines.
2377 */
2378 if_attach(ifp);
2379 ether_ifattach(ifp, eaddr);
2380 }
2381