sbmac.c revision 1.50 1 /* $NetBSD: sbmac.c,v 1.50 2018/06/26 06:47:59 msaitoh Exp $ */
2
3 /*
4 * Copyright 2000, 2001, 2004
5 * Broadcom Corporation. All rights reserved.
6 *
7 * This software is furnished under license and may be used and copied only
8 * in accordance with the following terms and conditions. Subject to these
9 * conditions, you may download, copy, install, use, modify and distribute
10 * modified or unmodified copies of this software in source and/or binary
11 * form. No title or ownership is transferred hereby.
12 *
13 * 1) Any source code used, modified or distributed must reproduce and
14 * retain this copyright notice and list of conditions as they appear in
15 * the source file.
16 *
17 * 2) No right is granted to use any trade name, trademark, or logo of
18 * Broadcom Corporation. The "Broadcom Corporation" name may not be
19 * used to endorse or promote products derived from this software
20 * without the prior written permission of Broadcom Corporation.
21 *
22 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
25 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
26 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
27 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.50 2018/06/26 06:47:59 msaitoh Exp $");
37
38 #include "opt_inet.h"
39 #include "opt_ns.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/queue.h>
49 #include <sys/device.h>
50
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/if_ether.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56
57 #include <net/bpf.h>
58
59 #ifdef INET
60 #include <netinet/in.h>
61 #include <netinet/if_inarp.h>
62 #endif
63
64 #include <mips/locore.h>
65
66 #include "sbobiovar.h"
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/mii_bitbang.h>
71
72 #include <mips/sibyte/include/sb1250_defs.h>
73 #include <mips/sibyte/include/sb1250_regs.h>
74 #include <mips/sibyte/include/sb1250_mac.h>
75 #include <mips/sibyte/include/sb1250_dma.h>
76 #include <mips/sibyte/include/sb1250_scd.h>
77
78 #include <evbmips/sbmips/systemsw.h>
79
80 /* Simple types */
81
82 typedef u_long sbmac_port_t;
83 typedef uint64_t sbmac_physaddr_t;
84 typedef uint64_t sbmac_enetaddr_t;
85
86 typedef enum { sbmac_speed_auto, sbmac_speed_10,
87 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
88
89 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
90 sbmac_duplex_full } sbmac_duplex_t;
91
92 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
93 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
94
95 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
96 sbmac_state_broken } sbmac_state_t;
97
98
99 /* Macros */
100
101 #define SBMAC_EVENT_COUNTERS /* Include counters for various events */
102
103 #define SBDMA_NEXTBUF(d, f) ((f + 1) & (d)->sbdma_dscr_mask)
104
105 #define CACHELINESIZE 32
106 #define NUMCACHEBLKS(x) (((x)+CACHELINESIZE-1)/CACHELINESIZE)
107 #define KMALLOC(x) malloc((x), M_DEVBUF, M_DONTWAIT)
108 #define KVTOPHYS(x) kvtophys((vaddr_t)(x))
109
110 #ifdef SBMACDEBUG
111 #define dprintf(x) printf x
112 #else
113 #define dprintf(x)
114 #endif
115
116 #define SBMAC_READCSR(t) mips3_ld((register_t)(t))
117 #define SBMAC_WRITECSR(t, v) mips3_sd((register_t)(t), (v))
118
119 #define PKSEG1(x) ((sbmac_port_t) MIPS_PHYS_TO_KSEG1(x))
120
121 /* These are limited to fit within one virtual page, and must be 2**N. */
122 #define SBMAC_MAX_TXDESCR 256 /* should be 1024 */
123 #define SBMAC_MAX_RXDESCR 256 /* should be 512 */
124
125 #define ETHER_ALIGN 2
126
127 /* DMA Descriptor structure */
128
129 typedef struct sbdmadscr_s {
130 uint64_t dscr_a;
131 uint64_t dscr_b;
132 } sbdmadscr_t;
133
134
135 /* DMA Controller structure */
136
137 typedef struct sbmacdma_s {
138
139 /*
140 * This stuff is used to identify the channel and the registers
141 * associated with it.
142 */
143
144 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
145 int sbdma_channel; /* channel number */
146 int sbdma_txdir; /* direction (1=transmit) */
147 int sbdma_maxdescr; /* total # of descriptors in ring */
148 sbmac_port_t sbdma_config0; /* DMA config register 0 */
149 sbmac_port_t sbdma_config1; /* DMA config register 1 */
150 sbmac_port_t sbdma_dscrbase; /* Descriptor base address */
151 sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */
152 sbmac_port_t sbdma_curdscr; /* current descriptor address */
153
154 /*
155 * This stuff is for maintenance of the ring
156 */
157 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
158 struct mbuf **sbdma_ctxtable; /* context table, one per descr */
159 unsigned int sbdma_dscr_mask; /* sbdma_maxdescr - 1 */
160 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
161 unsigned int sbdma_add_index; /* next dscr for sw to add */
162 unsigned int sbdma_rem_index; /* next dscr for sw to remove */
163 } sbmacdma_t;
164
165
166 /* Ethernet softc structure */
167
168 struct sbmac_softc {
169
170 /*
171 * NetBSD-specific things
172 */
173 struct ethercom sc_ethercom; /* Ethernet common part */
174 struct mii_data sc_mii;
175 struct callout sc_tick_ch;
176
177 device_t sc_dev; /* device */
178 int sbm_if_flags;
179 void *sbm_intrhand;
180
181 /*
182 * Controller-specific things
183 */
184
185 sbmac_port_t sbm_base; /* MAC's base address */
186 sbmac_state_t sbm_state; /* current state */
187
188 sbmac_port_t sbm_macenable; /* MAC Enable Register */
189 sbmac_port_t sbm_maccfg; /* MAC Configuration Register */
190 sbmac_port_t sbm_fifocfg; /* FIFO configuration register */
191 sbmac_port_t sbm_framecfg; /* Frame configuration register */
192 sbmac_port_t sbm_rxfilter; /* receive filter register */
193 sbmac_port_t sbm_isr; /* Interrupt status register */
194 sbmac_port_t sbm_imr; /* Interrupt mask register */
195
196 sbmac_speed_t sbm_speed; /* current speed */
197 sbmac_duplex_t sbm_duplex; /* current duplex */
198 sbmac_fc_t sbm_fc; /* current flow control setting */
199 int sbm_rxflags; /* received packet flags */
200
201 u_char sbm_hwaddr[ETHER_ADDR_LEN];
202
203 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
204 sbmacdma_t sbm_rxdma;
205
206 int sbm_pass3_dma; /* chip has pass3 SOC DMA features */
207
208 #ifdef SBMAC_EVENT_COUNTERS
209 struct evcnt sbm_ev_rxintr; /* Rx interrupts */
210 struct evcnt sbm_ev_txintr; /* Tx interrupts */
211 struct evcnt sbm_ev_txdrop; /* Tx dropped due to no mbuf alloc failed */
212 struct evcnt sbm_ev_txstall; /* Tx stalled due to no descriptors free */
213
214 struct evcnt sbm_ev_txsplit; /* pass3 Tx split mbuf */
215 struct evcnt sbm_ev_txkeep; /* pass3 Tx didn't split mbuf */
216 #endif
217 };
218
219
220 #ifdef SBMAC_EVENT_COUNTERS
221 #define SBMAC_EVCNT_INCR(ev) (ev).ev_count++
222 #else
223 #define SBMAC_EVCNT_INCR(ev) do { /* nothing */ } while (0)
224 #endif
225
226 /* Externs */
227
228 extern paddr_t kvtophys(vaddr_t);
229
230 /* Prototypes */
231
232 static void sbdma_initctx(sbmacdma_t *, struct sbmac_softc *, int, int, int);
233 static void sbdma_channel_start(sbmacdma_t *);
234 static int sbdma_add_rcvbuffer(sbmacdma_t *, struct mbuf *);
235 static int sbdma_add_txbuffer(sbmacdma_t *, struct mbuf *);
236 static void sbdma_emptyring(sbmacdma_t *);
237 static void sbdma_fillring(sbmacdma_t *);
238 static void sbdma_rx_process(struct sbmac_softc *, sbmacdma_t *);
239 static void sbdma_tx_process(struct sbmac_softc *, sbmacdma_t *);
240 static void sbmac_initctx(struct sbmac_softc *);
241 static void sbmac_channel_start(struct sbmac_softc *);
242 static void sbmac_channel_stop(struct sbmac_softc *);
243 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,
244 sbmac_state_t);
245 static void sbmac_promiscuous_mode(struct sbmac_softc *, bool);
246 static void sbmac_init_and_start(struct sbmac_softc *);
247 static uint64_t sbmac_addr2reg(u_char *);
248 static void sbmac_intr(void *, uint32_t, vaddr_t);
249 static void sbmac_start(struct ifnet *);
250 static void sbmac_setmulti(struct sbmac_softc *);
251 static int sbmac_ether_ioctl(struct ifnet *, u_long, void *);
252 static int sbmac_ioctl(struct ifnet *, u_long, void *);
253 static void sbmac_watchdog(struct ifnet *);
254 static int sbmac_match(device_t, cfdata_t, void *);
255 static void sbmac_attach(device_t, device_t, void *);
256 static bool sbmac_set_speed(struct sbmac_softc *, sbmac_speed_t);
257 static bool sbmac_set_duplex(struct sbmac_softc *, sbmac_duplex_t, sbmac_fc_t);
258 static void sbmac_tick(void *);
259
260
261 /* Globals */
262
263 CFATTACH_DECL_NEW(sbmac, sizeof(struct sbmac_softc),
264 sbmac_match, sbmac_attach, NULL, NULL);
265
266 static uint32_t sbmac_mii_bitbang_read(device_t self);
267 static void sbmac_mii_bitbang_write(device_t self, uint32_t val);
268
269 static const struct mii_bitbang_ops sbmac_mii_bitbang_ops = {
270 sbmac_mii_bitbang_read,
271 sbmac_mii_bitbang_write,
272 {
273 (uint32_t)M_MAC_MDIO_OUT, /* MII_BIT_MDO */
274 (uint32_t)M_MAC_MDIO_IN, /* MII_BIT_MDI */
275 (uint32_t)M_MAC_MDC, /* MII_BIT_MDC */
276 0, /* MII_BIT_DIR_HOST_PHY */
277 (uint32_t)M_MAC_MDIO_DIR /* MII_BIT_DIR_PHY_HOST */
278 }
279 };
280
281 static uint32_t
282 sbmac_mii_bitbang_read(device_t self)
283 {
284 struct sbmac_softc *sc = device_private(self);
285 sbmac_port_t reg;
286
287 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
288 return (uint32_t) SBMAC_READCSR(reg);
289 }
290
291 static void
292 sbmac_mii_bitbang_write(device_t self, uint32_t val)
293 {
294 struct sbmac_softc *sc = device_private(self);
295 sbmac_port_t reg;
296
297 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
298
299 SBMAC_WRITECSR(reg, (val &
300 (M_MAC_MDC|M_MAC_MDIO_DIR|M_MAC_MDIO_OUT|M_MAC_MDIO_IN)));
301 }
302
303 /*
304 * Read an PHY register through the MII.
305 */
306 static int
307 sbmac_mii_readreg(device_t self, int phy, int reg)
308 {
309
310 return (mii_bitbang_readreg(self, &sbmac_mii_bitbang_ops, phy, reg));
311 }
312
313 /*
314 * Write to a PHY register through the MII.
315 */
316 static void
317 sbmac_mii_writereg(device_t self, int phy, int reg, int val)
318 {
319
320 mii_bitbang_writereg(self, &sbmac_mii_bitbang_ops, phy, reg, val);
321 }
322
323 static void
324 sbmac_mii_statchg(struct ifnet *ifp)
325 {
326 struct sbmac_softc *sc = ifp->if_softc;
327 sbmac_state_t oldstate;
328
329 /* Stop the MAC in preparation for changing all of the parameters. */
330 oldstate = sbmac_set_channel_state(sc, sbmac_state_off);
331
332 switch (sc->sc_ethercom.ec_if.if_baudrate) {
333 default: /* if autonegotiation fails, assume 10Mbit */
334 case IF_Mbps(10):
335 sbmac_set_speed(sc, sbmac_speed_10);
336 break;
337
338 case IF_Mbps(100):
339 sbmac_set_speed(sc, sbmac_speed_100);
340 break;
341
342 case IF_Mbps(1000):
343 sbmac_set_speed(sc, sbmac_speed_1000);
344 break;
345 }
346
347 if (sc->sc_mii.mii_media_active & IFM_FDX) {
348 /* Configure for full-duplex */
349 /* XXX: is flow control right for 10, 100? */
350 sbmac_set_duplex(sc, sbmac_duplex_full, sbmac_fc_frame);
351 } else {
352 /* Configure for half-duplex */
353 /* XXX: is flow control right? */
354 sbmac_set_duplex(sc, sbmac_duplex_half, sbmac_fc_disabled);
355 }
356
357 /* And put it back into its former state. */
358 sbmac_set_channel_state(sc, oldstate);
359 }
360
361 /*
362 * SBDMA_INITCTX(d, sc, chan, txrx, maxdescr)
363 *
364 * Initialize a DMA channel context. Since there are potentially
365 * eight DMA channels per MAC, it's nice to do this in a standard
366 * way.
367 *
368 * Input parameters:
369 * d - sbmacdma_t structure (DMA channel context)
370 * sc - sbmac_softc structure (pointer to a MAC)
371 * chan - channel number (0..1 right now)
372 * txrx - Identifies DMA_TX or DMA_RX for channel direction
373 * maxdescr - number of descriptors
374 *
375 * Return value:
376 * nothing
377 */
378
379 static void
380 sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *sc, int chan, int txrx,
381 int maxdescr)
382 {
383 /*
384 * Save away interesting stuff in the structure
385 */
386
387 d->sbdma_eth = sc;
388 d->sbdma_channel = chan;
389 d->sbdma_txdir = txrx;
390
391 /*
392 * initialize register pointers
393 */
394
395 d->sbdma_config0 = PKSEG1(sc->sbm_base +
396 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG0));
397 d->sbdma_config1 = PKSEG1(sc->sbm_base +
398 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG1));
399 d->sbdma_dscrbase = PKSEG1(sc->sbm_base +
400 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_BASE));
401 d->sbdma_dscrcnt = PKSEG1(sc->sbm_base +
402 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_CNT));
403 d->sbdma_curdscr = PKSEG1(sc->sbm_base +
404 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CUR_DSCRADDR));
405
406 /*
407 * Allocate memory for the ring
408 */
409
410 d->sbdma_maxdescr = maxdescr;
411 d->sbdma_dscr_mask = d->sbdma_maxdescr - 1;
412
413 d->sbdma_dscrtable = (sbdmadscr_t *)
414 KMALLOC(d->sbdma_maxdescr * sizeof(sbdmadscr_t));
415
416 memset(d->sbdma_dscrtable, 0, d->sbdma_maxdescr*sizeof(sbdmadscr_t));
417
418 d->sbdma_dscrtable_phys = KVTOPHYS(d->sbdma_dscrtable);
419
420 /*
421 * And context table
422 */
423
424 d->sbdma_ctxtable = (struct mbuf **)
425 KMALLOC(d->sbdma_maxdescr*sizeof(struct mbuf *));
426
427 memset(d->sbdma_ctxtable, 0, d->sbdma_maxdescr*sizeof(struct mbuf *));
428 }
429
430 /*
431 * SBDMA_CHANNEL_START(d)
432 *
433 * Initialize the hardware registers for a DMA channel.
434 *
435 * Input parameters:
436 * d - DMA channel to init (context must be previously init'd
437 *
438 * Return value:
439 * nothing
440 */
441
442 static void
443 sbdma_channel_start(sbmacdma_t *d)
444 {
445 /*
446 * Turn on the DMA channel
447 */
448
449 SBMAC_WRITECSR(d->sbdma_config1, 0);
450
451 SBMAC_WRITECSR(d->sbdma_dscrbase, d->sbdma_dscrtable_phys);
452
453 SBMAC_WRITECSR(d->sbdma_config0, V_DMA_RINGSZ(d->sbdma_maxdescr) | 0);
454
455 /*
456 * Initialize ring pointers
457 */
458
459 d->sbdma_add_index = 0;
460 d->sbdma_rem_index = 0;
461 }
462
463 /*
464 * SBDMA_ADD_RCVBUFFER(d, m)
465 *
466 * Add a buffer to the specified DMA channel. For receive channels,
467 * this queues a buffer for inbound packets.
468 *
469 * Input parameters:
470 * d - DMA channel descriptor
471 * m - mbuf to add, or NULL if we should allocate one.
472 *
473 * Return value:
474 * 0 if buffer could not be added (ring is full)
475 * 1 if buffer added successfully
476 */
477
478 static int
479 sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m)
480 {
481 unsigned int dsc, nextdsc;
482 struct mbuf *m_new = NULL;
483
484 /* get pointer to our current place in the ring */
485
486 dsc = d->sbdma_add_index;
487 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
488
489 /*
490 * figure out if the ring is full - if the next descriptor
491 * is the same as the one that we're going to remove from
492 * the ring, the ring is full
493 */
494
495 if (nextdsc == d->sbdma_rem_index)
496 return ENOSPC;
497
498 /*
499 * Allocate an mbuf if we don't already have one.
500 * If we do have an mbuf, reset it so that it's empty.
501 */
502
503 if (m == NULL) {
504 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
505 if (m_new == NULL) {
506 aprint_error_dev(d->sbdma_eth->sc_dev,
507 "mbuf allocation failed\n");
508 return ENOBUFS;
509 }
510
511 MCLGET(m_new, M_DONTWAIT);
512 if (!(m_new->m_flags & M_EXT)) {
513 aprint_error_dev(d->sbdma_eth->sc_dev,
514 "mbuf cluster allocation failed\n");
515 m_freem(m_new);
516 return ENOBUFS;
517 }
518
519 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
520 m_adj(m_new, ETHER_ALIGN);
521 } else {
522 m_new = m;
523 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
524 m_new->m_data = m_new->m_ext.ext_buf;
525 m_adj(m_new, ETHER_ALIGN);
526 }
527
528 /*
529 * fill in the descriptor
530 */
531
532 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new, void *)) |
533 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(ETHER_ALIGN + m_new->m_len)) |
534 M_DMA_DSCRA_INTERRUPT;
535
536 /* receiving: no options */
537 d->sbdma_dscrtable[dsc].dscr_b = 0;
538
539 /*
540 * fill in the context
541 */
542
543 d->sbdma_ctxtable[dsc] = m_new;
544
545 /*
546 * point at next packet
547 */
548
549 d->sbdma_add_index = nextdsc;
550
551 /*
552 * Give the buffer to the DMA engine.
553 */
554
555 SBMAC_WRITECSR(d->sbdma_dscrcnt, 1);
556
557 return 0; /* we did it */
558 }
559
560 /*
561 * SBDMA_ADD_TXBUFFER(d, m)
562 *
563 * Add a transmit buffer to the specified DMA channel, causing a
564 * transmit to start.
565 *
566 * Input parameters:
567 * d - DMA channel descriptor
568 * m - mbuf to add
569 *
570 * Return value:
571 * 0 transmit queued successfully
572 * otherwise error code
573 */
574
575 static int
576 sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m)
577 {
578 unsigned int dsc, nextdsc, prevdsc, origdesc;
579 int length;
580 int num_mbufs = 0;
581 struct sbmac_softc *sc = d->sbdma_eth;
582
583 /* get pointer to our current place in the ring */
584
585 dsc = d->sbdma_add_index;
586 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
587
588 /*
589 * figure out if the ring is full - if the next descriptor
590 * is the same as the one that we're going to remove from
591 * the ring, the ring is full
592 */
593
594 if (nextdsc == d->sbdma_rem_index) {
595 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
596 return ENOSPC;
597 }
598
599 /*
600 * PASS3 parts do not have buffer alignment restriction.
601 * No need to copy/coalesce to new mbuf. Also has different
602 * descriptor format
603 */
604 if (sc->sbm_pass3_dma) {
605 struct mbuf *m_temp = NULL;
606
607 /*
608 * Loop thru this mbuf record.
609 * The head mbuf will have SOP set.
610 */
611 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m,void *)) |
612 M_DMA_ETHTX_SOP;
613
614 /*
615 * transmitting: set outbound options,buffer A size(+ low 5
616 * bits of start addr),and packet length.
617 */
618 d->sbdma_dscrtable[dsc].dscr_b =
619 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
620 V_DMA_DSCRB_A_SIZE((m->m_len +
621 (mtod(m,uintptr_t) & 0x0000001F))) |
622 V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) |
623 V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff);
624
625 d->sbdma_add_index = nextdsc;
626 origdesc = prevdsc = dsc;
627 dsc = d->sbdma_add_index;
628 num_mbufs++;
629
630 /* Start with first non-head mbuf */
631 for(m_temp = m->m_next; m_temp != 0; m_temp = m_temp->m_next) {
632 int len, next_len;
633 uint64_t addr;
634
635 if (m_temp->m_len == 0)
636 continue; /* Skip 0-length mbufs */
637
638 len = m_temp->m_len;
639 addr = KVTOPHYS(mtod(m_temp, void *));
640
641 /*
642 * Check to see if the mbuf spans a page boundary. If
643 * it does, and the physical pages behind the virtual
644 * pages are not contiguous, split it so that each
645 * virtual page uses its own Tx descriptor.
646 */
647 if (trunc_page(addr) != trunc_page(addr + len - 1)) {
648 next_len = (addr + len) - trunc_page(addr + len);
649
650 len -= next_len;
651
652 if (addr + len ==
653 KVTOPHYS(mtod(m_temp, char *) + len)) {
654 SBMAC_EVCNT_INCR(sc->sbm_ev_txkeep);
655 len += next_len;
656 next_len = 0;
657 } else {
658 SBMAC_EVCNT_INCR(sc->sbm_ev_txsplit);
659 }
660 } else {
661 next_len = 0;
662 }
663
664 again:
665 /*
666 * fill in the descriptor
667 */
668 d->sbdma_dscrtable[dsc].dscr_a = addr;
669
670 /*
671 * transmitting: set outbound options,buffer A
672 * size(+ low 5 bits of start addr)
673 */
674 d->sbdma_dscrtable[dsc].dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_NOTSOP) |
675 V_DMA_DSCRB_A_SIZE((len + (addr & 0x0000001F)));
676
677 d->sbdma_ctxtable[dsc] = NULL;
678
679 /*
680 * point at next descriptor
681 */
682 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
683 if (nextdsc == d->sbdma_rem_index) {
684 d->sbdma_add_index = origdesc;
685 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
686 return ENOSPC;
687 }
688 d->sbdma_add_index = nextdsc;
689
690 prevdsc = dsc;
691 dsc = d->sbdma_add_index;
692 num_mbufs++;
693
694 if (next_len != 0) {
695 addr = KVTOPHYS(mtod(m_temp, char *) + len);
696 len = next_len;
697
698 next_len = 0;
699 goto again;
700 }
701
702 }
703 /* Set head mbuf to last context index */
704 d->sbdma_ctxtable[prevdsc] = m;
705
706 /* Interrupt on last dscr of packet. */
707 d->sbdma_dscrtable[prevdsc].dscr_a |= M_DMA_DSCRA_INTERRUPT;
708 } else {
709 struct mbuf *m_new = NULL;
710 /*
711 * [BEGIN XXX]
712 * XXX Copy/coalesce the mbufs into a single mbuf cluster (we
713 * assume it will fit). This is a temporary hack to get us
714 * going.
715 */
716
717 MGETHDR(m_new,M_DONTWAIT,MT_DATA);
718 if (m_new == NULL) {
719 aprint_error_dev(d->sbdma_eth->sc_dev,
720 "mbuf allocation failed\n");
721 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
722 return ENOBUFS;
723 }
724
725 MCLGET(m_new,M_DONTWAIT);
726 if (!(m_new->m_flags & M_EXT)) {
727 aprint_error_dev(d->sbdma_eth->sc_dev,
728 "mbuf cluster allocation failed\n");
729 m_freem(m_new);
730 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
731 return ENOBUFS;
732 }
733
734 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
735 /*m_adj(m_new,ETHER_ALIGN);*/
736
737 /*
738 * XXX Don't forget to include the offset portion in the
739 * XXX cache block calculation when this code is rewritten!
740 */
741
742 /*
743 * Copy data
744 */
745
746 m_copydata(m,0,m->m_pkthdr.len,mtod(m_new,void *));
747 m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len;
748
749 /* Free old mbuf 'm', actual mbuf is now 'm_new' */
750
751 // XXX: CALLERS WILL FREE, they might have to bpf_mtap() if this
752 // XXX: function succeeds.
753 // m_freem(m);
754 length = m_new->m_len;
755
756 /* [END XXX] */
757 /*
758 * fill in the descriptor
759 */
760
761 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new,void *)) |
762 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(m_new->m_len)) |
763 M_DMA_DSCRA_INTERRUPT |
764 M_DMA_ETHTX_SOP;
765
766 /* transmitting: set outbound options and length */
767 d->sbdma_dscrtable[dsc].dscr_b =
768 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
769 V_DMA_DSCRB_PKT_SIZE(length);
770
771 num_mbufs++;
772
773 /*
774 * fill in the context
775 */
776
777 d->sbdma_ctxtable[dsc] = m_new;
778
779 /*
780 * point at next packet
781 */
782 d->sbdma_add_index = nextdsc;
783 }
784
785 /*
786 * Give the buffer to the DMA engine.
787 */
788
789 SBMAC_WRITECSR(d->sbdma_dscrcnt, num_mbufs);
790
791 return 0; /* we did it */
792 }
793
794 /*
795 * SBDMA_EMPTYRING(d)
796 *
797 * Free all allocated mbufs on the specified DMA channel;
798 *
799 * Input parameters:
800 * d - DMA channel
801 *
802 * Return value:
803 * nothing
804 */
805
806 static void
807 sbdma_emptyring(sbmacdma_t *d)
808 {
809 int idx;
810 struct mbuf *m;
811
812 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
813 m = d->sbdma_ctxtable[idx];
814 if (m) {
815 m_freem(m);
816 d->sbdma_ctxtable[idx] = NULL;
817 }
818 }
819 }
820
821 /*
822 * SBDMA_FILLRING(d)
823 *
824 * Fill the specified DMA channel (must be receive channel)
825 * with mbufs
826 *
827 * Input parameters:
828 * d - DMA channel
829 *
830 * Return value:
831 * nothing
832 */
833
834 static void
835 sbdma_fillring(sbmacdma_t *d)
836 {
837 int idx;
838
839 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++)
840 if (sbdma_add_rcvbuffer(d, NULL) != 0)
841 break;
842 }
843
844 /*
845 * SBDMA_RX_PROCESS(sc, d)
846 *
847 * Process "completed" receive buffers on the specified DMA channel.
848 * Note that this isn't really ideal for priority channels, since
849 * it processes all of the packets on a given channel before
850 * returning.
851 *
852 * Input parameters:
853 * sc - softc structure
854 * d - DMA channel context
855 *
856 * Return value:
857 * nothing
858 */
859
860 static void
861 sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d)
862 {
863 int curidx;
864 int hwidx;
865 sbdmadscr_t *dscp;
866 struct mbuf *m;
867 int len;
868
869 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
870
871 for (;;) {
872 /*
873 * figure out where we are (as an index) and where
874 * the hardware is (also as an index)
875 *
876 * This could be done faster if (for example) the
877 * descriptor table was page-aligned and contiguous in
878 * both virtual and physical memory -- you could then
879 * just compare the low-order bits of the virtual address
880 * (sbdma_rem_index) and the physical address
881 * (sbdma_curdscr CSR).
882 */
883
884 curidx = d->sbdma_rem_index;
885 hwidx = (int)
886 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
887 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
888
889 /*
890 * If they're the same, that means we've processed all
891 * of the descriptors up to (but not including) the one that
892 * the hardware is working on right now.
893 */
894
895 if (curidx == hwidx)
896 break;
897
898 /*
899 * Otherwise, get the packet's mbuf ptr back
900 */
901
902 dscp = &(d->sbdma_dscrtable[curidx]);
903 m = d->sbdma_ctxtable[curidx];
904 d->sbdma_ctxtable[curidx] = NULL;
905
906 len = (int)G_DMA_DSCRB_PKT_SIZE(dscp->dscr_b) - 4;
907
908 /*
909 * Check packet status. If good, process it.
910 * If not, silently drop it and put it back on the
911 * receive ring.
912 */
913
914 if (! (dscp->dscr_a & M_DMA_ETHRX_BAD)) {
915
916 /*
917 * Set length into the packet
918 * XXX do we remove the CRC here?
919 */
920 m->m_pkthdr.len = m->m_len = len;
921
922 m_set_rcvif(m, ifp);
923
924
925 /*
926 * Add a new buffer to replace the old one.
927 */
928 sbdma_add_rcvbuffer(d, NULL);
929
930 /*
931 * Handle BPF listeners. Let the BPF user see the
932 * packet, but don't pass it up to the ether_input()
933 * layer unless it's a broadcast packet, multicast
934 * packet, matches our ethernet address or the
935 * interface is in promiscuous mode.
936 */
937
938 /*
939 * Pass the buffer to the kernel
940 */
941 if_percpuq_enqueue(ifp->if_percpuq, m);
942 } else {
943 /*
944 * Packet was mangled somehow. Just drop it and
945 * put it back on the receive ring.
946 */
947 sbdma_add_rcvbuffer(d, m);
948 }
949
950 /*
951 * .. and advance to the next buffer.
952 */
953
954 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
955 }
956 }
957
958 /*
959 * SBDMA_TX_PROCESS(sc, d)
960 *
961 * Process "completed" transmit buffers on the specified DMA channel.
962 * This is normally called within the interrupt service routine.
963 * Note that this isn't really ideal for priority channels, since
964 * it processes all of the packets on a given channel before
965 * returning.
966 *
967 * Input parameters:
968 * sc - softc structure
969 * d - DMA channel context
970 *
971 * Return value:
972 * nothing
973 */
974
975 static void
976 sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d)
977 {
978 int curidx;
979 int hwidx;
980 struct mbuf *m;
981
982 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
983
984 for (;;) {
985 /*
986 * figure out where we are (as an index) and where
987 * the hardware is (also as an index)
988 *
989 * This could be done faster if (for example) the
990 * descriptor table was page-aligned and contiguous in
991 * both virtual and physical memory -- you could then
992 * just compare the low-order bits of the virtual address
993 * (sbdma_rem_index) and the physical address
994 * (sbdma_curdscr CSR).
995 */
996
997 curidx = d->sbdma_rem_index;
998 hwidx = (int)
999 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1000 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1001
1002 /*
1003 * If they're the same, that means we've processed all
1004 * of the descriptors up to (but not including) the one that
1005 * the hardware is working on right now.
1006 */
1007
1008 if (curidx == hwidx)
1009 break;
1010
1011 /*
1012 * Otherwise, get the packet's mbuf ptr back
1013 */
1014
1015 m = d->sbdma_ctxtable[curidx];
1016 d->sbdma_ctxtable[curidx] = NULL;
1017
1018 /*
1019 * for transmits we just free buffers and count packets.
1020 */
1021 ifp->if_opackets++;
1022 m_freem(m);
1023
1024 /*
1025 * .. and advance to the next buffer.
1026 */
1027
1028 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
1029 }
1030
1031 /*
1032 * Decide what to set the IFF_OACTIVE bit in the interface to.
1033 * It's supposed to reflect if the interface is actively
1034 * transmitting, but that's really hard to do quickly.
1035 */
1036
1037 ifp->if_flags &= ~IFF_OACTIVE;
1038 }
1039
1040 /*
1041 * SBMAC_INITCTX(s)
1042 *
1043 * Initialize an Ethernet context structure - this is called
1044 * once per MAC on the 1250. Memory is allocated here, so don't
1045 * call it again from inside the ioctl routines that bring the
1046 * interface up/down
1047 *
1048 * Input parameters:
1049 * sc - sbmac context structure
1050 *
1051 * Return value:
1052 * 0
1053 */
1054
1055 static void
1056 sbmac_initctx(struct sbmac_softc *sc)
1057 {
1058 uint64_t sysrev;
1059
1060 /*
1061 * figure out the addresses of some ports
1062 */
1063
1064 sc->sbm_macenable = PKSEG1(sc->sbm_base + R_MAC_ENABLE);
1065 sc->sbm_maccfg = PKSEG1(sc->sbm_base + R_MAC_CFG);
1066 sc->sbm_fifocfg = PKSEG1(sc->sbm_base + R_MAC_THRSH_CFG);
1067 sc->sbm_framecfg = PKSEG1(sc->sbm_base + R_MAC_FRAMECFG);
1068 sc->sbm_rxfilter = PKSEG1(sc->sbm_base + R_MAC_ADFILTER_CFG);
1069 sc->sbm_isr = PKSEG1(sc->sbm_base + R_MAC_STATUS);
1070 sc->sbm_imr = PKSEG1(sc->sbm_base + R_MAC_INT_MASK);
1071
1072 /*
1073 * Initialize the DMA channels. Right now, only one per MAC is used
1074 * Note: Only do this _once_, as it allocates memory from the kernel!
1075 */
1076
1077 sbdma_initctx(&(sc->sbm_txdma), sc, 0, DMA_TX, SBMAC_MAX_TXDESCR);
1078 sbdma_initctx(&(sc->sbm_rxdma), sc, 0, DMA_RX, SBMAC_MAX_RXDESCR);
1079
1080 /*
1081 * initial state is OFF
1082 */
1083
1084 sc->sbm_state = sbmac_state_off;
1085
1086 /*
1087 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1088 */
1089
1090 sc->sbm_speed = sbmac_speed_10;
1091 sc->sbm_duplex = sbmac_duplex_half;
1092 sc->sbm_fc = sbmac_fc_disabled;
1093
1094 /*
1095 * Determine SOC type. 112x has Pass3 SOC features.
1096 */
1097 sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) );
1098 sc->sbm_pass3_dma = (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1120 ||
1099 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125 ||
1100 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125H ||
1101 (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250 &&
1102 G_SYS_REVISION(sysrev) >= K_SYS_REVISION_BCM1250_PASS3));
1103 #ifdef SBMAC_EVENT_COUNTERS
1104 const char * const xname = device_xname(sc->sc_dev);
1105 evcnt_attach_dynamic(&sc->sbm_ev_rxintr, EVCNT_TYPE_INTR,
1106 NULL, xname, "rxintr");
1107 evcnt_attach_dynamic(&sc->sbm_ev_txintr, EVCNT_TYPE_INTR,
1108 NULL, xname, "txintr");
1109 evcnt_attach_dynamic(&sc->sbm_ev_txdrop, EVCNT_TYPE_MISC,
1110 NULL, xname, "txdrop");
1111 evcnt_attach_dynamic(&sc->sbm_ev_txstall, EVCNT_TYPE_MISC,
1112 NULL, xname, "txstall");
1113 if (sc->sbm_pass3_dma) {
1114 evcnt_attach_dynamic(&sc->sbm_ev_txsplit, EVCNT_TYPE_MISC,
1115 NULL, xname, "pass3tx-split");
1116 evcnt_attach_dynamic(&sc->sbm_ev_txkeep, EVCNT_TYPE_MISC,
1117 NULL, xname, "pass3tx-keep");
1118 }
1119 #endif
1120 }
1121
1122 /*
1123 * SBMAC_CHANNEL_START(s)
1124 *
1125 * Start packet processing on this MAC.
1126 *
1127 * Input parameters:
1128 * sc - sbmac structure
1129 *
1130 * Return value:
1131 * nothing
1132 */
1133
1134 static void
1135 sbmac_channel_start(struct sbmac_softc *sc)
1136 {
1137 uint64_t reg;
1138 sbmac_port_t port;
1139 uint64_t cfg, fifo, framecfg;
1140 int idx;
1141 uint64_t dma_cfg0, fifo_cfg;
1142 sbmacdma_t *txdma;
1143
1144 /*
1145 * Don't do this if running
1146 */
1147
1148 if (sc->sbm_state == sbmac_state_on)
1149 return;
1150
1151 /*
1152 * Bring the controller out of reset, but leave it off.
1153 */
1154
1155 SBMAC_WRITECSR(sc->sbm_macenable, 0);
1156
1157 /*
1158 * Ignore all received packets
1159 */
1160
1161 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1162
1163 /*
1164 * Calculate values for various control registers.
1165 */
1166
1167 cfg = M_MAC_RETRY_EN |
1168 M_MAC_TX_HOLD_SOP_EN |
1169 V_MAC_TX_PAUSE_CNT_16K |
1170 M_MAC_AP_STAT_EN |
1171 M_MAC_SS_EN |
1172 0;
1173
1174 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1175 V_MAC_TX_RD_THRSH(4) |
1176 V_MAC_TX_RL_THRSH(4) |
1177 V_MAC_RX_PL_THRSH(4) |
1178 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1179 V_MAC_RX_PL_THRSH(4) |
1180 V_MAC_RX_RL_THRSH(8) |
1181 0;
1182
1183 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1184 V_MAC_MAX_FRAMESZ_DEFAULT |
1185 V_MAC_BACKOFF_SEL(1);
1186
1187 /*
1188 * Clear out the hash address map
1189 */
1190
1191 port = PKSEG1(sc->sbm_base + R_MAC_HASH_BASE);
1192 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1193 SBMAC_WRITECSR(port, 0);
1194 port += sizeof(uint64_t);
1195 }
1196
1197 /*
1198 * Clear out the exact-match table
1199 */
1200
1201 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1202 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1203 SBMAC_WRITECSR(port, 0);
1204 port += sizeof(uint64_t);
1205 }
1206
1207 /*
1208 * Clear out the DMA Channel mapping table registers
1209 */
1210
1211 port = PKSEG1(sc->sbm_base + R_MAC_CHUP0_BASE);
1212 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1213 SBMAC_WRITECSR(port, 0);
1214 port += sizeof(uint64_t);
1215 }
1216
1217 port = PKSEG1(sc->sbm_base + R_MAC_CHLO0_BASE);
1218 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1219 SBMAC_WRITECSR(port, 0);
1220 port += sizeof(uint64_t);
1221 }
1222
1223 /*
1224 * Program the hardware address. It goes into the hardware-address
1225 * register as well as the first filter register.
1226 */
1227
1228 reg = sbmac_addr2reg(sc->sbm_hwaddr);
1229
1230 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1231 SBMAC_WRITECSR(port, reg);
1232 port = PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR);
1233 SBMAC_WRITECSR(port, 0); // pass1 workaround
1234
1235 /*
1236 * Set the receive filter for no packets, and write values
1237 * to the various config registers
1238 */
1239
1240 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1241 SBMAC_WRITECSR(sc->sbm_imr, 0);
1242 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1243 SBMAC_WRITECSR(sc->sbm_fifocfg, fifo);
1244 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1245
1246 /*
1247 * Initialize DMA channels (rings should be ok now)
1248 */
1249
1250 sbdma_channel_start(&(sc->sbm_rxdma));
1251 sbdma_channel_start(&(sc->sbm_txdma));
1252
1253 /*
1254 * Configure the speed, duplex, and flow control
1255 */
1256
1257 sbmac_set_speed(sc, sc->sbm_speed);
1258 sbmac_set_duplex(sc, sc->sbm_duplex, sc->sbm_fc);
1259
1260 /*
1261 * Fill the receive ring
1262 */
1263
1264 sbdma_fillring(&(sc->sbm_rxdma));
1265
1266 /*
1267 * Turn on the rest of the bits in the enable register
1268 */
1269
1270 SBMAC_WRITECSR(sc->sbm_macenable, M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 |
1271 M_MAC_RX_ENABLE | M_MAC_TX_ENABLE);
1272
1273
1274 /*
1275 * Accept any kind of interrupt on TX and RX DMA channel 0
1276 */
1277 SBMAC_WRITECSR(sc->sbm_imr,
1278 (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1279 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
1280
1281 /*
1282 * Enable receiving unicasts and broadcasts
1283 */
1284
1285 SBMAC_WRITECSR(sc->sbm_rxfilter, M_MAC_UCAST_EN | M_MAC_BCAST_EN);
1286
1287 /*
1288 * On chips which support unaligned DMA features, set the descriptor
1289 * ring for transmit channels to use the unaligned buffer format.
1290 */
1291 txdma = &(sc->sbm_txdma);
1292
1293 if (sc->sbm_pass3_dma) {
1294 dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0);
1295 dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) |
1296 M_DMA_TBX_EN | M_DMA_TDX_EN;
1297 SBMAC_WRITECSR(txdma->sbdma_config0,dma_cfg0);
1298
1299 fifo_cfg = SBMAC_READCSR(sc->sbm_fifocfg);
1300 fifo_cfg |= V_MAC_TX_WR_THRSH(8) |
1301 V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8);
1302 SBMAC_WRITECSR(sc->sbm_fifocfg,fifo_cfg);
1303 }
1304
1305 /*
1306 * we're running now.
1307 */
1308
1309 sc->sbm_state = sbmac_state_on;
1310 sc->sc_ethercom.ec_if.if_flags |= IFF_RUNNING;
1311
1312 /*
1313 * Program multicast addresses
1314 */
1315
1316 sbmac_setmulti(sc);
1317
1318 /*
1319 * If channel was in promiscuous mode before, turn that on
1320 */
1321
1322 if (sc->sc_ethercom.ec_if.if_flags & IFF_PROMISC)
1323 sbmac_promiscuous_mode(sc, true);
1324
1325 /*
1326 * Turn on the once-per-second timer
1327 */
1328
1329 callout_reset(&(sc->sc_tick_ch), hz, sbmac_tick, sc);
1330 }
1331
1332 /*
1333 * SBMAC_CHANNEL_STOP(s)
1334 *
1335 * Stop packet processing on this MAC.
1336 *
1337 * Input parameters:
1338 * sc - sbmac structure
1339 *
1340 * Return value:
1341 * nothing
1342 */
1343
1344 static void
1345 sbmac_channel_stop(struct sbmac_softc *sc)
1346 {
1347 uint64_t ctl;
1348
1349 /* don't do this if already stopped */
1350
1351 if (sc->sbm_state == sbmac_state_off)
1352 return;
1353
1354 /* don't accept any packets, disable all interrupts */
1355
1356 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1357 SBMAC_WRITECSR(sc->sbm_imr, 0);
1358
1359 /* Turn off ticker */
1360
1361 callout_stop(&(sc->sc_tick_ch));
1362
1363 /* turn off receiver and transmitter */
1364
1365 ctl = SBMAC_READCSR(sc->sbm_macenable);
1366 ctl &= ~(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0);
1367 SBMAC_WRITECSR(sc->sbm_macenable, ctl);
1368
1369 /* We're stopped now. */
1370
1371 sc->sbm_state = sbmac_state_off;
1372 sc->sc_ethercom.ec_if.if_flags &= ~IFF_RUNNING;
1373
1374 /* Empty the receive and transmit rings */
1375
1376 sbdma_emptyring(&(sc->sbm_rxdma));
1377 sbdma_emptyring(&(sc->sbm_txdma));
1378 }
1379
1380 /*
1381 * SBMAC_SET_CHANNEL_STATE(state)
1382 *
1383 * Set the channel's state ON or OFF
1384 *
1385 * Input parameters:
1386 * state - new state
1387 *
1388 * Return value:
1389 * old state
1390 */
1391
1392 static sbmac_state_t
1393 sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state)
1394 {
1395 sbmac_state_t oldstate = sc->sbm_state;
1396
1397 /*
1398 * If same as previous state, return
1399 */
1400
1401 if (state == oldstate)
1402 return oldstate;
1403
1404 /*
1405 * If new state is ON, turn channel on
1406 */
1407
1408 if (state == sbmac_state_on)
1409 sbmac_channel_start(sc);
1410 else
1411 sbmac_channel_stop(sc);
1412
1413 /*
1414 * Return previous state
1415 */
1416
1417 return oldstate;
1418 }
1419
1420 /*
1421 * SBMAC_PROMISCUOUS_MODE(sc, enabled)
1422 *
1423 * Turn on or off promiscuous mode
1424 *
1425 * Input parameters:
1426 * sc - softc
1427 * enabled - true to turn on, false to turn off
1428 *
1429 * Return value:
1430 * nothing
1431 */
1432
1433 static void
1434 sbmac_promiscuous_mode(struct sbmac_softc *sc, bool enabled)
1435 {
1436 uint64_t reg;
1437
1438 if (sc->sbm_state != sbmac_state_on)
1439 return;
1440
1441 if (enabled) {
1442 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1443 reg |= M_MAC_ALLPKT_EN;
1444 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1445 } else {
1446 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1447 reg &= ~M_MAC_ALLPKT_EN;
1448 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1449 }
1450 }
1451
1452 /*
1453 * SBMAC_INIT_AND_START(sc)
1454 *
1455 * Stop the channel and restart it. This is generally used
1456 * when we have to do something to the channel that requires
1457 * a swift kick.
1458 *
1459 * Input parameters:
1460 * sc - softc
1461 */
1462
1463 static void
1464 sbmac_init_and_start(struct sbmac_softc *sc)
1465 {
1466 int s;
1467
1468 s = splnet();
1469
1470 mii_pollstat(&sc->sc_mii); /* poll phy for current speed */
1471 sbmac_mii_statchg(&sc->sc_ethercom.ec_if); /* set state to new speed */
1472 sbmac_set_channel_state(sc, sbmac_state_on);
1473
1474 splx(s);
1475 }
1476
1477 /*
1478 * SBMAC_ADDR2REG(ptr)
1479 *
1480 * Convert six bytes into the 64-bit register value that
1481 * we typically write into the SBMAC's address/mcast registers
1482 *
1483 * Input parameters:
1484 * ptr - pointer to 6 bytes
1485 *
1486 * Return value:
1487 * register value
1488 */
1489
1490 static uint64_t
1491 sbmac_addr2reg(u_char *ptr)
1492 {
1493 uint64_t reg = 0;
1494
1495 ptr += 6;
1496
1497 reg |= (uint64_t) *(--ptr);
1498 reg <<= 8;
1499 reg |= (uint64_t) *(--ptr);
1500 reg <<= 8;
1501 reg |= (uint64_t) *(--ptr);
1502 reg <<= 8;
1503 reg |= (uint64_t) *(--ptr);
1504 reg <<= 8;
1505 reg |= (uint64_t) *(--ptr);
1506 reg <<= 8;
1507 reg |= (uint64_t) *(--ptr);
1508
1509 return reg;
1510 }
1511
1512 /*
1513 * SBMAC_SET_SPEED(sc, speed)
1514 *
1515 * Configure LAN speed for the specified MAC.
1516 * Warning: must be called when MAC is off!
1517 *
1518 * Input parameters:
1519 * sc - sbmac structure
1520 * speed - speed to set MAC to (see sbmac_speed_t enum)
1521 *
1522 * Return value:
1523 * true if successful
1524 * false indicates invalid parameters
1525 */
1526
1527 static bool
1528 sbmac_set_speed(struct sbmac_softc *sc, sbmac_speed_t speed)
1529 {
1530 uint64_t cfg;
1531 uint64_t framecfg;
1532
1533 /*
1534 * Save new current values
1535 */
1536
1537 sc->sbm_speed = speed;
1538
1539 if (sc->sbm_state != sbmac_state_off)
1540 panic("sbmac_set_speed while MAC not off");
1541
1542 /*
1543 * Read current register values
1544 */
1545
1546 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1547 framecfg = SBMAC_READCSR(sc->sbm_framecfg);
1548
1549 /*
1550 * Mask out the stuff we want to change
1551 */
1552
1553 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1554 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1555 M_MAC_SLOT_SIZE);
1556
1557 /*
1558 * Now add in the new bits
1559 */
1560
1561 switch (speed) {
1562 case sbmac_speed_10:
1563 framecfg |= V_MAC_IFG_RX_10 |
1564 V_MAC_IFG_TX_10 |
1565 K_MAC_IFG_THRSH_10 |
1566 V_MAC_SLOT_SIZE_10;
1567 cfg |= V_MAC_SPEED_SEL_10MBPS;
1568 break;
1569
1570 case sbmac_speed_100:
1571 framecfg |= V_MAC_IFG_RX_100 |
1572 V_MAC_IFG_TX_100 |
1573 V_MAC_IFG_THRSH_100 |
1574 V_MAC_SLOT_SIZE_100;
1575 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1576 break;
1577
1578 case sbmac_speed_1000:
1579 framecfg |= V_MAC_IFG_RX_1000 |
1580 V_MAC_IFG_TX_1000 |
1581 V_MAC_IFG_THRSH_1000 |
1582 V_MAC_SLOT_SIZE_1000;
1583 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1584 break;
1585
1586 case sbmac_speed_auto: /* XXX not implemented */
1587 /* fall through */
1588 default:
1589 return false;
1590 }
1591
1592 /*
1593 * Send the bits back to the hardware
1594 */
1595
1596 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1597 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1598
1599 return true;
1600 }
1601
1602 /*
1603 * SBMAC_SET_DUPLEX(sc, duplex, fc)
1604 *
1605 * Set Ethernet duplex and flow control options for this MAC
1606 * Warning: must be called when MAC is off!
1607 *
1608 * Input parameters:
1609 * sc - sbmac structure
1610 * duplex - duplex setting (see sbmac_duplex_t)
1611 * fc - flow control setting (see sbmac_fc_t)
1612 *
1613 * Return value:
1614 * true if ok
1615 * false if an invalid parameter combination was specified
1616 */
1617
1618 static bool
1619 sbmac_set_duplex(struct sbmac_softc *sc, sbmac_duplex_t duplex, sbmac_fc_t fc)
1620 {
1621 uint64_t cfg;
1622
1623 /*
1624 * Save new current values
1625 */
1626
1627 sc->sbm_duplex = duplex;
1628 sc->sbm_fc = fc;
1629
1630 if (sc->sbm_state != sbmac_state_off)
1631 panic("sbmac_set_duplex while MAC not off");
1632
1633 /*
1634 * Read current register values
1635 */
1636
1637 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1638
1639 /*
1640 * Mask off the stuff we're about to change
1641 */
1642
1643 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1644
1645 switch (duplex) {
1646 case sbmac_duplex_half:
1647 switch (fc) {
1648 case sbmac_fc_disabled:
1649 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1650 break;
1651
1652 case sbmac_fc_collision:
1653 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1654 break;
1655
1656 case sbmac_fc_carrier:
1657 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1658 break;
1659
1660 case sbmac_fc_auto: /* XXX not implemented */
1661 /* fall through */
1662 case sbmac_fc_frame: /* not valid in half duplex */
1663 default: /* invalid selection */
1664 panic("%s: invalid half duplex fc selection %d",
1665 device_xname(sc->sc_dev), fc);
1666 return false;
1667 }
1668 break;
1669
1670 case sbmac_duplex_full:
1671 switch (fc) {
1672 case sbmac_fc_disabled:
1673 cfg |= V_MAC_FC_CMD_DISABLED;
1674 break;
1675
1676 case sbmac_fc_frame:
1677 cfg |= V_MAC_FC_CMD_ENABLED;
1678 break;
1679
1680 case sbmac_fc_collision: /* not valid in full duplex */
1681 case sbmac_fc_carrier: /* not valid in full duplex */
1682 case sbmac_fc_auto: /* XXX not implemented */
1683 /* fall through */
1684 default:
1685 panic("%s: invalid full duplex fc selection %d",
1686 device_xname(sc->sc_dev), fc);
1687 return false;
1688 }
1689 break;
1690
1691 default:
1692 /* fall through */
1693 case sbmac_duplex_auto:
1694 panic("%s: bad duplex %d", device_xname(sc->sc_dev), duplex);
1695 /* XXX not implemented */
1696 break;
1697 }
1698
1699 /*
1700 * Send the bits back to the hardware
1701 */
1702
1703 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1704
1705 return true;
1706 }
1707
1708 /*
1709 * SBMAC_INTR()
1710 *
1711 * Interrupt handler for MAC interrupts
1712 *
1713 * Input parameters:
1714 * MAC structure
1715 *
1716 * Return value:
1717 * nothing
1718 */
1719
1720 /* ARGSUSED */
1721 static void
1722 sbmac_intr(void *xsc, uint32_t status, vaddr_t pc)
1723 {
1724 struct sbmac_softc *sc = xsc;
1725 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1726 uint64_t isr;
1727
1728 for (;;) {
1729
1730 /*
1731 * Read the ISR (this clears the bits in the real register)
1732 */
1733
1734 isr = SBMAC_READCSR(sc->sbm_isr);
1735
1736 if (isr == 0)
1737 break;
1738
1739 /*
1740 * Transmits on channel 0
1741 */
1742
1743 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
1744 sbdma_tx_process(sc, &(sc->sbm_txdma));
1745 SBMAC_EVCNT_INCR(sc->sbm_ev_txintr);
1746 }
1747
1748 /*
1749 * Receives on channel 0
1750 */
1751
1752 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
1753 sbdma_rx_process(sc, &(sc->sbm_rxdma));
1754 SBMAC_EVCNT_INCR(sc->sbm_ev_rxintr);
1755 }
1756 }
1757
1758 /* try to get more packets going */
1759 if_schedule_deferred_start(ifp);
1760 }
1761
1762
1763 /*
1764 * SBMAC_START(ifp)
1765 *
1766 * Start output on the specified interface. Basically, we
1767 * queue as many buffers as we can until the ring fills up, or
1768 * we run off the end of the queue, whichever comes first.
1769 *
1770 * Input parameters:
1771 * ifp - interface
1772 *
1773 * Return value:
1774 * nothing
1775 */
1776
1777 static void
1778 sbmac_start(struct ifnet *ifp)
1779 {
1780 struct sbmac_softc *sc;
1781 struct mbuf *m_head = NULL;
1782 int rv;
1783
1784 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1785 return;
1786
1787 sc = ifp->if_softc;
1788
1789 for (;;) {
1790
1791 IF_DEQUEUE(&ifp->if_snd, m_head);
1792 if (m_head == NULL)
1793 break;
1794
1795 /*
1796 * Put the buffer on the transmit ring. If we
1797 * don't have room, set the OACTIVE flag and wait
1798 * for the NIC to drain the ring.
1799 */
1800
1801 rv = sbdma_add_txbuffer(&(sc->sbm_txdma), m_head);
1802
1803 if (rv == 0) {
1804 /*
1805 * If there's a BPF listener, bounce a copy of this
1806 * frame to it.
1807 */
1808 bpf_mtap(ifp, m_head, BPF_D_OUT);
1809 if (!sc->sbm_pass3_dma) {
1810 /*
1811 * Don't free mbuf if we're not copying to new
1812 * mbuf in sbdma_add_txbuffer. It will be
1813 * freed in sbdma_tx_process.
1814 */
1815 m_freem(m_head);
1816 }
1817 } else {
1818 IF_PREPEND(&ifp->if_snd, m_head);
1819 ifp->if_flags |= IFF_OACTIVE;
1820 break;
1821 }
1822 }
1823 }
1824
1825 /*
1826 * SBMAC_SETMULTI(sc)
1827 *
1828 * Reprogram the multicast table into the hardware, given
1829 * the list of multicasts associated with the interface
1830 * structure.
1831 *
1832 * Input parameters:
1833 * sc - softc
1834 *
1835 * Return value:
1836 * nothing
1837 */
1838
1839 static void
1840 sbmac_setmulti(struct sbmac_softc *sc)
1841 {
1842 struct ifnet *ifp;
1843 uint64_t reg;
1844 sbmac_port_t port;
1845 int idx;
1846 struct ether_multi *enm;
1847 struct ether_multistep step;
1848
1849 ifp = &sc->sc_ethercom.ec_if;
1850
1851 /*
1852 * Clear out entire multicast table. We do this by nuking
1853 * the entire hash table and all the direct matches except
1854 * the first one, which is used for our station address
1855 */
1856
1857 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
1858 port = PKSEG1(sc->sbm_base +
1859 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1860 SBMAC_WRITECSR(port, 0);
1861 }
1862
1863 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1864 port = PKSEG1(sc->sbm_base +
1865 R_MAC_HASH_BASE+(idx*sizeof(uint64_t)));
1866 SBMAC_WRITECSR(port, 0);
1867 }
1868
1869 /*
1870 * Clear the filter to say we don't want any multicasts.
1871 */
1872
1873 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1874 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1875 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1876
1877 if (ifp->if_flags & IFF_ALLMULTI) {
1878 /*
1879 * Enable ALL multicasts. Do this by inverting the
1880 * multicast enable bit.
1881 */
1882 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1883 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1884 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1885 return;
1886 }
1887
1888 /*
1889 * Progam new multicast entries. For now, only use the
1890 * perfect filter. In the future we'll need to use the
1891 * hash filter if the perfect filter overflows
1892 */
1893
1894 /*
1895 * XXX only using perfect filter for now, need to use hash
1896 * XXX if the table overflows
1897 */
1898
1899 idx = 1; /* skip station address */
1900 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1901 while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) {
1902 reg = sbmac_addr2reg(enm->enm_addrlo);
1903 port = PKSEG1(sc->sbm_base +
1904 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1905 SBMAC_WRITECSR(port, reg);
1906 idx++;
1907 ETHER_NEXT_MULTI(step, enm);
1908 }
1909
1910 /*
1911 * Enable the "accept multicast bits" if we programmed at least one
1912 * multicast.
1913 */
1914
1915 if (idx > 1) {
1916 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1917 reg |= M_MAC_MCAST_EN;
1918 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1919 }
1920 }
1921
1922 /*
1923 * SBMAC_ETHER_IOCTL(ifp, cmd, data)
1924 *
1925 * Generic IOCTL requests for this interface. The basic
1926 * stuff is handled here for bringing the interface up,
1927 * handling multicasts, etc.
1928 *
1929 * Input parameters:
1930 * ifp - interface structure
1931 * cmd - command code
1932 * data - pointer to data
1933 *
1934 * Return value:
1935 * return value (0 is success)
1936 */
1937
1938 static int
1939 sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1940 {
1941 struct ifaddr *ifa = (struct ifaddr *) data;
1942 struct sbmac_softc *sc = ifp->if_softc;
1943
1944 switch (cmd) {
1945 case SIOCINITIFADDR:
1946 ifp->if_flags |= IFF_UP;
1947
1948 switch (ifa->ifa_addr->sa_family) {
1949 #ifdef INET
1950 case AF_INET:
1951 sbmac_init_and_start(sc);
1952 arp_ifinit(ifp, ifa);
1953 break;
1954 #endif
1955 default:
1956 sbmac_init_and_start(sc);
1957 break;
1958 }
1959 break;
1960
1961 default:
1962 return ENOTTY;
1963 }
1964
1965 return (0);
1966 }
1967
1968 /*
1969 * SBMAC_IOCTL(ifp, cmd, data)
1970 *
1971 * Main IOCTL handler - dispatches to other IOCTLs for various
1972 * types of requests.
1973 *
1974 * Input parameters:
1975 * ifp - interface pointer
1976 * cmd - command code
1977 * data - pointer to argument data
1978 *
1979 * Return value:
1980 * 0 if ok
1981 * else error code
1982 */
1983
1984 static int
1985 sbmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1986 {
1987 struct sbmac_softc *sc = ifp->if_softc;
1988 struct ifreq *ifr = (struct ifreq *) data;
1989 int s, error = 0;
1990
1991 s = splnet();
1992
1993 switch (cmd) {
1994 case SIOCINITIFADDR:
1995 error = sbmac_ether_ioctl(ifp, cmd, data);
1996 break;
1997 case SIOCSIFMTU:
1998 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
1999 error = EINVAL;
2000 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
2001 /* XXX Program new MTU here */
2002 error = 0;
2003 break;
2004 case SIOCSIFFLAGS:
2005 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2006 break;
2007 if (ifp->if_flags & IFF_UP) {
2008 /*
2009 * If only the state of the PROMISC flag changed,
2010 * just tweak the hardware registers.
2011 */
2012 if ((ifp->if_flags & IFF_RUNNING) &&
2013 (ifp->if_flags & IFF_PROMISC)) {
2014 /* turn on promiscuous mode */
2015 sbmac_promiscuous_mode(sc, true);
2016 } else if (ifp->if_flags & IFF_RUNNING &&
2017 !(ifp->if_flags & IFF_PROMISC)) {
2018 /* turn off promiscuous mode */
2019 sbmac_promiscuous_mode(sc, false);
2020 } else
2021 sbmac_set_channel_state(sc, sbmac_state_on);
2022 } else {
2023 if (ifp->if_flags & IFF_RUNNING)
2024 sbmac_set_channel_state(sc, sbmac_state_off);
2025 }
2026
2027 sc->sbm_if_flags = ifp->if_flags;
2028 error = 0;
2029 break;
2030
2031 case SIOCADDMULTI:
2032 case SIOCDELMULTI:
2033 case SIOCSIFMEDIA:
2034 case SIOCGIFMEDIA:
2035 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2036 error = 0;
2037 if (ifp->if_flags & IFF_RUNNING)
2038 sbmac_setmulti(sc);
2039 }
2040 break;
2041 default:
2042 error = ether_ioctl(ifp, cmd, data);
2043 break;
2044 }
2045
2046 (void)splx(s);
2047
2048 return(error);
2049 }
2050
2051 /*
2052 * SBMAC_IFMEDIA_UPD(ifp)
2053 *
2054 * Configure an appropriate media type for this interface,
2055 * given the data in the interface structure
2056 *
2057 * Input parameters:
2058 * ifp - interface
2059 *
2060 * Return value:
2061 * 0 if ok
2062 * else error code
2063 */
2064
2065 /*
2066 * SBMAC_IFMEDIA_STS(ifp, ifmr)
2067 *
2068 * Report current media status (used by ifconfig, for example)
2069 *
2070 * Input parameters:
2071 * ifp - interface structure
2072 * ifmr - media request structure
2073 *
2074 * Return value:
2075 * nothing
2076 */
2077
2078 /*
2079 * SBMAC_WATCHDOG(ifp)
2080 *
2081 * Called periodically to make sure we're still happy.
2082 *
2083 * Input parameters:
2084 * ifp - interface structure
2085 *
2086 * Return value:
2087 * nothing
2088 */
2089
2090 static void
2091 sbmac_watchdog(struct ifnet *ifp)
2092 {
2093
2094 /* XXX do something */
2095 }
2096
2097 /*
2098 * One second timer, used to tick MII.
2099 */
2100 static void
2101 sbmac_tick(void *arg)
2102 {
2103 struct sbmac_softc *sc = arg;
2104 int s;
2105
2106 s = splnet();
2107 mii_tick(&sc->sc_mii);
2108 splx(s);
2109
2110 callout_reset(&sc->sc_tick_ch, hz, sbmac_tick, sc);
2111 }
2112
2113
2114 /*
2115 * SBMAC_MATCH(parent, match, aux)
2116 *
2117 * Part of the config process - see if this device matches the
2118 * info about what we expect to find on the bus.
2119 *
2120 * Input parameters:
2121 * parent - parent bus structure
2122 * match -
2123 * aux - bus-specific args
2124 *
2125 * Return value:
2126 * 1 if we match
2127 * 0 if we don't match
2128 */
2129
2130 static int
2131 sbmac_match(device_t parent, cfdata_t match, void *aux)
2132 {
2133 struct sbobio_attach_args *sa = aux;
2134
2135 /*
2136 * Make sure it's a MAC
2137 */
2138 if (sa->sa_locs.sa_type != SBOBIO_DEVTYPE_MAC)
2139 return 0;
2140
2141 /*
2142 * Yup, it is.
2143 */
2144
2145 return 1;
2146 }
2147
2148 /*
2149 * SBMAC_PARSE_XDIGIT(str)
2150 *
2151 * Parse a hex digit, returning its value
2152 *
2153 * Input parameters:
2154 * str - character
2155 *
2156 * Return value:
2157 * hex value, or -1 if invalid
2158 */
2159
2160 static int
2161 sbmac_parse_xdigit(char str)
2162 {
2163 int digit;
2164
2165 if ((str >= '0') && (str <= '9'))
2166 digit = str - '0';
2167 else if ((str >= 'a') && (str <= 'f'))
2168 digit = str - 'a' + 10;
2169 else if ((str >= 'A') && (str <= 'F'))
2170 digit = str - 'A' + 10;
2171 else
2172 digit = -1;
2173
2174 return digit;
2175 }
2176
2177 /*
2178 * SBMAC_PARSE_HWADDR(str, hwaddr)
2179 *
2180 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2181 * Ethernet address.
2182 *
2183 * Input parameters:
2184 * str - string
2185 * hwaddr - pointer to hardware address
2186 *
2187 * Return value:
2188 * 0 if ok, else -1
2189 */
2190
2191 static int
2192 sbmac_parse_hwaddr(const char *str, u_char *hwaddr)
2193 {
2194 int digit1, digit2;
2195 int idx = 6;
2196
2197 while (*str && (idx > 0)) {
2198 digit1 = sbmac_parse_xdigit(*str);
2199 if (digit1 < 0)
2200 return -1;
2201 str++;
2202 if (!*str)
2203 return -1;
2204
2205 if ((*str == ':') || (*str == '-')) {
2206 digit2 = digit1;
2207 digit1 = 0;
2208 } else {
2209 digit2 = sbmac_parse_xdigit(*str);
2210 if (digit2 < 0)
2211 return -1;
2212 str++;
2213 }
2214
2215 *hwaddr++ = (digit1 << 4) | digit2;
2216 idx--;
2217
2218 if (*str == '-')
2219 str++;
2220 if (*str == ':')
2221 str++;
2222 }
2223 return 0;
2224 }
2225
2226 /*
2227 * SBMAC_ATTACH(parent, self, aux)
2228 *
2229 * Attach routine - init hardware and hook ourselves into NetBSD.
2230 *
2231 * Input parameters:
2232 * parent - parent bus device
2233 * self - our softc
2234 * aux - attach data
2235 *
2236 * Return value:
2237 * nothing
2238 */
2239
2240 static void
2241 sbmac_attach(device_t parent, device_t self, void *aux)
2242 {
2243 struct sbmac_softc * const sc = device_private(self);
2244 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
2245 struct sbobio_attach_args * const sa = aux;
2246 u_char *eaddr;
2247 static int unit = 0; /* XXX */
2248 uint64_t ea_reg;
2249 int idx;
2250
2251 sc->sc_dev = self;
2252
2253 /* Determine controller base address */
2254
2255 sc->sbm_base = sa->sa_base + sa->sa_locs.sa_offset;
2256
2257 eaddr = sc->sbm_hwaddr;
2258
2259 /*
2260 * Initialize context (get pointers to registers and stuff), then
2261 * allocate the memory for the descriptor tables.
2262 */
2263
2264 sbmac_initctx(sc);
2265
2266 callout_init(&(sc->sc_tick_ch), 0);
2267
2268 /*
2269 * Read the ethernet address. The firwmare left this programmed
2270 * for us in the ethernet address register for each mac.
2271 */
2272
2273 ea_reg = SBMAC_READCSR(PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR));
2274 for (idx = 0; idx < 6; idx++) {
2275 eaddr[idx] = (uint8_t) (ea_reg & 0xFF);
2276 ea_reg >>= 8;
2277 }
2278
2279 #define SBMAC_DEFAULT_HWADDR "40:00:00:00:01:00"
2280 if (eaddr[0] == 0 && eaddr[1] == 0 && eaddr[2] == 0 &&
2281 eaddr[3] == 0 && eaddr[4] == 0 && eaddr[5] == 0) {
2282 sbmac_parse_hwaddr(SBMAC_DEFAULT_HWADDR, eaddr);
2283 eaddr[5] = unit;
2284 }
2285
2286 #ifdef SBMAC_ETH0_HWADDR
2287 if (unit == 0)
2288 sbmac_parse_hwaddr(SBMAC_ETH0_HWADDR, eaddr);
2289 #endif
2290 #ifdef SBMAC_ETH1_HWADDR
2291 if (unit == 1)
2292 sbmac_parse_hwaddr(SBMAC_ETH1_HWADDR, eaddr);
2293 #endif
2294 #ifdef SBMAC_ETH2_HWADDR
2295 if (unit == 2)
2296 sbmac_parse_hwaddr(SBMAC_ETH2_HWADDR, eaddr);
2297 #endif
2298 unit++;
2299
2300 /*
2301 * Display Ethernet address (this is called during the config process
2302 * so we need to finish off the config message that was being displayed)
2303 */
2304 aprint_normal(": Ethernet%s\n",
2305 sc->sbm_pass3_dma ? ", using unaligned tx DMA" : "");
2306 aprint_normal_dev(self, "Ethernet address: %s\n", ether_sprintf(eaddr));
2307
2308
2309 /*
2310 * Set up ifnet structure
2311 */
2312
2313 ifp->if_softc = sc;
2314 memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
2315 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
2316 IFF_NOTRAILERS;
2317 ifp->if_ioctl = sbmac_ioctl;
2318 ifp->if_start = sbmac_start;
2319 ifp->if_watchdog = sbmac_watchdog;
2320 ifp->if_snd.ifq_maxlen = SBMAC_MAX_TXDESCR - 1;
2321
2322 /*
2323 * Set up ifmedia support.
2324 */
2325
2326 /*
2327 * Initialize MII/media info.
2328 */
2329 sc->sc_mii.mii_ifp = ifp;
2330 sc->sc_mii.mii_readreg = sbmac_mii_readreg;
2331 sc->sc_mii.mii_writereg = sbmac_mii_writereg;
2332 sc->sc_mii.mii_statchg = sbmac_mii_statchg;
2333 sc->sc_ethercom.ec_mii = &sc->sc_mii;
2334 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
2335 ether_mediastatus);
2336 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2337 MII_OFFSET_ANY, 0);
2338
2339 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2340 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2341 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2342 } else {
2343 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2344 }
2345
2346
2347 /*
2348 * map/route interrupt
2349 */
2350
2351 sc->sbm_intrhand = cpu_intr_establish(sa->sa_locs.sa_intr[0], IPL_NET,
2352 sbmac_intr, sc);
2353
2354 /*
2355 * Call MI attach routines.
2356 */
2357 if_attach(ifp);
2358 if_deferred_start_init(ifp, NULL);
2359 ether_ifattach(ifp, eaddr);
2360 }
2361