sbmac.c revision 1.60 1 /* $NetBSD: sbmac.c,v 1.60 2019/05/28 07:41:47 msaitoh Exp $ */
2
3 /*
4 * Copyright 2000, 2001, 2004
5 * Broadcom Corporation. All rights reserved.
6 *
7 * This software is furnished under license and may be used and copied only
8 * in accordance with the following terms and conditions. Subject to these
9 * conditions, you may download, copy, install, use, modify and distribute
10 * modified or unmodified copies of this software in source and/or binary
11 * form. No title or ownership is transferred hereby.
12 *
13 * 1) Any source code used, modified or distributed must reproduce and
14 * retain this copyright notice and list of conditions as they appear in
15 * the source file.
16 *
17 * 2) No right is granted to use any trade name, trademark, or logo of
18 * Broadcom Corporation. The "Broadcom Corporation" name may not be
19 * used to endorse or promote products derived from this software
20 * without the prior written permission of Broadcom Corporation.
21 *
22 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
25 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
26 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
27 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.60 2019/05/28 07:41:47 msaitoh Exp $");
37
38 #include "opt_inet.h"
39 #include "opt_ns.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/kmem.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/queue.h>
49 #include <sys/device.h>
50
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/if_ether.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56
57 #include <net/bpf.h>
58
59 #ifdef INET
60 #include <netinet/in.h>
61 #include <netinet/if_inarp.h>
62 #endif
63
64 #include <mips/locore.h>
65
66 #include "sbobiovar.h"
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/mii_bitbang.h>
71
72 #include <mips/sibyte/include/sb1250_defs.h>
73 #include <mips/sibyte/include/sb1250_regs.h>
74 #include <mips/sibyte/include/sb1250_mac.h>
75 #include <mips/sibyte/include/sb1250_dma.h>
76 #include <mips/sibyte/include/sb1250_scd.h>
77
78 #include <evbmips/sbmips/systemsw.h>
79
80 /* Simple types */
81
82 typedef u_long sbmac_port_t;
83 typedef uint64_t sbmac_physaddr_t;
84 typedef uint64_t sbmac_enetaddr_t;
85
86 typedef enum { sbmac_speed_auto, sbmac_speed_10,
87 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
88
89 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
90 sbmac_duplex_full } sbmac_duplex_t;
91
92 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
93 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
94
95 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
96 sbmac_state_broken } sbmac_state_t;
97
98
99 /* Macros */
100
101 #define SBMAC_EVENT_COUNTERS /* Include counters for various events */
102
103 #define SBDMA_NEXTBUF(d, f) ((f + 1) & (d)->sbdma_dscr_mask)
104
105 #define CACHELINESIZE 32
106 #define NUMCACHEBLKS(x) (((x)+CACHELINESIZE-1)/CACHELINESIZE)
107 #define KVTOPHYS(x) kvtophys((vaddr_t)(x))
108
109 #ifdef SBMACDEBUG
110 #define dprintf(x) printf x
111 #else
112 #define dprintf(x)
113 #endif
114
115 #define SBMAC_READCSR(t) mips3_ld((register_t)(t))
116 #define SBMAC_WRITECSR(t, v) mips3_sd((register_t)(t), (v))
117
118 #define PKSEG1(x) ((sbmac_port_t) MIPS_PHYS_TO_KSEG1(x))
119
120 /* These are limited to fit within one virtual page, and must be 2**N. */
121 #define SBMAC_MAX_TXDESCR 256 /* should be 1024 */
122 #define SBMAC_MAX_RXDESCR 256 /* should be 512 */
123
124 /* DMA Descriptor structure */
125
126 typedef struct sbdmadscr_s {
127 uint64_t dscr_a;
128 uint64_t dscr_b;
129 } sbdmadscr_t;
130
131
132 /* DMA Controller structure */
133
134 typedef struct sbmacdma_s {
135
136 /*
137 * This stuff is used to identify the channel and the registers
138 * associated with it.
139 */
140
141 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
142 int sbdma_channel; /* channel number */
143 int sbdma_txdir; /* direction (1=transmit) */
144 int sbdma_maxdescr; /* total # of descriptors in ring */
145 sbmac_port_t sbdma_config0; /* DMA config register 0 */
146 sbmac_port_t sbdma_config1; /* DMA config register 1 */
147 sbmac_port_t sbdma_dscrbase; /* Descriptor base address */
148 sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */
149 sbmac_port_t sbdma_curdscr; /* current descriptor address */
150
151 /*
152 * This stuff is for maintenance of the ring
153 */
154 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
155 struct mbuf **sbdma_ctxtable; /* context table, one per descr */
156 unsigned int sbdma_dscr_mask; /* sbdma_maxdescr - 1 */
157 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
158 unsigned int sbdma_add_index; /* next dscr for sw to add */
159 unsigned int sbdma_rem_index; /* next dscr for sw to remove */
160 } sbmacdma_t;
161
162
163 /* Ethernet softc structure */
164
165 struct sbmac_softc {
166
167 /*
168 * NetBSD-specific things
169 */
170 struct ethercom sc_ethercom; /* Ethernet common part */
171 struct mii_data sc_mii;
172 struct callout sc_tick_ch;
173
174 device_t sc_dev; /* device */
175 int sbm_if_flags;
176 void *sbm_intrhand;
177
178 /*
179 * Controller-specific things
180 */
181
182 sbmac_port_t sbm_base; /* MAC's base address */
183 sbmac_state_t sbm_state; /* current state */
184
185 sbmac_port_t sbm_macenable; /* MAC Enable Register */
186 sbmac_port_t sbm_maccfg; /* MAC Configuration Register */
187 sbmac_port_t sbm_fifocfg; /* FIFO configuration register */
188 sbmac_port_t sbm_framecfg; /* Frame configuration register */
189 sbmac_port_t sbm_rxfilter; /* receive filter register */
190 sbmac_port_t sbm_isr; /* Interrupt status register */
191 sbmac_port_t sbm_imr; /* Interrupt mask register */
192
193 sbmac_speed_t sbm_speed; /* current speed */
194 sbmac_duplex_t sbm_duplex; /* current duplex */
195 sbmac_fc_t sbm_fc; /* current flow control setting */
196 int sbm_rxflags; /* received packet flags */
197
198 u_char sbm_hwaddr[ETHER_ADDR_LEN];
199
200 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
201 sbmacdma_t sbm_rxdma;
202
203 int sbm_pass3_dma; /* chip has pass3 SOC DMA features */
204
205 #ifdef SBMAC_EVENT_COUNTERS
206 struct evcnt sbm_ev_rxintr; /* Rx interrupts */
207 struct evcnt sbm_ev_txintr; /* Tx interrupts */
208 struct evcnt sbm_ev_txdrop; /* Tx dropped due to no mbuf alloc failed */
209 struct evcnt sbm_ev_txstall; /* Tx stalled due to no descriptors free */
210
211 struct evcnt sbm_ev_txsplit; /* pass3 Tx split mbuf */
212 struct evcnt sbm_ev_txkeep; /* pass3 Tx didn't split mbuf */
213 #endif
214 };
215
216
217 #ifdef SBMAC_EVENT_COUNTERS
218 #define SBMAC_EVCNT_INCR(ev) (ev).ev_count++
219 #else
220 #define SBMAC_EVCNT_INCR(ev) do { /* nothing */ } while (0)
221 #endif
222
223 /* Externs */
224
225 extern paddr_t kvtophys(vaddr_t);
226
227 /* Prototypes */
228
229 static void sbdma_initctx(sbmacdma_t *, struct sbmac_softc *, int, int, int);
230 static void sbdma_channel_start(sbmacdma_t *);
231 static int sbdma_add_rcvbuffer(sbmacdma_t *, struct mbuf *);
232 static int sbdma_add_txbuffer(sbmacdma_t *, struct mbuf *);
233 static void sbdma_emptyring(sbmacdma_t *);
234 static void sbdma_fillring(sbmacdma_t *);
235 static void sbdma_rx_process(struct sbmac_softc *, sbmacdma_t *);
236 static void sbdma_tx_process(struct sbmac_softc *, sbmacdma_t *);
237 static void sbmac_initctx(struct sbmac_softc *);
238 static void sbmac_channel_start(struct sbmac_softc *);
239 static void sbmac_channel_stop(struct sbmac_softc *);
240 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,
241 sbmac_state_t);
242 static void sbmac_promiscuous_mode(struct sbmac_softc *, bool);
243 static void sbmac_init_and_start(struct sbmac_softc *);
244 static uint64_t sbmac_addr2reg(u_char *);
245 static void sbmac_intr(void *, uint32_t, vaddr_t);
246 static void sbmac_start(struct ifnet *);
247 static void sbmac_setmulti(struct sbmac_softc *);
248 static int sbmac_ether_ioctl(struct ifnet *, u_long, void *);
249 static int sbmac_ioctl(struct ifnet *, u_long, void *);
250 static void sbmac_watchdog(struct ifnet *);
251 static int sbmac_match(device_t, cfdata_t, void *);
252 static void sbmac_attach(device_t, device_t, void *);
253 static bool sbmac_set_speed(struct sbmac_softc *, sbmac_speed_t);
254 static bool sbmac_set_duplex(struct sbmac_softc *, sbmac_duplex_t, sbmac_fc_t);
255 static void sbmac_tick(void *);
256
257
258 /* Globals */
259
260 CFATTACH_DECL_NEW(sbmac, sizeof(struct sbmac_softc),
261 sbmac_match, sbmac_attach, NULL, NULL);
262
263 static uint32_t sbmac_mii_bitbang_read(device_t self);
264 static void sbmac_mii_bitbang_write(device_t self, uint32_t val);
265
266 static const struct mii_bitbang_ops sbmac_mii_bitbang_ops = {
267 sbmac_mii_bitbang_read,
268 sbmac_mii_bitbang_write,
269 {
270 (uint32_t)M_MAC_MDIO_OUT, /* MII_BIT_MDO */
271 (uint32_t)M_MAC_MDIO_IN, /* MII_BIT_MDI */
272 (uint32_t)M_MAC_MDC, /* MII_BIT_MDC */
273 0, /* MII_BIT_DIR_HOST_PHY */
274 (uint32_t)M_MAC_MDIO_DIR /* MII_BIT_DIR_PHY_HOST */
275 }
276 };
277
278 static uint32_t
279 sbmac_mii_bitbang_read(device_t self)
280 {
281 struct sbmac_softc *sc = device_private(self);
282 sbmac_port_t reg;
283
284 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
285 return (uint32_t) SBMAC_READCSR(reg);
286 }
287
288 static void
289 sbmac_mii_bitbang_write(device_t self, uint32_t val)
290 {
291 struct sbmac_softc *sc = device_private(self);
292 sbmac_port_t reg;
293
294 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
295
296 SBMAC_WRITECSR(reg, (val &
297 (M_MAC_MDC | M_MAC_MDIO_DIR | M_MAC_MDIO_OUT | M_MAC_MDIO_IN)));
298 }
299
300 /*
301 * Read an PHY register through the MII.
302 */
303 static int
304 sbmac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
305 {
306
307 return mii_bitbang_readreg(self, &sbmac_mii_bitbang_ops, phy, reg,
308 val);
309 }
310
311 /*
312 * Write to a PHY register through the MII.
313 */
314 static int
315 sbmac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
316 {
317
318 return mii_bitbang_writereg(self, &sbmac_mii_bitbang_ops, phy, reg,
319 val);
320 }
321
322 static void
323 sbmac_mii_statchg(struct ifnet *ifp)
324 {
325 struct sbmac_softc *sc = ifp->if_softc;
326 sbmac_state_t oldstate;
327
328 /* Stop the MAC in preparation for changing all of the parameters. */
329 oldstate = sbmac_set_channel_state(sc, sbmac_state_off);
330
331 switch (sc->sc_ethercom.ec_if.if_baudrate) {
332 default: /* if autonegotiation fails, assume 10Mbit */
333 case IF_Mbps(10):
334 sbmac_set_speed(sc, sbmac_speed_10);
335 break;
336
337 case IF_Mbps(100):
338 sbmac_set_speed(sc, sbmac_speed_100);
339 break;
340
341 case IF_Mbps(1000):
342 sbmac_set_speed(sc, sbmac_speed_1000);
343 break;
344 }
345
346 if (sc->sc_mii.mii_media_active & IFM_FDX) {
347 /* Configure for full-duplex */
348 /* XXX: is flow control right for 10, 100? */
349 sbmac_set_duplex(sc, sbmac_duplex_full, sbmac_fc_frame);
350 } else {
351 /* Configure for half-duplex */
352 /* XXX: is flow control right? */
353 sbmac_set_duplex(sc, sbmac_duplex_half, sbmac_fc_disabled);
354 }
355
356 /* And put it back into its former state. */
357 sbmac_set_channel_state(sc, oldstate);
358 }
359
360 /*
361 * SBDMA_INITCTX(d, sc, chan, txrx, maxdescr)
362 *
363 * Initialize a DMA channel context. Since there are potentially
364 * eight DMA channels per MAC, it's nice to do this in a standard
365 * way.
366 *
367 * Input parameters:
368 * d - sbmacdma_t structure (DMA channel context)
369 * sc - sbmac_softc structure (pointer to a MAC)
370 * chan - channel number (0..1 right now)
371 * txrx - Identifies DMA_TX or DMA_RX for channel direction
372 * maxdescr - number of descriptors
373 *
374 * Return value:
375 * nothing
376 */
377
378 static void
379 sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *sc, int chan, int txrx,
380 int maxdescr)
381 {
382 uintptr_t ptr;
383
384 /*
385 * Save away interesting stuff in the structure
386 */
387
388 d->sbdma_eth = sc;
389 d->sbdma_channel = chan;
390 d->sbdma_txdir = txrx;
391
392 /*
393 * initialize register pointers
394 */
395
396 d->sbdma_config0 = PKSEG1(sc->sbm_base +
397 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG0));
398 d->sbdma_config1 = PKSEG1(sc->sbm_base +
399 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG1));
400 d->sbdma_dscrbase = PKSEG1(sc->sbm_base +
401 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_BASE));
402 d->sbdma_dscrcnt = PKSEG1(sc->sbm_base +
403 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_CNT));
404 d->sbdma_curdscr = PKSEG1(sc->sbm_base +
405 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CUR_DSCRADDR));
406
407 /*
408 * Allocate memory for the ring. This must be aligned to a
409 * 32-byte cache line boundary on pass1 or pass2 silicon.
410 */
411
412 d->sbdma_maxdescr = maxdescr;
413 d->sbdma_dscr_mask = d->sbdma_maxdescr - 1;
414 ptr = (uintptr_t)kmem_zalloc(d->sbdma_maxdescr * sizeof(sbdmadscr_t) +
415 CACHELINESIZE - 1, KM_SLEEP);
416 d->sbdma_dscrtable = (sbdmadscr_t *)roundup2(ptr, CACHELINESIZE);
417
418 d->sbdma_dscrtable_phys = KVTOPHYS(d->sbdma_dscrtable);
419
420 /*
421 * And context table
422 */
423
424 d->sbdma_ctxtable = (struct mbuf **)
425 kmem_zalloc(d->sbdma_maxdescr * sizeof(struct mbuf *), KM_SLEEP);
426 }
427
428 /*
429 * SBDMA_CHANNEL_START(d)
430 *
431 * Initialize the hardware registers for a DMA channel.
432 *
433 * Input parameters:
434 * d - DMA channel to init (context must be previously init'd
435 *
436 * Return value:
437 * nothing
438 */
439
440 static void
441 sbdma_channel_start(sbmacdma_t *d)
442 {
443 /*
444 * Turn on the DMA channel
445 */
446
447 SBMAC_WRITECSR(d->sbdma_config1, 0);
448
449 SBMAC_WRITECSR(d->sbdma_dscrbase, d->sbdma_dscrtable_phys);
450
451 SBMAC_WRITECSR(d->sbdma_config0, V_DMA_RINGSZ(d->sbdma_maxdescr) | 0);
452
453 /*
454 * Initialize ring pointers
455 */
456
457 d->sbdma_add_index = 0;
458 d->sbdma_rem_index = 0;
459 }
460
461 /*
462 * SBDMA_ADD_RCVBUFFER(d, m)
463 *
464 * Add a buffer to the specified DMA channel. For receive channels,
465 * this queues a buffer for inbound packets.
466 *
467 * Input parameters:
468 * d - DMA channel descriptor
469 * m - mbuf to add, or NULL if we should allocate one.
470 *
471 * Return value:
472 * 0 if buffer could not be added (ring is full)
473 * 1 if buffer added successfully
474 */
475
476 static int
477 sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m)
478 {
479 unsigned int dsc, nextdsc;
480 struct mbuf *m_new = NULL;
481
482 /* get pointer to our current place in the ring */
483
484 dsc = d->sbdma_add_index;
485 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
486
487 /*
488 * figure out if the ring is full - if the next descriptor
489 * is the same as the one that we're going to remove from
490 * the ring, the ring is full
491 */
492
493 if (nextdsc == d->sbdma_rem_index)
494 return ENOSPC;
495
496 /*
497 * Allocate an mbuf if we don't already have one.
498 * If we do have an mbuf, reset it so that it's empty.
499 */
500
501 if (m == NULL) {
502 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
503 if (m_new == NULL) {
504 aprint_error_dev(d->sbdma_eth->sc_dev,
505 "mbuf allocation failed\n");
506 return ENOBUFS;
507 }
508
509 MCLGET(m_new, M_DONTWAIT);
510 if (!(m_new->m_flags & M_EXT)) {
511 aprint_error_dev(d->sbdma_eth->sc_dev,
512 "mbuf cluster allocation failed\n");
513 m_freem(m_new);
514 return ENOBUFS;
515 }
516
517 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
518 m_adj(m_new, ETHER_ALIGN);
519 } else {
520 m_new = m;
521 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
522 m_new->m_data = m_new->m_ext.ext_buf;
523 m_adj(m_new, ETHER_ALIGN);
524 }
525
526 /*
527 * fill in the descriptor
528 */
529
530 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new, void *)) |
531 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(ETHER_ALIGN + m_new->m_len)) |
532 M_DMA_DSCRA_INTERRUPT;
533
534 /* receiving: no options */
535 d->sbdma_dscrtable[dsc].dscr_b = 0;
536
537 /*
538 * fill in the context
539 */
540
541 d->sbdma_ctxtable[dsc] = m_new;
542
543 /*
544 * point at next packet
545 */
546
547 d->sbdma_add_index = nextdsc;
548
549 /*
550 * Give the buffer to the DMA engine.
551 */
552
553 SBMAC_WRITECSR(d->sbdma_dscrcnt, 1);
554
555 return 0; /* we did it */
556 }
557
558 /*
559 * SBDMA_ADD_TXBUFFER(d, m)
560 *
561 * Add a transmit buffer to the specified DMA channel, causing a
562 * transmit to start.
563 *
564 * Input parameters:
565 * d - DMA channel descriptor
566 * m - mbuf to add
567 *
568 * Return value:
569 * 0 transmit queued successfully
570 * otherwise error code
571 */
572
573 static int
574 sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m)
575 {
576 unsigned int dsc, nextdsc, prevdsc, origdesc;
577 int length;
578 int num_mbufs = 0;
579 struct sbmac_softc *sc = d->sbdma_eth;
580
581 /* get pointer to our current place in the ring */
582
583 dsc = d->sbdma_add_index;
584 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
585
586 /*
587 * figure out if the ring is full - if the next descriptor
588 * is the same as the one that we're going to remove from
589 * the ring, the ring is full
590 */
591
592 if (nextdsc == d->sbdma_rem_index) {
593 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
594 return ENOSPC;
595 }
596
597 /*
598 * PASS3 parts do not have buffer alignment restriction.
599 * No need to copy/coalesce to new mbuf. Also has different
600 * descriptor format
601 */
602 if (sc->sbm_pass3_dma) {
603 struct mbuf *m_temp = NULL;
604
605 /*
606 * Loop thru this mbuf record.
607 * The head mbuf will have SOP set.
608 */
609 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m, void *)) |
610 M_DMA_ETHTX_SOP;
611
612 /*
613 * transmitting: set outbound options,buffer A size(+ low 5
614 * bits of start addr),and packet length.
615 */
616 d->sbdma_dscrtable[dsc].dscr_b =
617 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
618 V_DMA_DSCRB_A_SIZE((m->m_len +
619 (mtod(m, uintptr_t) & 0x0000001F))) |
620 V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) |
621 V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff);
622
623 d->sbdma_add_index = nextdsc;
624 origdesc = prevdsc = dsc;
625 dsc = d->sbdma_add_index;
626 num_mbufs++;
627
628 /* Start with first non-head mbuf */
629 for(m_temp = m->m_next; m_temp != 0; m_temp = m_temp->m_next) {
630 int len, next_len;
631 uint64_t addr;
632
633 if (m_temp->m_len == 0)
634 continue; /* Skip 0-length mbufs */
635
636 len = m_temp->m_len;
637 addr = KVTOPHYS(mtod(m_temp, void *));
638
639 /*
640 * Check to see if the mbuf spans a page boundary. If
641 * it does, and the physical pages behind the virtual
642 * pages are not contiguous, split it so that each
643 * virtual page uses its own Tx descriptor.
644 */
645 if (trunc_page(addr) != trunc_page(addr + len - 1)) {
646 next_len = (addr + len) - trunc_page(addr + len);
647
648 len -= next_len;
649
650 if (addr + len ==
651 KVTOPHYS(mtod(m_temp, char *) + len)) {
652 SBMAC_EVCNT_INCR(sc->sbm_ev_txkeep);
653 len += next_len;
654 next_len = 0;
655 } else {
656 SBMAC_EVCNT_INCR(sc->sbm_ev_txsplit);
657 }
658 } else {
659 next_len = 0;
660 }
661
662 again:
663 /*
664 * fill in the descriptor
665 */
666 d->sbdma_dscrtable[dsc].dscr_a = addr;
667
668 /*
669 * transmitting: set outbound options,buffer A
670 * size(+ low 5 bits of start addr)
671 */
672 d->sbdma_dscrtable[dsc].dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_NOTSOP) |
673 V_DMA_DSCRB_A_SIZE((len + (addr & 0x0000001F)));
674
675 d->sbdma_ctxtable[dsc] = NULL;
676
677 /*
678 * point at next descriptor
679 */
680 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
681 if (nextdsc == d->sbdma_rem_index) {
682 d->sbdma_add_index = origdesc;
683 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
684 return ENOSPC;
685 }
686 d->sbdma_add_index = nextdsc;
687
688 prevdsc = dsc;
689 dsc = d->sbdma_add_index;
690 num_mbufs++;
691
692 if (next_len != 0) {
693 addr = KVTOPHYS(mtod(m_temp, char *) + len);
694 len = next_len;
695
696 next_len = 0;
697 goto again;
698 }
699
700 }
701 /* Set head mbuf to last context index */
702 d->sbdma_ctxtable[prevdsc] = m;
703
704 /* Interrupt on last dscr of packet. */
705 d->sbdma_dscrtable[prevdsc].dscr_a |= M_DMA_DSCRA_INTERRUPT;
706 } else {
707 struct mbuf *m_new = NULL;
708 /*
709 * [BEGIN XXX]
710 * XXX Copy/coalesce the mbufs into a single mbuf cluster (we
711 * assume it will fit). This is a temporary hack to get us
712 * going.
713 */
714
715 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
716 if (m_new == NULL) {
717 aprint_error_dev(d->sbdma_eth->sc_dev,
718 "mbuf allocation failed\n");
719 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
720 return ENOBUFS;
721 }
722
723 MCLGET(m_new, M_DONTWAIT);
724 if (!(m_new->m_flags & M_EXT)) {
725 aprint_error_dev(d->sbdma_eth->sc_dev,
726 "mbuf cluster allocation failed\n");
727 m_freem(m_new);
728 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
729 return ENOBUFS;
730 }
731
732 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
733 /*m_adj(m_new, ETHER_ALIGN);*/
734
735 /*
736 * XXX Don't forget to include the offset portion in the
737 * XXX cache block calculation when this code is rewritten!
738 */
739
740 /*
741 * Copy data
742 */
743
744 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
745 m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len;
746
747 /* Free old mbuf 'm', actual mbuf is now 'm_new' */
748
749 // XXX: CALLERS WILL FREE, they might have to bpf_mtap() if this
750 // XXX: function succeeds.
751 // m_freem(m);
752 length = m_new->m_len;
753
754 /* [END XXX] */
755 /*
756 * fill in the descriptor
757 */
758
759 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new,void *)) |
760 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(m_new->m_len)) |
761 M_DMA_DSCRA_INTERRUPT |
762 M_DMA_ETHTX_SOP;
763
764 /* transmitting: set outbound options and length */
765 d->sbdma_dscrtable[dsc].dscr_b =
766 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
767 V_DMA_DSCRB_PKT_SIZE(length);
768
769 num_mbufs++;
770
771 /*
772 * fill in the context
773 */
774
775 d->sbdma_ctxtable[dsc] = m_new;
776
777 /*
778 * point at next packet
779 */
780 d->sbdma_add_index = nextdsc;
781 }
782
783 /*
784 * Give the buffer to the DMA engine.
785 */
786
787 SBMAC_WRITECSR(d->sbdma_dscrcnt, num_mbufs);
788
789 return 0; /* we did it */
790 }
791
792 /*
793 * SBDMA_EMPTYRING(d)
794 *
795 * Free all allocated mbufs on the specified DMA channel;
796 *
797 * Input parameters:
798 * d - DMA channel
799 *
800 * Return value:
801 * nothing
802 */
803
804 static void
805 sbdma_emptyring(sbmacdma_t *d)
806 {
807 int idx;
808 struct mbuf *m;
809
810 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
811 m = d->sbdma_ctxtable[idx];
812 if (m) {
813 m_freem(m);
814 d->sbdma_ctxtable[idx] = NULL;
815 }
816 }
817 }
818
819 /*
820 * SBDMA_FILLRING(d)
821 *
822 * Fill the specified DMA channel (must be receive channel)
823 * with mbufs
824 *
825 * Input parameters:
826 * d - DMA channel
827 *
828 * Return value:
829 * nothing
830 */
831
832 static void
833 sbdma_fillring(sbmacdma_t *d)
834 {
835 int idx;
836
837 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++)
838 if (sbdma_add_rcvbuffer(d, NULL) != 0)
839 break;
840 }
841
842 /*
843 * SBDMA_RX_PROCESS(sc, d)
844 *
845 * Process "completed" receive buffers on the specified DMA channel.
846 * Note that this isn't really ideal for priority channels, since
847 * it processes all of the packets on a given channel before
848 * returning.
849 *
850 * Input parameters:
851 * sc - softc structure
852 * d - DMA channel context
853 *
854 * Return value:
855 * nothing
856 */
857
858 static void
859 sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d)
860 {
861 int curidx;
862 int hwidx;
863 sbdmadscr_t *dscp;
864 struct mbuf *m;
865 int len;
866
867 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
868
869 for (;;) {
870 /*
871 * figure out where we are (as an index) and where
872 * the hardware is (also as an index)
873 *
874 * This could be done faster if (for example) the
875 * descriptor table was page-aligned and contiguous in
876 * both virtual and physical memory -- you could then
877 * just compare the low-order bits of the virtual address
878 * (sbdma_rem_index) and the physical address
879 * (sbdma_curdscr CSR).
880 */
881
882 curidx = d->sbdma_rem_index;
883 hwidx = (int)
884 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
885 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
886
887 /*
888 * If they're the same, that means we've processed all
889 * of the descriptors up to (but not including) the one that
890 * the hardware is working on right now.
891 */
892
893 if (curidx == hwidx)
894 break;
895
896 /*
897 * Otherwise, get the packet's mbuf ptr back
898 */
899
900 dscp = &(d->sbdma_dscrtable[curidx]);
901 m = d->sbdma_ctxtable[curidx];
902 d->sbdma_ctxtable[curidx] = NULL;
903
904 len = (int)G_DMA_DSCRB_PKT_SIZE(dscp->dscr_b) - 4;
905
906 /*
907 * Check packet status. If good, process it.
908 * If not, silently drop it and put it back on the
909 * receive ring.
910 */
911
912 if (! (dscp->dscr_a & M_DMA_ETHRX_BAD)) {
913
914 /*
915 * Set length into the packet
916 * XXX do we remove the CRC here?
917 */
918 m->m_pkthdr.len = m->m_len = len;
919
920 m_set_rcvif(m, ifp);
921
922
923 /*
924 * Add a new buffer to replace the old one.
925 */
926 sbdma_add_rcvbuffer(d, NULL);
927
928 /*
929 * Handle BPF listeners. Let the BPF user see the
930 * packet, but don't pass it up to the ether_input()
931 * layer unless it's a broadcast packet, multicast
932 * packet, matches our ethernet address or the
933 * interface is in promiscuous mode.
934 */
935
936 /*
937 * Pass the buffer to the kernel
938 */
939 if_percpuq_enqueue(ifp->if_percpuq, m);
940 } else {
941 /*
942 * Packet was mangled somehow. Just drop it and
943 * put it back on the receive ring.
944 */
945 sbdma_add_rcvbuffer(d, m);
946 }
947
948 /*
949 * .. and advance to the next buffer.
950 */
951
952 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
953 }
954 }
955
956 /*
957 * SBDMA_TX_PROCESS(sc, d)
958 *
959 * Process "completed" transmit buffers on the specified DMA channel.
960 * This is normally called within the interrupt service routine.
961 * Note that this isn't really ideal for priority channels, since
962 * it processes all of the packets on a given channel before
963 * returning.
964 *
965 * Input parameters:
966 * sc - softc structure
967 * d - DMA channel context
968 *
969 * Return value:
970 * nothing
971 */
972
973 static void
974 sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d)
975 {
976 int curidx;
977 int hwidx;
978 struct mbuf *m;
979
980 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
981
982 for (;;) {
983 /*
984 * figure out where we are (as an index) and where
985 * the hardware is (also as an index)
986 *
987 * This could be done faster if (for example) the
988 * descriptor table was page-aligned and contiguous in
989 * both virtual and physical memory -- you could then
990 * just compare the low-order bits of the virtual address
991 * (sbdma_rem_index) and the physical address
992 * (sbdma_curdscr CSR).
993 */
994
995 curidx = d->sbdma_rem_index;
996 hwidx = (int)
997 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
998 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
999
1000 /*
1001 * If they're the same, that means we've processed all
1002 * of the descriptors up to (but not including) the one that
1003 * the hardware is working on right now.
1004 */
1005
1006 if (curidx == hwidx)
1007 break;
1008
1009 /*
1010 * Otherwise, get the packet's mbuf ptr back
1011 */
1012
1013 m = d->sbdma_ctxtable[curidx];
1014 d->sbdma_ctxtable[curidx] = NULL;
1015
1016 /*
1017 * for transmits we just free buffers and count packets.
1018 */
1019 ifp->if_opackets++;
1020 m_freem(m);
1021
1022 /*
1023 * .. and advance to the next buffer.
1024 */
1025
1026 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
1027 }
1028
1029 /*
1030 * Decide what to set the IFF_OACTIVE bit in the interface to.
1031 * It's supposed to reflect if the interface is actively
1032 * transmitting, but that's really hard to do quickly.
1033 */
1034
1035 ifp->if_flags &= ~IFF_OACTIVE;
1036 }
1037
1038 /*
1039 * SBMAC_INITCTX(s)
1040 *
1041 * Initialize an Ethernet context structure - this is called
1042 * once per MAC on the 1250. Memory is allocated here, so don't
1043 * call it again from inside the ioctl routines that bring the
1044 * interface up/down
1045 *
1046 * Input parameters:
1047 * sc - sbmac context structure
1048 *
1049 * Return value:
1050 * 0
1051 */
1052
1053 static void
1054 sbmac_initctx(struct sbmac_softc *sc)
1055 {
1056 uint64_t sysrev;
1057
1058 /*
1059 * figure out the addresses of some ports
1060 */
1061
1062 sc->sbm_macenable = PKSEG1(sc->sbm_base + R_MAC_ENABLE);
1063 sc->sbm_maccfg = PKSEG1(sc->sbm_base + R_MAC_CFG);
1064 sc->sbm_fifocfg = PKSEG1(sc->sbm_base + R_MAC_THRSH_CFG);
1065 sc->sbm_framecfg = PKSEG1(sc->sbm_base + R_MAC_FRAMECFG);
1066 sc->sbm_rxfilter = PKSEG1(sc->sbm_base + R_MAC_ADFILTER_CFG);
1067 sc->sbm_isr = PKSEG1(sc->sbm_base + R_MAC_STATUS);
1068 sc->sbm_imr = PKSEG1(sc->sbm_base + R_MAC_INT_MASK);
1069
1070 /*
1071 * Initialize the DMA channels. Right now, only one per MAC is used
1072 * Note: Only do this _once_, as it allocates memory from the kernel!
1073 */
1074
1075 sbdma_initctx(&(sc->sbm_txdma), sc, 0, DMA_TX, SBMAC_MAX_TXDESCR);
1076 sbdma_initctx(&(sc->sbm_rxdma), sc, 0, DMA_RX, SBMAC_MAX_RXDESCR);
1077
1078 /*
1079 * initial state is OFF
1080 */
1081
1082 sc->sbm_state = sbmac_state_off;
1083
1084 /*
1085 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1086 */
1087
1088 sc->sbm_speed = sbmac_speed_10;
1089 sc->sbm_duplex = sbmac_duplex_half;
1090 sc->sbm_fc = sbmac_fc_disabled;
1091
1092 /*
1093 * Determine SOC type. 112x has Pass3 SOC features.
1094 */
1095 sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) );
1096 sc->sbm_pass3_dma = (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1120 ||
1097 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125 ||
1098 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125H ||
1099 (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250 &&
1100 G_SYS_REVISION(sysrev) >= K_SYS_REVISION_BCM1250_PASS3));
1101 #ifdef SBMAC_EVENT_COUNTERS
1102 const char * const xname = device_xname(sc->sc_dev);
1103 evcnt_attach_dynamic(&sc->sbm_ev_rxintr, EVCNT_TYPE_INTR,
1104 NULL, xname, "rxintr");
1105 evcnt_attach_dynamic(&sc->sbm_ev_txintr, EVCNT_TYPE_INTR,
1106 NULL, xname, "txintr");
1107 evcnt_attach_dynamic(&sc->sbm_ev_txdrop, EVCNT_TYPE_MISC,
1108 NULL, xname, "txdrop");
1109 evcnt_attach_dynamic(&sc->sbm_ev_txstall, EVCNT_TYPE_MISC,
1110 NULL, xname, "txstall");
1111 if (sc->sbm_pass3_dma) {
1112 evcnt_attach_dynamic(&sc->sbm_ev_txsplit, EVCNT_TYPE_MISC,
1113 NULL, xname, "pass3tx-split");
1114 evcnt_attach_dynamic(&sc->sbm_ev_txkeep, EVCNT_TYPE_MISC,
1115 NULL, xname, "pass3tx-keep");
1116 }
1117 #endif
1118 }
1119
1120 /*
1121 * SBMAC_CHANNEL_START(s)
1122 *
1123 * Start packet processing on this MAC.
1124 *
1125 * Input parameters:
1126 * sc - sbmac structure
1127 *
1128 * Return value:
1129 * nothing
1130 */
1131
1132 static void
1133 sbmac_channel_start(struct sbmac_softc *sc)
1134 {
1135 uint64_t reg;
1136 sbmac_port_t port;
1137 uint64_t cfg, fifo, framecfg;
1138 int idx;
1139 uint64_t dma_cfg0, fifo_cfg;
1140 sbmacdma_t *txdma;
1141
1142 /*
1143 * Don't do this if running
1144 */
1145
1146 if (sc->sbm_state == sbmac_state_on)
1147 return;
1148
1149 /*
1150 * Bring the controller out of reset, but leave it off.
1151 */
1152
1153 SBMAC_WRITECSR(sc->sbm_macenable, 0);
1154
1155 /*
1156 * Ignore all received packets
1157 */
1158
1159 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1160
1161 /*
1162 * Calculate values for various control registers.
1163 */
1164
1165 cfg = M_MAC_RETRY_EN |
1166 M_MAC_TX_HOLD_SOP_EN |
1167 V_MAC_TX_PAUSE_CNT_16K |
1168 M_MAC_AP_STAT_EN |
1169 M_MAC_SS_EN |
1170 0;
1171
1172 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1173 V_MAC_TX_RD_THRSH(4) |
1174 V_MAC_TX_RL_THRSH(4) |
1175 V_MAC_RX_PL_THRSH(4) |
1176 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1177 V_MAC_RX_PL_THRSH(4) |
1178 V_MAC_RX_RL_THRSH(8) |
1179 0;
1180
1181 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1182 V_MAC_MAX_FRAMESZ_DEFAULT |
1183 V_MAC_BACKOFF_SEL(1);
1184
1185 /*
1186 * Clear out the hash address map
1187 */
1188
1189 port = PKSEG1(sc->sbm_base + R_MAC_HASH_BASE);
1190 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1191 SBMAC_WRITECSR(port, 0);
1192 port += sizeof(uint64_t);
1193 }
1194
1195 /*
1196 * Clear out the exact-match table
1197 */
1198
1199 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1200 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1201 SBMAC_WRITECSR(port, 0);
1202 port += sizeof(uint64_t);
1203 }
1204
1205 /*
1206 * Clear out the DMA Channel mapping table registers
1207 */
1208
1209 port = PKSEG1(sc->sbm_base + R_MAC_CHUP0_BASE);
1210 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1211 SBMAC_WRITECSR(port, 0);
1212 port += sizeof(uint64_t);
1213 }
1214
1215 port = PKSEG1(sc->sbm_base + R_MAC_CHLO0_BASE);
1216 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1217 SBMAC_WRITECSR(port, 0);
1218 port += sizeof(uint64_t);
1219 }
1220
1221 /*
1222 * Program the hardware address. It goes into the hardware-address
1223 * register as well as the first filter register.
1224 */
1225
1226 reg = sbmac_addr2reg(sc->sbm_hwaddr);
1227
1228 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1229 SBMAC_WRITECSR(port, reg);
1230 port = PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR);
1231 SBMAC_WRITECSR(port, 0); // pass1 workaround
1232
1233 /*
1234 * Set the receive filter for no packets, and write values
1235 * to the various config registers
1236 */
1237
1238 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1239 SBMAC_WRITECSR(sc->sbm_imr, 0);
1240 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1241 SBMAC_WRITECSR(sc->sbm_fifocfg, fifo);
1242 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1243
1244 /*
1245 * Initialize DMA channels (rings should be ok now)
1246 */
1247
1248 sbdma_channel_start(&(sc->sbm_rxdma));
1249 sbdma_channel_start(&(sc->sbm_txdma));
1250
1251 /*
1252 * Configure the speed, duplex, and flow control
1253 */
1254
1255 sbmac_set_speed(sc, sc->sbm_speed);
1256 sbmac_set_duplex(sc, sc->sbm_duplex, sc->sbm_fc);
1257
1258 /*
1259 * Fill the receive ring
1260 */
1261
1262 sbdma_fillring(&(sc->sbm_rxdma));
1263
1264 /*
1265 * Turn on the rest of the bits in the enable register
1266 */
1267
1268 SBMAC_WRITECSR(sc->sbm_macenable, M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 |
1269 M_MAC_RX_ENABLE | M_MAC_TX_ENABLE);
1270
1271
1272 /*
1273 * Accept any kind of interrupt on TX and RX DMA channel 0
1274 */
1275 SBMAC_WRITECSR(sc->sbm_imr,
1276 (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1277 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
1278
1279 /*
1280 * Enable receiving unicasts and broadcasts
1281 */
1282
1283 SBMAC_WRITECSR(sc->sbm_rxfilter, M_MAC_UCAST_EN | M_MAC_BCAST_EN);
1284
1285 /*
1286 * On chips which support unaligned DMA features, set the descriptor
1287 * ring for transmit channels to use the unaligned buffer format.
1288 */
1289 txdma = &(sc->sbm_txdma);
1290
1291 if (sc->sbm_pass3_dma) {
1292 dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0);
1293 dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) |
1294 M_DMA_TBX_EN | M_DMA_TDX_EN;
1295 SBMAC_WRITECSR(txdma->sbdma_config0, dma_cfg0);
1296
1297 fifo_cfg = SBMAC_READCSR(sc->sbm_fifocfg);
1298 fifo_cfg |= V_MAC_TX_WR_THRSH(8) |
1299 V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8);
1300 SBMAC_WRITECSR(sc->sbm_fifocfg, fifo_cfg);
1301 }
1302
1303 /*
1304 * we're running now.
1305 */
1306
1307 sc->sbm_state = sbmac_state_on;
1308 sc->sc_ethercom.ec_if.if_flags |= IFF_RUNNING;
1309
1310 /*
1311 * Program multicast addresses
1312 */
1313
1314 sbmac_setmulti(sc);
1315
1316 /*
1317 * If channel was in promiscuous mode before, turn that on
1318 */
1319
1320 if (sc->sc_ethercom.ec_if.if_flags & IFF_PROMISC)
1321 sbmac_promiscuous_mode(sc, true);
1322
1323 /*
1324 * Turn on the once-per-second timer
1325 */
1326
1327 callout_reset(&(sc->sc_tick_ch), hz, sbmac_tick, sc);
1328 }
1329
1330 /*
1331 * SBMAC_CHANNEL_STOP(s)
1332 *
1333 * Stop packet processing on this MAC.
1334 *
1335 * Input parameters:
1336 * sc - sbmac structure
1337 *
1338 * Return value:
1339 * nothing
1340 */
1341
1342 static void
1343 sbmac_channel_stop(struct sbmac_softc *sc)
1344 {
1345 uint64_t ctl;
1346
1347 /* don't do this if already stopped */
1348
1349 if (sc->sbm_state == sbmac_state_off)
1350 return;
1351
1352 /* don't accept any packets, disable all interrupts */
1353
1354 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1355 SBMAC_WRITECSR(sc->sbm_imr, 0);
1356
1357 /* Turn off ticker */
1358
1359 callout_stop(&(sc->sc_tick_ch));
1360
1361 /* turn off receiver and transmitter */
1362
1363 ctl = SBMAC_READCSR(sc->sbm_macenable);
1364 ctl &= ~(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0);
1365 SBMAC_WRITECSR(sc->sbm_macenable, ctl);
1366
1367 /* We're stopped now. */
1368
1369 sc->sbm_state = sbmac_state_off;
1370 sc->sc_ethercom.ec_if.if_flags &= ~IFF_RUNNING;
1371
1372 /* Empty the receive and transmit rings */
1373
1374 sbdma_emptyring(&(sc->sbm_rxdma));
1375 sbdma_emptyring(&(sc->sbm_txdma));
1376 }
1377
1378 /*
1379 * SBMAC_SET_CHANNEL_STATE(state)
1380 *
1381 * Set the channel's state ON or OFF
1382 *
1383 * Input parameters:
1384 * state - new state
1385 *
1386 * Return value:
1387 * old state
1388 */
1389
1390 static sbmac_state_t
1391 sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state)
1392 {
1393 sbmac_state_t oldstate = sc->sbm_state;
1394
1395 /*
1396 * If same as previous state, return
1397 */
1398
1399 if (state == oldstate)
1400 return oldstate;
1401
1402 /*
1403 * If new state is ON, turn channel on
1404 */
1405
1406 if (state == sbmac_state_on)
1407 sbmac_channel_start(sc);
1408 else
1409 sbmac_channel_stop(sc);
1410
1411 /*
1412 * Return previous state
1413 */
1414
1415 return oldstate;
1416 }
1417
1418 /*
1419 * SBMAC_PROMISCUOUS_MODE(sc, enabled)
1420 *
1421 * Turn on or off promiscuous mode
1422 *
1423 * Input parameters:
1424 * sc - softc
1425 * enabled - true to turn on, false to turn off
1426 *
1427 * Return value:
1428 * nothing
1429 */
1430
1431 static void
1432 sbmac_promiscuous_mode(struct sbmac_softc *sc, bool enabled)
1433 {
1434 uint64_t reg;
1435
1436 if (sc->sbm_state != sbmac_state_on)
1437 return;
1438
1439 if (enabled) {
1440 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1441 reg |= M_MAC_ALLPKT_EN;
1442 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1443 } else {
1444 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1445 reg &= ~M_MAC_ALLPKT_EN;
1446 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1447 }
1448 }
1449
1450 /*
1451 * SBMAC_INIT_AND_START(sc)
1452 *
1453 * Stop the channel and restart it. This is generally used
1454 * when we have to do something to the channel that requires
1455 * a swift kick.
1456 *
1457 * Input parameters:
1458 * sc - softc
1459 */
1460
1461 static void
1462 sbmac_init_and_start(struct sbmac_softc *sc)
1463 {
1464 int s;
1465
1466 s = splnet();
1467
1468 mii_pollstat(&sc->sc_mii); /* poll phy for current speed */
1469 sbmac_mii_statchg(&sc->sc_ethercom.ec_if); /* set state to new speed */
1470 sbmac_set_channel_state(sc, sbmac_state_on);
1471
1472 splx(s);
1473 }
1474
1475 /*
1476 * SBMAC_ADDR2REG(ptr)
1477 *
1478 * Convert six bytes into the 64-bit register value that
1479 * we typically write into the SBMAC's address/mcast registers
1480 *
1481 * Input parameters:
1482 * ptr - pointer to 6 bytes
1483 *
1484 * Return value:
1485 * register value
1486 */
1487
1488 static uint64_t
1489 sbmac_addr2reg(u_char *ptr)
1490 {
1491 uint64_t reg = 0;
1492
1493 ptr += 6;
1494
1495 reg |= (uint64_t) *(--ptr);
1496 reg <<= 8;
1497 reg |= (uint64_t) *(--ptr);
1498 reg <<= 8;
1499 reg |= (uint64_t) *(--ptr);
1500 reg <<= 8;
1501 reg |= (uint64_t) *(--ptr);
1502 reg <<= 8;
1503 reg |= (uint64_t) *(--ptr);
1504 reg <<= 8;
1505 reg |= (uint64_t) *(--ptr);
1506
1507 return reg;
1508 }
1509
1510 /*
1511 * SBMAC_SET_SPEED(sc, speed)
1512 *
1513 * Configure LAN speed for the specified MAC.
1514 * Warning: must be called when MAC is off!
1515 *
1516 * Input parameters:
1517 * sc - sbmac structure
1518 * speed - speed to set MAC to (see sbmac_speed_t enum)
1519 *
1520 * Return value:
1521 * true if successful
1522 * false indicates invalid parameters
1523 */
1524
1525 static bool
1526 sbmac_set_speed(struct sbmac_softc *sc, sbmac_speed_t speed)
1527 {
1528 uint64_t cfg;
1529 uint64_t framecfg;
1530
1531 /*
1532 * Save new current values
1533 */
1534
1535 sc->sbm_speed = speed;
1536
1537 if (sc->sbm_state != sbmac_state_off)
1538 panic("sbmac_set_speed while MAC not off");
1539
1540 /*
1541 * Read current register values
1542 */
1543
1544 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1545 framecfg = SBMAC_READCSR(sc->sbm_framecfg);
1546
1547 /*
1548 * Mask out the stuff we want to change
1549 */
1550
1551 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1552 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1553 M_MAC_SLOT_SIZE);
1554
1555 /*
1556 * Now add in the new bits
1557 */
1558
1559 switch (speed) {
1560 case sbmac_speed_10:
1561 framecfg |= V_MAC_IFG_RX_10 |
1562 V_MAC_IFG_TX_10 |
1563 K_MAC_IFG_THRSH_10 |
1564 V_MAC_SLOT_SIZE_10;
1565 cfg |= V_MAC_SPEED_SEL_10MBPS;
1566 break;
1567
1568 case sbmac_speed_100:
1569 framecfg |= V_MAC_IFG_RX_100 |
1570 V_MAC_IFG_TX_100 |
1571 V_MAC_IFG_THRSH_100 |
1572 V_MAC_SLOT_SIZE_100;
1573 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1574 break;
1575
1576 case sbmac_speed_1000:
1577 framecfg |= V_MAC_IFG_RX_1000 |
1578 V_MAC_IFG_TX_1000 |
1579 V_MAC_IFG_THRSH_1000 |
1580 V_MAC_SLOT_SIZE_1000;
1581 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1582 break;
1583
1584 case sbmac_speed_auto: /* XXX not implemented */
1585 /* fall through */
1586 default:
1587 return false;
1588 }
1589
1590 /*
1591 * Send the bits back to the hardware
1592 */
1593
1594 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1595 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1596
1597 return true;
1598 }
1599
1600 /*
1601 * SBMAC_SET_DUPLEX(sc, duplex, fc)
1602 *
1603 * Set Ethernet duplex and flow control options for this MAC
1604 * Warning: must be called when MAC is off!
1605 *
1606 * Input parameters:
1607 * sc - sbmac structure
1608 * duplex - duplex setting (see sbmac_duplex_t)
1609 * fc - flow control setting (see sbmac_fc_t)
1610 *
1611 * Return value:
1612 * true if ok
1613 * false if an invalid parameter combination was specified
1614 */
1615
1616 static bool
1617 sbmac_set_duplex(struct sbmac_softc *sc, sbmac_duplex_t duplex, sbmac_fc_t fc)
1618 {
1619 uint64_t cfg;
1620
1621 /*
1622 * Save new current values
1623 */
1624
1625 sc->sbm_duplex = duplex;
1626 sc->sbm_fc = fc;
1627
1628 if (sc->sbm_state != sbmac_state_off)
1629 panic("sbmac_set_duplex while MAC not off");
1630
1631 /*
1632 * Read current register values
1633 */
1634
1635 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1636
1637 /*
1638 * Mask off the stuff we're about to change
1639 */
1640
1641 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1642
1643 switch (duplex) {
1644 case sbmac_duplex_half:
1645 switch (fc) {
1646 case sbmac_fc_disabled:
1647 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1648 break;
1649
1650 case sbmac_fc_collision:
1651 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1652 break;
1653
1654 case sbmac_fc_carrier:
1655 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1656 break;
1657
1658 case sbmac_fc_auto: /* XXX not implemented */
1659 /* fall through */
1660 case sbmac_fc_frame: /* not valid in half duplex */
1661 default: /* invalid selection */
1662 panic("%s: invalid half duplex fc selection %d",
1663 device_xname(sc->sc_dev), fc);
1664 return false;
1665 }
1666 break;
1667
1668 case sbmac_duplex_full:
1669 switch (fc) {
1670 case sbmac_fc_disabled:
1671 cfg |= V_MAC_FC_CMD_DISABLED;
1672 break;
1673
1674 case sbmac_fc_frame:
1675 cfg |= V_MAC_FC_CMD_ENABLED;
1676 break;
1677
1678 case sbmac_fc_collision: /* not valid in full duplex */
1679 case sbmac_fc_carrier: /* not valid in full duplex */
1680 case sbmac_fc_auto: /* XXX not implemented */
1681 /* fall through */
1682 default:
1683 panic("%s: invalid full duplex fc selection %d",
1684 device_xname(sc->sc_dev), fc);
1685 return false;
1686 }
1687 break;
1688
1689 default:
1690 /* fall through */
1691 case sbmac_duplex_auto:
1692 panic("%s: bad duplex %d", device_xname(sc->sc_dev), duplex);
1693 /* XXX not implemented */
1694 break;
1695 }
1696
1697 /*
1698 * Send the bits back to the hardware
1699 */
1700
1701 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1702
1703 return true;
1704 }
1705
1706 /*
1707 * SBMAC_INTR()
1708 *
1709 * Interrupt handler for MAC interrupts
1710 *
1711 * Input parameters:
1712 * MAC structure
1713 *
1714 * Return value:
1715 * nothing
1716 */
1717
1718 /* ARGSUSED */
1719 static void
1720 sbmac_intr(void *xsc, uint32_t status, vaddr_t pc)
1721 {
1722 struct sbmac_softc *sc = xsc;
1723 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1724 uint64_t isr;
1725
1726 for (;;) {
1727
1728 /*
1729 * Read the ISR (this clears the bits in the real register)
1730 */
1731
1732 isr = SBMAC_READCSR(sc->sbm_isr);
1733
1734 if (isr == 0)
1735 break;
1736
1737 /*
1738 * Transmits on channel 0
1739 */
1740
1741 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
1742 sbdma_tx_process(sc, &(sc->sbm_txdma));
1743 SBMAC_EVCNT_INCR(sc->sbm_ev_txintr);
1744 }
1745
1746 /*
1747 * Receives on channel 0
1748 */
1749
1750 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
1751 sbdma_rx_process(sc, &(sc->sbm_rxdma));
1752 SBMAC_EVCNT_INCR(sc->sbm_ev_rxintr);
1753 }
1754 }
1755
1756 /* try to get more packets going */
1757 if_schedule_deferred_start(ifp);
1758 }
1759
1760
1761 /*
1762 * SBMAC_START(ifp)
1763 *
1764 * Start output on the specified interface. Basically, we
1765 * queue as many buffers as we can until the ring fills up, or
1766 * we run off the end of the queue, whichever comes first.
1767 *
1768 * Input parameters:
1769 * ifp - interface
1770 *
1771 * Return value:
1772 * nothing
1773 */
1774
1775 static void
1776 sbmac_start(struct ifnet *ifp)
1777 {
1778 struct sbmac_softc *sc;
1779 struct mbuf *m_head = NULL;
1780 int rv;
1781
1782 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1783 return;
1784
1785 sc = ifp->if_softc;
1786
1787 for (;;) {
1788
1789 IF_DEQUEUE(&ifp->if_snd, m_head);
1790 if (m_head == NULL)
1791 break;
1792
1793 /*
1794 * Put the buffer on the transmit ring. If we
1795 * don't have room, set the OACTIVE flag and wait
1796 * for the NIC to drain the ring.
1797 */
1798
1799 rv = sbdma_add_txbuffer(&(sc->sbm_txdma), m_head);
1800
1801 if (rv == 0) {
1802 /*
1803 * If there's a BPF listener, bounce a copy of this
1804 * frame to it.
1805 */
1806 bpf_mtap(ifp, m_head, BPF_D_OUT);
1807 if (!sc->sbm_pass3_dma) {
1808 /*
1809 * Don't free mbuf if we're not copying to new
1810 * mbuf in sbdma_add_txbuffer. It will be
1811 * freed in sbdma_tx_process.
1812 */
1813 m_freem(m_head);
1814 }
1815 } else {
1816 IF_PREPEND(&ifp->if_snd, m_head);
1817 ifp->if_flags |= IFF_OACTIVE;
1818 break;
1819 }
1820 }
1821 }
1822
1823 /*
1824 * SBMAC_SETMULTI(sc)
1825 *
1826 * Reprogram the multicast table into the hardware, given
1827 * the list of multicasts associated with the interface
1828 * structure.
1829 *
1830 * Input parameters:
1831 * sc - softc
1832 *
1833 * Return value:
1834 * nothing
1835 */
1836
1837 static void
1838 sbmac_setmulti(struct sbmac_softc *sc)
1839 {
1840 struct ethercom *ec = &sc->sc_ethercom;
1841 struct ifnet *ifp = &ec->ec_if;
1842 uint64_t reg;
1843 sbmac_port_t port;
1844 int idx;
1845 struct ether_multi *enm;
1846 struct ether_multistep step;
1847
1848 /*
1849 * Clear out entire multicast table. We do this by nuking
1850 * the entire hash table and all the direct matches except
1851 * the first one, which is used for our station address
1852 */
1853
1854 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
1855 port = PKSEG1(sc->sbm_base +
1856 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1857 SBMAC_WRITECSR(port, 0);
1858 }
1859
1860 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1861 port = PKSEG1(sc->sbm_base +
1862 R_MAC_HASH_BASE+(idx*sizeof(uint64_t)));
1863 SBMAC_WRITECSR(port, 0);
1864 }
1865
1866 /*
1867 * Clear the filter to say we don't want any multicasts.
1868 */
1869
1870 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1871 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1872 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1873
1874 if (ifp->if_flags & IFF_ALLMULTI) {
1875 /*
1876 * Enable ALL multicasts. Do this by inverting the
1877 * multicast enable bit.
1878 */
1879 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1880 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1881 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1882 return;
1883 }
1884
1885 /*
1886 * Progam new multicast entries. For now, only use the
1887 * perfect filter. In the future we'll need to use the
1888 * hash filter if the perfect filter overflows
1889 */
1890
1891 /*
1892 * XXX only using perfect filter for now, need to use hash
1893 * XXX if the table overflows
1894 */
1895
1896 idx = 1; /* skip station address */
1897 ETHER_LOCK(ec);
1898 ETHER_FIRST_MULTI(step, ec, enm);
1899 while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) {
1900 reg = sbmac_addr2reg(enm->enm_addrlo);
1901 port = PKSEG1(sc->sbm_base +
1902 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1903 SBMAC_WRITECSR(port, reg);
1904 idx++;
1905 ETHER_NEXT_MULTI(step, enm);
1906 }
1907 ETHER_UNLOCK(ec);
1908
1909 /*
1910 * Enable the "accept multicast bits" if we programmed at least one
1911 * multicast.
1912 */
1913
1914 if (idx > 1) {
1915 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1916 reg |= M_MAC_MCAST_EN;
1917 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1918 }
1919 }
1920
1921 /*
1922 * SBMAC_ETHER_IOCTL(ifp, cmd, data)
1923 *
1924 * Generic IOCTL requests for this interface. The basic
1925 * stuff is handled here for bringing the interface up,
1926 * handling multicasts, etc.
1927 *
1928 * Input parameters:
1929 * ifp - interface structure
1930 * cmd - command code
1931 * data - pointer to data
1932 *
1933 * Return value:
1934 * return value (0 is success)
1935 */
1936
1937 static int
1938 sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1939 {
1940 struct ifaddr *ifa = (struct ifaddr *) data;
1941 struct sbmac_softc *sc = ifp->if_softc;
1942
1943 switch (cmd) {
1944 case SIOCINITIFADDR:
1945 ifp->if_flags |= IFF_UP;
1946
1947 switch (ifa->ifa_addr->sa_family) {
1948 #ifdef INET
1949 case AF_INET:
1950 sbmac_init_and_start(sc);
1951 arp_ifinit(ifp, ifa);
1952 break;
1953 #endif
1954 default:
1955 sbmac_init_and_start(sc);
1956 break;
1957 }
1958 break;
1959
1960 default:
1961 return ENOTTY;
1962 }
1963
1964 return 0;
1965 }
1966
1967 /*
1968 * SBMAC_IOCTL(ifp, cmd, data)
1969 *
1970 * Main IOCTL handler - dispatches to other IOCTLs for various
1971 * types of requests.
1972 *
1973 * Input parameters:
1974 * ifp - interface pointer
1975 * cmd - command code
1976 * data - pointer to argument data
1977 *
1978 * Return value:
1979 * 0 if ok
1980 * else error code
1981 */
1982
1983 static int
1984 sbmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1985 {
1986 struct sbmac_softc *sc = ifp->if_softc;
1987 struct ifreq *ifr = (struct ifreq *) data;
1988 int s, error = 0;
1989
1990 s = splnet();
1991
1992 switch (cmd) {
1993 case SIOCINITIFADDR:
1994 error = sbmac_ether_ioctl(ifp, cmd, data);
1995 break;
1996 case SIOCSIFMTU:
1997 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
1998 error = EINVAL;
1999 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
2000 /* XXX Program new MTU here */
2001 error = 0;
2002 break;
2003 case SIOCSIFFLAGS:
2004 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2005 break;
2006 if (ifp->if_flags & IFF_UP) {
2007 /*
2008 * If only the state of the PROMISC flag changed,
2009 * just tweak the hardware registers.
2010 */
2011 if ((ifp->if_flags & IFF_RUNNING) &&
2012 (ifp->if_flags & IFF_PROMISC)) {
2013 /* turn on promiscuous mode */
2014 sbmac_promiscuous_mode(sc, true);
2015 } else if (ifp->if_flags & IFF_RUNNING &&
2016 !(ifp->if_flags & IFF_PROMISC)) {
2017 /* turn off promiscuous mode */
2018 sbmac_promiscuous_mode(sc, false);
2019 } else
2020 sbmac_set_channel_state(sc, sbmac_state_on);
2021 } else {
2022 if (ifp->if_flags & IFF_RUNNING)
2023 sbmac_set_channel_state(sc, sbmac_state_off);
2024 }
2025
2026 sc->sbm_if_flags = ifp->if_flags;
2027 error = 0;
2028 break;
2029
2030 default:
2031 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2032 error = 0;
2033 if (ifp->if_flags & IFF_RUNNING)
2034 sbmac_setmulti(sc);
2035 }
2036 break;
2037 }
2038
2039 (void)splx(s);
2040
2041 return error;
2042 }
2043
2044 /*
2045 * SBMAC_IFMEDIA_UPD(ifp)
2046 *
2047 * Configure an appropriate media type for this interface,
2048 * given the data in the interface structure
2049 *
2050 * Input parameters:
2051 * ifp - interface
2052 *
2053 * Return value:
2054 * 0 if ok
2055 * else error code
2056 */
2057
2058 /*
2059 * SBMAC_IFMEDIA_STS(ifp, ifmr)
2060 *
2061 * Report current media status (used by ifconfig, for example)
2062 *
2063 * Input parameters:
2064 * ifp - interface structure
2065 * ifmr - media request structure
2066 *
2067 * Return value:
2068 * nothing
2069 */
2070
2071 /*
2072 * SBMAC_WATCHDOG(ifp)
2073 *
2074 * Called periodically to make sure we're still happy.
2075 *
2076 * Input parameters:
2077 * ifp - interface structure
2078 *
2079 * Return value:
2080 * nothing
2081 */
2082
2083 static void
2084 sbmac_watchdog(struct ifnet *ifp)
2085 {
2086
2087 /* XXX do something */
2088 }
2089
2090 /*
2091 * One second timer, used to tick MII.
2092 */
2093 static void
2094 sbmac_tick(void *arg)
2095 {
2096 struct sbmac_softc *sc = arg;
2097 int s;
2098
2099 s = splnet();
2100 mii_tick(&sc->sc_mii);
2101 splx(s);
2102
2103 callout_reset(&sc->sc_tick_ch, hz, sbmac_tick, sc);
2104 }
2105
2106
2107 /*
2108 * SBMAC_MATCH(parent, match, aux)
2109 *
2110 * Part of the config process - see if this device matches the
2111 * info about what we expect to find on the bus.
2112 *
2113 * Input parameters:
2114 * parent - parent bus structure
2115 * match -
2116 * aux - bus-specific args
2117 *
2118 * Return value:
2119 * 1 if we match
2120 * 0 if we don't match
2121 */
2122
2123 static int
2124 sbmac_match(device_t parent, cfdata_t match, void *aux)
2125 {
2126 struct sbobio_attach_args *sa = aux;
2127
2128 /*
2129 * Make sure it's a MAC
2130 */
2131 if (sa->sa_locs.sa_type != SBOBIO_DEVTYPE_MAC)
2132 return 0;
2133
2134 /*
2135 * Yup, it is.
2136 */
2137
2138 return 1;
2139 }
2140
2141 /*
2142 * SBMAC_PARSE_XDIGIT(str)
2143 *
2144 * Parse a hex digit, returning its value
2145 *
2146 * Input parameters:
2147 * str - character
2148 *
2149 * Return value:
2150 * hex value, or -1 if invalid
2151 */
2152
2153 static int
2154 sbmac_parse_xdigit(char str)
2155 {
2156 int digit;
2157
2158 if ((str >= '0') && (str <= '9'))
2159 digit = str - '0';
2160 else if ((str >= 'a') && (str <= 'f'))
2161 digit = str - 'a' + 10;
2162 else if ((str >= 'A') && (str <= 'F'))
2163 digit = str - 'A' + 10;
2164 else
2165 digit = -1;
2166
2167 return digit;
2168 }
2169
2170 /*
2171 * SBMAC_PARSE_HWADDR(str, hwaddr)
2172 *
2173 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2174 * Ethernet address.
2175 *
2176 * Input parameters:
2177 * str - string
2178 * hwaddr - pointer to hardware address
2179 *
2180 * Return value:
2181 * 0 if ok, else -1
2182 */
2183
2184 static int
2185 sbmac_parse_hwaddr(const char *str, u_char *hwaddr)
2186 {
2187 int digit1, digit2;
2188 int idx = 6;
2189
2190 while (*str && (idx > 0)) {
2191 digit1 = sbmac_parse_xdigit(*str);
2192 if (digit1 < 0)
2193 return -1;
2194 str++;
2195 if (!*str)
2196 return -1;
2197
2198 if ((*str == ':') || (*str == '-')) {
2199 digit2 = digit1;
2200 digit1 = 0;
2201 } else {
2202 digit2 = sbmac_parse_xdigit(*str);
2203 if (digit2 < 0)
2204 return -1;
2205 str++;
2206 }
2207
2208 *hwaddr++ = (digit1 << 4) | digit2;
2209 idx--;
2210
2211 if (*str == '-')
2212 str++;
2213 if (*str == ':')
2214 str++;
2215 }
2216 return 0;
2217 }
2218
2219 /*
2220 * SBMAC_ATTACH(parent, self, aux)
2221 *
2222 * Attach routine - init hardware and hook ourselves into NetBSD.
2223 *
2224 * Input parameters:
2225 * parent - parent bus device
2226 * self - our softc
2227 * aux - attach data
2228 *
2229 * Return value:
2230 * nothing
2231 */
2232
2233 static void
2234 sbmac_attach(device_t parent, device_t self, void *aux)
2235 {
2236 struct sbmac_softc * const sc = device_private(self);
2237 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
2238 struct mii_data * const mii = &sc->sc_mii;
2239 struct sbobio_attach_args * const sa = aux;
2240 u_char *eaddr;
2241 static int unit = 0; /* XXX */
2242 uint64_t ea_reg;
2243 int idx;
2244
2245 sc->sc_dev = self;
2246
2247 /* Determine controller base address */
2248
2249 sc->sbm_base = sa->sa_base + sa->sa_locs.sa_offset;
2250
2251 eaddr = sc->sbm_hwaddr;
2252
2253 /*
2254 * Initialize context (get pointers to registers and stuff), then
2255 * allocate the memory for the descriptor tables.
2256 */
2257
2258 sbmac_initctx(sc);
2259
2260 callout_init(&(sc->sc_tick_ch), 0);
2261
2262 /*
2263 * Read the ethernet address. The firwmare left this programmed
2264 * for us in the ethernet address register for each mac.
2265 */
2266
2267 ea_reg = SBMAC_READCSR(PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR));
2268 for (idx = 0; idx < 6; idx++) {
2269 eaddr[idx] = (uint8_t) (ea_reg & 0xFF);
2270 ea_reg >>= 8;
2271 }
2272
2273 #define SBMAC_DEFAULT_HWADDR "40:00:00:00:01:00"
2274 if (eaddr[0] == 0 && eaddr[1] == 0 && eaddr[2] == 0 &&
2275 eaddr[3] == 0 && eaddr[4] == 0 && eaddr[5] == 0) {
2276 sbmac_parse_hwaddr(SBMAC_DEFAULT_HWADDR, eaddr);
2277 eaddr[5] = unit;
2278 }
2279
2280 #ifdef SBMAC_ETH0_HWADDR
2281 if (unit == 0)
2282 sbmac_parse_hwaddr(SBMAC_ETH0_HWADDR, eaddr);
2283 #endif
2284 #ifdef SBMAC_ETH1_HWADDR
2285 if (unit == 1)
2286 sbmac_parse_hwaddr(SBMAC_ETH1_HWADDR, eaddr);
2287 #endif
2288 #ifdef SBMAC_ETH2_HWADDR
2289 if (unit == 2)
2290 sbmac_parse_hwaddr(SBMAC_ETH2_HWADDR, eaddr);
2291 #endif
2292 unit++;
2293
2294 /*
2295 * Display Ethernet address (this is called during the config process
2296 * so we need to finish off the config message that was being displayed)
2297 */
2298 aprint_normal(": Ethernet%s\n",
2299 sc->sbm_pass3_dma ? ", using unaligned tx DMA" : "");
2300 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
2301
2302
2303 /*
2304 * Set up ifnet structure
2305 */
2306
2307 ifp->if_softc = sc;
2308 memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
2309 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2310 ifp->if_ioctl = sbmac_ioctl;
2311 ifp->if_start = sbmac_start;
2312 ifp->if_watchdog = sbmac_watchdog;
2313 ifp->if_snd.ifq_maxlen = SBMAC_MAX_TXDESCR - 1;
2314
2315 /*
2316 * Set up ifmedia support.
2317 */
2318
2319 /*
2320 * Initialize MII/media info.
2321 */
2322 mii->mii_ifp = ifp;
2323 mii->mii_readreg = sbmac_mii_readreg;
2324 mii->mii_writereg = sbmac_mii_writereg;
2325 mii->mii_statchg = sbmac_mii_statchg;
2326 sc->sc_ethercom.ec_mii = mii;
2327 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
2328 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
2329 MII_OFFSET_ANY, 0);
2330
2331 if (LIST_FIRST(&mii->mii_phys) == NULL) {
2332 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
2333 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
2334 } else
2335 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
2336
2337 /*
2338 * map/route interrupt
2339 */
2340
2341 sc->sbm_intrhand = cpu_intr_establish(sa->sa_locs.sa_intr[0], IPL_NET,
2342 sbmac_intr, sc);
2343
2344 /*
2345 * Call MI attach routines.
2346 */
2347 if_attach(ifp);
2348 if_deferred_start_init(ifp, NULL);
2349 ether_ifattach(ifp, eaddr);
2350 }
2351