sbmac.c revision 1.52 1 /* $NetBSD: sbmac.c,v 1.52 2019/01/22 03:42:26 msaitoh Exp $ */
2
3 /*
4 * Copyright 2000, 2001, 2004
5 * Broadcom Corporation. All rights reserved.
6 *
7 * This software is furnished under license and may be used and copied only
8 * in accordance with the following terms and conditions. Subject to these
9 * conditions, you may download, copy, install, use, modify and distribute
10 * modified or unmodified copies of this software in source and/or binary
11 * form. No title or ownership is transferred hereby.
12 *
13 * 1) Any source code used, modified or distributed must reproduce and
14 * retain this copyright notice and list of conditions as they appear in
15 * the source file.
16 *
17 * 2) No right is granted to use any trade name, trademark, or logo of
18 * Broadcom Corporation. The "Broadcom Corporation" name may not be
19 * used to endorse or promote products derived from this software
20 * without the prior written permission of Broadcom Corporation.
21 *
22 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
25 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
26 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
27 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.52 2019/01/22 03:42:26 msaitoh Exp $");
37
38 #include "opt_inet.h"
39 #include "opt_ns.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/queue.h>
49 #include <sys/device.h>
50
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/if_ether.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56
57 #include <net/bpf.h>
58
59 #ifdef INET
60 #include <netinet/in.h>
61 #include <netinet/if_inarp.h>
62 #endif
63
64 #include <mips/locore.h>
65
66 #include "sbobiovar.h"
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/mii_bitbang.h>
71
72 #include <mips/sibyte/include/sb1250_defs.h>
73 #include <mips/sibyte/include/sb1250_regs.h>
74 #include <mips/sibyte/include/sb1250_mac.h>
75 #include <mips/sibyte/include/sb1250_dma.h>
76 #include <mips/sibyte/include/sb1250_scd.h>
77
78 #include <evbmips/sbmips/systemsw.h>
79
80 /* Simple types */
81
82 typedef u_long sbmac_port_t;
83 typedef uint64_t sbmac_physaddr_t;
84 typedef uint64_t sbmac_enetaddr_t;
85
86 typedef enum { sbmac_speed_auto, sbmac_speed_10,
87 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
88
89 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
90 sbmac_duplex_full } sbmac_duplex_t;
91
92 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
93 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
94
95 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
96 sbmac_state_broken } sbmac_state_t;
97
98
99 /* Macros */
100
101 #define SBMAC_EVENT_COUNTERS /* Include counters for various events */
102
103 #define SBDMA_NEXTBUF(d, f) ((f + 1) & (d)->sbdma_dscr_mask)
104
105 #define CACHELINESIZE 32
106 #define NUMCACHEBLKS(x) (((x)+CACHELINESIZE-1)/CACHELINESIZE)
107 #define KMALLOC(x) malloc((x), M_DEVBUF, M_DONTWAIT)
108 #define KVTOPHYS(x) kvtophys((vaddr_t)(x))
109
110 #ifdef SBMACDEBUG
111 #define dprintf(x) printf x
112 #else
113 #define dprintf(x)
114 #endif
115
116 #define SBMAC_READCSR(t) mips3_ld((register_t)(t))
117 #define SBMAC_WRITECSR(t, v) mips3_sd((register_t)(t), (v))
118
119 #define PKSEG1(x) ((sbmac_port_t) MIPS_PHYS_TO_KSEG1(x))
120
121 /* These are limited to fit within one virtual page, and must be 2**N. */
122 #define SBMAC_MAX_TXDESCR 256 /* should be 1024 */
123 #define SBMAC_MAX_RXDESCR 256 /* should be 512 */
124
125 #define ETHER_ALIGN 2
126
127 /* DMA Descriptor structure */
128
129 typedef struct sbdmadscr_s {
130 uint64_t dscr_a;
131 uint64_t dscr_b;
132 } sbdmadscr_t;
133
134
135 /* DMA Controller structure */
136
137 typedef struct sbmacdma_s {
138
139 /*
140 * This stuff is used to identify the channel and the registers
141 * associated with it.
142 */
143
144 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
145 int sbdma_channel; /* channel number */
146 int sbdma_txdir; /* direction (1=transmit) */
147 int sbdma_maxdescr; /* total # of descriptors in ring */
148 sbmac_port_t sbdma_config0; /* DMA config register 0 */
149 sbmac_port_t sbdma_config1; /* DMA config register 1 */
150 sbmac_port_t sbdma_dscrbase; /* Descriptor base address */
151 sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */
152 sbmac_port_t sbdma_curdscr; /* current descriptor address */
153
154 /*
155 * This stuff is for maintenance of the ring
156 */
157 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
158 struct mbuf **sbdma_ctxtable; /* context table, one per descr */
159 unsigned int sbdma_dscr_mask; /* sbdma_maxdescr - 1 */
160 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
161 unsigned int sbdma_add_index; /* next dscr for sw to add */
162 unsigned int sbdma_rem_index; /* next dscr for sw to remove */
163 } sbmacdma_t;
164
165
166 /* Ethernet softc structure */
167
168 struct sbmac_softc {
169
170 /*
171 * NetBSD-specific things
172 */
173 struct ethercom sc_ethercom; /* Ethernet common part */
174 struct mii_data sc_mii;
175 struct callout sc_tick_ch;
176
177 device_t sc_dev; /* device */
178 int sbm_if_flags;
179 void *sbm_intrhand;
180
181 /*
182 * Controller-specific things
183 */
184
185 sbmac_port_t sbm_base; /* MAC's base address */
186 sbmac_state_t sbm_state; /* current state */
187
188 sbmac_port_t sbm_macenable; /* MAC Enable Register */
189 sbmac_port_t sbm_maccfg; /* MAC Configuration Register */
190 sbmac_port_t sbm_fifocfg; /* FIFO configuration register */
191 sbmac_port_t sbm_framecfg; /* Frame configuration register */
192 sbmac_port_t sbm_rxfilter; /* receive filter register */
193 sbmac_port_t sbm_isr; /* Interrupt status register */
194 sbmac_port_t sbm_imr; /* Interrupt mask register */
195
196 sbmac_speed_t sbm_speed; /* current speed */
197 sbmac_duplex_t sbm_duplex; /* current duplex */
198 sbmac_fc_t sbm_fc; /* current flow control setting */
199 int sbm_rxflags; /* received packet flags */
200
201 u_char sbm_hwaddr[ETHER_ADDR_LEN];
202
203 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
204 sbmacdma_t sbm_rxdma;
205
206 int sbm_pass3_dma; /* chip has pass3 SOC DMA features */
207
208 #ifdef SBMAC_EVENT_COUNTERS
209 struct evcnt sbm_ev_rxintr; /* Rx interrupts */
210 struct evcnt sbm_ev_txintr; /* Tx interrupts */
211 struct evcnt sbm_ev_txdrop; /* Tx dropped due to no mbuf alloc failed */
212 struct evcnt sbm_ev_txstall; /* Tx stalled due to no descriptors free */
213
214 struct evcnt sbm_ev_txsplit; /* pass3 Tx split mbuf */
215 struct evcnt sbm_ev_txkeep; /* pass3 Tx didn't split mbuf */
216 #endif
217 };
218
219
220 #ifdef SBMAC_EVENT_COUNTERS
221 #define SBMAC_EVCNT_INCR(ev) (ev).ev_count++
222 #else
223 #define SBMAC_EVCNT_INCR(ev) do { /* nothing */ } while (0)
224 #endif
225
226 /* Externs */
227
228 extern paddr_t kvtophys(vaddr_t);
229
230 /* Prototypes */
231
232 static void sbdma_initctx(sbmacdma_t *, struct sbmac_softc *, int, int, int);
233 static void sbdma_channel_start(sbmacdma_t *);
234 static int sbdma_add_rcvbuffer(sbmacdma_t *, struct mbuf *);
235 static int sbdma_add_txbuffer(sbmacdma_t *, struct mbuf *);
236 static void sbdma_emptyring(sbmacdma_t *);
237 static void sbdma_fillring(sbmacdma_t *);
238 static void sbdma_rx_process(struct sbmac_softc *, sbmacdma_t *);
239 static void sbdma_tx_process(struct sbmac_softc *, sbmacdma_t *);
240 static void sbmac_initctx(struct sbmac_softc *);
241 static void sbmac_channel_start(struct sbmac_softc *);
242 static void sbmac_channel_stop(struct sbmac_softc *);
243 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,
244 sbmac_state_t);
245 static void sbmac_promiscuous_mode(struct sbmac_softc *, bool);
246 static void sbmac_init_and_start(struct sbmac_softc *);
247 static uint64_t sbmac_addr2reg(u_char *);
248 static void sbmac_intr(void *, uint32_t, vaddr_t);
249 static void sbmac_start(struct ifnet *);
250 static void sbmac_setmulti(struct sbmac_softc *);
251 static int sbmac_ether_ioctl(struct ifnet *, u_long, void *);
252 static int sbmac_ioctl(struct ifnet *, u_long, void *);
253 static void sbmac_watchdog(struct ifnet *);
254 static int sbmac_match(device_t, cfdata_t, void *);
255 static void sbmac_attach(device_t, device_t, void *);
256 static bool sbmac_set_speed(struct sbmac_softc *, sbmac_speed_t);
257 static bool sbmac_set_duplex(struct sbmac_softc *, sbmac_duplex_t, sbmac_fc_t);
258 static void sbmac_tick(void *);
259
260
261 /* Globals */
262
263 CFATTACH_DECL_NEW(sbmac, sizeof(struct sbmac_softc),
264 sbmac_match, sbmac_attach, NULL, NULL);
265
266 static uint32_t sbmac_mii_bitbang_read(device_t self);
267 static void sbmac_mii_bitbang_write(device_t self, uint32_t val);
268
269 static const struct mii_bitbang_ops sbmac_mii_bitbang_ops = {
270 sbmac_mii_bitbang_read,
271 sbmac_mii_bitbang_write,
272 {
273 (uint32_t)M_MAC_MDIO_OUT, /* MII_BIT_MDO */
274 (uint32_t)M_MAC_MDIO_IN, /* MII_BIT_MDI */
275 (uint32_t)M_MAC_MDC, /* MII_BIT_MDC */
276 0, /* MII_BIT_DIR_HOST_PHY */
277 (uint32_t)M_MAC_MDIO_DIR /* MII_BIT_DIR_PHY_HOST */
278 }
279 };
280
281 static uint32_t
282 sbmac_mii_bitbang_read(device_t self)
283 {
284 struct sbmac_softc *sc = device_private(self);
285 sbmac_port_t reg;
286
287 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
288 return (uint32_t) SBMAC_READCSR(reg);
289 }
290
291 static void
292 sbmac_mii_bitbang_write(device_t self, uint32_t val)
293 {
294 struct sbmac_softc *sc = device_private(self);
295 sbmac_port_t reg;
296
297 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
298
299 SBMAC_WRITECSR(reg, (val &
300 (M_MAC_MDC|M_MAC_MDIO_DIR|M_MAC_MDIO_OUT|M_MAC_MDIO_IN)));
301 }
302
303 /*
304 * Read an PHY register through the MII.
305 */
306 static int
307 sbmac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
308 {
309
310 return mii_bitbang_readreg(self, &sbmac_mii_bitbang_ops, phy, reg,
311 val);
312 }
313
314 /*
315 * Write to a PHY register through the MII.
316 */
317 static
318 sbmac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
319 {
320
321 return mii_bitbang_writereg(self, &sbmac_mii_bitbang_ops, phy, reg,
322 val);
323 }
324
325 static void
326 sbmac_mii_statchg(struct ifnet *ifp)
327 {
328 struct sbmac_softc *sc = ifp->if_softc;
329 sbmac_state_t oldstate;
330
331 /* Stop the MAC in preparation for changing all of the parameters. */
332 oldstate = sbmac_set_channel_state(sc, sbmac_state_off);
333
334 switch (sc->sc_ethercom.ec_if.if_baudrate) {
335 default: /* if autonegotiation fails, assume 10Mbit */
336 case IF_Mbps(10):
337 sbmac_set_speed(sc, sbmac_speed_10);
338 break;
339
340 case IF_Mbps(100):
341 sbmac_set_speed(sc, sbmac_speed_100);
342 break;
343
344 case IF_Mbps(1000):
345 sbmac_set_speed(sc, sbmac_speed_1000);
346 break;
347 }
348
349 if (sc->sc_mii.mii_media_active & IFM_FDX) {
350 /* Configure for full-duplex */
351 /* XXX: is flow control right for 10, 100? */
352 sbmac_set_duplex(sc, sbmac_duplex_full, sbmac_fc_frame);
353 } else {
354 /* Configure for half-duplex */
355 /* XXX: is flow control right? */
356 sbmac_set_duplex(sc, sbmac_duplex_half, sbmac_fc_disabled);
357 }
358
359 /* And put it back into its former state. */
360 sbmac_set_channel_state(sc, oldstate);
361 }
362
363 /*
364 * SBDMA_INITCTX(d, sc, chan, txrx, maxdescr)
365 *
366 * Initialize a DMA channel context. Since there are potentially
367 * eight DMA channels per MAC, it's nice to do this in a standard
368 * way.
369 *
370 * Input parameters:
371 * d - sbmacdma_t structure (DMA channel context)
372 * sc - sbmac_softc structure (pointer to a MAC)
373 * chan - channel number (0..1 right now)
374 * txrx - Identifies DMA_TX or DMA_RX for channel direction
375 * maxdescr - number of descriptors
376 *
377 * Return value:
378 * nothing
379 */
380
381 static void
382 sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *sc, int chan, int txrx,
383 int maxdescr)
384 {
385 /*
386 * Save away interesting stuff in the structure
387 */
388
389 d->sbdma_eth = sc;
390 d->sbdma_channel = chan;
391 d->sbdma_txdir = txrx;
392
393 /*
394 * initialize register pointers
395 */
396
397 d->sbdma_config0 = PKSEG1(sc->sbm_base +
398 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG0));
399 d->sbdma_config1 = PKSEG1(sc->sbm_base +
400 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG1));
401 d->sbdma_dscrbase = PKSEG1(sc->sbm_base +
402 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_BASE));
403 d->sbdma_dscrcnt = PKSEG1(sc->sbm_base +
404 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_CNT));
405 d->sbdma_curdscr = PKSEG1(sc->sbm_base +
406 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CUR_DSCRADDR));
407
408 /*
409 * Allocate memory for the ring
410 */
411
412 d->sbdma_maxdescr = maxdescr;
413 d->sbdma_dscr_mask = d->sbdma_maxdescr - 1;
414
415 d->sbdma_dscrtable = (sbdmadscr_t *)
416 KMALLOC(d->sbdma_maxdescr * sizeof(sbdmadscr_t));
417
418 memset(d->sbdma_dscrtable, 0, d->sbdma_maxdescr*sizeof(sbdmadscr_t));
419
420 d->sbdma_dscrtable_phys = KVTOPHYS(d->sbdma_dscrtable);
421
422 /*
423 * And context table
424 */
425
426 d->sbdma_ctxtable = (struct mbuf **)
427 KMALLOC(d->sbdma_maxdescr*sizeof(struct mbuf *));
428
429 memset(d->sbdma_ctxtable, 0, d->sbdma_maxdescr*sizeof(struct mbuf *));
430 }
431
432 /*
433 * SBDMA_CHANNEL_START(d)
434 *
435 * Initialize the hardware registers for a DMA channel.
436 *
437 * Input parameters:
438 * d - DMA channel to init (context must be previously init'd
439 *
440 * Return value:
441 * nothing
442 */
443
444 static void
445 sbdma_channel_start(sbmacdma_t *d)
446 {
447 /*
448 * Turn on the DMA channel
449 */
450
451 SBMAC_WRITECSR(d->sbdma_config1, 0);
452
453 SBMAC_WRITECSR(d->sbdma_dscrbase, d->sbdma_dscrtable_phys);
454
455 SBMAC_WRITECSR(d->sbdma_config0, V_DMA_RINGSZ(d->sbdma_maxdescr) | 0);
456
457 /*
458 * Initialize ring pointers
459 */
460
461 d->sbdma_add_index = 0;
462 d->sbdma_rem_index = 0;
463 }
464
465 /*
466 * SBDMA_ADD_RCVBUFFER(d, m)
467 *
468 * Add a buffer to the specified DMA channel. For receive channels,
469 * this queues a buffer for inbound packets.
470 *
471 * Input parameters:
472 * d - DMA channel descriptor
473 * m - mbuf to add, or NULL if we should allocate one.
474 *
475 * Return value:
476 * 0 if buffer could not be added (ring is full)
477 * 1 if buffer added successfully
478 */
479
480 static int
481 sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m)
482 {
483 unsigned int dsc, nextdsc;
484 struct mbuf *m_new = NULL;
485
486 /* get pointer to our current place in the ring */
487
488 dsc = d->sbdma_add_index;
489 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
490
491 /*
492 * figure out if the ring is full - if the next descriptor
493 * is the same as the one that we're going to remove from
494 * the ring, the ring is full
495 */
496
497 if (nextdsc == d->sbdma_rem_index)
498 return ENOSPC;
499
500 /*
501 * Allocate an mbuf if we don't already have one.
502 * If we do have an mbuf, reset it so that it's empty.
503 */
504
505 if (m == NULL) {
506 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
507 if (m_new == NULL) {
508 aprint_error_dev(d->sbdma_eth->sc_dev,
509 "mbuf allocation failed\n");
510 return ENOBUFS;
511 }
512
513 MCLGET(m_new, M_DONTWAIT);
514 if (!(m_new->m_flags & M_EXT)) {
515 aprint_error_dev(d->sbdma_eth->sc_dev,
516 "mbuf cluster allocation failed\n");
517 m_freem(m_new);
518 return ENOBUFS;
519 }
520
521 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
522 m_adj(m_new, ETHER_ALIGN);
523 } else {
524 m_new = m;
525 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
526 m_new->m_data = m_new->m_ext.ext_buf;
527 m_adj(m_new, ETHER_ALIGN);
528 }
529
530 /*
531 * fill in the descriptor
532 */
533
534 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new, void *)) |
535 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(ETHER_ALIGN + m_new->m_len)) |
536 M_DMA_DSCRA_INTERRUPT;
537
538 /* receiving: no options */
539 d->sbdma_dscrtable[dsc].dscr_b = 0;
540
541 /*
542 * fill in the context
543 */
544
545 d->sbdma_ctxtable[dsc] = m_new;
546
547 /*
548 * point at next packet
549 */
550
551 d->sbdma_add_index = nextdsc;
552
553 /*
554 * Give the buffer to the DMA engine.
555 */
556
557 SBMAC_WRITECSR(d->sbdma_dscrcnt, 1);
558
559 return 0; /* we did it */
560 }
561
562 /*
563 * SBDMA_ADD_TXBUFFER(d, m)
564 *
565 * Add a transmit buffer to the specified DMA channel, causing a
566 * transmit to start.
567 *
568 * Input parameters:
569 * d - DMA channel descriptor
570 * m - mbuf to add
571 *
572 * Return value:
573 * 0 transmit queued successfully
574 * otherwise error code
575 */
576
577 static int
578 sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m)
579 {
580 unsigned int dsc, nextdsc, prevdsc, origdesc;
581 int length;
582 int num_mbufs = 0;
583 struct sbmac_softc *sc = d->sbdma_eth;
584
585 /* get pointer to our current place in the ring */
586
587 dsc = d->sbdma_add_index;
588 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
589
590 /*
591 * figure out if the ring is full - if the next descriptor
592 * is the same as the one that we're going to remove from
593 * the ring, the ring is full
594 */
595
596 if (nextdsc == d->sbdma_rem_index) {
597 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
598 return ENOSPC;
599 }
600
601 /*
602 * PASS3 parts do not have buffer alignment restriction.
603 * No need to copy/coalesce to new mbuf. Also has different
604 * descriptor format
605 */
606 if (sc->sbm_pass3_dma) {
607 struct mbuf *m_temp = NULL;
608
609 /*
610 * Loop thru this mbuf record.
611 * The head mbuf will have SOP set.
612 */
613 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m,void *)) |
614 M_DMA_ETHTX_SOP;
615
616 /*
617 * transmitting: set outbound options,buffer A size(+ low 5
618 * bits of start addr),and packet length.
619 */
620 d->sbdma_dscrtable[dsc].dscr_b =
621 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
622 V_DMA_DSCRB_A_SIZE((m->m_len +
623 (mtod(m,uintptr_t) & 0x0000001F))) |
624 V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) |
625 V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff);
626
627 d->sbdma_add_index = nextdsc;
628 origdesc = prevdsc = dsc;
629 dsc = d->sbdma_add_index;
630 num_mbufs++;
631
632 /* Start with first non-head mbuf */
633 for(m_temp = m->m_next; m_temp != 0; m_temp = m_temp->m_next) {
634 int len, next_len;
635 uint64_t addr;
636
637 if (m_temp->m_len == 0)
638 continue; /* Skip 0-length mbufs */
639
640 len = m_temp->m_len;
641 addr = KVTOPHYS(mtod(m_temp, void *));
642
643 /*
644 * Check to see if the mbuf spans a page boundary. If
645 * it does, and the physical pages behind the virtual
646 * pages are not contiguous, split it so that each
647 * virtual page uses its own Tx descriptor.
648 */
649 if (trunc_page(addr) != trunc_page(addr + len - 1)) {
650 next_len = (addr + len) - trunc_page(addr + len);
651
652 len -= next_len;
653
654 if (addr + len ==
655 KVTOPHYS(mtod(m_temp, char *) + len)) {
656 SBMAC_EVCNT_INCR(sc->sbm_ev_txkeep);
657 len += next_len;
658 next_len = 0;
659 } else {
660 SBMAC_EVCNT_INCR(sc->sbm_ev_txsplit);
661 }
662 } else {
663 next_len = 0;
664 }
665
666 again:
667 /*
668 * fill in the descriptor
669 */
670 d->sbdma_dscrtable[dsc].dscr_a = addr;
671
672 /*
673 * transmitting: set outbound options,buffer A
674 * size(+ low 5 bits of start addr)
675 */
676 d->sbdma_dscrtable[dsc].dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_NOTSOP) |
677 V_DMA_DSCRB_A_SIZE((len + (addr & 0x0000001F)));
678
679 d->sbdma_ctxtable[dsc] = NULL;
680
681 /*
682 * point at next descriptor
683 */
684 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
685 if (nextdsc == d->sbdma_rem_index) {
686 d->sbdma_add_index = origdesc;
687 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
688 return ENOSPC;
689 }
690 d->sbdma_add_index = nextdsc;
691
692 prevdsc = dsc;
693 dsc = d->sbdma_add_index;
694 num_mbufs++;
695
696 if (next_len != 0) {
697 addr = KVTOPHYS(mtod(m_temp, char *) + len);
698 len = next_len;
699
700 next_len = 0;
701 goto again;
702 }
703
704 }
705 /* Set head mbuf to last context index */
706 d->sbdma_ctxtable[prevdsc] = m;
707
708 /* Interrupt on last dscr of packet. */
709 d->sbdma_dscrtable[prevdsc].dscr_a |= M_DMA_DSCRA_INTERRUPT;
710 } else {
711 struct mbuf *m_new = NULL;
712 /*
713 * [BEGIN XXX]
714 * XXX Copy/coalesce the mbufs into a single mbuf cluster (we
715 * assume it will fit). This is a temporary hack to get us
716 * going.
717 */
718
719 MGETHDR(m_new,M_DONTWAIT,MT_DATA);
720 if (m_new == NULL) {
721 aprint_error_dev(d->sbdma_eth->sc_dev,
722 "mbuf allocation failed\n");
723 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
724 return ENOBUFS;
725 }
726
727 MCLGET(m_new,M_DONTWAIT);
728 if (!(m_new->m_flags & M_EXT)) {
729 aprint_error_dev(d->sbdma_eth->sc_dev,
730 "mbuf cluster allocation failed\n");
731 m_freem(m_new);
732 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
733 return ENOBUFS;
734 }
735
736 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
737 /*m_adj(m_new,ETHER_ALIGN);*/
738
739 /*
740 * XXX Don't forget to include the offset portion in the
741 * XXX cache block calculation when this code is rewritten!
742 */
743
744 /*
745 * Copy data
746 */
747
748 m_copydata(m,0,m->m_pkthdr.len,mtod(m_new,void *));
749 m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len;
750
751 /* Free old mbuf 'm', actual mbuf is now 'm_new' */
752
753 // XXX: CALLERS WILL FREE, they might have to bpf_mtap() if this
754 // XXX: function succeeds.
755 // m_freem(m);
756 length = m_new->m_len;
757
758 /* [END XXX] */
759 /*
760 * fill in the descriptor
761 */
762
763 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new,void *)) |
764 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(m_new->m_len)) |
765 M_DMA_DSCRA_INTERRUPT |
766 M_DMA_ETHTX_SOP;
767
768 /* transmitting: set outbound options and length */
769 d->sbdma_dscrtable[dsc].dscr_b =
770 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
771 V_DMA_DSCRB_PKT_SIZE(length);
772
773 num_mbufs++;
774
775 /*
776 * fill in the context
777 */
778
779 d->sbdma_ctxtable[dsc] = m_new;
780
781 /*
782 * point at next packet
783 */
784 d->sbdma_add_index = nextdsc;
785 }
786
787 /*
788 * Give the buffer to the DMA engine.
789 */
790
791 SBMAC_WRITECSR(d->sbdma_dscrcnt, num_mbufs);
792
793 return 0; /* we did it */
794 }
795
796 /*
797 * SBDMA_EMPTYRING(d)
798 *
799 * Free all allocated mbufs on the specified DMA channel;
800 *
801 * Input parameters:
802 * d - DMA channel
803 *
804 * Return value:
805 * nothing
806 */
807
808 static void
809 sbdma_emptyring(sbmacdma_t *d)
810 {
811 int idx;
812 struct mbuf *m;
813
814 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
815 m = d->sbdma_ctxtable[idx];
816 if (m) {
817 m_freem(m);
818 d->sbdma_ctxtable[idx] = NULL;
819 }
820 }
821 }
822
823 /*
824 * SBDMA_FILLRING(d)
825 *
826 * Fill the specified DMA channel (must be receive channel)
827 * with mbufs
828 *
829 * Input parameters:
830 * d - DMA channel
831 *
832 * Return value:
833 * nothing
834 */
835
836 static void
837 sbdma_fillring(sbmacdma_t *d)
838 {
839 int idx;
840
841 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++)
842 if (sbdma_add_rcvbuffer(d, NULL) != 0)
843 break;
844 }
845
846 /*
847 * SBDMA_RX_PROCESS(sc, d)
848 *
849 * Process "completed" receive buffers on the specified DMA channel.
850 * Note that this isn't really ideal for priority channels, since
851 * it processes all of the packets on a given channel before
852 * returning.
853 *
854 * Input parameters:
855 * sc - softc structure
856 * d - DMA channel context
857 *
858 * Return value:
859 * nothing
860 */
861
862 static void
863 sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d)
864 {
865 int curidx;
866 int hwidx;
867 sbdmadscr_t *dscp;
868 struct mbuf *m;
869 int len;
870
871 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
872
873 for (;;) {
874 /*
875 * figure out where we are (as an index) and where
876 * the hardware is (also as an index)
877 *
878 * This could be done faster if (for example) the
879 * descriptor table was page-aligned and contiguous in
880 * both virtual and physical memory -- you could then
881 * just compare the low-order bits of the virtual address
882 * (sbdma_rem_index) and the physical address
883 * (sbdma_curdscr CSR).
884 */
885
886 curidx = d->sbdma_rem_index;
887 hwidx = (int)
888 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
889 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
890
891 /*
892 * If they're the same, that means we've processed all
893 * of the descriptors up to (but not including) the one that
894 * the hardware is working on right now.
895 */
896
897 if (curidx == hwidx)
898 break;
899
900 /*
901 * Otherwise, get the packet's mbuf ptr back
902 */
903
904 dscp = &(d->sbdma_dscrtable[curidx]);
905 m = d->sbdma_ctxtable[curidx];
906 d->sbdma_ctxtable[curidx] = NULL;
907
908 len = (int)G_DMA_DSCRB_PKT_SIZE(dscp->dscr_b) - 4;
909
910 /*
911 * Check packet status. If good, process it.
912 * If not, silently drop it and put it back on the
913 * receive ring.
914 */
915
916 if (! (dscp->dscr_a & M_DMA_ETHRX_BAD)) {
917
918 /*
919 * Set length into the packet
920 * XXX do we remove the CRC here?
921 */
922 m->m_pkthdr.len = m->m_len = len;
923
924 m_set_rcvif(m, ifp);
925
926
927 /*
928 * Add a new buffer to replace the old one.
929 */
930 sbdma_add_rcvbuffer(d, NULL);
931
932 /*
933 * Handle BPF listeners. Let the BPF user see the
934 * packet, but don't pass it up to the ether_input()
935 * layer unless it's a broadcast packet, multicast
936 * packet, matches our ethernet address or the
937 * interface is in promiscuous mode.
938 */
939
940 /*
941 * Pass the buffer to the kernel
942 */
943 if_percpuq_enqueue(ifp->if_percpuq, m);
944 } else {
945 /*
946 * Packet was mangled somehow. Just drop it and
947 * put it back on the receive ring.
948 */
949 sbdma_add_rcvbuffer(d, m);
950 }
951
952 /*
953 * .. and advance to the next buffer.
954 */
955
956 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
957 }
958 }
959
960 /*
961 * SBDMA_TX_PROCESS(sc, d)
962 *
963 * Process "completed" transmit buffers on the specified DMA channel.
964 * This is normally called within the interrupt service routine.
965 * Note that this isn't really ideal for priority channels, since
966 * it processes all of the packets on a given channel before
967 * returning.
968 *
969 * Input parameters:
970 * sc - softc structure
971 * d - DMA channel context
972 *
973 * Return value:
974 * nothing
975 */
976
977 static void
978 sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d)
979 {
980 int curidx;
981 int hwidx;
982 struct mbuf *m;
983
984 struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
985
986 for (;;) {
987 /*
988 * figure out where we are (as an index) and where
989 * the hardware is (also as an index)
990 *
991 * This could be done faster if (for example) the
992 * descriptor table was page-aligned and contiguous in
993 * both virtual and physical memory -- you could then
994 * just compare the low-order bits of the virtual address
995 * (sbdma_rem_index) and the physical address
996 * (sbdma_curdscr CSR).
997 */
998
999 curidx = d->sbdma_rem_index;
1000 hwidx = (int)
1001 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1002 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1003
1004 /*
1005 * If they're the same, that means we've processed all
1006 * of the descriptors up to (but not including) the one that
1007 * the hardware is working on right now.
1008 */
1009
1010 if (curidx == hwidx)
1011 break;
1012
1013 /*
1014 * Otherwise, get the packet's mbuf ptr back
1015 */
1016
1017 m = d->sbdma_ctxtable[curidx];
1018 d->sbdma_ctxtable[curidx] = NULL;
1019
1020 /*
1021 * for transmits we just free buffers and count packets.
1022 */
1023 ifp->if_opackets++;
1024 m_freem(m);
1025
1026 /*
1027 * .. and advance to the next buffer.
1028 */
1029
1030 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
1031 }
1032
1033 /*
1034 * Decide what to set the IFF_OACTIVE bit in the interface to.
1035 * It's supposed to reflect if the interface is actively
1036 * transmitting, but that's really hard to do quickly.
1037 */
1038
1039 ifp->if_flags &= ~IFF_OACTIVE;
1040 }
1041
1042 /*
1043 * SBMAC_INITCTX(s)
1044 *
1045 * Initialize an Ethernet context structure - this is called
1046 * once per MAC on the 1250. Memory is allocated here, so don't
1047 * call it again from inside the ioctl routines that bring the
1048 * interface up/down
1049 *
1050 * Input parameters:
1051 * sc - sbmac context structure
1052 *
1053 * Return value:
1054 * 0
1055 */
1056
1057 static void
1058 sbmac_initctx(struct sbmac_softc *sc)
1059 {
1060 uint64_t sysrev;
1061
1062 /*
1063 * figure out the addresses of some ports
1064 */
1065
1066 sc->sbm_macenable = PKSEG1(sc->sbm_base + R_MAC_ENABLE);
1067 sc->sbm_maccfg = PKSEG1(sc->sbm_base + R_MAC_CFG);
1068 sc->sbm_fifocfg = PKSEG1(sc->sbm_base + R_MAC_THRSH_CFG);
1069 sc->sbm_framecfg = PKSEG1(sc->sbm_base + R_MAC_FRAMECFG);
1070 sc->sbm_rxfilter = PKSEG1(sc->sbm_base + R_MAC_ADFILTER_CFG);
1071 sc->sbm_isr = PKSEG1(sc->sbm_base + R_MAC_STATUS);
1072 sc->sbm_imr = PKSEG1(sc->sbm_base + R_MAC_INT_MASK);
1073
1074 /*
1075 * Initialize the DMA channels. Right now, only one per MAC is used
1076 * Note: Only do this _once_, as it allocates memory from the kernel!
1077 */
1078
1079 sbdma_initctx(&(sc->sbm_txdma), sc, 0, DMA_TX, SBMAC_MAX_TXDESCR);
1080 sbdma_initctx(&(sc->sbm_rxdma), sc, 0, DMA_RX, SBMAC_MAX_RXDESCR);
1081
1082 /*
1083 * initial state is OFF
1084 */
1085
1086 sc->sbm_state = sbmac_state_off;
1087
1088 /*
1089 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1090 */
1091
1092 sc->sbm_speed = sbmac_speed_10;
1093 sc->sbm_duplex = sbmac_duplex_half;
1094 sc->sbm_fc = sbmac_fc_disabled;
1095
1096 /*
1097 * Determine SOC type. 112x has Pass3 SOC features.
1098 */
1099 sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) );
1100 sc->sbm_pass3_dma = (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1120 ||
1101 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125 ||
1102 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125H ||
1103 (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250 &&
1104 G_SYS_REVISION(sysrev) >= K_SYS_REVISION_BCM1250_PASS3));
1105 #ifdef SBMAC_EVENT_COUNTERS
1106 const char * const xname = device_xname(sc->sc_dev);
1107 evcnt_attach_dynamic(&sc->sbm_ev_rxintr, EVCNT_TYPE_INTR,
1108 NULL, xname, "rxintr");
1109 evcnt_attach_dynamic(&sc->sbm_ev_txintr, EVCNT_TYPE_INTR,
1110 NULL, xname, "txintr");
1111 evcnt_attach_dynamic(&sc->sbm_ev_txdrop, EVCNT_TYPE_MISC,
1112 NULL, xname, "txdrop");
1113 evcnt_attach_dynamic(&sc->sbm_ev_txstall, EVCNT_TYPE_MISC,
1114 NULL, xname, "txstall");
1115 if (sc->sbm_pass3_dma) {
1116 evcnt_attach_dynamic(&sc->sbm_ev_txsplit, EVCNT_TYPE_MISC,
1117 NULL, xname, "pass3tx-split");
1118 evcnt_attach_dynamic(&sc->sbm_ev_txkeep, EVCNT_TYPE_MISC,
1119 NULL, xname, "pass3tx-keep");
1120 }
1121 #endif
1122 }
1123
1124 /*
1125 * SBMAC_CHANNEL_START(s)
1126 *
1127 * Start packet processing on this MAC.
1128 *
1129 * Input parameters:
1130 * sc - sbmac structure
1131 *
1132 * Return value:
1133 * nothing
1134 */
1135
1136 static void
1137 sbmac_channel_start(struct sbmac_softc *sc)
1138 {
1139 uint64_t reg;
1140 sbmac_port_t port;
1141 uint64_t cfg, fifo, framecfg;
1142 int idx;
1143 uint64_t dma_cfg0, fifo_cfg;
1144 sbmacdma_t *txdma;
1145
1146 /*
1147 * Don't do this if running
1148 */
1149
1150 if (sc->sbm_state == sbmac_state_on)
1151 return;
1152
1153 /*
1154 * Bring the controller out of reset, but leave it off.
1155 */
1156
1157 SBMAC_WRITECSR(sc->sbm_macenable, 0);
1158
1159 /*
1160 * Ignore all received packets
1161 */
1162
1163 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1164
1165 /*
1166 * Calculate values for various control registers.
1167 */
1168
1169 cfg = M_MAC_RETRY_EN |
1170 M_MAC_TX_HOLD_SOP_EN |
1171 V_MAC_TX_PAUSE_CNT_16K |
1172 M_MAC_AP_STAT_EN |
1173 M_MAC_SS_EN |
1174 0;
1175
1176 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
1177 V_MAC_TX_RD_THRSH(4) |
1178 V_MAC_TX_RL_THRSH(4) |
1179 V_MAC_RX_PL_THRSH(4) |
1180 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1181 V_MAC_RX_PL_THRSH(4) |
1182 V_MAC_RX_RL_THRSH(8) |
1183 0;
1184
1185 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1186 V_MAC_MAX_FRAMESZ_DEFAULT |
1187 V_MAC_BACKOFF_SEL(1);
1188
1189 /*
1190 * Clear out the hash address map
1191 */
1192
1193 port = PKSEG1(sc->sbm_base + R_MAC_HASH_BASE);
1194 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1195 SBMAC_WRITECSR(port, 0);
1196 port += sizeof(uint64_t);
1197 }
1198
1199 /*
1200 * Clear out the exact-match table
1201 */
1202
1203 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1204 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1205 SBMAC_WRITECSR(port, 0);
1206 port += sizeof(uint64_t);
1207 }
1208
1209 /*
1210 * Clear out the DMA Channel mapping table registers
1211 */
1212
1213 port = PKSEG1(sc->sbm_base + R_MAC_CHUP0_BASE);
1214 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1215 SBMAC_WRITECSR(port, 0);
1216 port += sizeof(uint64_t);
1217 }
1218
1219 port = PKSEG1(sc->sbm_base + R_MAC_CHLO0_BASE);
1220 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1221 SBMAC_WRITECSR(port, 0);
1222 port += sizeof(uint64_t);
1223 }
1224
1225 /*
1226 * Program the hardware address. It goes into the hardware-address
1227 * register as well as the first filter register.
1228 */
1229
1230 reg = sbmac_addr2reg(sc->sbm_hwaddr);
1231
1232 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1233 SBMAC_WRITECSR(port, reg);
1234 port = PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR);
1235 SBMAC_WRITECSR(port, 0); // pass1 workaround
1236
1237 /*
1238 * Set the receive filter for no packets, and write values
1239 * to the various config registers
1240 */
1241
1242 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1243 SBMAC_WRITECSR(sc->sbm_imr, 0);
1244 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1245 SBMAC_WRITECSR(sc->sbm_fifocfg, fifo);
1246 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1247
1248 /*
1249 * Initialize DMA channels (rings should be ok now)
1250 */
1251
1252 sbdma_channel_start(&(sc->sbm_rxdma));
1253 sbdma_channel_start(&(sc->sbm_txdma));
1254
1255 /*
1256 * Configure the speed, duplex, and flow control
1257 */
1258
1259 sbmac_set_speed(sc, sc->sbm_speed);
1260 sbmac_set_duplex(sc, sc->sbm_duplex, sc->sbm_fc);
1261
1262 /*
1263 * Fill the receive ring
1264 */
1265
1266 sbdma_fillring(&(sc->sbm_rxdma));
1267
1268 /*
1269 * Turn on the rest of the bits in the enable register
1270 */
1271
1272 SBMAC_WRITECSR(sc->sbm_macenable, M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 |
1273 M_MAC_RX_ENABLE | M_MAC_TX_ENABLE);
1274
1275
1276 /*
1277 * Accept any kind of interrupt on TX and RX DMA channel 0
1278 */
1279 SBMAC_WRITECSR(sc->sbm_imr,
1280 (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1281 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
1282
1283 /*
1284 * Enable receiving unicasts and broadcasts
1285 */
1286
1287 SBMAC_WRITECSR(sc->sbm_rxfilter, M_MAC_UCAST_EN | M_MAC_BCAST_EN);
1288
1289 /*
1290 * On chips which support unaligned DMA features, set the descriptor
1291 * ring for transmit channels to use the unaligned buffer format.
1292 */
1293 txdma = &(sc->sbm_txdma);
1294
1295 if (sc->sbm_pass3_dma) {
1296 dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0);
1297 dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) |
1298 M_DMA_TBX_EN | M_DMA_TDX_EN;
1299 SBMAC_WRITECSR(txdma->sbdma_config0,dma_cfg0);
1300
1301 fifo_cfg = SBMAC_READCSR(sc->sbm_fifocfg);
1302 fifo_cfg |= V_MAC_TX_WR_THRSH(8) |
1303 V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8);
1304 SBMAC_WRITECSR(sc->sbm_fifocfg,fifo_cfg);
1305 }
1306
1307 /*
1308 * we're running now.
1309 */
1310
1311 sc->sbm_state = sbmac_state_on;
1312 sc->sc_ethercom.ec_if.if_flags |= IFF_RUNNING;
1313
1314 /*
1315 * Program multicast addresses
1316 */
1317
1318 sbmac_setmulti(sc);
1319
1320 /*
1321 * If channel was in promiscuous mode before, turn that on
1322 */
1323
1324 if (sc->sc_ethercom.ec_if.if_flags & IFF_PROMISC)
1325 sbmac_promiscuous_mode(sc, true);
1326
1327 /*
1328 * Turn on the once-per-second timer
1329 */
1330
1331 callout_reset(&(sc->sc_tick_ch), hz, sbmac_tick, sc);
1332 }
1333
1334 /*
1335 * SBMAC_CHANNEL_STOP(s)
1336 *
1337 * Stop packet processing on this MAC.
1338 *
1339 * Input parameters:
1340 * sc - sbmac structure
1341 *
1342 * Return value:
1343 * nothing
1344 */
1345
1346 static void
1347 sbmac_channel_stop(struct sbmac_softc *sc)
1348 {
1349 uint64_t ctl;
1350
1351 /* don't do this if already stopped */
1352
1353 if (sc->sbm_state == sbmac_state_off)
1354 return;
1355
1356 /* don't accept any packets, disable all interrupts */
1357
1358 SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1359 SBMAC_WRITECSR(sc->sbm_imr, 0);
1360
1361 /* Turn off ticker */
1362
1363 callout_stop(&(sc->sc_tick_ch));
1364
1365 /* turn off receiver and transmitter */
1366
1367 ctl = SBMAC_READCSR(sc->sbm_macenable);
1368 ctl &= ~(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0);
1369 SBMAC_WRITECSR(sc->sbm_macenable, ctl);
1370
1371 /* We're stopped now. */
1372
1373 sc->sbm_state = sbmac_state_off;
1374 sc->sc_ethercom.ec_if.if_flags &= ~IFF_RUNNING;
1375
1376 /* Empty the receive and transmit rings */
1377
1378 sbdma_emptyring(&(sc->sbm_rxdma));
1379 sbdma_emptyring(&(sc->sbm_txdma));
1380 }
1381
1382 /*
1383 * SBMAC_SET_CHANNEL_STATE(state)
1384 *
1385 * Set the channel's state ON or OFF
1386 *
1387 * Input parameters:
1388 * state - new state
1389 *
1390 * Return value:
1391 * old state
1392 */
1393
1394 static sbmac_state_t
1395 sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state)
1396 {
1397 sbmac_state_t oldstate = sc->sbm_state;
1398
1399 /*
1400 * If same as previous state, return
1401 */
1402
1403 if (state == oldstate)
1404 return oldstate;
1405
1406 /*
1407 * If new state is ON, turn channel on
1408 */
1409
1410 if (state == sbmac_state_on)
1411 sbmac_channel_start(sc);
1412 else
1413 sbmac_channel_stop(sc);
1414
1415 /*
1416 * Return previous state
1417 */
1418
1419 return oldstate;
1420 }
1421
1422 /*
1423 * SBMAC_PROMISCUOUS_MODE(sc, enabled)
1424 *
1425 * Turn on or off promiscuous mode
1426 *
1427 * Input parameters:
1428 * sc - softc
1429 * enabled - true to turn on, false to turn off
1430 *
1431 * Return value:
1432 * nothing
1433 */
1434
1435 static void
1436 sbmac_promiscuous_mode(struct sbmac_softc *sc, bool enabled)
1437 {
1438 uint64_t reg;
1439
1440 if (sc->sbm_state != sbmac_state_on)
1441 return;
1442
1443 if (enabled) {
1444 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1445 reg |= M_MAC_ALLPKT_EN;
1446 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1447 } else {
1448 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1449 reg &= ~M_MAC_ALLPKT_EN;
1450 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1451 }
1452 }
1453
1454 /*
1455 * SBMAC_INIT_AND_START(sc)
1456 *
1457 * Stop the channel and restart it. This is generally used
1458 * when we have to do something to the channel that requires
1459 * a swift kick.
1460 *
1461 * Input parameters:
1462 * sc - softc
1463 */
1464
1465 static void
1466 sbmac_init_and_start(struct sbmac_softc *sc)
1467 {
1468 int s;
1469
1470 s = splnet();
1471
1472 mii_pollstat(&sc->sc_mii); /* poll phy for current speed */
1473 sbmac_mii_statchg(&sc->sc_ethercom.ec_if); /* set state to new speed */
1474 sbmac_set_channel_state(sc, sbmac_state_on);
1475
1476 splx(s);
1477 }
1478
1479 /*
1480 * SBMAC_ADDR2REG(ptr)
1481 *
1482 * Convert six bytes into the 64-bit register value that
1483 * we typically write into the SBMAC's address/mcast registers
1484 *
1485 * Input parameters:
1486 * ptr - pointer to 6 bytes
1487 *
1488 * Return value:
1489 * register value
1490 */
1491
1492 static uint64_t
1493 sbmac_addr2reg(u_char *ptr)
1494 {
1495 uint64_t reg = 0;
1496
1497 ptr += 6;
1498
1499 reg |= (uint64_t) *(--ptr);
1500 reg <<= 8;
1501 reg |= (uint64_t) *(--ptr);
1502 reg <<= 8;
1503 reg |= (uint64_t) *(--ptr);
1504 reg <<= 8;
1505 reg |= (uint64_t) *(--ptr);
1506 reg <<= 8;
1507 reg |= (uint64_t) *(--ptr);
1508 reg <<= 8;
1509 reg |= (uint64_t) *(--ptr);
1510
1511 return reg;
1512 }
1513
1514 /*
1515 * SBMAC_SET_SPEED(sc, speed)
1516 *
1517 * Configure LAN speed for the specified MAC.
1518 * Warning: must be called when MAC is off!
1519 *
1520 * Input parameters:
1521 * sc - sbmac structure
1522 * speed - speed to set MAC to (see sbmac_speed_t enum)
1523 *
1524 * Return value:
1525 * true if successful
1526 * false indicates invalid parameters
1527 */
1528
1529 static bool
1530 sbmac_set_speed(struct sbmac_softc *sc, sbmac_speed_t speed)
1531 {
1532 uint64_t cfg;
1533 uint64_t framecfg;
1534
1535 /*
1536 * Save new current values
1537 */
1538
1539 sc->sbm_speed = speed;
1540
1541 if (sc->sbm_state != sbmac_state_off)
1542 panic("sbmac_set_speed while MAC not off");
1543
1544 /*
1545 * Read current register values
1546 */
1547
1548 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1549 framecfg = SBMAC_READCSR(sc->sbm_framecfg);
1550
1551 /*
1552 * Mask out the stuff we want to change
1553 */
1554
1555 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1556 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1557 M_MAC_SLOT_SIZE);
1558
1559 /*
1560 * Now add in the new bits
1561 */
1562
1563 switch (speed) {
1564 case sbmac_speed_10:
1565 framecfg |= V_MAC_IFG_RX_10 |
1566 V_MAC_IFG_TX_10 |
1567 K_MAC_IFG_THRSH_10 |
1568 V_MAC_SLOT_SIZE_10;
1569 cfg |= V_MAC_SPEED_SEL_10MBPS;
1570 break;
1571
1572 case sbmac_speed_100:
1573 framecfg |= V_MAC_IFG_RX_100 |
1574 V_MAC_IFG_TX_100 |
1575 V_MAC_IFG_THRSH_100 |
1576 V_MAC_SLOT_SIZE_100;
1577 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1578 break;
1579
1580 case sbmac_speed_1000:
1581 framecfg |= V_MAC_IFG_RX_1000 |
1582 V_MAC_IFG_TX_1000 |
1583 V_MAC_IFG_THRSH_1000 |
1584 V_MAC_SLOT_SIZE_1000;
1585 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1586 break;
1587
1588 case sbmac_speed_auto: /* XXX not implemented */
1589 /* fall through */
1590 default:
1591 return false;
1592 }
1593
1594 /*
1595 * Send the bits back to the hardware
1596 */
1597
1598 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1599 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1600
1601 return true;
1602 }
1603
1604 /*
1605 * SBMAC_SET_DUPLEX(sc, duplex, fc)
1606 *
1607 * Set Ethernet duplex and flow control options for this MAC
1608 * Warning: must be called when MAC is off!
1609 *
1610 * Input parameters:
1611 * sc - sbmac structure
1612 * duplex - duplex setting (see sbmac_duplex_t)
1613 * fc - flow control setting (see sbmac_fc_t)
1614 *
1615 * Return value:
1616 * true if ok
1617 * false if an invalid parameter combination was specified
1618 */
1619
1620 static bool
1621 sbmac_set_duplex(struct sbmac_softc *sc, sbmac_duplex_t duplex, sbmac_fc_t fc)
1622 {
1623 uint64_t cfg;
1624
1625 /*
1626 * Save new current values
1627 */
1628
1629 sc->sbm_duplex = duplex;
1630 sc->sbm_fc = fc;
1631
1632 if (sc->sbm_state != sbmac_state_off)
1633 panic("sbmac_set_duplex while MAC not off");
1634
1635 /*
1636 * Read current register values
1637 */
1638
1639 cfg = SBMAC_READCSR(sc->sbm_maccfg);
1640
1641 /*
1642 * Mask off the stuff we're about to change
1643 */
1644
1645 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1646
1647 switch (duplex) {
1648 case sbmac_duplex_half:
1649 switch (fc) {
1650 case sbmac_fc_disabled:
1651 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1652 break;
1653
1654 case sbmac_fc_collision:
1655 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1656 break;
1657
1658 case sbmac_fc_carrier:
1659 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1660 break;
1661
1662 case sbmac_fc_auto: /* XXX not implemented */
1663 /* fall through */
1664 case sbmac_fc_frame: /* not valid in half duplex */
1665 default: /* invalid selection */
1666 panic("%s: invalid half duplex fc selection %d",
1667 device_xname(sc->sc_dev), fc);
1668 return false;
1669 }
1670 break;
1671
1672 case sbmac_duplex_full:
1673 switch (fc) {
1674 case sbmac_fc_disabled:
1675 cfg |= V_MAC_FC_CMD_DISABLED;
1676 break;
1677
1678 case sbmac_fc_frame:
1679 cfg |= V_MAC_FC_CMD_ENABLED;
1680 break;
1681
1682 case sbmac_fc_collision: /* not valid in full duplex */
1683 case sbmac_fc_carrier: /* not valid in full duplex */
1684 case sbmac_fc_auto: /* XXX not implemented */
1685 /* fall through */
1686 default:
1687 panic("%s: invalid full duplex fc selection %d",
1688 device_xname(sc->sc_dev), fc);
1689 return false;
1690 }
1691 break;
1692
1693 default:
1694 /* fall through */
1695 case sbmac_duplex_auto:
1696 panic("%s: bad duplex %d", device_xname(sc->sc_dev), duplex);
1697 /* XXX not implemented */
1698 break;
1699 }
1700
1701 /*
1702 * Send the bits back to the hardware
1703 */
1704
1705 SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1706
1707 return true;
1708 }
1709
1710 /*
1711 * SBMAC_INTR()
1712 *
1713 * Interrupt handler for MAC interrupts
1714 *
1715 * Input parameters:
1716 * MAC structure
1717 *
1718 * Return value:
1719 * nothing
1720 */
1721
1722 /* ARGSUSED */
1723 static void
1724 sbmac_intr(void *xsc, uint32_t status, vaddr_t pc)
1725 {
1726 struct sbmac_softc *sc = xsc;
1727 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1728 uint64_t isr;
1729
1730 for (;;) {
1731
1732 /*
1733 * Read the ISR (this clears the bits in the real register)
1734 */
1735
1736 isr = SBMAC_READCSR(sc->sbm_isr);
1737
1738 if (isr == 0)
1739 break;
1740
1741 /*
1742 * Transmits on channel 0
1743 */
1744
1745 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
1746 sbdma_tx_process(sc, &(sc->sbm_txdma));
1747 SBMAC_EVCNT_INCR(sc->sbm_ev_txintr);
1748 }
1749
1750 /*
1751 * Receives on channel 0
1752 */
1753
1754 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
1755 sbdma_rx_process(sc, &(sc->sbm_rxdma));
1756 SBMAC_EVCNT_INCR(sc->sbm_ev_rxintr);
1757 }
1758 }
1759
1760 /* try to get more packets going */
1761 if_schedule_deferred_start(ifp);
1762 }
1763
1764
1765 /*
1766 * SBMAC_START(ifp)
1767 *
1768 * Start output on the specified interface. Basically, we
1769 * queue as many buffers as we can until the ring fills up, or
1770 * we run off the end of the queue, whichever comes first.
1771 *
1772 * Input parameters:
1773 * ifp - interface
1774 *
1775 * Return value:
1776 * nothing
1777 */
1778
1779 static void
1780 sbmac_start(struct ifnet *ifp)
1781 {
1782 struct sbmac_softc *sc;
1783 struct mbuf *m_head = NULL;
1784 int rv;
1785
1786 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1787 return;
1788
1789 sc = ifp->if_softc;
1790
1791 for (;;) {
1792
1793 IF_DEQUEUE(&ifp->if_snd, m_head);
1794 if (m_head == NULL)
1795 break;
1796
1797 /*
1798 * Put the buffer on the transmit ring. If we
1799 * don't have room, set the OACTIVE flag and wait
1800 * for the NIC to drain the ring.
1801 */
1802
1803 rv = sbdma_add_txbuffer(&(sc->sbm_txdma), m_head);
1804
1805 if (rv == 0) {
1806 /*
1807 * If there's a BPF listener, bounce a copy of this
1808 * frame to it.
1809 */
1810 bpf_mtap(ifp, m_head, BPF_D_OUT);
1811 if (!sc->sbm_pass3_dma) {
1812 /*
1813 * Don't free mbuf if we're not copying to new
1814 * mbuf in sbdma_add_txbuffer. It will be
1815 * freed in sbdma_tx_process.
1816 */
1817 m_freem(m_head);
1818 }
1819 } else {
1820 IF_PREPEND(&ifp->if_snd, m_head);
1821 ifp->if_flags |= IFF_OACTIVE;
1822 break;
1823 }
1824 }
1825 }
1826
1827 /*
1828 * SBMAC_SETMULTI(sc)
1829 *
1830 * Reprogram the multicast table into the hardware, given
1831 * the list of multicasts associated with the interface
1832 * structure.
1833 *
1834 * Input parameters:
1835 * sc - softc
1836 *
1837 * Return value:
1838 * nothing
1839 */
1840
1841 static void
1842 sbmac_setmulti(struct sbmac_softc *sc)
1843 {
1844 struct ifnet *ifp;
1845 uint64_t reg;
1846 sbmac_port_t port;
1847 int idx;
1848 struct ether_multi *enm;
1849 struct ether_multistep step;
1850
1851 ifp = &sc->sc_ethercom.ec_if;
1852
1853 /*
1854 * Clear out entire multicast table. We do this by nuking
1855 * the entire hash table and all the direct matches except
1856 * the first one, which is used for our station address
1857 */
1858
1859 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
1860 port = PKSEG1(sc->sbm_base +
1861 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1862 SBMAC_WRITECSR(port, 0);
1863 }
1864
1865 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1866 port = PKSEG1(sc->sbm_base +
1867 R_MAC_HASH_BASE+(idx*sizeof(uint64_t)));
1868 SBMAC_WRITECSR(port, 0);
1869 }
1870
1871 /*
1872 * Clear the filter to say we don't want any multicasts.
1873 */
1874
1875 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1876 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1877 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1878
1879 if (ifp->if_flags & IFF_ALLMULTI) {
1880 /*
1881 * Enable ALL multicasts. Do this by inverting the
1882 * multicast enable bit.
1883 */
1884 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1885 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1886 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1887 return;
1888 }
1889
1890 /*
1891 * Progam new multicast entries. For now, only use the
1892 * perfect filter. In the future we'll need to use the
1893 * hash filter if the perfect filter overflows
1894 */
1895
1896 /*
1897 * XXX only using perfect filter for now, need to use hash
1898 * XXX if the table overflows
1899 */
1900
1901 idx = 1; /* skip station address */
1902 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1903 while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) {
1904 reg = sbmac_addr2reg(enm->enm_addrlo);
1905 port = PKSEG1(sc->sbm_base +
1906 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1907 SBMAC_WRITECSR(port, reg);
1908 idx++;
1909 ETHER_NEXT_MULTI(step, enm);
1910 }
1911
1912 /*
1913 * Enable the "accept multicast bits" if we programmed at least one
1914 * multicast.
1915 */
1916
1917 if (idx > 1) {
1918 reg = SBMAC_READCSR(sc->sbm_rxfilter);
1919 reg |= M_MAC_MCAST_EN;
1920 SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1921 }
1922 }
1923
1924 /*
1925 * SBMAC_ETHER_IOCTL(ifp, cmd, data)
1926 *
1927 * Generic IOCTL requests for this interface. The basic
1928 * stuff is handled here for bringing the interface up,
1929 * handling multicasts, etc.
1930 *
1931 * Input parameters:
1932 * ifp - interface structure
1933 * cmd - command code
1934 * data - pointer to data
1935 *
1936 * Return value:
1937 * return value (0 is success)
1938 */
1939
1940 static int
1941 sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1942 {
1943 struct ifaddr *ifa = (struct ifaddr *) data;
1944 struct sbmac_softc *sc = ifp->if_softc;
1945
1946 switch (cmd) {
1947 case SIOCINITIFADDR:
1948 ifp->if_flags |= IFF_UP;
1949
1950 switch (ifa->ifa_addr->sa_family) {
1951 #ifdef INET
1952 case AF_INET:
1953 sbmac_init_and_start(sc);
1954 arp_ifinit(ifp, ifa);
1955 break;
1956 #endif
1957 default:
1958 sbmac_init_and_start(sc);
1959 break;
1960 }
1961 break;
1962
1963 default:
1964 return ENOTTY;
1965 }
1966
1967 return (0);
1968 }
1969
1970 /*
1971 * SBMAC_IOCTL(ifp, cmd, data)
1972 *
1973 * Main IOCTL handler - dispatches to other IOCTLs for various
1974 * types of requests.
1975 *
1976 * Input parameters:
1977 * ifp - interface pointer
1978 * cmd - command code
1979 * data - pointer to argument data
1980 *
1981 * Return value:
1982 * 0 if ok
1983 * else error code
1984 */
1985
1986 static int
1987 sbmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1988 {
1989 struct sbmac_softc *sc = ifp->if_softc;
1990 struct ifreq *ifr = (struct ifreq *) data;
1991 int s, error = 0;
1992
1993 s = splnet();
1994
1995 switch (cmd) {
1996 case SIOCINITIFADDR:
1997 error = sbmac_ether_ioctl(ifp, cmd, data);
1998 break;
1999 case SIOCSIFMTU:
2000 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2001 error = EINVAL;
2002 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
2003 /* XXX Program new MTU here */
2004 error = 0;
2005 break;
2006 case SIOCSIFFLAGS:
2007 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2008 break;
2009 if (ifp->if_flags & IFF_UP) {
2010 /*
2011 * If only the state of the PROMISC flag changed,
2012 * just tweak the hardware registers.
2013 */
2014 if ((ifp->if_flags & IFF_RUNNING) &&
2015 (ifp->if_flags & IFF_PROMISC)) {
2016 /* turn on promiscuous mode */
2017 sbmac_promiscuous_mode(sc, true);
2018 } else if (ifp->if_flags & IFF_RUNNING &&
2019 !(ifp->if_flags & IFF_PROMISC)) {
2020 /* turn off promiscuous mode */
2021 sbmac_promiscuous_mode(sc, false);
2022 } else
2023 sbmac_set_channel_state(sc, sbmac_state_on);
2024 } else {
2025 if (ifp->if_flags & IFF_RUNNING)
2026 sbmac_set_channel_state(sc, sbmac_state_off);
2027 }
2028
2029 sc->sbm_if_flags = ifp->if_flags;
2030 error = 0;
2031 break;
2032
2033 case SIOCADDMULTI:
2034 case SIOCDELMULTI:
2035 case SIOCSIFMEDIA:
2036 case SIOCGIFMEDIA:
2037 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2038 error = 0;
2039 if (ifp->if_flags & IFF_RUNNING)
2040 sbmac_setmulti(sc);
2041 }
2042 break;
2043 default:
2044 error = ether_ioctl(ifp, cmd, data);
2045 break;
2046 }
2047
2048 (void)splx(s);
2049
2050 return(error);
2051 }
2052
2053 /*
2054 * SBMAC_IFMEDIA_UPD(ifp)
2055 *
2056 * Configure an appropriate media type for this interface,
2057 * given the data in the interface structure
2058 *
2059 * Input parameters:
2060 * ifp - interface
2061 *
2062 * Return value:
2063 * 0 if ok
2064 * else error code
2065 */
2066
2067 /*
2068 * SBMAC_IFMEDIA_STS(ifp, ifmr)
2069 *
2070 * Report current media status (used by ifconfig, for example)
2071 *
2072 * Input parameters:
2073 * ifp - interface structure
2074 * ifmr - media request structure
2075 *
2076 * Return value:
2077 * nothing
2078 */
2079
2080 /*
2081 * SBMAC_WATCHDOG(ifp)
2082 *
2083 * Called periodically to make sure we're still happy.
2084 *
2085 * Input parameters:
2086 * ifp - interface structure
2087 *
2088 * Return value:
2089 * nothing
2090 */
2091
2092 static void
2093 sbmac_watchdog(struct ifnet *ifp)
2094 {
2095
2096 /* XXX do something */
2097 }
2098
2099 /*
2100 * One second timer, used to tick MII.
2101 */
2102 static void
2103 sbmac_tick(void *arg)
2104 {
2105 struct sbmac_softc *sc = arg;
2106 int s;
2107
2108 s = splnet();
2109 mii_tick(&sc->sc_mii);
2110 splx(s);
2111
2112 callout_reset(&sc->sc_tick_ch, hz, sbmac_tick, sc);
2113 }
2114
2115
2116 /*
2117 * SBMAC_MATCH(parent, match, aux)
2118 *
2119 * Part of the config process - see if this device matches the
2120 * info about what we expect to find on the bus.
2121 *
2122 * Input parameters:
2123 * parent - parent bus structure
2124 * match -
2125 * aux - bus-specific args
2126 *
2127 * Return value:
2128 * 1 if we match
2129 * 0 if we don't match
2130 */
2131
2132 static int
2133 sbmac_match(device_t parent, cfdata_t match, void *aux)
2134 {
2135 struct sbobio_attach_args *sa = aux;
2136
2137 /*
2138 * Make sure it's a MAC
2139 */
2140 if (sa->sa_locs.sa_type != SBOBIO_DEVTYPE_MAC)
2141 return 0;
2142
2143 /*
2144 * Yup, it is.
2145 */
2146
2147 return 1;
2148 }
2149
2150 /*
2151 * SBMAC_PARSE_XDIGIT(str)
2152 *
2153 * Parse a hex digit, returning its value
2154 *
2155 * Input parameters:
2156 * str - character
2157 *
2158 * Return value:
2159 * hex value, or -1 if invalid
2160 */
2161
2162 static int
2163 sbmac_parse_xdigit(char str)
2164 {
2165 int digit;
2166
2167 if ((str >= '0') && (str <= '9'))
2168 digit = str - '0';
2169 else if ((str >= 'a') && (str <= 'f'))
2170 digit = str - 'a' + 10;
2171 else if ((str >= 'A') && (str <= 'F'))
2172 digit = str - 'A' + 10;
2173 else
2174 digit = -1;
2175
2176 return digit;
2177 }
2178
2179 /*
2180 * SBMAC_PARSE_HWADDR(str, hwaddr)
2181 *
2182 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2183 * Ethernet address.
2184 *
2185 * Input parameters:
2186 * str - string
2187 * hwaddr - pointer to hardware address
2188 *
2189 * Return value:
2190 * 0 if ok, else -1
2191 */
2192
2193 static int
2194 sbmac_parse_hwaddr(const char *str, u_char *hwaddr)
2195 {
2196 int digit1, digit2;
2197 int idx = 6;
2198
2199 while (*str && (idx > 0)) {
2200 digit1 = sbmac_parse_xdigit(*str);
2201 if (digit1 < 0)
2202 return -1;
2203 str++;
2204 if (!*str)
2205 return -1;
2206
2207 if ((*str == ':') || (*str == '-')) {
2208 digit2 = digit1;
2209 digit1 = 0;
2210 } else {
2211 digit2 = sbmac_parse_xdigit(*str);
2212 if (digit2 < 0)
2213 return -1;
2214 str++;
2215 }
2216
2217 *hwaddr++ = (digit1 << 4) | digit2;
2218 idx--;
2219
2220 if (*str == '-')
2221 str++;
2222 if (*str == ':')
2223 str++;
2224 }
2225 return 0;
2226 }
2227
2228 /*
2229 * SBMAC_ATTACH(parent, self, aux)
2230 *
2231 * Attach routine - init hardware and hook ourselves into NetBSD.
2232 *
2233 * Input parameters:
2234 * parent - parent bus device
2235 * self - our softc
2236 * aux - attach data
2237 *
2238 * Return value:
2239 * nothing
2240 */
2241
2242 static void
2243 sbmac_attach(device_t parent, device_t self, void *aux)
2244 {
2245 struct sbmac_softc * const sc = device_private(self);
2246 struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
2247 struct sbobio_attach_args * const sa = aux;
2248 u_char *eaddr;
2249 static int unit = 0; /* XXX */
2250 uint64_t ea_reg;
2251 int idx;
2252
2253 sc->sc_dev = self;
2254
2255 /* Determine controller base address */
2256
2257 sc->sbm_base = sa->sa_base + sa->sa_locs.sa_offset;
2258
2259 eaddr = sc->sbm_hwaddr;
2260
2261 /*
2262 * Initialize context (get pointers to registers and stuff), then
2263 * allocate the memory for the descriptor tables.
2264 */
2265
2266 sbmac_initctx(sc);
2267
2268 callout_init(&(sc->sc_tick_ch), 0);
2269
2270 /*
2271 * Read the ethernet address. The firwmare left this programmed
2272 * for us in the ethernet address register for each mac.
2273 */
2274
2275 ea_reg = SBMAC_READCSR(PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR));
2276 for (idx = 0; idx < 6; idx++) {
2277 eaddr[idx] = (uint8_t) (ea_reg & 0xFF);
2278 ea_reg >>= 8;
2279 }
2280
2281 #define SBMAC_DEFAULT_HWADDR "40:00:00:00:01:00"
2282 if (eaddr[0] == 0 && eaddr[1] == 0 && eaddr[2] == 0 &&
2283 eaddr[3] == 0 && eaddr[4] == 0 && eaddr[5] == 0) {
2284 sbmac_parse_hwaddr(SBMAC_DEFAULT_HWADDR, eaddr);
2285 eaddr[5] = unit;
2286 }
2287
2288 #ifdef SBMAC_ETH0_HWADDR
2289 if (unit == 0)
2290 sbmac_parse_hwaddr(SBMAC_ETH0_HWADDR, eaddr);
2291 #endif
2292 #ifdef SBMAC_ETH1_HWADDR
2293 if (unit == 1)
2294 sbmac_parse_hwaddr(SBMAC_ETH1_HWADDR, eaddr);
2295 #endif
2296 #ifdef SBMAC_ETH2_HWADDR
2297 if (unit == 2)
2298 sbmac_parse_hwaddr(SBMAC_ETH2_HWADDR, eaddr);
2299 #endif
2300 unit++;
2301
2302 /*
2303 * Display Ethernet address (this is called during the config process
2304 * so we need to finish off the config message that was being displayed)
2305 */
2306 aprint_normal(": Ethernet%s\n",
2307 sc->sbm_pass3_dma ? ", using unaligned tx DMA" : "");
2308 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
2309
2310
2311 /*
2312 * Set up ifnet structure
2313 */
2314
2315 ifp->if_softc = sc;
2316 memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
2317 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
2318 IFF_NOTRAILERS;
2319 ifp->if_ioctl = sbmac_ioctl;
2320 ifp->if_start = sbmac_start;
2321 ifp->if_watchdog = sbmac_watchdog;
2322 ifp->if_snd.ifq_maxlen = SBMAC_MAX_TXDESCR - 1;
2323
2324 /*
2325 * Set up ifmedia support.
2326 */
2327
2328 /*
2329 * Initialize MII/media info.
2330 */
2331 sc->sc_mii.mii_ifp = ifp;
2332 sc->sc_mii.mii_readreg = sbmac_mii_readreg;
2333 sc->sc_mii.mii_writereg = sbmac_mii_writereg;
2334 sc->sc_mii.mii_statchg = sbmac_mii_statchg;
2335 sc->sc_ethercom.ec_mii = &sc->sc_mii;
2336 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
2337 ether_mediastatus);
2338 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2339 MII_OFFSET_ANY, 0);
2340
2341 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2342 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2343 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2344 } else {
2345 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2346 }
2347
2348
2349 /*
2350 * map/route interrupt
2351 */
2352
2353 sc->sbm_intrhand = cpu_intr_establish(sa->sa_locs.sa_intr[0], IPL_NET,
2354 sbmac_intr, sc);
2355
2356 /*
2357 * Call MI attach routines.
2358 */
2359 if_attach(ifp);
2360 if_deferred_start_init(ifp, NULL);
2361 ether_ifattach(ifp, eaddr);
2362 }
2363