if_cpsw.c revision 1.13 1 /* $NetBSD: if_cpsw.c,v 1.13 2021/01/15 23:19:33 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion (at) Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.13 2021/01/15 23:19:33 jmcneill Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/fdt/fdtvar.h>
77
78 #include <arm/ti/if_cpswreg.h>
79
80 #define FDT_INTR_FLAGS 0
81
82 #define CPSW_TXFRAGS 16
83
84 #define CPSW_CPPI_RAM_SIZE (0x2000)
85 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
86 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
87 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
88 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
89 #define CPSW_CPPI_RAM_RXDESCS_BASE \
90 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
91
92 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
93 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
94
95 CTASSERT(powerof2(CPSW_NTXDESCS));
96 CTASSERT(powerof2(CPSW_NRXDESCS));
97
98 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
99
100 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
101 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
102
103 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
104 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
105
106 struct cpsw_ring_data {
107 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
108 struct mbuf *tx_mb[CPSW_NTXDESCS];
109 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
110 struct mbuf *rx_mb[CPSW_NRXDESCS];
111 };
112
113 struct cpsw_softc {
114 device_t sc_dev;
115 bus_space_tag_t sc_bst;
116 bus_space_handle_t sc_bsh;
117 bus_size_t sc_bss;
118 bus_dma_tag_t sc_bdt;
119 bus_space_handle_t sc_bsh_txdescs;
120 bus_space_handle_t sc_bsh_rxdescs;
121 bus_addr_t sc_txdescs_pa;
122 bus_addr_t sc_rxdescs_pa;
123 struct ethercom sc_ec;
124 struct mii_data sc_mii;
125 bool sc_phy_has_1000t;
126 bool sc_attached;
127 callout_t sc_tick_ch;
128 void *sc_ih;
129 struct cpsw_ring_data *sc_rdp;
130 volatile u_int sc_txnext;
131 volatile u_int sc_txhead;
132 volatile u_int sc_rxhead;
133 void *sc_rxthih;
134 void *sc_rxih;
135 void *sc_txih;
136 void *sc_miscih;
137 void *sc_txpad;
138 bus_dmamap_t sc_txpad_dm;
139 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
140 uint8_t sc_enaddr[ETHER_ADDR_LEN];
141 volatile bool sc_txrun;
142 volatile bool sc_rxrun;
143 volatile bool sc_txeoq;
144 volatile bool sc_rxeoq;
145 };
146
147 static int cpsw_match(device_t, cfdata_t, void *);
148 static void cpsw_attach(device_t, device_t, void *);
149 static int cpsw_detach(device_t, int);
150
151 static void cpsw_start(struct ifnet *);
152 static int cpsw_ioctl(struct ifnet *, u_long, void *);
153 static void cpsw_watchdog(struct ifnet *);
154 static int cpsw_init(struct ifnet *);
155 static void cpsw_stop(struct ifnet *, int);
156
157 static int cpsw_mii_readreg(device_t, int, int, uint16_t *);
158 static int cpsw_mii_writereg(device_t, int, int, uint16_t);
159 static void cpsw_mii_statchg(struct ifnet *);
160
161 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
162 static void cpsw_tick(void *);
163
164 static int cpsw_rxthintr(void *);
165 static int cpsw_rxintr(void *);
166 static int cpsw_txintr(void *);
167 static int cpsw_miscintr(void *);
168
169 /* ALE support */
170 #define CPSW_MAX_ALE_ENTRIES 1024
171
172 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
173
174 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
175 cpsw_match, cpsw_attach, cpsw_detach, NULL);
176
177 #include <sys/kernhist.h>
178 KERNHIST_DEFINE(cpswhist);
179
180 #define CPSWHIST_CALLARGS(A,B,C,D) do { \
181 KERNHIST_CALLARGS(cpswhist, "%jx %jx %jx %jx", \
182 (uintptr_t)(A), (uintptr_t)(B), (uintptr_t)(C), (uintptr_t)(D));\
183 } while (0)
184
185
186 static inline u_int
187 cpsw_txdesc_adjust(u_int x, int y)
188 {
189 return (((x) + y) & (CPSW_NTXDESCS - 1));
190 }
191
192 static inline u_int
193 cpsw_rxdesc_adjust(u_int x, int y)
194 {
195 return (((x) + y) & (CPSW_NRXDESCS - 1));
196 }
197
198 static inline uint32_t
199 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
200 {
201 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
202 }
203
204 static inline void
205 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
206 uint32_t const value)
207 {
208 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
209 }
210
211 static inline void
212 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
213 {
214 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
215
216 KERNHIST_FUNC(__func__);
217 CPSWHIST_CALLARGS(sc, i, n, 0);
218
219 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
220 }
221
222 static inline void
223 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
224 {
225 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
226
227 KERNHIST_FUNC(__func__);
228 CPSWHIST_CALLARGS(sc, i, n, 0);
229
230 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
231 }
232
233 static inline void
234 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
235 struct cpsw_cpdma_bd * const bdp)
236 {
237 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
238 uint32_t * const dp = bdp->word;
239 const bus_size_t c = __arraycount(bdp->word);
240
241 KERNHIST_FUNC(__func__);
242 CPSWHIST_CALLARGS(sc, i, bdp, 0);
243
244 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
245 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
246 dp[0], dp[1], dp[2], dp[3]);
247 }
248
249 static inline void
250 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
251 struct cpsw_cpdma_bd * const bdp)
252 {
253 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
254 uint32_t * const dp = bdp->word;
255 const bus_size_t c = __arraycount(bdp->word);
256
257 KERNHIST_FUNC(__func__);
258 CPSWHIST_CALLARGS(sc, i, bdp, 0);
259 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
260 dp[0], dp[1], dp[2], dp[3]);
261
262 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
263 }
264
265 static inline void
266 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
267 struct cpsw_cpdma_bd * const bdp)
268 {
269 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
270 uint32_t * const dp = bdp->word;
271 const bus_size_t c = __arraycount(bdp->word);
272
273 KERNHIST_FUNC(__func__);
274 CPSWHIST_CALLARGS(sc, i, bdp, 0);
275
276 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
277
278 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
279 dp[0], dp[1], dp[2], dp[3]);
280 }
281
282 static inline void
283 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
284 struct cpsw_cpdma_bd * const bdp)
285 {
286 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
287 uint32_t * const dp = bdp->word;
288 const bus_size_t c = __arraycount(bdp->word);
289
290 KERNHIST_FUNC(__func__);
291 CPSWHIST_CALLARGS(sc, i, bdp, 0);
292 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
293 dp[0], dp[1], dp[2], dp[3]);
294
295 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
296 }
297
298 static inline bus_addr_t
299 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
300 {
301 KASSERT(x < CPSW_NTXDESCS);
302 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
303 }
304
305 static inline bus_addr_t
306 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
307 {
308 KASSERT(x < CPSW_NRXDESCS);
309 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
310 }
311
312
313 static int
314 cpsw_match(device_t parent, cfdata_t cf, void *aux)
315 {
316 struct fdt_attach_args * const faa = aux;
317
318 static const char * const compatible[] = {
319 "ti,am335x-cpsw",
320 "ti,cpsw",
321 NULL
322 };
323
324 return of_match_compatible(faa->faa_phandle, compatible);
325 }
326
327 static bool
328 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
329 {
330 struct ifmedia_entry *ifm;
331
332 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
333 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
334 return true;
335 }
336 return false;
337 }
338
339 static int
340 cpsw_detach(device_t self, int flags)
341 {
342 struct cpsw_softc * const sc = device_private(self);
343 struct ifnet *ifp = &sc->sc_ec.ec_if;
344 u_int i;
345
346 /* Succeed now if there's no work to do. */
347 if (!sc->sc_attached)
348 return 0;
349
350 sc->sc_attached = false;
351
352 /* Stop the interface. Callouts are stopped in it. */
353 cpsw_stop(ifp, 1);
354
355 /* Destroy our callout. */
356 callout_destroy(&sc->sc_tick_ch);
357
358 /* Let go of the interrupts */
359 intr_disestablish(sc->sc_rxthih);
360 intr_disestablish(sc->sc_rxih);
361 intr_disestablish(sc->sc_txih);
362 intr_disestablish(sc->sc_miscih);
363
364 ether_ifdetach(ifp);
365 if_detach(ifp);
366
367 /* Delete all media. */
368 ifmedia_fini(&sc->sc_mii.mii_media);
369
370 /* Free the packet padding buffer */
371 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
372 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
373
374 /* Destroy all the descriptors */
375 for (i = 0; i < CPSW_NTXDESCS; i++)
376 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
377 for (i = 0; i < CPSW_NRXDESCS; i++)
378 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
379 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
380
381 /* Unmap */
382 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
383
384
385 return 0;
386 }
387
388 static void
389 cpsw_attach(device_t parent, device_t self, void *aux)
390 {
391 struct fdt_attach_args * const faa = aux;
392 struct cpsw_softc * const sc = device_private(self);
393 struct ethercom * const ec = &sc->sc_ec;
394 struct ifnet * const ifp = &ec->ec_if;
395 struct mii_data * const mii = &sc->sc_mii;
396 const int phandle = faa->faa_phandle;
397 const uint8_t *macaddr;
398 bus_addr_t addr;
399 bus_size_t size;
400 int error, slave, len;
401 char xname[16];
402 u_int i;
403
404 KERNHIST_INIT(cpswhist, 4096);
405
406 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
407 aprint_error(": couldn't get registers\n");
408 return;
409 }
410
411 sc->sc_dev = self;
412
413 aprint_normal(": TI Layer 2 3-Port Switch\n");
414 aprint_naive("\n");
415
416 callout_init(&sc->sc_tick_ch, 0);
417 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
418
419 macaddr = NULL;
420 slave = of_find_firstchild_byname(phandle, "slave");
421 if (slave > 0) {
422 macaddr = fdtbus_get_prop(slave, "mac-address", &len);
423 if (len != ETHER_ADDR_LEN)
424 macaddr = NULL;
425 }
426 if (macaddr == NULL) {
427 #if 0
428 /* grab mac_id0 from AM335x control module */
429 uint32_t reg_lo, reg_hi;
430
431 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
432 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
433 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
434 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
435 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
436 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
437 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
438 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
439 } else
440 #endif
441 {
442 aprint_error_dev(sc->sc_dev,
443 "using fake station address\n");
444 /* 'N' happens to have the Local bit set */
445 #if 0
446 sc->sc_enaddr[0] = 'N';
447 sc->sc_enaddr[1] = 'e';
448 sc->sc_enaddr[2] = 't';
449 sc->sc_enaddr[3] = 'B';
450 sc->sc_enaddr[4] = 'S';
451 sc->sc_enaddr[5] = 'D';
452 #else
453 /* XXX Glor */
454 sc->sc_enaddr[0] = 0xd4;
455 sc->sc_enaddr[1] = 0x94;
456 sc->sc_enaddr[2] = 0xa1;
457 sc->sc_enaddr[3] = 0x97;
458 sc->sc_enaddr[4] = 0x03;
459 sc->sc_enaddr[5] = 0x94;
460 #endif
461 }
462 } else {
463 memcpy(sc->sc_enaddr, macaddr, ETHER_ADDR_LEN);
464 }
465
466 snprintf(xname, sizeof(xname), "%s rxth", device_xname(self));
467 sc->sc_rxthih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RXTH,
468 IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc, xname);
469
470 snprintf(xname, sizeof(xname), "%s rx", device_xname(self));
471 sc->sc_rxih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RX,
472 IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc, xname);
473
474 snprintf(xname, sizeof(xname), "%s tx", device_xname(self));
475 sc->sc_txih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_TX,
476 IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc, xname);
477
478 snprintf(xname, sizeof(xname), "%s misc", device_xname(self));
479 sc->sc_miscih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_MISC,
480 IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc, xname);
481
482 sc->sc_bst = faa->faa_bst;
483 sc->sc_bss = size;
484 sc->sc_bdt = faa->faa_dmat;
485
486 error = bus_space_map(sc->sc_bst, addr, size, 0,
487 &sc->sc_bsh);
488 if (error) {
489 aprint_error_dev(sc->sc_dev,
490 "can't map registers: %d\n", error);
491 return;
492 }
493
494 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
495 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
496 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
497 &sc->sc_bsh_txdescs);
498 if (error) {
499 aprint_error_dev(sc->sc_dev,
500 "can't subregion tx ring SRAM: %d\n", error);
501 return;
502 }
503 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
504 (void *)sc->sc_bsh_txdescs);
505
506 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
507 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
508 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
509 &sc->sc_bsh_rxdescs);
510 if (error) {
511 aprint_error_dev(sc->sc_dev,
512 "can't subregion rx ring SRAM: %d\n", error);
513 return;
514 }
515 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
516 (void *)sc->sc_bsh_rxdescs);
517
518 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
519
520 for (i = 0; i < CPSW_NTXDESCS; i++) {
521 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
522 CPSW_TXFRAGS, MCLBYTES, 0, 0,
523 &sc->sc_rdp->tx_dm[i])) != 0) {
524 aprint_error_dev(sc->sc_dev,
525 "unable to create tx DMA map: %d\n", error);
526 }
527 sc->sc_rdp->tx_mb[i] = NULL;
528 }
529
530 for (i = 0; i < CPSW_NRXDESCS; i++) {
531 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
532 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
533 aprint_error_dev(sc->sc_dev,
534 "unable to create rx DMA map: %d\n", error);
535 }
536 sc->sc_rdp->rx_mb[i] = NULL;
537 }
538
539 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
540 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
541 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
542 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
543 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
544 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
545 BUS_DMASYNC_PREWRITE);
546
547 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
548 ether_sprintf(sc->sc_enaddr));
549
550 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
551 ifp->if_softc = sc;
552 ifp->if_capabilities = 0;
553 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
554 ifp->if_start = cpsw_start;
555 ifp->if_ioctl = cpsw_ioctl;
556 ifp->if_init = cpsw_init;
557 ifp->if_stop = cpsw_stop;
558 ifp->if_watchdog = cpsw_watchdog;
559 IFQ_SET_READY(&ifp->if_snd);
560
561 cpsw_stop(ifp, 0);
562
563 mii->mii_ifp = ifp;
564 mii->mii_readreg = cpsw_mii_readreg;
565 mii->mii_writereg = cpsw_mii_writereg;
566 mii->mii_statchg = cpsw_mii_statchg;
567
568 sc->sc_ec.ec_mii = mii;
569 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
570
571 /* Initialize MDIO */
572 cpsw_write_4(sc, MDIOCONTROL,
573 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
574 /* Clear ALE */
575 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
576
577 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, 0);
578 if (LIST_FIRST(&mii->mii_phys) == NULL) {
579 aprint_error_dev(self, "no PHY found!\n");
580 sc->sc_phy_has_1000t = false;
581 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
582 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
583 } else {
584 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
585
586 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
587 }
588
589 if_attach(ifp);
590 if_deferred_start_init(ifp, NULL);
591 ether_ifattach(ifp, sc->sc_enaddr);
592
593 /* The attach is successful. */
594 sc->sc_attached = true;
595
596 return;
597 }
598
599 static void
600 cpsw_start(struct ifnet *ifp)
601 {
602 struct cpsw_softc * const sc = ifp->if_softc;
603 struct cpsw_ring_data * const rdp = sc->sc_rdp;
604 struct cpsw_cpdma_bd bd;
605 uint32_t * const dw = bd.word;
606 struct mbuf *m;
607 bus_dmamap_t dm;
608 u_int eopi __diagused = ~0;
609 u_int seg;
610 u_int txfree;
611 int txstart = -1;
612 int error;
613 bool pad;
614 u_int mlen;
615
616 KERNHIST_FUNC(__func__);
617 CPSWHIST_CALLARGS(sc, 0, 0, 0);
618
619 if (__predict_false((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
620 IFF_RUNNING)) {
621 return;
622 }
623
624 if (sc->sc_txnext >= sc->sc_txhead)
625 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
626 else
627 txfree = sc->sc_txhead - sc->sc_txnext - 1;
628
629 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
630 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
631
632 while (txfree > 0) {
633 IFQ_POLL(&ifp->if_snd, m);
634 if (m == NULL)
635 break;
636
637 dm = rdp->tx_dm[sc->sc_txnext];
638
639 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
640 if (error == EFBIG) {
641 device_printf(sc->sc_dev, "won't fit\n");
642 IFQ_DEQUEUE(&ifp->if_snd, m);
643 m_freem(m);
644 if_statinc(ifp, if_oerrors);
645 continue;
646 } else if (error != 0) {
647 device_printf(sc->sc_dev, "error\n");
648 break;
649 }
650
651 if (dm->dm_nsegs + 1 >= txfree) {
652 ifp->if_flags |= IFF_OACTIVE;
653 bus_dmamap_unload(sc->sc_bdt, dm);
654 break;
655 }
656
657 mlen = m_length(m);
658 pad = mlen < CPSW_PAD_LEN;
659
660 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
661 rdp->tx_mb[sc->sc_txnext] = m;
662 IFQ_DEQUEUE(&ifp->if_snd, m);
663
664 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
665 BUS_DMASYNC_PREWRITE);
666
667 if (txstart == -1)
668 txstart = sc->sc_txnext;
669 eopi = sc->sc_txnext;
670 for (seg = 0; seg < dm->dm_nsegs; seg++) {
671 dw[0] = cpsw_txdesc_paddr(sc,
672 TXDESC_NEXT(sc->sc_txnext));
673 dw[1] = dm->dm_segs[seg].ds_addr;
674 dw[2] = dm->dm_segs[seg].ds_len;
675 dw[3] = 0;
676
677 if (seg == 0)
678 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
679 MAX(mlen, CPSW_PAD_LEN);
680
681 if ((seg == dm->dm_nsegs - 1) && !pad)
682 dw[3] |= CPDMA_BD_EOP;
683
684 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
685 txfree--;
686 eopi = sc->sc_txnext;
687 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
688 }
689 if (pad) {
690 dw[0] = cpsw_txdesc_paddr(sc,
691 TXDESC_NEXT(sc->sc_txnext));
692 dw[1] = sc->sc_txpad_pa;
693 dw[2] = CPSW_PAD_LEN - mlen;
694 dw[3] = CPDMA_BD_EOP;
695
696 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
697 txfree--;
698 eopi = sc->sc_txnext;
699 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
700 }
701
702 bpf_mtap(ifp, m, BPF_D_OUT);
703 }
704
705 if (txstart >= 0) {
706 ifp->if_timer = 5;
707 /* terminate the new chain */
708 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
709 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
710 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
711 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
712 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
713 /* link the new chain on */
714 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
715 cpsw_txdesc_paddr(sc, txstart));
716 if (sc->sc_txeoq) {
717 /* kick the dma engine */
718 sc->sc_txeoq = false;
719 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
720 cpsw_txdesc_paddr(sc, txstart));
721 }
722 }
723 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
724 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
725 }
726
727 static int
728 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
729 {
730 const int s = splnet();
731 int error = 0;
732
733 switch (cmd) {
734 default:
735 error = ether_ioctl(ifp, cmd, data);
736 if (error == ENETRESET) {
737 error = 0;
738 }
739 break;
740 }
741
742 splx(s);
743
744 return error;
745 }
746
747 static void
748 cpsw_watchdog(struct ifnet *ifp)
749 {
750 struct cpsw_softc *sc = ifp->if_softc;
751
752 device_printf(sc->sc_dev, "device timeout\n");
753
754 if_statinc(ifp, if_oerrors);
755 cpsw_init(ifp);
756 cpsw_start(ifp);
757 }
758
759 static int
760 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
761 {
762 u_int tries;
763
764 for (tries = 0; tries < 1000; tries++) {
765 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
766 return 0;
767 delay(1);
768 }
769 return ETIMEDOUT;
770 }
771
772 static int
773 cpsw_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
774 {
775 struct cpsw_softc * const sc = device_private(dev);
776 uint32_t v;
777
778 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
779 return -1;
780
781 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
782 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
783
784 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
785 return -1;
786
787 v = cpsw_read_4(sc, MDIOUSERACCESS0);
788 if (v & __BIT(29)) {
789 *val = v & 0xffff;
790 return 0;
791 }
792
793 return -1;
794 }
795
796 static int
797 cpsw_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
798 {
799 struct cpsw_softc * const sc = device_private(dev);
800 uint32_t v;
801
802 KASSERT((val & 0xffff0000UL) == 0);
803
804 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
805 goto out;
806
807 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
808 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
809
810 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
811 goto out;
812
813 v = cpsw_read_4(sc, MDIOUSERACCESS0);
814 if ((v & __BIT(29)) == 0) {
815 out:
816 device_printf(sc->sc_dev, "%s error\n", __func__);
817 return -1;
818 }
819
820 return 0;
821 }
822
823 static void
824 cpsw_mii_statchg(struct ifnet *ifp)
825 {
826 return;
827 }
828
829 static int
830 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
831 {
832 struct cpsw_ring_data * const rdp = sc->sc_rdp;
833 const u_int h = RXDESC_PREV(i);
834 struct cpsw_cpdma_bd bd;
835 uint32_t * const dw = bd.word;
836 struct mbuf *m;
837 int error = ENOBUFS;
838
839 MGETHDR(m, M_DONTWAIT, MT_DATA);
840 if (m == NULL) {
841 goto reuse;
842 }
843
844 MCLGET(m, M_DONTWAIT);
845 if ((m->m_flags & M_EXT) == 0) {
846 m_freem(m);
847 goto reuse;
848 }
849
850 /* We have a new buffer, prepare it for the ring. */
851
852 if (rdp->rx_mb[i] != NULL)
853 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
854
855 m->m_len = m->m_pkthdr.len = MCLBYTES;
856
857 rdp->rx_mb[i] = m;
858
859 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
860 BUS_DMA_READ | BUS_DMA_NOWAIT);
861 if (error) {
862 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
863 i, error);
864 }
865
866 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
867 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
868
869 error = 0;
870
871 reuse:
872 /* (re-)setup the descriptor */
873 dw[0] = 0;
874 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
875 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
876 dw[3] = CPDMA_BD_OWNER;
877
878 cpsw_set_rxdesc(sc, i, &bd);
879 /* and link onto ring */
880 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
881
882 return error;
883 }
884
885 static int
886 cpsw_init(struct ifnet *ifp)
887 {
888 struct cpsw_softc * const sc = ifp->if_softc;
889 struct mii_data * const mii = &sc->sc_mii;
890 int i;
891
892 cpsw_stop(ifp, 0);
893
894 sc->sc_txnext = 0;
895 sc->sc_txhead = 0;
896
897 /* Reset wrapper */
898 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
899 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
900 ;
901
902 /* Reset SS */
903 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
904 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
905 ;
906
907 /* Clear table and enable ALE */
908 cpsw_write_4(sc, CPSW_ALE_CONTROL,
909 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
910
911 /* Reset and init Sliver port 1 and 2 */
912 for (i = 0; i < CPSW_ETH_PORTS; i++) {
913 uint32_t macctl;
914
915 /* Reset */
916 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
917 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
918 ;
919 /* Set Slave Mapping */
920 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
921 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
922 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
923 /* Set MAC Address */
924 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
925 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
926 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
927 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
928 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
929
930 /* Set MACCONTROL for ports 0,1 */
931 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
932 SLMACCTL_IFCTL_A;
933 if (sc->sc_phy_has_1000t)
934 macctl |= SLMACCTL_GIG;
935 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
936
937 /* Set ALE port to forwarding(3) */
938 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
939 }
940
941 /* Set Host Port Mapping */
942 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
943 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
944
945 /* Set ALE port to forwarding(3) */
946 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
947
948 /* Initialize addrs */
949 cpsw_ale_update_addresses(sc, 1);
950
951 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
952 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
953
954 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
955 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
956 ;
957
958 for (i = 0; i < 8; i++) {
959 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
960 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
961 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
962 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
963 }
964
965 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
966 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
967
968 sc->sc_txhead = 0;
969 sc->sc_txnext = 0;
970
971 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
972
973 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
974 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
975 /* Initialize RX Buffer Descriptors */
976 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
977 for (i = 0; i < CPSW_NRXDESCS; i++) {
978 cpsw_new_rxbuf(sc, i);
979 }
980 sc->sc_rxhead = 0;
981
982 /* turn off flow control */
983 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
984
985 /* align layer 3 header to 32-bit */
986 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
987
988 /* Clear all interrupt Masks */
989 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
990 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
991
992 /* Enable TX & RX DMA */
993 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
994 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
995
996 /* Enable TX and RX interrupt receive for core 0 */
997 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
998 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
999 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
1000
1001 /* Enable host Error Interrupt */
1002 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
1003
1004 /* Enable interrupts for TX and RX Channel 0 */
1005 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
1006 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
1007
1008 /* Ack stalled irqs */
1009 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1010 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1011 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1012 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1013
1014 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1015 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1016 cpsw_write_4(sc, MDIOCONTROL,
1017 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1018
1019 mii_mediachg(mii);
1020
1021 /* Write channel 0 RX HDP */
1022 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1023 sc->sc_rxrun = true;
1024 sc->sc_rxeoq = false;
1025
1026 sc->sc_txrun = true;
1027 sc->sc_txeoq = true;
1028 callout_schedule(&sc->sc_tick_ch, hz);
1029 ifp->if_flags |= IFF_RUNNING;
1030 ifp->if_flags &= ~IFF_OACTIVE;
1031
1032 return 0;
1033 }
1034
1035 static void
1036 cpsw_stop(struct ifnet *ifp, int disable)
1037 {
1038 struct cpsw_softc * const sc = ifp->if_softc;
1039 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1040 u_int i;
1041
1042 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1043 ifp, disable);
1044
1045 if ((ifp->if_flags & IFF_RUNNING) == 0)
1046 return;
1047
1048 callout_stop(&sc->sc_tick_ch);
1049 mii_down(&sc->sc_mii);
1050
1051 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1052 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1053 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1054 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1055 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1056
1057 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1058 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1059 i = 0;
1060 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1061 delay(10);
1062 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1063 sc->sc_txrun = false;
1064 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1065 sc->sc_rxrun = false;
1066 i++;
1067 }
1068 //printf("%s toredown complete in %u\n", __func__, i);
1069
1070 /* Reset wrapper */
1071 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1072 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
1073 ;
1074
1075 /* Reset SS */
1076 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1077 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
1078 ;
1079
1080 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1081 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1082 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
1083 ;
1084 }
1085
1086 /* Reset CPDMA */
1087 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1088 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
1089 ;
1090
1091 /* Release any queued transmit buffers. */
1092 for (i = 0; i < CPSW_NTXDESCS; i++) {
1093 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1094 m_freem(rdp->tx_mb[i]);
1095 rdp->tx_mb[i] = NULL;
1096 }
1097
1098 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1099 ifp->if_timer = 0;
1100
1101 if (!disable)
1102 return;
1103
1104 for (i = 0; i < CPSW_NRXDESCS; i++) {
1105 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1106 m_freem(rdp->rx_mb[i]);
1107 rdp->rx_mb[i] = NULL;
1108 }
1109 }
1110
1111 static void
1112 cpsw_tick(void *arg)
1113 {
1114 struct cpsw_softc * const sc = arg;
1115 struct mii_data * const mii = &sc->sc_mii;
1116 const int s = splnet();
1117
1118 mii_tick(mii);
1119
1120 splx(s);
1121
1122 callout_schedule(&sc->sc_tick_ch, hz);
1123 }
1124
1125 static int
1126 cpsw_rxthintr(void *arg)
1127 {
1128 struct cpsw_softc * const sc = arg;
1129
1130 /* this won't deassert the interrupt though */
1131 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1132
1133 return 1;
1134 }
1135
1136 static int
1137 cpsw_rxintr(void *arg)
1138 {
1139 struct cpsw_softc * const sc = arg;
1140 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1141 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1142 struct cpsw_cpdma_bd bd;
1143 const uint32_t * const dw = bd.word;
1144 bus_dmamap_t dm;
1145 struct mbuf *m;
1146 u_int i;
1147 u_int len, off;
1148
1149 KERNHIST_FUNC(__func__);
1150 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1151
1152 for (;;) {
1153 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1154
1155 i = sc->sc_rxhead;
1156 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1157 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1158 dm = rdp->rx_dm[i];
1159 m = rdp->rx_mb[i];
1160
1161 KASSERT(dm != NULL);
1162 KASSERT(m != NULL);
1163
1164 cpsw_get_rxdesc(sc, i, &bd);
1165
1166 if (ISSET(dw[3], CPDMA_BD_OWNER))
1167 break;
1168
1169 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1170 sc->sc_rxrun = false;
1171 return 1;
1172 }
1173
1174 if ((dw[3] & (CPDMA_BD_SOP | CPDMA_BD_EOP)) !=
1175 (CPDMA_BD_SOP | CPDMA_BD_EOP)) {
1176 //Debugger();
1177 }
1178
1179 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1180 BUS_DMASYNC_POSTREAD);
1181
1182 if (cpsw_new_rxbuf(sc, i) != 0) {
1183 /* drop current packet, reuse buffer for new */
1184 if_statinc(ifp, if_ierrors);
1185 goto next;
1186 }
1187
1188 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1189 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1190
1191 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1192 len -= ETHER_CRC_LEN;
1193
1194 m_set_rcvif(m, ifp);
1195 m->m_pkthdr.len = m->m_len = len;
1196 m->m_data += off;
1197
1198 if_percpuq_enqueue(ifp->if_percpuq, m);
1199
1200 next:
1201 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1202 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1203 sc->sc_rxeoq = true;
1204 break;
1205 } else {
1206 sc->sc_rxeoq = false;
1207 }
1208 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1209 cpsw_rxdesc_paddr(sc, i));
1210 }
1211
1212 if (sc->sc_rxeoq) {
1213 device_printf(sc->sc_dev, "rxeoq\n");
1214 //Debugger();
1215 }
1216
1217 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1218
1219 return 1;
1220 }
1221
1222 static int
1223 cpsw_txintr(void *arg)
1224 {
1225 struct cpsw_softc * const sc = arg;
1226 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1227 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1228 struct cpsw_cpdma_bd bd;
1229 const uint32_t * const dw = bd.word;
1230 bool handled = false;
1231 uint32_t tx0_cp;
1232 u_int cpi;
1233
1234 KERNHIST_FUNC(__func__);
1235 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1236
1237 KASSERT(sc->sc_txrun);
1238
1239 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1240 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1241
1242 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1243
1244 if (tx0_cp == 0xfffffffc) {
1245 /* Teardown, ack it */
1246 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1247 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1248 sc->sc_txrun = false;
1249 return 0;
1250 }
1251
1252 for (;;) {
1253 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1254 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1255 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1256
1257 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1258 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1259
1260 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1261
1262 if (dw[2] == 0) {
1263 //Debugger();
1264 }
1265
1266 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1267 goto next;
1268
1269 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1270 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1271 sc->sc_txnext);
1272 break;
1273 }
1274
1275 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1276 sc->sc_txrun = false;
1277 return 1;
1278 }
1279
1280 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1281 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1282 BUS_DMASYNC_POSTWRITE);
1283 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1284
1285 m_freem(rdp->tx_mb[sc->sc_txhead]);
1286 rdp->tx_mb[sc->sc_txhead] = NULL;
1287
1288 if_statinc(ifp, if_opackets);
1289
1290 handled = true;
1291
1292 ifp->if_flags &= ~IFF_OACTIVE;
1293
1294 next:
1295 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1296 sc->sc_txeoq = true;
1297 }
1298 if (sc->sc_txhead == cpi) {
1299 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1300 cpsw_txdesc_paddr(sc, cpi));
1301 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1302 break;
1303 }
1304 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1305 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1306 sc->sc_txeoq = true;
1307 break;
1308 }
1309 }
1310
1311 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1312
1313 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1314 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1315 sc->sc_txeoq = false;
1316 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1317 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1318 }
1319 }
1320
1321 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1322 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1323 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1324 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1325 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1326
1327 if (handled && sc->sc_txnext == sc->sc_txhead)
1328 ifp->if_timer = 0;
1329
1330 if (handled)
1331 if_schedule_deferred_start(ifp);
1332
1333 return handled;
1334 }
1335
1336 static int
1337 cpsw_miscintr(void *arg)
1338 {
1339 struct cpsw_softc * const sc = arg;
1340 uint32_t miscstat;
1341 uint32_t dmastat;
1342 uint32_t stat;
1343
1344 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1345 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1346
1347 #define CPSW_MISC_HOST_PEND __BIT32(2)
1348 #define CPSW_MISC_STAT_PEND __BIT32(3)
1349
1350 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1351 /* Host Error */
1352 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1353 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1354
1355 printf("rxhead %02x\n", sc->sc_rxhead);
1356
1357 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1358 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1359 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1360 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1361 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1362 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1363 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1364 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1365 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1366 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1367
1368 //Debugger();
1369
1370 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1371 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1372 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1373 }
1374
1375 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1376
1377 return 1;
1378 }
1379
1380 /*
1381 *
1382 * ALE support routines.
1383 *
1384 */
1385
1386 static void
1387 cpsw_ale_entry_init(uint32_t *ale_entry)
1388 {
1389 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1390 }
1391
1392 static void
1393 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1394 {
1395 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1396 ale_entry[1] = mac[0] << 8 | mac[1];
1397 }
1398
1399 static void
1400 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1401 {
1402 ale_entry[0] = 0xffffffff;
1403 ale_entry[1] = 0x0000ffff;
1404 }
1405
1406 static void
1407 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1408 {
1409 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1410 switch (field) {
1411 case ALE_ENTRY_TYPE:
1412 /* [61:60] */
1413 ale_entry[1] |= (val & 0x3) << 28;
1414 break;
1415 case ALE_MCAST_FWD_STATE:
1416 /* [63:62] */
1417 ale_entry[1] |= (val & 0x3) << 30;
1418 break;
1419 case ALE_PORT_MASK:
1420 /* [68:66] */
1421 ale_entry[2] |= (val & 0x7) << 2;
1422 break;
1423 case ALE_PORT_NUMBER:
1424 /* [67:66] */
1425 ale_entry[2] |= (val & 0x3) << 2;
1426 break;
1427 default:
1428 panic("Invalid ALE entry field: %d\n", field);
1429 }
1430
1431 return;
1432 }
1433
1434 static bool
1435 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1436 {
1437 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1438 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1439 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1440 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1441 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1442 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1443 }
1444
1445 static void
1446 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1447 {
1448 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1449 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1450 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1451 mac[5] << 8 | mac[4]);
1452 }
1453
1454 static void
1455 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1456 {
1457 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1458 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1459 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1460 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1461 }
1462
1463 static void
1464 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1465 const uint32_t *ale_entry)
1466 {
1467 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1468 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1469 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1470 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1471 }
1472
1473 static int
1474 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1475 {
1476 int i;
1477 uint32_t ale_entry[3];
1478
1479 /* First two entries are link address and broadcast. */
1480 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1481 cpsw_ale_read_entry(sc, i, ale_entry);
1482 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1483 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1484 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1485 cpsw_ale_write_entry(sc, i, ale_entry);
1486 }
1487 }
1488 return CPSW_MAX_ALE_ENTRIES;
1489 }
1490
1491 static int
1492 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1493 {
1494 int free_index = -1, matching_index = -1, i;
1495 uint32_t ale_entry[3];
1496
1497 /* Find a matching entry or a free entry. */
1498 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1499 cpsw_ale_read_entry(sc, i, ale_entry);
1500
1501 /* Entry Type[61:60] is 0 for free entry */
1502 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1503 free_index = i;
1504 }
1505
1506 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1507 matching_index = i;
1508 break;
1509 }
1510 }
1511
1512 if (matching_index < 0) {
1513 if (free_index < 0)
1514 return ENOMEM;
1515 i = free_index;
1516 }
1517
1518 cpsw_ale_entry_init(ale_entry);
1519
1520 cpsw_ale_entry_set_mac(ale_entry, mac);
1521 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1522 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1523 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1524
1525 cpsw_ale_write_entry(sc, i, ale_entry);
1526
1527 return 0;
1528 }
1529
1530 static int
1531 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1532 {
1533 uint8_t *mac = sc->sc_enaddr;
1534 uint32_t ale_entry[3];
1535 int i;
1536 struct ethercom * const ec = &sc->sc_ec;
1537 struct ether_multi *ifma;
1538
1539 cpsw_ale_entry_init(ale_entry);
1540 /* Route incoming packets for our MAC address to Port 0 (host). */
1541 /* For simplicity, keep this entry at table index 0 in the ALE. */
1542 cpsw_ale_entry_set_mac(ale_entry, mac);
1543 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1544 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1545 cpsw_ale_write_entry(sc, 0, ale_entry);
1546
1547 /* Set outgoing MAC Address for Ports 1 and 2. */
1548 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1549 cpsw_ale_set_outgoing_mac(sc, i, mac);
1550
1551 /* Keep the broadcast address at table entry 1. */
1552 cpsw_ale_entry_init(ale_entry);
1553 cpsw_ale_entry_set_bcast_mac(ale_entry);
1554 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1555 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1556 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1557 cpsw_ale_write_entry(sc, 1, ale_entry);
1558
1559 /* SIOCDELMULTI doesn't specify the particular address
1560 being removed, so we have to remove all and rebuild. */
1561 if (purge)
1562 cpsw_ale_remove_all_mc_entries(sc);
1563
1564 /* Set other multicast addrs desired. */
1565 ETHER_LOCK(ec);
1566 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1567 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1568 }
1569 ETHER_UNLOCK(ec);
1570
1571 return 0;
1572 }
1573