if_cpsw.c revision 1.14 1 /* $NetBSD: if_cpsw.c,v 1.14 2021/01/27 03:10:20 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion (at) Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.14 2021/01/27 03:10:20 thorpej Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/fdt/fdtvar.h>
77
78 #include <arm/ti/if_cpswreg.h>
79
80 #define FDT_INTR_FLAGS 0
81
82 #define CPSW_TXFRAGS 16
83
84 #define CPSW_CPPI_RAM_SIZE (0x2000)
85 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
86 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
87 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
88 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
89 #define CPSW_CPPI_RAM_RXDESCS_BASE \
90 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
91
92 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
93 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
94
95 CTASSERT(powerof2(CPSW_NTXDESCS));
96 CTASSERT(powerof2(CPSW_NRXDESCS));
97
98 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
99
100 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
101 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
102
103 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
104 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
105
106 struct cpsw_ring_data {
107 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
108 struct mbuf *tx_mb[CPSW_NTXDESCS];
109 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
110 struct mbuf *rx_mb[CPSW_NRXDESCS];
111 };
112
113 struct cpsw_softc {
114 device_t sc_dev;
115 bus_space_tag_t sc_bst;
116 bus_space_handle_t sc_bsh;
117 bus_size_t sc_bss;
118 bus_dma_tag_t sc_bdt;
119 bus_space_handle_t sc_bsh_txdescs;
120 bus_space_handle_t sc_bsh_rxdescs;
121 bus_addr_t sc_txdescs_pa;
122 bus_addr_t sc_rxdescs_pa;
123 struct ethercom sc_ec;
124 struct mii_data sc_mii;
125 bool sc_phy_has_1000t;
126 bool sc_attached;
127 callout_t sc_tick_ch;
128 void *sc_ih;
129 struct cpsw_ring_data *sc_rdp;
130 volatile u_int sc_txnext;
131 volatile u_int sc_txhead;
132 volatile u_int sc_rxhead;
133 void *sc_rxthih;
134 void *sc_rxih;
135 void *sc_txih;
136 void *sc_miscih;
137 void *sc_txpad;
138 bus_dmamap_t sc_txpad_dm;
139 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
140 uint8_t sc_enaddr[ETHER_ADDR_LEN];
141 volatile bool sc_txrun;
142 volatile bool sc_rxrun;
143 volatile bool sc_txeoq;
144 volatile bool sc_rxeoq;
145 };
146
147 static int cpsw_match(device_t, cfdata_t, void *);
148 static void cpsw_attach(device_t, device_t, void *);
149 static int cpsw_detach(device_t, int);
150
151 static void cpsw_start(struct ifnet *);
152 static int cpsw_ioctl(struct ifnet *, u_long, void *);
153 static void cpsw_watchdog(struct ifnet *);
154 static int cpsw_init(struct ifnet *);
155 static void cpsw_stop(struct ifnet *, int);
156
157 static int cpsw_mii_readreg(device_t, int, int, uint16_t *);
158 static int cpsw_mii_writereg(device_t, int, int, uint16_t);
159 static void cpsw_mii_statchg(struct ifnet *);
160
161 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
162 static void cpsw_tick(void *);
163
164 static int cpsw_rxthintr(void *);
165 static int cpsw_rxintr(void *);
166 static int cpsw_txintr(void *);
167 static int cpsw_miscintr(void *);
168
169 /* ALE support */
170 #define CPSW_MAX_ALE_ENTRIES 1024
171
172 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
173
174 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
175 cpsw_match, cpsw_attach, cpsw_detach, NULL);
176
177 #include <sys/kernhist.h>
178 KERNHIST_DEFINE(cpswhist);
179
180 #define CPSWHIST_CALLARGS(A,B,C,D) do { \
181 KERNHIST_CALLARGS(cpswhist, "%jx %jx %jx %jx", \
182 (uintptr_t)(A), (uintptr_t)(B), (uintptr_t)(C), (uintptr_t)(D));\
183 } while (0)
184
185
186 static inline u_int
187 cpsw_txdesc_adjust(u_int x, int y)
188 {
189 return (((x) + y) & (CPSW_NTXDESCS - 1));
190 }
191
192 static inline u_int
193 cpsw_rxdesc_adjust(u_int x, int y)
194 {
195 return (((x) + y) & (CPSW_NRXDESCS - 1));
196 }
197
198 static inline uint32_t
199 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
200 {
201 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
202 }
203
204 static inline void
205 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
206 uint32_t const value)
207 {
208 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
209 }
210
211 static inline void
212 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
213 {
214 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
215
216 KERNHIST_FUNC(__func__);
217 CPSWHIST_CALLARGS(sc, i, n, 0);
218
219 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
220 }
221
222 static inline void
223 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
224 {
225 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
226
227 KERNHIST_FUNC(__func__);
228 CPSWHIST_CALLARGS(sc, i, n, 0);
229
230 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
231 }
232
233 static inline void
234 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
235 struct cpsw_cpdma_bd * const bdp)
236 {
237 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
238 uint32_t * const dp = bdp->word;
239 const bus_size_t c = __arraycount(bdp->word);
240
241 KERNHIST_FUNC(__func__);
242 CPSWHIST_CALLARGS(sc, i, bdp, 0);
243
244 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
245 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
246 dp[0], dp[1], dp[2], dp[3]);
247 }
248
249 static inline void
250 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
251 struct cpsw_cpdma_bd * const bdp)
252 {
253 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
254 uint32_t * const dp = bdp->word;
255 const bus_size_t c = __arraycount(bdp->word);
256
257 KERNHIST_FUNC(__func__);
258 CPSWHIST_CALLARGS(sc, i, bdp, 0);
259 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
260 dp[0], dp[1], dp[2], dp[3]);
261
262 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
263 }
264
265 static inline void
266 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
267 struct cpsw_cpdma_bd * const bdp)
268 {
269 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
270 uint32_t * const dp = bdp->word;
271 const bus_size_t c = __arraycount(bdp->word);
272
273 KERNHIST_FUNC(__func__);
274 CPSWHIST_CALLARGS(sc, i, bdp, 0);
275
276 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
277
278 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
279 dp[0], dp[1], dp[2], dp[3]);
280 }
281
282 static inline void
283 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
284 struct cpsw_cpdma_bd * const bdp)
285 {
286 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
287 uint32_t * const dp = bdp->word;
288 const bus_size_t c = __arraycount(bdp->word);
289
290 KERNHIST_FUNC(__func__);
291 CPSWHIST_CALLARGS(sc, i, bdp, 0);
292 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
293 dp[0], dp[1], dp[2], dp[3]);
294
295 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
296 }
297
298 static inline bus_addr_t
299 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
300 {
301 KASSERT(x < CPSW_NTXDESCS);
302 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
303 }
304
305 static inline bus_addr_t
306 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
307 {
308 KASSERT(x < CPSW_NRXDESCS);
309 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
310 }
311
312 static const struct device_compatible_entry compat_data[] = {
313 { .compat = "ti,am335x-cpsw" },
314 { .compat = "ti,cpsw" },
315 DEVICE_COMPAT_EOL
316 };
317
318 static int
319 cpsw_match(device_t parent, cfdata_t cf, void *aux)
320 {
321 struct fdt_attach_args * const faa = aux;
322
323 return of_compatible_match(faa->faa_phandle, compat_data);
324 }
325
326 static bool
327 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
328 {
329 struct ifmedia_entry *ifm;
330
331 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
332 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
333 return true;
334 }
335 return false;
336 }
337
338 static int
339 cpsw_detach(device_t self, int flags)
340 {
341 struct cpsw_softc * const sc = device_private(self);
342 struct ifnet *ifp = &sc->sc_ec.ec_if;
343 u_int i;
344
345 /* Succeed now if there's no work to do. */
346 if (!sc->sc_attached)
347 return 0;
348
349 sc->sc_attached = false;
350
351 /* Stop the interface. Callouts are stopped in it. */
352 cpsw_stop(ifp, 1);
353
354 /* Destroy our callout. */
355 callout_destroy(&sc->sc_tick_ch);
356
357 /* Let go of the interrupts */
358 intr_disestablish(sc->sc_rxthih);
359 intr_disestablish(sc->sc_rxih);
360 intr_disestablish(sc->sc_txih);
361 intr_disestablish(sc->sc_miscih);
362
363 ether_ifdetach(ifp);
364 if_detach(ifp);
365
366 /* Delete all media. */
367 ifmedia_fini(&sc->sc_mii.mii_media);
368
369 /* Free the packet padding buffer */
370 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
371 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
372
373 /* Destroy all the descriptors */
374 for (i = 0; i < CPSW_NTXDESCS; i++)
375 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
376 for (i = 0; i < CPSW_NRXDESCS; i++)
377 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
378 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
379
380 /* Unmap */
381 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
382
383
384 return 0;
385 }
386
387 static void
388 cpsw_attach(device_t parent, device_t self, void *aux)
389 {
390 struct fdt_attach_args * const faa = aux;
391 struct cpsw_softc * const sc = device_private(self);
392 struct ethercom * const ec = &sc->sc_ec;
393 struct ifnet * const ifp = &ec->ec_if;
394 struct mii_data * const mii = &sc->sc_mii;
395 const int phandle = faa->faa_phandle;
396 const uint8_t *macaddr;
397 bus_addr_t addr;
398 bus_size_t size;
399 int error, slave, len;
400 char xname[16];
401 u_int i;
402
403 KERNHIST_INIT(cpswhist, 4096);
404
405 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
406 aprint_error(": couldn't get registers\n");
407 return;
408 }
409
410 sc->sc_dev = self;
411
412 aprint_normal(": TI Layer 2 3-Port Switch\n");
413 aprint_naive("\n");
414
415 callout_init(&sc->sc_tick_ch, 0);
416 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
417
418 macaddr = NULL;
419 slave = of_find_firstchild_byname(phandle, "slave");
420 if (slave > 0) {
421 macaddr = fdtbus_get_prop(slave, "mac-address", &len);
422 if (len != ETHER_ADDR_LEN)
423 macaddr = NULL;
424 }
425 if (macaddr == NULL) {
426 #if 0
427 /* grab mac_id0 from AM335x control module */
428 uint32_t reg_lo, reg_hi;
429
430 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
431 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
432 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
433 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
434 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
435 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
436 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
437 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
438 } else
439 #endif
440 {
441 aprint_error_dev(sc->sc_dev,
442 "using fake station address\n");
443 /* 'N' happens to have the Local bit set */
444 #if 0
445 sc->sc_enaddr[0] = 'N';
446 sc->sc_enaddr[1] = 'e';
447 sc->sc_enaddr[2] = 't';
448 sc->sc_enaddr[3] = 'B';
449 sc->sc_enaddr[4] = 'S';
450 sc->sc_enaddr[5] = 'D';
451 #else
452 /* XXX Glor */
453 sc->sc_enaddr[0] = 0xd4;
454 sc->sc_enaddr[1] = 0x94;
455 sc->sc_enaddr[2] = 0xa1;
456 sc->sc_enaddr[3] = 0x97;
457 sc->sc_enaddr[4] = 0x03;
458 sc->sc_enaddr[5] = 0x94;
459 #endif
460 }
461 } else {
462 memcpy(sc->sc_enaddr, macaddr, ETHER_ADDR_LEN);
463 }
464
465 snprintf(xname, sizeof(xname), "%s rxth", device_xname(self));
466 sc->sc_rxthih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RXTH,
467 IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc, xname);
468
469 snprintf(xname, sizeof(xname), "%s rx", device_xname(self));
470 sc->sc_rxih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RX,
471 IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc, xname);
472
473 snprintf(xname, sizeof(xname), "%s tx", device_xname(self));
474 sc->sc_txih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_TX,
475 IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc, xname);
476
477 snprintf(xname, sizeof(xname), "%s misc", device_xname(self));
478 sc->sc_miscih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_MISC,
479 IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc, xname);
480
481 sc->sc_bst = faa->faa_bst;
482 sc->sc_bss = size;
483 sc->sc_bdt = faa->faa_dmat;
484
485 error = bus_space_map(sc->sc_bst, addr, size, 0,
486 &sc->sc_bsh);
487 if (error) {
488 aprint_error_dev(sc->sc_dev,
489 "can't map registers: %d\n", error);
490 return;
491 }
492
493 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
494 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
495 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
496 &sc->sc_bsh_txdescs);
497 if (error) {
498 aprint_error_dev(sc->sc_dev,
499 "can't subregion tx ring SRAM: %d\n", error);
500 return;
501 }
502 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
503 (void *)sc->sc_bsh_txdescs);
504
505 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
506 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
507 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
508 &sc->sc_bsh_rxdescs);
509 if (error) {
510 aprint_error_dev(sc->sc_dev,
511 "can't subregion rx ring SRAM: %d\n", error);
512 return;
513 }
514 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
515 (void *)sc->sc_bsh_rxdescs);
516
517 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
518
519 for (i = 0; i < CPSW_NTXDESCS; i++) {
520 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
521 CPSW_TXFRAGS, MCLBYTES, 0, 0,
522 &sc->sc_rdp->tx_dm[i])) != 0) {
523 aprint_error_dev(sc->sc_dev,
524 "unable to create tx DMA map: %d\n", error);
525 }
526 sc->sc_rdp->tx_mb[i] = NULL;
527 }
528
529 for (i = 0; i < CPSW_NRXDESCS; i++) {
530 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
531 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
532 aprint_error_dev(sc->sc_dev,
533 "unable to create rx DMA map: %d\n", error);
534 }
535 sc->sc_rdp->rx_mb[i] = NULL;
536 }
537
538 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
539 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
540 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
541 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
542 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
543 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
544 BUS_DMASYNC_PREWRITE);
545
546 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
547 ether_sprintf(sc->sc_enaddr));
548
549 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
550 ifp->if_softc = sc;
551 ifp->if_capabilities = 0;
552 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
553 ifp->if_start = cpsw_start;
554 ifp->if_ioctl = cpsw_ioctl;
555 ifp->if_init = cpsw_init;
556 ifp->if_stop = cpsw_stop;
557 ifp->if_watchdog = cpsw_watchdog;
558 IFQ_SET_READY(&ifp->if_snd);
559
560 cpsw_stop(ifp, 0);
561
562 mii->mii_ifp = ifp;
563 mii->mii_readreg = cpsw_mii_readreg;
564 mii->mii_writereg = cpsw_mii_writereg;
565 mii->mii_statchg = cpsw_mii_statchg;
566
567 sc->sc_ec.ec_mii = mii;
568 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
569
570 /* Initialize MDIO */
571 cpsw_write_4(sc, MDIOCONTROL,
572 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
573 /* Clear ALE */
574 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
575
576 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, 0);
577 if (LIST_FIRST(&mii->mii_phys) == NULL) {
578 aprint_error_dev(self, "no PHY found!\n");
579 sc->sc_phy_has_1000t = false;
580 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
581 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
582 } else {
583 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
584
585 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
586 }
587
588 if_attach(ifp);
589 if_deferred_start_init(ifp, NULL);
590 ether_ifattach(ifp, sc->sc_enaddr);
591
592 /* The attach is successful. */
593 sc->sc_attached = true;
594
595 return;
596 }
597
598 static void
599 cpsw_start(struct ifnet *ifp)
600 {
601 struct cpsw_softc * const sc = ifp->if_softc;
602 struct cpsw_ring_data * const rdp = sc->sc_rdp;
603 struct cpsw_cpdma_bd bd;
604 uint32_t * const dw = bd.word;
605 struct mbuf *m;
606 bus_dmamap_t dm;
607 u_int eopi __diagused = ~0;
608 u_int seg;
609 u_int txfree;
610 int txstart = -1;
611 int error;
612 bool pad;
613 u_int mlen;
614
615 KERNHIST_FUNC(__func__);
616 CPSWHIST_CALLARGS(sc, 0, 0, 0);
617
618 if (__predict_false((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
619 IFF_RUNNING)) {
620 return;
621 }
622
623 if (sc->sc_txnext >= sc->sc_txhead)
624 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
625 else
626 txfree = sc->sc_txhead - sc->sc_txnext - 1;
627
628 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
629 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
630
631 while (txfree > 0) {
632 IFQ_POLL(&ifp->if_snd, m);
633 if (m == NULL)
634 break;
635
636 dm = rdp->tx_dm[sc->sc_txnext];
637
638 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
639 if (error == EFBIG) {
640 device_printf(sc->sc_dev, "won't fit\n");
641 IFQ_DEQUEUE(&ifp->if_snd, m);
642 m_freem(m);
643 if_statinc(ifp, if_oerrors);
644 continue;
645 } else if (error != 0) {
646 device_printf(sc->sc_dev, "error\n");
647 break;
648 }
649
650 if (dm->dm_nsegs + 1 >= txfree) {
651 ifp->if_flags |= IFF_OACTIVE;
652 bus_dmamap_unload(sc->sc_bdt, dm);
653 break;
654 }
655
656 mlen = m_length(m);
657 pad = mlen < CPSW_PAD_LEN;
658
659 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
660 rdp->tx_mb[sc->sc_txnext] = m;
661 IFQ_DEQUEUE(&ifp->if_snd, m);
662
663 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
664 BUS_DMASYNC_PREWRITE);
665
666 if (txstart == -1)
667 txstart = sc->sc_txnext;
668 eopi = sc->sc_txnext;
669 for (seg = 0; seg < dm->dm_nsegs; seg++) {
670 dw[0] = cpsw_txdesc_paddr(sc,
671 TXDESC_NEXT(sc->sc_txnext));
672 dw[1] = dm->dm_segs[seg].ds_addr;
673 dw[2] = dm->dm_segs[seg].ds_len;
674 dw[3] = 0;
675
676 if (seg == 0)
677 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
678 MAX(mlen, CPSW_PAD_LEN);
679
680 if ((seg == dm->dm_nsegs - 1) && !pad)
681 dw[3] |= CPDMA_BD_EOP;
682
683 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
684 txfree--;
685 eopi = sc->sc_txnext;
686 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
687 }
688 if (pad) {
689 dw[0] = cpsw_txdesc_paddr(sc,
690 TXDESC_NEXT(sc->sc_txnext));
691 dw[1] = sc->sc_txpad_pa;
692 dw[2] = CPSW_PAD_LEN - mlen;
693 dw[3] = CPDMA_BD_EOP;
694
695 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
696 txfree--;
697 eopi = sc->sc_txnext;
698 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
699 }
700
701 bpf_mtap(ifp, m, BPF_D_OUT);
702 }
703
704 if (txstart >= 0) {
705 ifp->if_timer = 5;
706 /* terminate the new chain */
707 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
708 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
709 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
710 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
711 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
712 /* link the new chain on */
713 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
714 cpsw_txdesc_paddr(sc, txstart));
715 if (sc->sc_txeoq) {
716 /* kick the dma engine */
717 sc->sc_txeoq = false;
718 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
719 cpsw_txdesc_paddr(sc, txstart));
720 }
721 }
722 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
723 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
724 }
725
726 static int
727 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
728 {
729 const int s = splnet();
730 int error = 0;
731
732 switch (cmd) {
733 default:
734 error = ether_ioctl(ifp, cmd, data);
735 if (error == ENETRESET) {
736 error = 0;
737 }
738 break;
739 }
740
741 splx(s);
742
743 return error;
744 }
745
746 static void
747 cpsw_watchdog(struct ifnet *ifp)
748 {
749 struct cpsw_softc *sc = ifp->if_softc;
750
751 device_printf(sc->sc_dev, "device timeout\n");
752
753 if_statinc(ifp, if_oerrors);
754 cpsw_init(ifp);
755 cpsw_start(ifp);
756 }
757
758 static int
759 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
760 {
761 u_int tries;
762
763 for (tries = 0; tries < 1000; tries++) {
764 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
765 return 0;
766 delay(1);
767 }
768 return ETIMEDOUT;
769 }
770
771 static int
772 cpsw_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
773 {
774 struct cpsw_softc * const sc = device_private(dev);
775 uint32_t v;
776
777 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
778 return -1;
779
780 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
781 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
782
783 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
784 return -1;
785
786 v = cpsw_read_4(sc, MDIOUSERACCESS0);
787 if (v & __BIT(29)) {
788 *val = v & 0xffff;
789 return 0;
790 }
791
792 return -1;
793 }
794
795 static int
796 cpsw_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
797 {
798 struct cpsw_softc * const sc = device_private(dev);
799 uint32_t v;
800
801 KASSERT((val & 0xffff0000UL) == 0);
802
803 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
804 goto out;
805
806 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
807 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
808
809 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
810 goto out;
811
812 v = cpsw_read_4(sc, MDIOUSERACCESS0);
813 if ((v & __BIT(29)) == 0) {
814 out:
815 device_printf(sc->sc_dev, "%s error\n", __func__);
816 return -1;
817 }
818
819 return 0;
820 }
821
822 static void
823 cpsw_mii_statchg(struct ifnet *ifp)
824 {
825 return;
826 }
827
828 static int
829 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
830 {
831 struct cpsw_ring_data * const rdp = sc->sc_rdp;
832 const u_int h = RXDESC_PREV(i);
833 struct cpsw_cpdma_bd bd;
834 uint32_t * const dw = bd.word;
835 struct mbuf *m;
836 int error = ENOBUFS;
837
838 MGETHDR(m, M_DONTWAIT, MT_DATA);
839 if (m == NULL) {
840 goto reuse;
841 }
842
843 MCLGET(m, M_DONTWAIT);
844 if ((m->m_flags & M_EXT) == 0) {
845 m_freem(m);
846 goto reuse;
847 }
848
849 /* We have a new buffer, prepare it for the ring. */
850
851 if (rdp->rx_mb[i] != NULL)
852 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
853
854 m->m_len = m->m_pkthdr.len = MCLBYTES;
855
856 rdp->rx_mb[i] = m;
857
858 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
859 BUS_DMA_READ | BUS_DMA_NOWAIT);
860 if (error) {
861 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
862 i, error);
863 }
864
865 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
866 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
867
868 error = 0;
869
870 reuse:
871 /* (re-)setup the descriptor */
872 dw[0] = 0;
873 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
874 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
875 dw[3] = CPDMA_BD_OWNER;
876
877 cpsw_set_rxdesc(sc, i, &bd);
878 /* and link onto ring */
879 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
880
881 return error;
882 }
883
884 static int
885 cpsw_init(struct ifnet *ifp)
886 {
887 struct cpsw_softc * const sc = ifp->if_softc;
888 struct mii_data * const mii = &sc->sc_mii;
889 int i;
890
891 cpsw_stop(ifp, 0);
892
893 sc->sc_txnext = 0;
894 sc->sc_txhead = 0;
895
896 /* Reset wrapper */
897 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
898 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
899 ;
900
901 /* Reset SS */
902 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
903 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
904 ;
905
906 /* Clear table and enable ALE */
907 cpsw_write_4(sc, CPSW_ALE_CONTROL,
908 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
909
910 /* Reset and init Sliver port 1 and 2 */
911 for (i = 0; i < CPSW_ETH_PORTS; i++) {
912 uint32_t macctl;
913
914 /* Reset */
915 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
916 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
917 ;
918 /* Set Slave Mapping */
919 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
920 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
921 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
922 /* Set MAC Address */
923 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
924 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
925 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
926 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
927 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
928
929 /* Set MACCONTROL for ports 0,1 */
930 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
931 SLMACCTL_IFCTL_A;
932 if (sc->sc_phy_has_1000t)
933 macctl |= SLMACCTL_GIG;
934 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
935
936 /* Set ALE port to forwarding(3) */
937 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
938 }
939
940 /* Set Host Port Mapping */
941 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
942 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
943
944 /* Set ALE port to forwarding(3) */
945 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
946
947 /* Initialize addrs */
948 cpsw_ale_update_addresses(sc, 1);
949
950 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
951 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
952
953 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
954 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
955 ;
956
957 for (i = 0; i < 8; i++) {
958 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
959 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
960 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
961 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
962 }
963
964 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
965 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
966
967 sc->sc_txhead = 0;
968 sc->sc_txnext = 0;
969
970 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
971
972 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
973 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
974 /* Initialize RX Buffer Descriptors */
975 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
976 for (i = 0; i < CPSW_NRXDESCS; i++) {
977 cpsw_new_rxbuf(sc, i);
978 }
979 sc->sc_rxhead = 0;
980
981 /* turn off flow control */
982 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
983
984 /* align layer 3 header to 32-bit */
985 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
986
987 /* Clear all interrupt Masks */
988 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
989 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
990
991 /* Enable TX & RX DMA */
992 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
993 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
994
995 /* Enable TX and RX interrupt receive for core 0 */
996 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
997 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
998 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
999
1000 /* Enable host Error Interrupt */
1001 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
1002
1003 /* Enable interrupts for TX and RX Channel 0 */
1004 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
1005 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
1006
1007 /* Ack stalled irqs */
1008 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1009 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1010 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1011 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1012
1013 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1014 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1015 cpsw_write_4(sc, MDIOCONTROL,
1016 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1017
1018 mii_mediachg(mii);
1019
1020 /* Write channel 0 RX HDP */
1021 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1022 sc->sc_rxrun = true;
1023 sc->sc_rxeoq = false;
1024
1025 sc->sc_txrun = true;
1026 sc->sc_txeoq = true;
1027 callout_schedule(&sc->sc_tick_ch, hz);
1028 ifp->if_flags |= IFF_RUNNING;
1029 ifp->if_flags &= ~IFF_OACTIVE;
1030
1031 return 0;
1032 }
1033
1034 static void
1035 cpsw_stop(struct ifnet *ifp, int disable)
1036 {
1037 struct cpsw_softc * const sc = ifp->if_softc;
1038 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1039 u_int i;
1040
1041 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1042 ifp, disable);
1043
1044 if ((ifp->if_flags & IFF_RUNNING) == 0)
1045 return;
1046
1047 callout_stop(&sc->sc_tick_ch);
1048 mii_down(&sc->sc_mii);
1049
1050 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1051 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1052 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1053 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1054 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1055
1056 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1057 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1058 i = 0;
1059 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1060 delay(10);
1061 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1062 sc->sc_txrun = false;
1063 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1064 sc->sc_rxrun = false;
1065 i++;
1066 }
1067 //printf("%s toredown complete in %u\n", __func__, i);
1068
1069 /* Reset wrapper */
1070 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1071 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
1072 ;
1073
1074 /* Reset SS */
1075 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1076 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
1077 ;
1078
1079 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1080 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1081 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
1082 ;
1083 }
1084
1085 /* Reset CPDMA */
1086 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1087 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
1088 ;
1089
1090 /* Release any queued transmit buffers. */
1091 for (i = 0; i < CPSW_NTXDESCS; i++) {
1092 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1093 m_freem(rdp->tx_mb[i]);
1094 rdp->tx_mb[i] = NULL;
1095 }
1096
1097 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1098 ifp->if_timer = 0;
1099
1100 if (!disable)
1101 return;
1102
1103 for (i = 0; i < CPSW_NRXDESCS; i++) {
1104 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1105 m_freem(rdp->rx_mb[i]);
1106 rdp->rx_mb[i] = NULL;
1107 }
1108 }
1109
1110 static void
1111 cpsw_tick(void *arg)
1112 {
1113 struct cpsw_softc * const sc = arg;
1114 struct mii_data * const mii = &sc->sc_mii;
1115 const int s = splnet();
1116
1117 mii_tick(mii);
1118
1119 splx(s);
1120
1121 callout_schedule(&sc->sc_tick_ch, hz);
1122 }
1123
1124 static int
1125 cpsw_rxthintr(void *arg)
1126 {
1127 struct cpsw_softc * const sc = arg;
1128
1129 /* this won't deassert the interrupt though */
1130 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1131
1132 return 1;
1133 }
1134
1135 static int
1136 cpsw_rxintr(void *arg)
1137 {
1138 struct cpsw_softc * const sc = arg;
1139 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1140 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1141 struct cpsw_cpdma_bd bd;
1142 const uint32_t * const dw = bd.word;
1143 bus_dmamap_t dm;
1144 struct mbuf *m;
1145 u_int i;
1146 u_int len, off;
1147
1148 KERNHIST_FUNC(__func__);
1149 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1150
1151 for (;;) {
1152 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1153
1154 i = sc->sc_rxhead;
1155 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1156 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1157 dm = rdp->rx_dm[i];
1158 m = rdp->rx_mb[i];
1159
1160 KASSERT(dm != NULL);
1161 KASSERT(m != NULL);
1162
1163 cpsw_get_rxdesc(sc, i, &bd);
1164
1165 if (ISSET(dw[3], CPDMA_BD_OWNER))
1166 break;
1167
1168 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1169 sc->sc_rxrun = false;
1170 return 1;
1171 }
1172
1173 if ((dw[3] & (CPDMA_BD_SOP | CPDMA_BD_EOP)) !=
1174 (CPDMA_BD_SOP | CPDMA_BD_EOP)) {
1175 //Debugger();
1176 }
1177
1178 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1179 BUS_DMASYNC_POSTREAD);
1180
1181 if (cpsw_new_rxbuf(sc, i) != 0) {
1182 /* drop current packet, reuse buffer for new */
1183 if_statinc(ifp, if_ierrors);
1184 goto next;
1185 }
1186
1187 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1188 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1189
1190 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1191 len -= ETHER_CRC_LEN;
1192
1193 m_set_rcvif(m, ifp);
1194 m->m_pkthdr.len = m->m_len = len;
1195 m->m_data += off;
1196
1197 if_percpuq_enqueue(ifp->if_percpuq, m);
1198
1199 next:
1200 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1201 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1202 sc->sc_rxeoq = true;
1203 break;
1204 } else {
1205 sc->sc_rxeoq = false;
1206 }
1207 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1208 cpsw_rxdesc_paddr(sc, i));
1209 }
1210
1211 if (sc->sc_rxeoq) {
1212 device_printf(sc->sc_dev, "rxeoq\n");
1213 //Debugger();
1214 }
1215
1216 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1217
1218 return 1;
1219 }
1220
1221 static int
1222 cpsw_txintr(void *arg)
1223 {
1224 struct cpsw_softc * const sc = arg;
1225 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1226 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1227 struct cpsw_cpdma_bd bd;
1228 const uint32_t * const dw = bd.word;
1229 bool handled = false;
1230 uint32_t tx0_cp;
1231 u_int cpi;
1232
1233 KERNHIST_FUNC(__func__);
1234 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1235
1236 KASSERT(sc->sc_txrun);
1237
1238 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1239 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1240
1241 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1242
1243 if (tx0_cp == 0xfffffffc) {
1244 /* Teardown, ack it */
1245 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1246 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1247 sc->sc_txrun = false;
1248 return 0;
1249 }
1250
1251 for (;;) {
1252 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1253 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1254 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1255
1256 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1257 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1258
1259 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1260
1261 if (dw[2] == 0) {
1262 //Debugger();
1263 }
1264
1265 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1266 goto next;
1267
1268 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1269 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1270 sc->sc_txnext);
1271 break;
1272 }
1273
1274 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1275 sc->sc_txrun = false;
1276 return 1;
1277 }
1278
1279 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1280 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1281 BUS_DMASYNC_POSTWRITE);
1282 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1283
1284 m_freem(rdp->tx_mb[sc->sc_txhead]);
1285 rdp->tx_mb[sc->sc_txhead] = NULL;
1286
1287 if_statinc(ifp, if_opackets);
1288
1289 handled = true;
1290
1291 ifp->if_flags &= ~IFF_OACTIVE;
1292
1293 next:
1294 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1295 sc->sc_txeoq = true;
1296 }
1297 if (sc->sc_txhead == cpi) {
1298 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1299 cpsw_txdesc_paddr(sc, cpi));
1300 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1301 break;
1302 }
1303 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1304 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1305 sc->sc_txeoq = true;
1306 break;
1307 }
1308 }
1309
1310 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1311
1312 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1313 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1314 sc->sc_txeoq = false;
1315 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1316 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1317 }
1318 }
1319
1320 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1321 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1322 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1323 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1324 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1325
1326 if (handled && sc->sc_txnext == sc->sc_txhead)
1327 ifp->if_timer = 0;
1328
1329 if (handled)
1330 if_schedule_deferred_start(ifp);
1331
1332 return handled;
1333 }
1334
1335 static int
1336 cpsw_miscintr(void *arg)
1337 {
1338 struct cpsw_softc * const sc = arg;
1339 uint32_t miscstat;
1340 uint32_t dmastat;
1341 uint32_t stat;
1342
1343 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1344 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1345
1346 #define CPSW_MISC_HOST_PEND __BIT32(2)
1347 #define CPSW_MISC_STAT_PEND __BIT32(3)
1348
1349 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1350 /* Host Error */
1351 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1352 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1353
1354 printf("rxhead %02x\n", sc->sc_rxhead);
1355
1356 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1357 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1358 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1359 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1360 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1361 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1362 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1363 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1364 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1365 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1366
1367 //Debugger();
1368
1369 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1370 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1371 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1372 }
1373
1374 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1375
1376 return 1;
1377 }
1378
1379 /*
1380 *
1381 * ALE support routines.
1382 *
1383 */
1384
1385 static void
1386 cpsw_ale_entry_init(uint32_t *ale_entry)
1387 {
1388 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1389 }
1390
1391 static void
1392 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1393 {
1394 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1395 ale_entry[1] = mac[0] << 8 | mac[1];
1396 }
1397
1398 static void
1399 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1400 {
1401 ale_entry[0] = 0xffffffff;
1402 ale_entry[1] = 0x0000ffff;
1403 }
1404
1405 static void
1406 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1407 {
1408 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1409 switch (field) {
1410 case ALE_ENTRY_TYPE:
1411 /* [61:60] */
1412 ale_entry[1] |= (val & 0x3) << 28;
1413 break;
1414 case ALE_MCAST_FWD_STATE:
1415 /* [63:62] */
1416 ale_entry[1] |= (val & 0x3) << 30;
1417 break;
1418 case ALE_PORT_MASK:
1419 /* [68:66] */
1420 ale_entry[2] |= (val & 0x7) << 2;
1421 break;
1422 case ALE_PORT_NUMBER:
1423 /* [67:66] */
1424 ale_entry[2] |= (val & 0x3) << 2;
1425 break;
1426 default:
1427 panic("Invalid ALE entry field: %d\n", field);
1428 }
1429
1430 return;
1431 }
1432
1433 static bool
1434 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1435 {
1436 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1437 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1438 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1439 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1440 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1441 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1442 }
1443
1444 static void
1445 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1446 {
1447 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1448 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1449 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1450 mac[5] << 8 | mac[4]);
1451 }
1452
1453 static void
1454 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1455 {
1456 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1457 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1458 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1459 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1460 }
1461
1462 static void
1463 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1464 const uint32_t *ale_entry)
1465 {
1466 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1467 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1468 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1469 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1470 }
1471
1472 static int
1473 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1474 {
1475 int i;
1476 uint32_t ale_entry[3];
1477
1478 /* First two entries are link address and broadcast. */
1479 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1480 cpsw_ale_read_entry(sc, i, ale_entry);
1481 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1482 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1483 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1484 cpsw_ale_write_entry(sc, i, ale_entry);
1485 }
1486 }
1487 return CPSW_MAX_ALE_ENTRIES;
1488 }
1489
1490 static int
1491 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1492 {
1493 int free_index = -1, matching_index = -1, i;
1494 uint32_t ale_entry[3];
1495
1496 /* Find a matching entry or a free entry. */
1497 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1498 cpsw_ale_read_entry(sc, i, ale_entry);
1499
1500 /* Entry Type[61:60] is 0 for free entry */
1501 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1502 free_index = i;
1503 }
1504
1505 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1506 matching_index = i;
1507 break;
1508 }
1509 }
1510
1511 if (matching_index < 0) {
1512 if (free_index < 0)
1513 return ENOMEM;
1514 i = free_index;
1515 }
1516
1517 cpsw_ale_entry_init(ale_entry);
1518
1519 cpsw_ale_entry_set_mac(ale_entry, mac);
1520 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1521 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1522 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1523
1524 cpsw_ale_write_entry(sc, i, ale_entry);
1525
1526 return 0;
1527 }
1528
1529 static int
1530 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1531 {
1532 uint8_t *mac = sc->sc_enaddr;
1533 uint32_t ale_entry[3];
1534 int i;
1535 struct ethercom * const ec = &sc->sc_ec;
1536 struct ether_multi *ifma;
1537
1538 cpsw_ale_entry_init(ale_entry);
1539 /* Route incoming packets for our MAC address to Port 0 (host). */
1540 /* For simplicity, keep this entry at table index 0 in the ALE. */
1541 cpsw_ale_entry_set_mac(ale_entry, mac);
1542 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1543 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1544 cpsw_ale_write_entry(sc, 0, ale_entry);
1545
1546 /* Set outgoing MAC Address for Ports 1 and 2. */
1547 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1548 cpsw_ale_set_outgoing_mac(sc, i, mac);
1549
1550 /* Keep the broadcast address at table entry 1. */
1551 cpsw_ale_entry_init(ale_entry);
1552 cpsw_ale_entry_set_bcast_mac(ale_entry);
1553 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1554 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1555 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1556 cpsw_ale_write_entry(sc, 1, ale_entry);
1557
1558 /* SIOCDELMULTI doesn't specify the particular address
1559 being removed, so we have to remove all and rebuild. */
1560 if (purge)
1561 cpsw_ale_remove_all_mc_entries(sc);
1562
1563 /* Set other multicast addrs desired. */
1564 ETHER_LOCK(ec);
1565 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1566 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1567 }
1568 ETHER_UNLOCK(ec);
1569
1570 return 0;
1571 }
1572