if_cpsw.c revision 1.16 1 /* $NetBSD: if_cpsw.c,v 1.16 2022/09/18 15:47:09 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion (at) Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.16 2022/09/18 15:47:09 thorpej Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/fdt/fdtvar.h>
77
78 #include <arm/ti/if_cpswreg.h>
79
80 #define FDT_INTR_FLAGS 0
81
82 #define CPSW_TXFRAGS 16
83
84 #define CPSW_CPPI_RAM_SIZE (0x2000)
85 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
86 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
87 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
88 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
89 #define CPSW_CPPI_RAM_RXDESCS_BASE \
90 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
91
92 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
93 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
94
95 CTASSERT(powerof2(CPSW_NTXDESCS));
96 CTASSERT(powerof2(CPSW_NRXDESCS));
97
98 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
99
100 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
101 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
102
103 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
104 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
105
106 struct cpsw_ring_data {
107 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
108 struct mbuf *tx_mb[CPSW_NTXDESCS];
109 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
110 struct mbuf *rx_mb[CPSW_NRXDESCS];
111 };
112
113 struct cpsw_softc {
114 device_t sc_dev;
115 bus_space_tag_t sc_bst;
116 bus_space_handle_t sc_bsh;
117 bus_size_t sc_bss;
118 bus_dma_tag_t sc_bdt;
119 bus_space_handle_t sc_bsh_txdescs;
120 bus_space_handle_t sc_bsh_rxdescs;
121 bus_addr_t sc_txdescs_pa;
122 bus_addr_t sc_rxdescs_pa;
123 struct ethercom sc_ec;
124 struct mii_data sc_mii;
125 bool sc_phy_has_1000t;
126 bool sc_attached;
127 callout_t sc_tick_ch;
128 void *sc_ih;
129 struct cpsw_ring_data *sc_rdp;
130 volatile u_int sc_txnext;
131 volatile u_int sc_txhead;
132 volatile u_int sc_rxhead;
133 bool sc_txbusy;
134 void *sc_rxthih;
135 void *sc_rxih;
136 void *sc_txih;
137 void *sc_miscih;
138 void *sc_txpad;
139 bus_dmamap_t sc_txpad_dm;
140 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
141 uint8_t sc_enaddr[ETHER_ADDR_LEN];
142 volatile bool sc_txrun;
143 volatile bool sc_rxrun;
144 volatile bool sc_txeoq;
145 volatile bool sc_rxeoq;
146 };
147
148 static int cpsw_match(device_t, cfdata_t, void *);
149 static void cpsw_attach(device_t, device_t, void *);
150 static int cpsw_detach(device_t, int);
151
152 static void cpsw_start(struct ifnet *);
153 static int cpsw_ioctl(struct ifnet *, u_long, void *);
154 static void cpsw_watchdog(struct ifnet *);
155 static int cpsw_init(struct ifnet *);
156 static void cpsw_stop(struct ifnet *, int);
157
158 static int cpsw_mii_readreg(device_t, int, int, uint16_t *);
159 static int cpsw_mii_writereg(device_t, int, int, uint16_t);
160 static void cpsw_mii_statchg(struct ifnet *);
161
162 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
163 static void cpsw_tick(void *);
164
165 static int cpsw_rxthintr(void *);
166 static int cpsw_rxintr(void *);
167 static int cpsw_txintr(void *);
168 static int cpsw_miscintr(void *);
169
170 /* ALE support */
171 #define CPSW_MAX_ALE_ENTRIES 1024
172
173 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
174
175 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
176 cpsw_match, cpsw_attach, cpsw_detach, NULL);
177
178 #include <sys/kernhist.h>
179 KERNHIST_DEFINE(cpswhist);
180
181 #define CPSWHIST_CALLARGS(A,B,C,D) do { \
182 KERNHIST_CALLARGS(cpswhist, "%jx %jx %jx %jx", \
183 (uintptr_t)(A), (uintptr_t)(B), (uintptr_t)(C), (uintptr_t)(D));\
184 } while (0)
185
186
187 static inline u_int
188 cpsw_txdesc_adjust(u_int x, int y)
189 {
190 return (((x) + y) & (CPSW_NTXDESCS - 1));
191 }
192
193 static inline u_int
194 cpsw_rxdesc_adjust(u_int x, int y)
195 {
196 return (((x) + y) & (CPSW_NRXDESCS - 1));
197 }
198
199 static inline uint32_t
200 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
201 {
202 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
203 }
204
205 static inline void
206 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
207 uint32_t const value)
208 {
209 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
210 }
211
212 static inline void
213 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
214 {
215 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
216
217 KERNHIST_FUNC(__func__);
218 CPSWHIST_CALLARGS(sc, i, n, 0);
219
220 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
221 }
222
223 static inline void
224 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
225 {
226 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
227
228 KERNHIST_FUNC(__func__);
229 CPSWHIST_CALLARGS(sc, i, n, 0);
230
231 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
232 }
233
234 static inline void
235 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
236 struct cpsw_cpdma_bd * const bdp)
237 {
238 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
239 uint32_t * const dp = bdp->word;
240 const bus_size_t c = __arraycount(bdp->word);
241
242 KERNHIST_FUNC(__func__);
243 CPSWHIST_CALLARGS(sc, i, bdp, 0);
244
245 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
246 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
247 dp[0], dp[1], dp[2], dp[3]);
248 }
249
250 static inline void
251 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
252 struct cpsw_cpdma_bd * const bdp)
253 {
254 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
255 uint32_t * const dp = bdp->word;
256 const bus_size_t c = __arraycount(bdp->word);
257
258 KERNHIST_FUNC(__func__);
259 CPSWHIST_CALLARGS(sc, i, bdp, 0);
260 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
261 dp[0], dp[1], dp[2], dp[3]);
262
263 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
264 }
265
266 static inline void
267 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
268 struct cpsw_cpdma_bd * const bdp)
269 {
270 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
271 uint32_t * const dp = bdp->word;
272 const bus_size_t c = __arraycount(bdp->word);
273
274 KERNHIST_FUNC(__func__);
275 CPSWHIST_CALLARGS(sc, i, bdp, 0);
276
277 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
278
279 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
280 dp[0], dp[1], dp[2], dp[3]);
281 }
282
283 static inline void
284 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
285 struct cpsw_cpdma_bd * const bdp)
286 {
287 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
288 uint32_t * const dp = bdp->word;
289 const bus_size_t c = __arraycount(bdp->word);
290
291 KERNHIST_FUNC(__func__);
292 CPSWHIST_CALLARGS(sc, i, bdp, 0);
293 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
294 dp[0], dp[1], dp[2], dp[3]);
295
296 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
297 }
298
299 static inline bus_addr_t
300 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
301 {
302 KASSERT(x < CPSW_NTXDESCS);
303 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
304 }
305
306 static inline bus_addr_t
307 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
308 {
309 KASSERT(x < CPSW_NRXDESCS);
310 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
311 }
312
313 static const struct device_compatible_entry compat_data[] = {
314 { .compat = "ti,am335x-cpsw-switch" },
315 { .compat = "ti,am335x-cpsw" },
316 { .compat = "ti,cpsw" },
317 DEVICE_COMPAT_EOL
318 };
319
320 static int
321 cpsw_match(device_t parent, cfdata_t cf, void *aux)
322 {
323 struct fdt_attach_args * const faa = aux;
324
325 return of_compatible_match(faa->faa_phandle, compat_data);
326 }
327
328 static bool
329 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
330 {
331 struct ifmedia_entry *ifm;
332
333 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
334 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
335 return true;
336 }
337 return false;
338 }
339
340 static int
341 cpsw_detach(device_t self, int flags)
342 {
343 struct cpsw_softc * const sc = device_private(self);
344 struct ifnet *ifp = &sc->sc_ec.ec_if;
345 u_int i;
346
347 /* Succeed now if there's no work to do. */
348 if (!sc->sc_attached)
349 return 0;
350
351 sc->sc_attached = false;
352
353 /* Stop the interface. Callouts are stopped in it. */
354 cpsw_stop(ifp, 1);
355
356 /* Destroy our callout. */
357 callout_destroy(&sc->sc_tick_ch);
358
359 /* Let go of the interrupts */
360 intr_disestablish(sc->sc_rxthih);
361 intr_disestablish(sc->sc_rxih);
362 intr_disestablish(sc->sc_txih);
363 intr_disestablish(sc->sc_miscih);
364
365 ether_ifdetach(ifp);
366 if_detach(ifp);
367
368 /* Delete all media. */
369 ifmedia_fini(&sc->sc_mii.mii_media);
370
371 /* Free the packet padding buffer */
372 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
373 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
374
375 /* Destroy all the descriptors */
376 for (i = 0; i < CPSW_NTXDESCS; i++)
377 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
378 for (i = 0; i < CPSW_NRXDESCS; i++)
379 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
380 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
381
382 /* Unmap */
383 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
384
385
386 return 0;
387 }
388
389 static void
390 cpsw_attach(device_t parent, device_t self, void *aux)
391 {
392 struct fdt_attach_args * const faa = aux;
393 struct cpsw_softc * const sc = device_private(self);
394 struct ethercom * const ec = &sc->sc_ec;
395 struct ifnet * const ifp = &ec->ec_if;
396 struct mii_data * const mii = &sc->sc_mii;
397 const int phandle = faa->faa_phandle;
398 const uint8_t *macaddr;
399 bus_addr_t addr;
400 bus_size_t size;
401 int error, slave, len;
402 char xname[16];
403 u_int i;
404
405 KERNHIST_INIT(cpswhist, 4096);
406
407 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
408 aprint_error(": couldn't get registers\n");
409 return;
410 }
411
412 sc->sc_dev = self;
413
414 aprint_normal(": TI Layer 2 3-Port Switch\n");
415 aprint_naive("\n");
416
417 callout_init(&sc->sc_tick_ch, 0);
418 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
419
420 macaddr = NULL;
421 slave = of_find_firstchild_byname(phandle, "slave");
422 if (slave == -1) {
423 slave = of_find_firstchild_byname(phandle, "ethernet-ports");
424 if (slave != -1) {
425 slave = of_find_firstchild_byname(slave, "port");
426 }
427 }
428 if (slave != -1) {
429 macaddr = fdtbus_get_prop(slave, "mac-address", &len);
430 if (len != ETHER_ADDR_LEN)
431 macaddr = NULL;
432 }
433 if (macaddr == NULL) {
434 #if 0
435 /* grab mac_id0 from AM335x control module */
436 uint32_t reg_lo, reg_hi;
437
438 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
439 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
440 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
441 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
442 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
443 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
444 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
445 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
446 } else
447 #endif
448 {
449 aprint_error_dev(sc->sc_dev,
450 "using fake station address\n");
451 /* 'N' happens to have the Local bit set */
452 #if 0
453 sc->sc_enaddr[0] = 'N';
454 sc->sc_enaddr[1] = 'e';
455 sc->sc_enaddr[2] = 't';
456 sc->sc_enaddr[3] = 'B';
457 sc->sc_enaddr[4] = 'S';
458 sc->sc_enaddr[5] = 'D';
459 #else
460 /* XXX Glor */
461 sc->sc_enaddr[0] = 0xd4;
462 sc->sc_enaddr[1] = 0x94;
463 sc->sc_enaddr[2] = 0xa1;
464 sc->sc_enaddr[3] = 0x97;
465 sc->sc_enaddr[4] = 0x03;
466 sc->sc_enaddr[5] = 0x94;
467 #endif
468 }
469 } else {
470 memcpy(sc->sc_enaddr, macaddr, ETHER_ADDR_LEN);
471 }
472
473 snprintf(xname, sizeof(xname), "%s rxth", device_xname(self));
474 sc->sc_rxthih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RXTH,
475 IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc, xname);
476
477 snprintf(xname, sizeof(xname), "%s rx", device_xname(self));
478 sc->sc_rxih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RX,
479 IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc, xname);
480
481 snprintf(xname, sizeof(xname), "%s tx", device_xname(self));
482 sc->sc_txih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_TX,
483 IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc, xname);
484
485 snprintf(xname, sizeof(xname), "%s misc", device_xname(self));
486 sc->sc_miscih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_MISC,
487 IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc, xname);
488
489 sc->sc_bst = faa->faa_bst;
490 sc->sc_bss = size;
491 sc->sc_bdt = faa->faa_dmat;
492
493 error = bus_space_map(sc->sc_bst, addr, size, 0,
494 &sc->sc_bsh);
495 if (error) {
496 aprint_error_dev(sc->sc_dev,
497 "can't map registers: %d\n", error);
498 return;
499 }
500
501 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
502 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
503 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
504 &sc->sc_bsh_txdescs);
505 if (error) {
506 aprint_error_dev(sc->sc_dev,
507 "can't subregion tx ring SRAM: %d\n", error);
508 return;
509 }
510 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
511 (void *)sc->sc_bsh_txdescs);
512
513 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
514 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
515 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
516 &sc->sc_bsh_rxdescs);
517 if (error) {
518 aprint_error_dev(sc->sc_dev,
519 "can't subregion rx ring SRAM: %d\n", error);
520 return;
521 }
522 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
523 (void *)sc->sc_bsh_rxdescs);
524
525 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
526
527 for (i = 0; i < CPSW_NTXDESCS; i++) {
528 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
529 CPSW_TXFRAGS, MCLBYTES, 0, 0,
530 &sc->sc_rdp->tx_dm[i])) != 0) {
531 aprint_error_dev(sc->sc_dev,
532 "unable to create tx DMA map: %d\n", error);
533 }
534 sc->sc_rdp->tx_mb[i] = NULL;
535 }
536
537 for (i = 0; i < CPSW_NRXDESCS; i++) {
538 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
539 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
540 aprint_error_dev(sc->sc_dev,
541 "unable to create rx DMA map: %d\n", error);
542 }
543 sc->sc_rdp->rx_mb[i] = NULL;
544 }
545
546 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
547 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
548 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
549 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
550 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
551 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
552 BUS_DMASYNC_PREWRITE);
553
554 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
555 ether_sprintf(sc->sc_enaddr));
556
557 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
558 ifp->if_softc = sc;
559 ifp->if_capabilities = 0;
560 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
561 ifp->if_start = cpsw_start;
562 ifp->if_ioctl = cpsw_ioctl;
563 ifp->if_init = cpsw_init;
564 ifp->if_stop = cpsw_stop;
565 ifp->if_watchdog = cpsw_watchdog;
566 IFQ_SET_READY(&ifp->if_snd);
567
568 cpsw_stop(ifp, 0);
569
570 mii->mii_ifp = ifp;
571 mii->mii_readreg = cpsw_mii_readreg;
572 mii->mii_writereg = cpsw_mii_writereg;
573 mii->mii_statchg = cpsw_mii_statchg;
574
575 sc->sc_ec.ec_mii = mii;
576 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
577
578 /* Initialize MDIO */
579 cpsw_write_4(sc, MDIOCONTROL,
580 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
581 /* Clear ALE */
582 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
583
584 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, 0);
585 if (LIST_FIRST(&mii->mii_phys) == NULL) {
586 aprint_error_dev(self, "no PHY found!\n");
587 sc->sc_phy_has_1000t = false;
588 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
589 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
590 } else {
591 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
592
593 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
594 }
595
596 if_attach(ifp);
597 if_deferred_start_init(ifp, NULL);
598 ether_ifattach(ifp, sc->sc_enaddr);
599
600 /* The attach is successful. */
601 sc->sc_attached = true;
602
603 return;
604 }
605
606 static void
607 cpsw_start(struct ifnet *ifp)
608 {
609 struct cpsw_softc * const sc = ifp->if_softc;
610 struct cpsw_ring_data * const rdp = sc->sc_rdp;
611 struct cpsw_cpdma_bd bd;
612 uint32_t * const dw = bd.word;
613 struct mbuf *m;
614 bus_dmamap_t dm;
615 u_int eopi __diagused = ~0;
616 u_int seg;
617 u_int txfree;
618 int txstart = -1;
619 int error;
620 bool pad;
621 u_int mlen;
622
623 KERNHIST_FUNC(__func__);
624 CPSWHIST_CALLARGS(sc, 0, 0, 0);
625
626 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
627 return;
628 }
629 if (__predict_false(sc->sc_txbusy)) {
630 return;
631 }
632
633 if (sc->sc_txnext >= sc->sc_txhead)
634 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
635 else
636 txfree = sc->sc_txhead - sc->sc_txnext - 1;
637
638 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
639 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
640
641 while (txfree > 0) {
642 IFQ_POLL(&ifp->if_snd, m);
643 if (m == NULL)
644 break;
645
646 dm = rdp->tx_dm[sc->sc_txnext];
647
648 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
649 if (error == EFBIG) {
650 device_printf(sc->sc_dev, "won't fit\n");
651 IFQ_DEQUEUE(&ifp->if_snd, m);
652 m_freem(m);
653 if_statinc(ifp, if_oerrors);
654 continue;
655 } else if (error != 0) {
656 device_printf(sc->sc_dev, "error\n");
657 break;
658 }
659
660 if (dm->dm_nsegs + 1 >= txfree) {
661 sc->sc_txbusy = true;
662 bus_dmamap_unload(sc->sc_bdt, dm);
663 break;
664 }
665
666 mlen = m_length(m);
667 pad = mlen < CPSW_PAD_LEN;
668
669 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
670 rdp->tx_mb[sc->sc_txnext] = m;
671 IFQ_DEQUEUE(&ifp->if_snd, m);
672
673 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
674 BUS_DMASYNC_PREWRITE);
675
676 if (txstart == -1)
677 txstart = sc->sc_txnext;
678 eopi = sc->sc_txnext;
679 for (seg = 0; seg < dm->dm_nsegs; seg++) {
680 dw[0] = cpsw_txdesc_paddr(sc,
681 TXDESC_NEXT(sc->sc_txnext));
682 dw[1] = dm->dm_segs[seg].ds_addr;
683 dw[2] = dm->dm_segs[seg].ds_len;
684 dw[3] = 0;
685
686 if (seg == 0)
687 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
688 MAX(mlen, CPSW_PAD_LEN);
689
690 if ((seg == dm->dm_nsegs - 1) && !pad)
691 dw[3] |= CPDMA_BD_EOP;
692
693 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
694 txfree--;
695 eopi = sc->sc_txnext;
696 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
697 }
698 if (pad) {
699 dw[0] = cpsw_txdesc_paddr(sc,
700 TXDESC_NEXT(sc->sc_txnext));
701 dw[1] = sc->sc_txpad_pa;
702 dw[2] = CPSW_PAD_LEN - mlen;
703 dw[3] = CPDMA_BD_EOP;
704
705 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
706 txfree--;
707 eopi = sc->sc_txnext;
708 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
709 }
710
711 bpf_mtap(ifp, m, BPF_D_OUT);
712 }
713
714 if (txstart >= 0) {
715 ifp->if_timer = 5;
716 /* terminate the new chain */
717 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
718 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
719 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
720 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
721 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
722 /* link the new chain on */
723 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
724 cpsw_txdesc_paddr(sc, txstart));
725 if (sc->sc_txeoq) {
726 /* kick the dma engine */
727 sc->sc_txeoq = false;
728 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
729 cpsw_txdesc_paddr(sc, txstart));
730 }
731 }
732 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
733 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
734 }
735
736 static int
737 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
738 {
739 const int s = splnet();
740 int error = 0;
741
742 switch (cmd) {
743 default:
744 error = ether_ioctl(ifp, cmd, data);
745 if (error == ENETRESET) {
746 error = 0;
747 }
748 break;
749 }
750
751 splx(s);
752
753 return error;
754 }
755
756 static void
757 cpsw_watchdog(struct ifnet *ifp)
758 {
759 struct cpsw_softc *sc = ifp->if_softc;
760
761 device_printf(sc->sc_dev, "device timeout\n");
762
763 if_statinc(ifp, if_oerrors);
764 cpsw_init(ifp);
765 cpsw_start(ifp);
766 }
767
768 static int
769 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
770 {
771 u_int tries;
772
773 for (tries = 0; tries < 1000; tries++) {
774 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
775 return 0;
776 delay(1);
777 }
778 return ETIMEDOUT;
779 }
780
781 static int
782 cpsw_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
783 {
784 struct cpsw_softc * const sc = device_private(dev);
785 uint32_t v;
786
787 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
788 return -1;
789
790 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
791 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
792
793 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
794 return -1;
795
796 v = cpsw_read_4(sc, MDIOUSERACCESS0);
797 if (v & __BIT(29)) {
798 *val = v & 0xffff;
799 return 0;
800 }
801
802 return -1;
803 }
804
805 static int
806 cpsw_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
807 {
808 struct cpsw_softc * const sc = device_private(dev);
809 uint32_t v;
810
811 KASSERT((val & 0xffff0000UL) == 0);
812
813 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
814 goto out;
815
816 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
817 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
818
819 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
820 goto out;
821
822 v = cpsw_read_4(sc, MDIOUSERACCESS0);
823 if ((v & __BIT(29)) == 0) {
824 out:
825 device_printf(sc->sc_dev, "%s error\n", __func__);
826 return -1;
827 }
828
829 return 0;
830 }
831
832 static void
833 cpsw_mii_statchg(struct ifnet *ifp)
834 {
835 return;
836 }
837
838 static int
839 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
840 {
841 struct cpsw_ring_data * const rdp = sc->sc_rdp;
842 const u_int h = RXDESC_PREV(i);
843 struct cpsw_cpdma_bd bd;
844 uint32_t * const dw = bd.word;
845 struct mbuf *m;
846 int error = ENOBUFS;
847
848 MGETHDR(m, M_DONTWAIT, MT_DATA);
849 if (m == NULL) {
850 goto reuse;
851 }
852
853 MCLGET(m, M_DONTWAIT);
854 if ((m->m_flags & M_EXT) == 0) {
855 m_freem(m);
856 goto reuse;
857 }
858
859 /* We have a new buffer, prepare it for the ring. */
860
861 if (rdp->rx_mb[i] != NULL)
862 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
863
864 m->m_len = m->m_pkthdr.len = MCLBYTES;
865
866 rdp->rx_mb[i] = m;
867
868 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
869 BUS_DMA_READ | BUS_DMA_NOWAIT);
870 if (error) {
871 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
872 i, error);
873 }
874
875 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
876 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
877
878 error = 0;
879
880 reuse:
881 /* (re-)setup the descriptor */
882 dw[0] = 0;
883 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
884 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
885 dw[3] = CPDMA_BD_OWNER;
886
887 cpsw_set_rxdesc(sc, i, &bd);
888 /* and link onto ring */
889 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
890
891 return error;
892 }
893
894 static int
895 cpsw_init(struct ifnet *ifp)
896 {
897 struct cpsw_softc * const sc = ifp->if_softc;
898 struct mii_data * const mii = &sc->sc_mii;
899 int i;
900
901 cpsw_stop(ifp, 0);
902
903 sc->sc_txnext = 0;
904 sc->sc_txhead = 0;
905
906 /* Reset wrapper */
907 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
908 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
909 ;
910
911 /* Reset SS */
912 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
913 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
914 ;
915
916 /* Clear table and enable ALE */
917 cpsw_write_4(sc, CPSW_ALE_CONTROL,
918 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
919
920 /* Reset and init Sliver port 1 and 2 */
921 for (i = 0; i < CPSW_ETH_PORTS; i++) {
922 uint32_t macctl;
923
924 /* Reset */
925 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
926 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
927 ;
928 /* Set Slave Mapping */
929 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
930 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
931 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
932 /* Set MAC Address */
933 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
934 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
935 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
936 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
937 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
938
939 /* Set MACCONTROL for ports 0,1 */
940 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
941 SLMACCTL_IFCTL_A;
942 if (sc->sc_phy_has_1000t)
943 macctl |= SLMACCTL_GIG;
944 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
945
946 /* Set ALE port to forwarding(3) */
947 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
948 }
949
950 /* Set Host Port Mapping */
951 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
952 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
953
954 /* Set ALE port to forwarding(3) */
955 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
956
957 /* Initialize addrs */
958 cpsw_ale_update_addresses(sc, 1);
959
960 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
961 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
962
963 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
964 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
965 ;
966
967 for (i = 0; i < 8; i++) {
968 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
969 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
970 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
971 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
972 }
973
974 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
975 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
976
977 sc->sc_txhead = 0;
978 sc->sc_txnext = 0;
979
980 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
981
982 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
983 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
984 /* Initialize RX Buffer Descriptors */
985 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
986 for (i = 0; i < CPSW_NRXDESCS; i++) {
987 cpsw_new_rxbuf(sc, i);
988 }
989 sc->sc_rxhead = 0;
990
991 /* turn off flow control */
992 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
993
994 /* align layer 3 header to 32-bit */
995 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
996
997 /* Clear all interrupt Masks */
998 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
999 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1000
1001 /* Enable TX & RX DMA */
1002 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
1003 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
1004
1005 /* Enable TX and RX interrupt receive for core 0 */
1006 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
1007 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
1008 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
1009
1010 /* Enable host Error Interrupt */
1011 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
1012
1013 /* Enable interrupts for TX and RX Channel 0 */
1014 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
1015 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
1016
1017 /* Ack stalled irqs */
1018 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1019 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1020 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1021 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1022
1023 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1024 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1025 cpsw_write_4(sc, MDIOCONTROL,
1026 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1027
1028 mii_mediachg(mii);
1029
1030 /* Write channel 0 RX HDP */
1031 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1032 sc->sc_rxrun = true;
1033 sc->sc_rxeoq = false;
1034
1035 sc->sc_txrun = true;
1036 sc->sc_txeoq = true;
1037 callout_schedule(&sc->sc_tick_ch, hz);
1038 ifp->if_flags |= IFF_RUNNING;
1039 sc->sc_txbusy = false;
1040
1041 return 0;
1042 }
1043
1044 static void
1045 cpsw_stop(struct ifnet *ifp, int disable)
1046 {
1047 struct cpsw_softc * const sc = ifp->if_softc;
1048 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1049 u_int i;
1050
1051 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1052 ifp, disable);
1053
1054 if ((ifp->if_flags & IFF_RUNNING) == 0)
1055 return;
1056
1057 callout_stop(&sc->sc_tick_ch);
1058 mii_down(&sc->sc_mii);
1059
1060 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1061 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1062 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1063 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1064 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1065
1066 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1067 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1068 i = 0;
1069 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1070 delay(10);
1071 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1072 sc->sc_txrun = false;
1073 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1074 sc->sc_rxrun = false;
1075 i++;
1076 }
1077 //printf("%s toredown complete in %u\n", __func__, i);
1078
1079 /* Reset wrapper */
1080 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1081 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
1082 ;
1083
1084 /* Reset SS */
1085 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1086 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
1087 ;
1088
1089 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1090 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1091 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
1092 ;
1093 }
1094
1095 /* Reset CPDMA */
1096 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1097 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
1098 ;
1099
1100 /* Release any queued transmit buffers. */
1101 for (i = 0; i < CPSW_NTXDESCS; i++) {
1102 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1103 m_freem(rdp->tx_mb[i]);
1104 rdp->tx_mb[i] = NULL;
1105 }
1106
1107 ifp->if_flags &= ~IFF_RUNNING;
1108 ifp->if_timer = 0;
1109 sc->sc_txbusy = false;
1110
1111 if (!disable)
1112 return;
1113
1114 for (i = 0; i < CPSW_NRXDESCS; i++) {
1115 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1116 m_freem(rdp->rx_mb[i]);
1117 rdp->rx_mb[i] = NULL;
1118 }
1119 }
1120
1121 static void
1122 cpsw_tick(void *arg)
1123 {
1124 struct cpsw_softc * const sc = arg;
1125 struct mii_data * const mii = &sc->sc_mii;
1126 const int s = splnet();
1127
1128 mii_tick(mii);
1129
1130 splx(s);
1131
1132 callout_schedule(&sc->sc_tick_ch, hz);
1133 }
1134
1135 static int
1136 cpsw_rxthintr(void *arg)
1137 {
1138 struct cpsw_softc * const sc = arg;
1139
1140 /* this won't deassert the interrupt though */
1141 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1142
1143 return 1;
1144 }
1145
1146 static int
1147 cpsw_rxintr(void *arg)
1148 {
1149 struct cpsw_softc * const sc = arg;
1150 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1151 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1152 struct cpsw_cpdma_bd bd;
1153 const uint32_t * const dw = bd.word;
1154 bus_dmamap_t dm;
1155 struct mbuf *m;
1156 u_int i;
1157 u_int len, off;
1158
1159 KERNHIST_FUNC(__func__);
1160 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1161
1162 for (;;) {
1163 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1164
1165 i = sc->sc_rxhead;
1166 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1167 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1168 dm = rdp->rx_dm[i];
1169 m = rdp->rx_mb[i];
1170
1171 KASSERT(dm != NULL);
1172 KASSERT(m != NULL);
1173
1174 cpsw_get_rxdesc(sc, i, &bd);
1175
1176 if (ISSET(dw[3], CPDMA_BD_OWNER))
1177 break;
1178
1179 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1180 sc->sc_rxrun = false;
1181 return 1;
1182 }
1183
1184 if ((dw[3] & (CPDMA_BD_SOP | CPDMA_BD_EOP)) !=
1185 (CPDMA_BD_SOP | CPDMA_BD_EOP)) {
1186 //Debugger();
1187 }
1188
1189 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1190 BUS_DMASYNC_POSTREAD);
1191
1192 if (cpsw_new_rxbuf(sc, i) != 0) {
1193 /* drop current packet, reuse buffer for new */
1194 if_statinc(ifp, if_ierrors);
1195 goto next;
1196 }
1197
1198 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1199 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1200
1201 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1202 len -= ETHER_CRC_LEN;
1203
1204 m_set_rcvif(m, ifp);
1205 m->m_pkthdr.len = m->m_len = len;
1206 m->m_data += off;
1207
1208 if_percpuq_enqueue(ifp->if_percpuq, m);
1209
1210 next:
1211 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1212 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1213 sc->sc_rxeoq = true;
1214 break;
1215 } else {
1216 sc->sc_rxeoq = false;
1217 }
1218 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1219 cpsw_rxdesc_paddr(sc, i));
1220 }
1221
1222 if (sc->sc_rxeoq) {
1223 device_printf(sc->sc_dev, "rxeoq\n");
1224 //Debugger();
1225 }
1226
1227 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1228
1229 return 1;
1230 }
1231
1232 static int
1233 cpsw_txintr(void *arg)
1234 {
1235 struct cpsw_softc * const sc = arg;
1236 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1237 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1238 struct cpsw_cpdma_bd bd;
1239 const uint32_t * const dw = bd.word;
1240 bool handled = false;
1241 uint32_t tx0_cp;
1242 u_int cpi;
1243
1244 KERNHIST_FUNC(__func__);
1245 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1246
1247 KASSERT(sc->sc_txrun);
1248
1249 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1250 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1251
1252 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1253
1254 if (tx0_cp == 0xfffffffc) {
1255 /* Teardown, ack it */
1256 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1257 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1258 sc->sc_txrun = false;
1259 return 0;
1260 }
1261
1262 for (;;) {
1263 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1264 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1265 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1266
1267 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1268 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1269
1270 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1271
1272 if (dw[2] == 0) {
1273 //Debugger();
1274 }
1275
1276 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1277 goto next;
1278
1279 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1280 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1281 sc->sc_txnext);
1282 break;
1283 }
1284
1285 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1286 sc->sc_txrun = false;
1287 return 1;
1288 }
1289
1290 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1291 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1292 BUS_DMASYNC_POSTWRITE);
1293 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1294
1295 m_freem(rdp->tx_mb[sc->sc_txhead]);
1296 rdp->tx_mb[sc->sc_txhead] = NULL;
1297
1298 if_statinc(ifp, if_opackets);
1299
1300 handled = true;
1301
1302 sc->sc_txbusy = false;
1303
1304 next:
1305 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1306 sc->sc_txeoq = true;
1307 }
1308 if (sc->sc_txhead == cpi) {
1309 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1310 cpsw_txdesc_paddr(sc, cpi));
1311 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1312 break;
1313 }
1314 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1315 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1316 sc->sc_txeoq = true;
1317 break;
1318 }
1319 }
1320
1321 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1322
1323 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1324 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1325 sc->sc_txeoq = false;
1326 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1327 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1328 }
1329 }
1330
1331 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1332 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1333 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1334 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1335 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1336
1337 if (handled && sc->sc_txnext == sc->sc_txhead)
1338 ifp->if_timer = 0;
1339
1340 if (handled)
1341 if_schedule_deferred_start(ifp);
1342
1343 return handled;
1344 }
1345
1346 static int
1347 cpsw_miscintr(void *arg)
1348 {
1349 struct cpsw_softc * const sc = arg;
1350 uint32_t miscstat;
1351 uint32_t dmastat;
1352 uint32_t stat;
1353
1354 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1355 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1356
1357 #define CPSW_MISC_HOST_PEND __BIT32(2)
1358 #define CPSW_MISC_STAT_PEND __BIT32(3)
1359
1360 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1361 /* Host Error */
1362 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1363 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1364
1365 printf("rxhead %02x\n", sc->sc_rxhead);
1366
1367 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1368 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1369 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1370 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1371 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1372 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1373 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1374 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1375 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1376 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1377
1378 //Debugger();
1379
1380 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1381 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1382 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1383 }
1384
1385 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1386
1387 return 1;
1388 }
1389
1390 /*
1391 *
1392 * ALE support routines.
1393 *
1394 */
1395
1396 static void
1397 cpsw_ale_entry_init(uint32_t *ale_entry)
1398 {
1399 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1400 }
1401
1402 static void
1403 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1404 {
1405 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1406 ale_entry[1] = mac[0] << 8 | mac[1];
1407 }
1408
1409 static void
1410 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1411 {
1412 ale_entry[0] = 0xffffffff;
1413 ale_entry[1] = 0x0000ffff;
1414 }
1415
1416 static void
1417 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1418 {
1419 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1420 switch (field) {
1421 case ALE_ENTRY_TYPE:
1422 /* [61:60] */
1423 ale_entry[1] |= (val & 0x3) << 28;
1424 break;
1425 case ALE_MCAST_FWD_STATE:
1426 /* [63:62] */
1427 ale_entry[1] |= (val & 0x3) << 30;
1428 break;
1429 case ALE_PORT_MASK:
1430 /* [68:66] */
1431 ale_entry[2] |= (val & 0x7) << 2;
1432 break;
1433 case ALE_PORT_NUMBER:
1434 /* [67:66] */
1435 ale_entry[2] |= (val & 0x3) << 2;
1436 break;
1437 default:
1438 panic("Invalid ALE entry field: %d\n", field);
1439 }
1440
1441 return;
1442 }
1443
1444 static bool
1445 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1446 {
1447 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1448 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1449 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1450 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1451 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1452 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1453 }
1454
1455 static void
1456 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1457 {
1458 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1459 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1460 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1461 mac[5] << 8 | mac[4]);
1462 }
1463
1464 static void
1465 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1466 {
1467 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1468 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1469 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1470 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1471 }
1472
1473 static void
1474 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1475 const uint32_t *ale_entry)
1476 {
1477 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1478 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1479 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1480 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1481 }
1482
1483 static int
1484 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1485 {
1486 int i;
1487 uint32_t ale_entry[3];
1488
1489 /* First two entries are link address and broadcast. */
1490 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1491 cpsw_ale_read_entry(sc, i, ale_entry);
1492 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1493 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1494 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1495 cpsw_ale_write_entry(sc, i, ale_entry);
1496 }
1497 }
1498 return CPSW_MAX_ALE_ENTRIES;
1499 }
1500
1501 static int
1502 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1503 {
1504 int free_index = -1, matching_index = -1, i;
1505 uint32_t ale_entry[3];
1506
1507 /* Find a matching entry or a free entry. */
1508 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1509 cpsw_ale_read_entry(sc, i, ale_entry);
1510
1511 /* Entry Type[61:60] is 0 for free entry */
1512 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1513 free_index = i;
1514 }
1515
1516 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1517 matching_index = i;
1518 break;
1519 }
1520 }
1521
1522 if (matching_index < 0) {
1523 if (free_index < 0)
1524 return ENOMEM;
1525 i = free_index;
1526 }
1527
1528 cpsw_ale_entry_init(ale_entry);
1529
1530 cpsw_ale_entry_set_mac(ale_entry, mac);
1531 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1532 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1533 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1534
1535 cpsw_ale_write_entry(sc, i, ale_entry);
1536
1537 return 0;
1538 }
1539
1540 static int
1541 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1542 {
1543 uint8_t *mac = sc->sc_enaddr;
1544 uint32_t ale_entry[3];
1545 int i;
1546 struct ethercom * const ec = &sc->sc_ec;
1547 struct ether_multi *ifma;
1548
1549 cpsw_ale_entry_init(ale_entry);
1550 /* Route incoming packets for our MAC address to Port 0 (host). */
1551 /* For simplicity, keep this entry at table index 0 in the ALE. */
1552 cpsw_ale_entry_set_mac(ale_entry, mac);
1553 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1554 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1555 cpsw_ale_write_entry(sc, 0, ale_entry);
1556
1557 /* Set outgoing MAC Address for Ports 1 and 2. */
1558 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1559 cpsw_ale_set_outgoing_mac(sc, i, mac);
1560
1561 /* Keep the broadcast address at table entry 1. */
1562 cpsw_ale_entry_init(ale_entry);
1563 cpsw_ale_entry_set_bcast_mac(ale_entry);
1564 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1565 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1566 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1567 cpsw_ale_write_entry(sc, 1, ale_entry);
1568
1569 /* SIOCDELMULTI doesn't specify the particular address
1570 being removed, so we have to remove all and rebuild. */
1571 if (purge)
1572 cpsw_ale_remove_all_mc_entries(sc);
1573
1574 /* Set other multicast addrs desired. */
1575 ETHER_LOCK(ec);
1576 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1577 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1578 }
1579 ETHER_UNLOCK(ec);
1580
1581 return 0;
1582 }
1583