if_cpsw.c revision 1.10.2.1 1 /* $NetBSD: if_cpsw.c,v 1.10.2.1 2020/02/29 20:18:20 ad Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion (at) Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.10.2.1 2020/02/29 20:18:20 ad Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/fdt/fdtvar.h>
77
78 #include <arm/ti/if_cpswreg.h>
79
80 #define FDT_INTR_FLAGS 0
81
82 #define CPSW_TXFRAGS 16
83
84 #define CPSW_CPPI_RAM_SIZE (0x2000)
85 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
86 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
87 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
88 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
89 #define CPSW_CPPI_RAM_RXDESCS_BASE \
90 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
91
92 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
93 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
94
95 CTASSERT(powerof2(CPSW_NTXDESCS));
96 CTASSERT(powerof2(CPSW_NRXDESCS));
97
98 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
99
100 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
101 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
102
103 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
104 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
105
106 struct cpsw_ring_data {
107 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
108 struct mbuf *tx_mb[CPSW_NTXDESCS];
109 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
110 struct mbuf *rx_mb[CPSW_NRXDESCS];
111 };
112
113 struct cpsw_softc {
114 device_t sc_dev;
115 bus_space_tag_t sc_bst;
116 bus_space_handle_t sc_bsh;
117 bus_size_t sc_bss;
118 bus_dma_tag_t sc_bdt;
119 bus_space_handle_t sc_bsh_txdescs;
120 bus_space_handle_t sc_bsh_rxdescs;
121 bus_addr_t sc_txdescs_pa;
122 bus_addr_t sc_rxdescs_pa;
123 struct ethercom sc_ec;
124 struct mii_data sc_mii;
125 bool sc_phy_has_1000t;
126 bool sc_attached;
127 callout_t sc_tick_ch;
128 void *sc_ih;
129 struct cpsw_ring_data *sc_rdp;
130 volatile u_int sc_txnext;
131 volatile u_int sc_txhead;
132 volatile u_int sc_rxhead;
133 void *sc_rxthih;
134 void *sc_rxih;
135 void *sc_txih;
136 void *sc_miscih;
137 void *sc_txpad;
138 bus_dmamap_t sc_txpad_dm;
139 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
140 uint8_t sc_enaddr[ETHER_ADDR_LEN];
141 volatile bool sc_txrun;
142 volatile bool sc_rxrun;
143 volatile bool sc_txeoq;
144 volatile bool sc_rxeoq;
145 };
146
147 static int cpsw_match(device_t, cfdata_t, void *);
148 static void cpsw_attach(device_t, device_t, void *);
149 static int cpsw_detach(device_t, int);
150
151 static void cpsw_start(struct ifnet *);
152 static int cpsw_ioctl(struct ifnet *, u_long, void *);
153 static void cpsw_watchdog(struct ifnet *);
154 static int cpsw_init(struct ifnet *);
155 static void cpsw_stop(struct ifnet *, int);
156
157 static int cpsw_mii_readreg(device_t, int, int, uint16_t *);
158 static int cpsw_mii_writereg(device_t, int, int, uint16_t);
159 static void cpsw_mii_statchg(struct ifnet *);
160
161 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
162 static void cpsw_tick(void *);
163
164 static int cpsw_rxthintr(void *);
165 static int cpsw_rxintr(void *);
166 static int cpsw_txintr(void *);
167 static int cpsw_miscintr(void *);
168
169 /* ALE support */
170 #define CPSW_MAX_ALE_ENTRIES 1024
171
172 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
173
174 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
175 cpsw_match, cpsw_attach, cpsw_detach, NULL);
176
177 #include <sys/kernhist.h>
178 KERNHIST_DEFINE(cpswhist);
179
180 #define CPSWHIST_CALLARGS(A,B,C,D) do { \
181 KERNHIST_CALLARGS(cpswhist, "%jx %jx %jx %jx", \
182 (uintptr_t)(A), (uintptr_t)(B), (uintptr_t)(C), (uintptr_t)(D));\
183 } while (0)
184
185
186 static inline u_int
187 cpsw_txdesc_adjust(u_int x, int y)
188 {
189 return (((x) + y) & (CPSW_NTXDESCS - 1));
190 }
191
192 static inline u_int
193 cpsw_rxdesc_adjust(u_int x, int y)
194 {
195 return (((x) + y) & (CPSW_NRXDESCS - 1));
196 }
197
198 static inline uint32_t
199 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
200 {
201 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
202 }
203
204 static inline void
205 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
206 uint32_t const value)
207 {
208 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
209 }
210
211 static inline void
212 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
213 {
214 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
215
216 KERNHIST_FUNC(__func__);
217 CPSWHIST_CALLARGS(sc, i, n, 0);
218
219 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
220 }
221
222 static inline void
223 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
224 {
225 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
226
227 KERNHIST_FUNC(__func__);
228 CPSWHIST_CALLARGS(sc, i, n, 0);
229
230 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
231 }
232
233 static inline void
234 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
235 struct cpsw_cpdma_bd * const bdp)
236 {
237 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
238 uint32_t * const dp = bdp->word;
239 const bus_size_t c = __arraycount(bdp->word);
240
241 KERNHIST_FUNC(__func__);
242 CPSWHIST_CALLARGS(sc, i, bdp, 0);
243
244 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
245 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
246 dp[0], dp[1], dp[2], dp[3]);
247 }
248
249 static inline void
250 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
251 struct cpsw_cpdma_bd * const bdp)
252 {
253 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
254 uint32_t * const dp = bdp->word;
255 const bus_size_t c = __arraycount(bdp->word);
256
257 KERNHIST_FUNC(__func__);
258 CPSWHIST_CALLARGS(sc, i, bdp, 0);
259 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
260 dp[0], dp[1], dp[2], dp[3]);
261
262 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
263 }
264
265 static inline void
266 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
267 struct cpsw_cpdma_bd * const bdp)
268 {
269 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
270 uint32_t * const dp = bdp->word;
271 const bus_size_t c = __arraycount(bdp->word);
272
273 KERNHIST_FUNC(__func__);
274 CPSWHIST_CALLARGS(sc, i, bdp, 0);
275
276 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
277
278 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
279 dp[0], dp[1], dp[2], dp[3]);
280 }
281
282 static inline void
283 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
284 struct cpsw_cpdma_bd * const bdp)
285 {
286 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
287 uint32_t * const dp = bdp->word;
288 const bus_size_t c = __arraycount(bdp->word);
289
290 KERNHIST_FUNC(__func__);
291 CPSWHIST_CALLARGS(sc, i, bdp, 0);
292 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
293 dp[0], dp[1], dp[2], dp[3]);
294
295 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
296 }
297
298 static inline bus_addr_t
299 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
300 {
301 KASSERT(x < CPSW_NTXDESCS);
302 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
303 }
304
305 static inline bus_addr_t
306 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
307 {
308 KASSERT(x < CPSW_NRXDESCS);
309 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
310 }
311
312
313 static int
314 cpsw_match(device_t parent, cfdata_t cf, void *aux)
315 {
316 struct fdt_attach_args * const faa = aux;
317
318 static const char * const compatible[] = {
319 "ti,am335x-cpsw",
320 "ti,cpsw",
321 NULL
322 };
323
324 return of_match_compatible(faa->faa_phandle, compatible);
325 }
326
327 static bool
328 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
329 {
330 struct ifmedia_entry *ifm;
331
332 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
333 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
334 return true;
335 }
336 return false;
337 }
338
339 static int
340 cpsw_detach(device_t self, int flags)
341 {
342 struct cpsw_softc * const sc = device_private(self);
343 struct ifnet *ifp = &sc->sc_ec.ec_if;
344 u_int i;
345
346 /* Succeed now if there's no work to do. */
347 if (!sc->sc_attached)
348 return 0;
349
350 sc->sc_attached = false;
351
352 /* Stop the interface. Callouts are stopped in it. */
353 cpsw_stop(ifp, 1);
354
355 /* Destroy our callout. */
356 callout_destroy(&sc->sc_tick_ch);
357
358 /* Let go of the interrupts */
359 intr_disestablish(sc->sc_rxthih);
360 intr_disestablish(sc->sc_rxih);
361 intr_disestablish(sc->sc_txih);
362 intr_disestablish(sc->sc_miscih);
363
364 ether_ifdetach(ifp);
365 if_detach(ifp);
366
367 /* Delete all media. */
368 ifmedia_fini(&sc->sc_mii.mii_media);
369
370 /* Free the packet padding buffer */
371 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
372 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
373
374 /* Destroy all the descriptors */
375 for (i = 0; i < CPSW_NTXDESCS; i++)
376 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
377 for (i = 0; i < CPSW_NRXDESCS; i++)
378 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
379 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
380
381 /* Unmap */
382 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
383
384
385 return 0;
386 }
387
388 static void
389 cpsw_attach(device_t parent, device_t self, void *aux)
390 {
391 struct fdt_attach_args * const faa = aux;
392 struct cpsw_softc * const sc = device_private(self);
393 struct ethercom * const ec = &sc->sc_ec;
394 struct ifnet * const ifp = &ec->ec_if;
395 struct mii_data * const mii = &sc->sc_mii;
396 const int phandle = faa->faa_phandle;
397 const uint8_t *macaddr;
398 bus_addr_t addr;
399 bus_size_t size;
400 int error, slave, len;
401 u_int i;
402
403 KERNHIST_INIT(cpswhist, 4096);
404
405 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
406 aprint_error(": couldn't get registers\n");
407 return;
408 }
409
410 sc->sc_dev = self;
411
412 aprint_normal(": TI Layer 2 3-Port Switch\n");
413 aprint_naive("\n");
414
415 callout_init(&sc->sc_tick_ch, 0);
416 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
417
418 macaddr = NULL;
419 slave = of_find_firstchild_byname(phandle, "slave");
420 if (slave > 0) {
421 macaddr = fdtbus_get_prop(slave, "mac-address", &len);
422 if (len != ETHER_ADDR_LEN)
423 macaddr = NULL;
424 }
425 if (macaddr == NULL) {
426 #if 0
427 /* grab mac_id0 from AM335x control module */
428 uint32_t reg_lo, reg_hi;
429
430 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
431 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
432 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
433 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
434 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
435 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
436 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
437 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
438 } else
439 #endif
440 {
441 aprint_error_dev(sc->sc_dev,
442 "using fake station address\n");
443 /* 'N' happens to have the Local bit set */
444 #if 0
445 sc->sc_enaddr[0] = 'N';
446 sc->sc_enaddr[1] = 'e';
447 sc->sc_enaddr[2] = 't';
448 sc->sc_enaddr[3] = 'B';
449 sc->sc_enaddr[4] = 'S';
450 sc->sc_enaddr[5] = 'D';
451 #else
452 /* XXX Glor */
453 sc->sc_enaddr[0] = 0xd4;
454 sc->sc_enaddr[1] = 0x94;
455 sc->sc_enaddr[2] = 0xa1;
456 sc->sc_enaddr[3] = 0x97;
457 sc->sc_enaddr[4] = 0x03;
458 sc->sc_enaddr[5] = 0x94;
459 #endif
460 }
461 } else {
462 memcpy(sc->sc_enaddr, macaddr, ETHER_ADDR_LEN);
463 }
464
465 sc->sc_rxthih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RXTH, IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc);
466 sc->sc_rxih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RX, IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc);
467 sc->sc_txih = fdtbus_intr_establish(phandle, CPSW_INTROFF_TX, IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc);
468 sc->sc_miscih = fdtbus_intr_establish(phandle, CPSW_INTROFF_MISC, IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc);
469
470 sc->sc_bst = faa->faa_bst;
471 sc->sc_bss = size;
472 sc->sc_bdt = faa->faa_dmat;
473
474 error = bus_space_map(sc->sc_bst, addr, size, 0,
475 &sc->sc_bsh);
476 if (error) {
477 aprint_error_dev(sc->sc_dev,
478 "can't map registers: %d\n", error);
479 return;
480 }
481
482 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
483 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
484 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
485 &sc->sc_bsh_txdescs);
486 if (error) {
487 aprint_error_dev(sc->sc_dev,
488 "can't subregion tx ring SRAM: %d\n", error);
489 return;
490 }
491 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
492 (void *)sc->sc_bsh_txdescs);
493
494 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
495 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
496 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
497 &sc->sc_bsh_rxdescs);
498 if (error) {
499 aprint_error_dev(sc->sc_dev,
500 "can't subregion rx ring SRAM: %d\n", error);
501 return;
502 }
503 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
504 (void *)sc->sc_bsh_rxdescs);
505
506 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
507
508 for (i = 0; i < CPSW_NTXDESCS; i++) {
509 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
510 CPSW_TXFRAGS, MCLBYTES, 0, 0,
511 &sc->sc_rdp->tx_dm[i])) != 0) {
512 aprint_error_dev(sc->sc_dev,
513 "unable to create tx DMA map: %d\n", error);
514 }
515 sc->sc_rdp->tx_mb[i] = NULL;
516 }
517
518 for (i = 0; i < CPSW_NRXDESCS; i++) {
519 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
520 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
521 aprint_error_dev(sc->sc_dev,
522 "unable to create rx DMA map: %d\n", error);
523 }
524 sc->sc_rdp->rx_mb[i] = NULL;
525 }
526
527 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
528 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
529 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
530 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
531 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
532 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
533 BUS_DMASYNC_PREWRITE);
534
535 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
536 ether_sprintf(sc->sc_enaddr));
537
538 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
539 ifp->if_softc = sc;
540 ifp->if_capabilities = 0;
541 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
542 ifp->if_start = cpsw_start;
543 ifp->if_ioctl = cpsw_ioctl;
544 ifp->if_init = cpsw_init;
545 ifp->if_stop = cpsw_stop;
546 ifp->if_watchdog = cpsw_watchdog;
547 IFQ_SET_READY(&ifp->if_snd);
548
549 cpsw_stop(ifp, 0);
550
551 mii->mii_ifp = ifp;
552 mii->mii_readreg = cpsw_mii_readreg;
553 mii->mii_writereg = cpsw_mii_writereg;
554 mii->mii_statchg = cpsw_mii_statchg;
555
556 sc->sc_ec.ec_mii = mii;
557 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
558
559 /* Initialize MDIO */
560 cpsw_write_4(sc, MDIOCONTROL,
561 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
562 /* Clear ALE */
563 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
564
565 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, 0);
566 if (LIST_FIRST(&mii->mii_phys) == NULL) {
567 aprint_error_dev(self, "no PHY found!\n");
568 sc->sc_phy_has_1000t = false;
569 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
570 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
571 } else {
572 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
573
574 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
575 }
576
577 if_attach(ifp);
578 if_deferred_start_init(ifp, NULL);
579 ether_ifattach(ifp, sc->sc_enaddr);
580
581 /* The attach is successful. */
582 sc->sc_attached = true;
583
584 return;
585 }
586
587 static void
588 cpsw_start(struct ifnet *ifp)
589 {
590 struct cpsw_softc * const sc = ifp->if_softc;
591 struct cpsw_ring_data * const rdp = sc->sc_rdp;
592 struct cpsw_cpdma_bd bd;
593 uint32_t * const dw = bd.word;
594 struct mbuf *m;
595 bus_dmamap_t dm;
596 u_int eopi __diagused = ~0;
597 u_int seg;
598 u_int txfree;
599 int txstart = -1;
600 int error;
601 bool pad;
602 u_int mlen;
603
604 KERNHIST_FUNC(__func__);
605 CPSWHIST_CALLARGS(sc, 0, 0, 0);
606
607 if (__predict_false((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
608 IFF_RUNNING)) {
609 return;
610 }
611
612 if (sc->sc_txnext >= sc->sc_txhead)
613 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
614 else
615 txfree = sc->sc_txhead - sc->sc_txnext - 1;
616
617 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
618 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
619
620 while (txfree > 0) {
621 IFQ_POLL(&ifp->if_snd, m);
622 if (m == NULL)
623 break;
624
625 dm = rdp->tx_dm[sc->sc_txnext];
626
627 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
628 if (error == EFBIG) {
629 device_printf(sc->sc_dev, "won't fit\n");
630 IFQ_DEQUEUE(&ifp->if_snd, m);
631 m_freem(m);
632 if_statinc(ifp, if_oerrors);
633 continue;
634 } else if (error != 0) {
635 device_printf(sc->sc_dev, "error\n");
636 break;
637 }
638
639 if (dm->dm_nsegs + 1 >= txfree) {
640 ifp->if_flags |= IFF_OACTIVE;
641 bus_dmamap_unload(sc->sc_bdt, dm);
642 break;
643 }
644
645 mlen = m_length(m);
646 pad = mlen < CPSW_PAD_LEN;
647
648 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
649 rdp->tx_mb[sc->sc_txnext] = m;
650 IFQ_DEQUEUE(&ifp->if_snd, m);
651
652 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
653 BUS_DMASYNC_PREWRITE);
654
655 if (txstart == -1)
656 txstart = sc->sc_txnext;
657 eopi = sc->sc_txnext;
658 for (seg = 0; seg < dm->dm_nsegs; seg++) {
659 dw[0] = cpsw_txdesc_paddr(sc,
660 TXDESC_NEXT(sc->sc_txnext));
661 dw[1] = dm->dm_segs[seg].ds_addr;
662 dw[2] = dm->dm_segs[seg].ds_len;
663 dw[3] = 0;
664
665 if (seg == 0)
666 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
667 MAX(mlen, CPSW_PAD_LEN);
668
669 if ((seg == dm->dm_nsegs - 1) && !pad)
670 dw[3] |= CPDMA_BD_EOP;
671
672 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
673 txfree--;
674 eopi = sc->sc_txnext;
675 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
676 }
677 if (pad) {
678 dw[0] = cpsw_txdesc_paddr(sc,
679 TXDESC_NEXT(sc->sc_txnext));
680 dw[1] = sc->sc_txpad_pa;
681 dw[2] = CPSW_PAD_LEN - mlen;
682 dw[3] = CPDMA_BD_EOP;
683
684 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
685 txfree--;
686 eopi = sc->sc_txnext;
687 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
688 }
689
690 bpf_mtap(ifp, m, BPF_D_OUT);
691 }
692
693 if (txstart >= 0) {
694 ifp->if_timer = 5;
695 /* terminate the new chain */
696 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
697 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
698 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
699 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
700 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
701 /* link the new chain on */
702 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
703 cpsw_txdesc_paddr(sc, txstart));
704 if (sc->sc_txeoq) {
705 /* kick the dma engine */
706 sc->sc_txeoq = false;
707 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
708 cpsw_txdesc_paddr(sc, txstart));
709 }
710 }
711 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
712 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
713 }
714
715 static int
716 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
717 {
718 const int s = splnet();
719 int error = 0;
720
721 switch (cmd) {
722 default:
723 error = ether_ioctl(ifp, cmd, data);
724 if (error == ENETRESET) {
725 error = 0;
726 }
727 break;
728 }
729
730 splx(s);
731
732 return error;
733 }
734
735 static void
736 cpsw_watchdog(struct ifnet *ifp)
737 {
738 struct cpsw_softc *sc = ifp->if_softc;
739
740 device_printf(sc->sc_dev, "device timeout\n");
741
742 if_statinc(ifp, if_oerrors);
743 cpsw_init(ifp);
744 cpsw_start(ifp);
745 }
746
747 static int
748 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
749 {
750 u_int tries;
751
752 for (tries = 0; tries < 1000; tries++) {
753 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
754 return 0;
755 delay(1);
756 }
757 return ETIMEDOUT;
758 }
759
760 static int
761 cpsw_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
762 {
763 struct cpsw_softc * const sc = device_private(dev);
764 uint32_t v;
765
766 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
767 return -1;
768
769 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
770 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
771
772 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
773 return -1;
774
775 v = cpsw_read_4(sc, MDIOUSERACCESS0);
776 if (v & __BIT(29)) {
777 *val = v & 0xffff;
778 return 0;
779 }
780
781 return -1;
782 }
783
784 static int
785 cpsw_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
786 {
787 struct cpsw_softc * const sc = device_private(dev);
788 uint32_t v;
789
790 KASSERT((val & 0xffff0000UL) == 0);
791
792 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
793 goto out;
794
795 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
796 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
797
798 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
799 goto out;
800
801 v = cpsw_read_4(sc, MDIOUSERACCESS0);
802 if ((v & __BIT(29)) == 0) {
803 out:
804 device_printf(sc->sc_dev, "%s error\n", __func__);
805 return -1;
806 }
807
808 return 0;
809 }
810
811 static void
812 cpsw_mii_statchg(struct ifnet *ifp)
813 {
814 return;
815 }
816
817 static int
818 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
819 {
820 struct cpsw_ring_data * const rdp = sc->sc_rdp;
821 const u_int h = RXDESC_PREV(i);
822 struct cpsw_cpdma_bd bd;
823 uint32_t * const dw = bd.word;
824 struct mbuf *m;
825 int error = ENOBUFS;
826
827 MGETHDR(m, M_DONTWAIT, MT_DATA);
828 if (m == NULL) {
829 goto reuse;
830 }
831
832 MCLGET(m, M_DONTWAIT);
833 if ((m->m_flags & M_EXT) == 0) {
834 m_freem(m);
835 goto reuse;
836 }
837
838 /* We have a new buffer, prepare it for the ring. */
839
840 if (rdp->rx_mb[i] != NULL)
841 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
842
843 m->m_len = m->m_pkthdr.len = MCLBYTES;
844
845 rdp->rx_mb[i] = m;
846
847 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
848 BUS_DMA_READ | BUS_DMA_NOWAIT);
849 if (error) {
850 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
851 i, error);
852 }
853
854 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
855 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
856
857 error = 0;
858
859 reuse:
860 /* (re-)setup the descriptor */
861 dw[0] = 0;
862 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
863 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
864 dw[3] = CPDMA_BD_OWNER;
865
866 cpsw_set_rxdesc(sc, i, &bd);
867 /* and link onto ring */
868 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
869
870 return error;
871 }
872
873 static int
874 cpsw_init(struct ifnet *ifp)
875 {
876 struct cpsw_softc * const sc = ifp->if_softc;
877 struct mii_data * const mii = &sc->sc_mii;
878 int i;
879
880 cpsw_stop(ifp, 0);
881
882 sc->sc_txnext = 0;
883 sc->sc_txhead = 0;
884
885 /* Reset wrapper */
886 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
887 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
888 ;
889
890 /* Reset SS */
891 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
892 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
893 ;
894
895 /* Clear table and enable ALE */
896 cpsw_write_4(sc, CPSW_ALE_CONTROL,
897 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
898
899 /* Reset and init Sliver port 1 and 2 */
900 for (i = 0; i < CPSW_ETH_PORTS; i++) {
901 uint32_t macctl;
902
903 /* Reset */
904 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
905 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
906 ;
907 /* Set Slave Mapping */
908 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
909 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
910 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
911 /* Set MAC Address */
912 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
913 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
914 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
915 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
916 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
917
918 /* Set MACCONTROL for ports 0,1 */
919 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
920 SLMACCTL_IFCTL_A;
921 if (sc->sc_phy_has_1000t)
922 macctl |= SLMACCTL_GIG;
923 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
924
925 /* Set ALE port to forwarding(3) */
926 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
927 }
928
929 /* Set Host Port Mapping */
930 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
931 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
932
933 /* Set ALE port to forwarding(3) */
934 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
935
936 /* Initialize addrs */
937 cpsw_ale_update_addresses(sc, 1);
938
939 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
940 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
941
942 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
943 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
944 ;
945
946 for (i = 0; i < 8; i++) {
947 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
948 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
949 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
950 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
951 }
952
953 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
954 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
955
956 sc->sc_txhead = 0;
957 sc->sc_txnext = 0;
958
959 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
960
961 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
962 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
963 /* Initialize RX Buffer Descriptors */
964 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
965 for (i = 0; i < CPSW_NRXDESCS; i++) {
966 cpsw_new_rxbuf(sc, i);
967 }
968 sc->sc_rxhead = 0;
969
970 /* turn off flow control */
971 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
972
973 /* align layer 3 header to 32-bit */
974 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
975
976 /* Clear all interrupt Masks */
977 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
978 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
979
980 /* Enable TX & RX DMA */
981 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
982 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
983
984 /* Enable TX and RX interrupt receive for core 0 */
985 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
986 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
987 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
988
989 /* Enable host Error Interrupt */
990 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
991
992 /* Enable interrupts for TX and RX Channel 0 */
993 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
994 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
995
996 /* Ack stalled irqs */
997 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
998 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
999 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1000 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1001
1002 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1003 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1004 cpsw_write_4(sc, MDIOCONTROL,
1005 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1006
1007 mii_mediachg(mii);
1008
1009 /* Write channel 0 RX HDP */
1010 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1011 sc->sc_rxrun = true;
1012 sc->sc_rxeoq = false;
1013
1014 sc->sc_txrun = true;
1015 sc->sc_txeoq = true;
1016 callout_schedule(&sc->sc_tick_ch, hz);
1017 ifp->if_flags |= IFF_RUNNING;
1018 ifp->if_flags &= ~IFF_OACTIVE;
1019
1020 return 0;
1021 }
1022
1023 static void
1024 cpsw_stop(struct ifnet *ifp, int disable)
1025 {
1026 struct cpsw_softc * const sc = ifp->if_softc;
1027 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1028 u_int i;
1029
1030 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1031 ifp, disable);
1032
1033 if ((ifp->if_flags & IFF_RUNNING) == 0)
1034 return;
1035
1036 callout_stop(&sc->sc_tick_ch);
1037 mii_down(&sc->sc_mii);
1038
1039 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1040 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1041 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1042 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1043 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1044
1045 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1046 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1047 i = 0;
1048 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1049 delay(10);
1050 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1051 sc->sc_txrun = false;
1052 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1053 sc->sc_rxrun = false;
1054 i++;
1055 }
1056 //printf("%s toredown complete in %u\n", __func__, i);
1057
1058 /* Reset wrapper */
1059 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1060 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
1061 ;
1062
1063 /* Reset SS */
1064 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1065 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
1066 ;
1067
1068 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1069 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1070 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
1071 ;
1072 }
1073
1074 /* Reset CPDMA */
1075 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1076 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
1077 ;
1078
1079 /* Release any queued transmit buffers. */
1080 for (i = 0; i < CPSW_NTXDESCS; i++) {
1081 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1082 m_freem(rdp->tx_mb[i]);
1083 rdp->tx_mb[i] = NULL;
1084 }
1085
1086 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1087 ifp->if_timer = 0;
1088
1089 if (!disable)
1090 return;
1091
1092 for (i = 0; i < CPSW_NRXDESCS; i++) {
1093 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1094 m_freem(rdp->rx_mb[i]);
1095 rdp->rx_mb[i] = NULL;
1096 }
1097 }
1098
1099 static void
1100 cpsw_tick(void *arg)
1101 {
1102 struct cpsw_softc * const sc = arg;
1103 struct mii_data * const mii = &sc->sc_mii;
1104 const int s = splnet();
1105
1106 mii_tick(mii);
1107
1108 splx(s);
1109
1110 callout_schedule(&sc->sc_tick_ch, hz);
1111 }
1112
1113 static int
1114 cpsw_rxthintr(void *arg)
1115 {
1116 struct cpsw_softc * const sc = arg;
1117
1118 /* this won't deassert the interrupt though */
1119 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1120
1121 return 1;
1122 }
1123
1124 static int
1125 cpsw_rxintr(void *arg)
1126 {
1127 struct cpsw_softc * const sc = arg;
1128 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1129 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1130 struct cpsw_cpdma_bd bd;
1131 const uint32_t * const dw = bd.word;
1132 bus_dmamap_t dm;
1133 struct mbuf *m;
1134 u_int i;
1135 u_int len, off;
1136
1137 KERNHIST_FUNC(__func__);
1138 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1139
1140 for (;;) {
1141 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1142
1143 i = sc->sc_rxhead;
1144 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1145 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1146 dm = rdp->rx_dm[i];
1147 m = rdp->rx_mb[i];
1148
1149 KASSERT(dm != NULL);
1150 KASSERT(m != NULL);
1151
1152 cpsw_get_rxdesc(sc, i, &bd);
1153
1154 if (ISSET(dw[3], CPDMA_BD_OWNER))
1155 break;
1156
1157 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1158 sc->sc_rxrun = false;
1159 return 1;
1160 }
1161
1162 if ((dw[3] & (CPDMA_BD_SOP | CPDMA_BD_EOP)) !=
1163 (CPDMA_BD_SOP | CPDMA_BD_EOP)) {
1164 //Debugger();
1165 }
1166
1167 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1168 BUS_DMASYNC_POSTREAD);
1169
1170 if (cpsw_new_rxbuf(sc, i) != 0) {
1171 /* drop current packet, reuse buffer for new */
1172 if_statinc(ifp, if_ierrors);
1173 goto next;
1174 }
1175
1176 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1177 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1178
1179 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1180 len -= ETHER_CRC_LEN;
1181
1182 m_set_rcvif(m, ifp);
1183 m->m_pkthdr.len = m->m_len = len;
1184 m->m_data += off;
1185
1186 if_percpuq_enqueue(ifp->if_percpuq, m);
1187
1188 next:
1189 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1190 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1191 sc->sc_rxeoq = true;
1192 break;
1193 } else {
1194 sc->sc_rxeoq = false;
1195 }
1196 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1197 cpsw_rxdesc_paddr(sc, i));
1198 }
1199
1200 if (sc->sc_rxeoq) {
1201 device_printf(sc->sc_dev, "rxeoq\n");
1202 //Debugger();
1203 }
1204
1205 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1206
1207 return 1;
1208 }
1209
1210 static int
1211 cpsw_txintr(void *arg)
1212 {
1213 struct cpsw_softc * const sc = arg;
1214 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1215 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1216 struct cpsw_cpdma_bd bd;
1217 const uint32_t * const dw = bd.word;
1218 bool handled = false;
1219 uint32_t tx0_cp;
1220 u_int cpi;
1221
1222 KERNHIST_FUNC(__func__);
1223 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1224
1225 KASSERT(sc->sc_txrun);
1226
1227 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1228 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1229
1230 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1231
1232 if (tx0_cp == 0xfffffffc) {
1233 /* Teardown, ack it */
1234 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1235 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1236 sc->sc_txrun = false;
1237 return 0;
1238 }
1239
1240 for (;;) {
1241 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1242 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1243 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1244
1245 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1246 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1247
1248 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1249
1250 if (dw[2] == 0) {
1251 //Debugger();
1252 }
1253
1254 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1255 goto next;
1256
1257 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1258 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1259 sc->sc_txnext);
1260 break;
1261 }
1262
1263 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1264 sc->sc_txrun = false;
1265 return 1;
1266 }
1267
1268 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1269 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1270 BUS_DMASYNC_POSTWRITE);
1271 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1272
1273 m_freem(rdp->tx_mb[sc->sc_txhead]);
1274 rdp->tx_mb[sc->sc_txhead] = NULL;
1275
1276 if_statinc(ifp, if_opackets);
1277
1278 handled = true;
1279
1280 ifp->if_flags &= ~IFF_OACTIVE;
1281
1282 next:
1283 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1284 sc->sc_txeoq = true;
1285 }
1286 if (sc->sc_txhead == cpi) {
1287 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1288 cpsw_txdesc_paddr(sc, cpi));
1289 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1290 break;
1291 }
1292 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1293 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1294 sc->sc_txeoq = true;
1295 break;
1296 }
1297 }
1298
1299 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1300
1301 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1302 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1303 sc->sc_txeoq = false;
1304 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1305 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1306 }
1307 }
1308
1309 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1310 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1311 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1312 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1313 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1314
1315 if (handled && sc->sc_txnext == sc->sc_txhead)
1316 ifp->if_timer = 0;
1317
1318 if (handled)
1319 if_schedule_deferred_start(ifp);
1320
1321 return handled;
1322 }
1323
1324 static int
1325 cpsw_miscintr(void *arg)
1326 {
1327 struct cpsw_softc * const sc = arg;
1328 uint32_t miscstat;
1329 uint32_t dmastat;
1330 uint32_t stat;
1331
1332 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1333 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1334
1335 #define CPSW_MISC_HOST_PEND __BIT32(2)
1336 #define CPSW_MISC_STAT_PEND __BIT32(3)
1337
1338 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1339 /* Host Error */
1340 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1341 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1342
1343 printf("rxhead %02x\n", sc->sc_rxhead);
1344
1345 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1346 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1347 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1348 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1349 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1350 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1351 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1352 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1353 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1354 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1355
1356 //Debugger();
1357
1358 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1359 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1360 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1361 }
1362
1363 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1364
1365 return 1;
1366 }
1367
1368 /*
1369 *
1370 * ALE support routines.
1371 *
1372 */
1373
1374 static void
1375 cpsw_ale_entry_init(uint32_t *ale_entry)
1376 {
1377 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1378 }
1379
1380 static void
1381 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1382 {
1383 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1384 ale_entry[1] = mac[0] << 8 | mac[1];
1385 }
1386
1387 static void
1388 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1389 {
1390 ale_entry[0] = 0xffffffff;
1391 ale_entry[1] = 0x0000ffff;
1392 }
1393
1394 static void
1395 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1396 {
1397 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1398 switch (field) {
1399 case ALE_ENTRY_TYPE:
1400 /* [61:60] */
1401 ale_entry[1] |= (val & 0x3) << 28;
1402 break;
1403 case ALE_MCAST_FWD_STATE:
1404 /* [63:62] */
1405 ale_entry[1] |= (val & 0x3) << 30;
1406 break;
1407 case ALE_PORT_MASK:
1408 /* [68:66] */
1409 ale_entry[2] |= (val & 0x7) << 2;
1410 break;
1411 case ALE_PORT_NUMBER:
1412 /* [67:66] */
1413 ale_entry[2] |= (val & 0x3) << 2;
1414 break;
1415 default:
1416 panic("Invalid ALE entry field: %d\n", field);
1417 }
1418
1419 return;
1420 }
1421
1422 static bool
1423 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1424 {
1425 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1426 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1427 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1428 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1429 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1430 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1431 }
1432
1433 static void
1434 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1435 {
1436 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1437 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1438 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1439 mac[5] << 8 | mac[4]);
1440 }
1441
1442 static void
1443 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1444 {
1445 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1446 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1447 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1448 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1449 }
1450
1451 static void
1452 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1453 const uint32_t *ale_entry)
1454 {
1455 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1456 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1457 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1458 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1459 }
1460
1461 static int
1462 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1463 {
1464 int i;
1465 uint32_t ale_entry[3];
1466
1467 /* First two entries are link address and broadcast. */
1468 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1469 cpsw_ale_read_entry(sc, i, ale_entry);
1470 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1471 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1472 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1473 cpsw_ale_write_entry(sc, i, ale_entry);
1474 }
1475 }
1476 return CPSW_MAX_ALE_ENTRIES;
1477 }
1478
1479 static int
1480 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1481 {
1482 int free_index = -1, matching_index = -1, i;
1483 uint32_t ale_entry[3];
1484
1485 /* Find a matching entry or a free entry. */
1486 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1487 cpsw_ale_read_entry(sc, i, ale_entry);
1488
1489 /* Entry Type[61:60] is 0 for free entry */
1490 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1491 free_index = i;
1492 }
1493
1494 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1495 matching_index = i;
1496 break;
1497 }
1498 }
1499
1500 if (matching_index < 0) {
1501 if (free_index < 0)
1502 return ENOMEM;
1503 i = free_index;
1504 }
1505
1506 cpsw_ale_entry_init(ale_entry);
1507
1508 cpsw_ale_entry_set_mac(ale_entry, mac);
1509 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1510 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1511 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1512
1513 cpsw_ale_write_entry(sc, i, ale_entry);
1514
1515 return 0;
1516 }
1517
1518 static int
1519 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1520 {
1521 uint8_t *mac = sc->sc_enaddr;
1522 uint32_t ale_entry[3];
1523 int i;
1524 struct ethercom * const ec = &sc->sc_ec;
1525 struct ether_multi *ifma;
1526
1527 cpsw_ale_entry_init(ale_entry);
1528 /* Route incoming packets for our MAC address to Port 0 (host). */
1529 /* For simplicity, keep this entry at table index 0 in the ALE. */
1530 cpsw_ale_entry_set_mac(ale_entry, mac);
1531 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1532 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1533 cpsw_ale_write_entry(sc, 0, ale_entry);
1534
1535 /* Set outgoing MAC Address for Ports 1 and 2. */
1536 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1537 cpsw_ale_set_outgoing_mac(sc, i, mac);
1538
1539 /* Keep the broadcast address at table entry 1. */
1540 cpsw_ale_entry_init(ale_entry);
1541 cpsw_ale_entry_set_bcast_mac(ale_entry);
1542 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1543 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1544 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1545 cpsw_ale_write_entry(sc, 1, ale_entry);
1546
1547 /* SIOCDELMULTI doesn't specify the particular address
1548 being removed, so we have to remove all and rebuild. */
1549 if (purge)
1550 cpsw_ale_remove_all_mc_entries(sc);
1551
1552 /* Set other multicast addrs desired. */
1553 ETHER_LOCK(ec);
1554 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1555 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1556 }
1557 ETHER_UNLOCK(ec);
1558
1559 return 0;
1560 }
1561