if_cpsw.c revision 1.2.2.2 1 /* $NetBSD: if_cpsw.c,v 1.2.2.2 2020/04/08 14:07:31 martin Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion (at) Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.2.2.2 2020/04/08 14:07:31 martin Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #if 0
77 #include <arch/arm/omap/omap2_obiovar.h>
78 #else
79 #include <dev/fdt/fdtvar.h>
80 #endif
81 #include <arch/arm/omap/if_cpswreg.h>
82 #include <arch/arm/omap/sitara_cmreg.h>
83 #include <arch/arm/omap/sitara_cm.h>
84
85 #define CPSW_TXFRAGS 16
86
87 #define CPSW_CPPI_RAM_SIZE (0x2000)
88 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
89 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
90 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
91 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
92 #define CPSW_CPPI_RAM_RXDESCS_BASE \
93 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
94
95 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
96 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
97
98 CTASSERT(powerof2(CPSW_NTXDESCS));
99 CTASSERT(powerof2(CPSW_NRXDESCS));
100
101 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
102
103 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
104 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
105
106 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
107 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
108
109 struct cpsw_ring_data {
110 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
111 struct mbuf *tx_mb[CPSW_NTXDESCS];
112 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
113 struct mbuf *rx_mb[CPSW_NRXDESCS];
114 };
115
116 struct cpsw_softc {
117 device_t sc_dev;
118 bus_space_tag_t sc_bst;
119 bus_space_handle_t sc_bsh;
120 bus_size_t sc_bss;
121 bus_dma_tag_t sc_bdt;
122 bus_space_handle_t sc_bsh_txdescs;
123 bus_space_handle_t sc_bsh_rxdescs;
124 bus_addr_t sc_txdescs_pa;
125 bus_addr_t sc_rxdescs_pa;
126 struct ethercom sc_ec;
127 struct mii_data sc_mii;
128 bool sc_phy_has_1000t;
129 bool sc_attached;
130 callout_t sc_tick_ch;
131 void *sc_ih;
132 struct cpsw_ring_data *sc_rdp;
133 volatile u_int sc_txnext;
134 volatile u_int sc_txhead;
135 volatile u_int sc_rxhead;
136 void *sc_rxthih;
137 void *sc_rxih;
138 void *sc_txih;
139 void *sc_miscih;
140 void *sc_txpad;
141 bus_dmamap_t sc_txpad_dm;
142 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
143 uint8_t sc_enaddr[ETHER_ADDR_LEN];
144 volatile bool sc_txrun;
145 volatile bool sc_rxrun;
146 volatile bool sc_txeoq;
147 volatile bool sc_rxeoq;
148 };
149
150 static int cpsw_match(device_t, cfdata_t, void *);
151 static void cpsw_attach(device_t, device_t, void *);
152 static int cpsw_detach(device_t, int);
153
154 static void cpsw_start(struct ifnet *);
155 static int cpsw_ioctl(struct ifnet *, u_long, void *);
156 static void cpsw_watchdog(struct ifnet *);
157 static int cpsw_init(struct ifnet *);
158 static void cpsw_stop(struct ifnet *, int);
159
160 static int cpsw_mii_readreg(device_t, int, int, uint16_t *);
161 static int cpsw_mii_writereg(device_t, int, int, uint16_t);
162 static void cpsw_mii_statchg(struct ifnet *);
163
164 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
165 static void cpsw_tick(void *);
166
167 static int cpsw_rxthintr(void *);
168 static int cpsw_rxintr(void *);
169 static int cpsw_txintr(void *);
170 static int cpsw_miscintr(void *);
171
172 /* ALE support */
173 #define CPSW_MAX_ALE_ENTRIES 1024
174
175 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
176
177 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
178 cpsw_match, cpsw_attach, cpsw_detach, NULL);
179
180 #include <sys/kernhist.h>
181 KERNHIST_DEFINE(cpswhist);
182
183 #define CPSWHIST_CALLARGS(A,B,C,D) do { \
184 KERNHIST_CALLARGS(cpswhist, "%jx %jx %jx %jx", \
185 (uintptr_t)(A), (uintptr_t)(B), (uintptr_t)(C), (uintptr_t)(D));\
186 } while (0)
187
188
189 static inline u_int
190 cpsw_txdesc_adjust(u_int x, int y)
191 {
192 return (((x) + y) & (CPSW_NTXDESCS - 1));
193 }
194
195 static inline u_int
196 cpsw_rxdesc_adjust(u_int x, int y)
197 {
198 return (((x) + y) & (CPSW_NRXDESCS - 1));
199 }
200
201 static inline uint32_t
202 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
203 {
204 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
205 }
206
207 static inline void
208 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
209 uint32_t const value)
210 {
211 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
212 }
213
214 static inline void
215 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
216 {
217 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
218
219 KERNHIST_FUNC(__func__);
220 CPSWHIST_CALLARGS(sc, i, n, 0);
221
222 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
223 }
224
225 static inline void
226 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
227 {
228 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
229
230 KERNHIST_FUNC(__func__);
231 CPSWHIST_CALLARGS(sc, i, n, 0);
232
233 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
234 }
235
236 static inline void
237 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
238 struct cpsw_cpdma_bd * const bdp)
239 {
240 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
241 uint32_t * const dp = bdp->word;
242 const bus_size_t c = __arraycount(bdp->word);
243
244 KERNHIST_FUNC(__func__);
245 CPSWHIST_CALLARGS(sc, i, bdp, 0);
246
247 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
248 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
249 dp[0], dp[1], dp[2], dp[3]);
250 }
251
252 static inline void
253 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
254 struct cpsw_cpdma_bd * const bdp)
255 {
256 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
257 uint32_t * const dp = bdp->word;
258 const bus_size_t c = __arraycount(bdp->word);
259
260 KERNHIST_FUNC(__func__);
261 CPSWHIST_CALLARGS(sc, i, bdp, 0);
262 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
263 dp[0], dp[1], dp[2], dp[3]);
264
265 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
266 }
267
268 static inline void
269 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
270 struct cpsw_cpdma_bd * const bdp)
271 {
272 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
273 uint32_t * const dp = bdp->word;
274 const bus_size_t c = __arraycount(bdp->word);
275
276 KERNHIST_FUNC(__func__);
277 CPSWHIST_CALLARGS(sc, i, bdp, 0);
278
279 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
280
281 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
282 dp[0], dp[1], dp[2], dp[3]);
283 }
284
285 static inline void
286 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
287 struct cpsw_cpdma_bd * const bdp)
288 {
289 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
290 uint32_t * const dp = bdp->word;
291 const bus_size_t c = __arraycount(bdp->word);
292
293 KERNHIST_FUNC(__func__);
294 CPSWHIST_CALLARGS(sc, i, bdp, 0);
295 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
296 dp[0], dp[1], dp[2], dp[3]);
297
298 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
299 }
300
301 static inline bus_addr_t
302 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
303 {
304 KASSERT(x < CPSW_NTXDESCS);
305 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
306 }
307
308 static inline bus_addr_t
309 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
310 {
311 KASSERT(x < CPSW_NRXDESCS);
312 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
313 }
314
315
316 static int
317 cpsw_match(device_t parent, cfdata_t cf, void *aux)
318 {
319 struct fdt_attach_args * const faa = aux;
320
321 static const char * const compatible[] = {
322 "ti,am335x-cpsw",
323 "ti,cpsw",
324 NULL
325 };
326
327 return of_match_compatible(faa->faa_phandle, compatible);
328 }
329
330 static bool
331 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
332 {
333 struct ifmedia_entry *ifm;
334
335 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
336 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
337 return true;
338 }
339 return false;
340 }
341
342 static int
343 cpsw_detach(device_t self, int flags)
344 {
345 struct cpsw_softc * const sc = device_private(self);
346 struct ifnet *ifp = &sc->sc_ec.ec_if;
347 u_int i;
348
349 /* Succeed now if there's no work to do. */
350 if (!sc->sc_attached)
351 return 0;
352
353 sc->sc_attached = false;
354
355 /* Stop the interface. Callouts are stopped in it. */
356 cpsw_stop(ifp, 1);
357
358 /* Destroy our callout. */
359 callout_destroy(&sc->sc_tick_ch);
360
361 /* Let go of the interrupts */
362 intr_disestablish(sc->sc_rxthih);
363 intr_disestablish(sc->sc_rxih);
364 intr_disestablish(sc->sc_txih);
365 intr_disestablish(sc->sc_miscih);
366
367 ether_ifdetach(ifp);
368 if_detach(ifp);
369
370 /* Delete all media. */
371 ifmedia_fini(&sc->sc_mii.mii_media);
372
373 /* Free the packet padding buffer */
374 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
375 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
376
377 /* Destroy all the descriptors */
378 for (i = 0; i < CPSW_NTXDESCS; i++)
379 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
380 for (i = 0; i < CPSW_NRXDESCS; i++)
381 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
382 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
383
384 /* Unmap */
385 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
386
387
388 return 0;
389 }
390
391 static void
392 cpsw_attach(device_t parent, device_t self, void *aux)
393 {
394 struct fdt_attach_args * const faa = aux;
395 struct cpsw_softc * const sc = device_private(self);
396 prop_dictionary_t dict = device_properties(self);
397 struct ethercom * const ec = &sc->sc_ec;
398 struct ifnet * const ifp = &ec->ec_if;
399 struct mii_data * const mii = &sc->sc_mii;
400 const int phandle = faa->faa_phandle;
401 bus_addr_t addr;
402 bus_size_t size;
403 int error;
404 u_int i;
405
406 KERNHIST_INIT(cpswhist, 4096);
407
408 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
409 aprint_error(": couldn't get registers\n");
410 return;
411 }
412
413 sc->sc_dev = self;
414
415 aprint_normal(": TI Layer 2 3-Port Switch\n");
416 aprint_naive("\n");
417
418 callout_init(&sc->sc_tick_ch, 0);
419 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
420
421 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
422 if (eaprop == NULL) {
423 #if 0
424 /* grab mac_id0 from AM335x control module */
425 uint32_t reg_lo, reg_hi;
426
427 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
428 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
429 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
430 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
431 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
432 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
433 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
434 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
435 } else
436 #endif
437 {
438 aprint_error_dev(sc->sc_dev,
439 "using fake station address\n");
440 /* 'N' happens to have the Local bit set */
441 #if 0
442 sc->sc_enaddr[0] = 'N';
443 sc->sc_enaddr[1] = 'e';
444 sc->sc_enaddr[2] = 't';
445 sc->sc_enaddr[3] = 'B';
446 sc->sc_enaddr[4] = 'S';
447 sc->sc_enaddr[5] = 'D';
448 #else
449 /* XXX Glor */
450 sc->sc_enaddr[0] = 0xd4;
451 sc->sc_enaddr[1] = 0x94;
452 sc->sc_enaddr[2] = 0xa1;
453 sc->sc_enaddr[3] = 0x97;
454 sc->sc_enaddr[4] = 0x03;
455 sc->sc_enaddr[5] = 0x94;
456 #endif
457 }
458 } else {
459 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
460 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
461 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
462 ETHER_ADDR_LEN);
463 }
464
465 #if 0
466 sc->sc_rxthih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_RXTH,
467 IPL_VM, IST_LEVEL, cpsw_rxthintr, sc);
468 sc->sc_rxih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_RX,
469 IPL_VM, IST_LEVEL, cpsw_rxintr, sc);
470 sc->sc_txih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_TX,
471 IPL_VM, IST_LEVEL, cpsw_txintr, sc);
472 sc->sc_miscih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_MISC,
473 IPL_VM, IST_LEVEL, cpsw_miscintr, sc);
474 #else
475 #define FDT_INTR_FLAGS 0
476 sc->sc_rxthih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RXTH, IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc);
477 sc->sc_rxih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RX, IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc);
478 sc->sc_txih = fdtbus_intr_establish(phandle, CPSW_INTROFF_TX, IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc);
479 sc->sc_miscih = fdtbus_intr_establish(phandle, CPSW_INTROFF_MISC, IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc);
480 #endif
481
482 sc->sc_bst = faa->faa_bst;
483 sc->sc_bss = size;
484 sc->sc_bdt = faa->faa_dmat;
485
486 error = bus_space_map(sc->sc_bst, addr, size, 0,
487 &sc->sc_bsh);
488 if (error) {
489 aprint_error_dev(sc->sc_dev,
490 "can't map registers: %d\n", error);
491 return;
492 }
493
494 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
495 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
496 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
497 &sc->sc_bsh_txdescs);
498 if (error) {
499 aprint_error_dev(sc->sc_dev,
500 "can't subregion tx ring SRAM: %d\n", error);
501 return;
502 }
503 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
504 (void *)sc->sc_bsh_txdescs);
505
506 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
507 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
508 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
509 &sc->sc_bsh_rxdescs);
510 if (error) {
511 aprint_error_dev(sc->sc_dev,
512 "can't subregion rx ring SRAM: %d\n", error);
513 return;
514 }
515 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
516 (void *)sc->sc_bsh_rxdescs);
517
518 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
519
520 for (i = 0; i < CPSW_NTXDESCS; i++) {
521 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
522 CPSW_TXFRAGS, MCLBYTES, 0, 0,
523 &sc->sc_rdp->tx_dm[i])) != 0) {
524 aprint_error_dev(sc->sc_dev,
525 "unable to create tx DMA map: %d\n", error);
526 }
527 sc->sc_rdp->tx_mb[i] = NULL;
528 }
529
530 for (i = 0; i < CPSW_NRXDESCS; i++) {
531 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
532 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
533 aprint_error_dev(sc->sc_dev,
534 "unable to create rx DMA map: %d\n", error);
535 }
536 sc->sc_rdp->rx_mb[i] = NULL;
537 }
538
539 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
540 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
541 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
542 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
543 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
544 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
545 BUS_DMASYNC_PREWRITE);
546
547 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
548 ether_sprintf(sc->sc_enaddr));
549
550 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
551 ifp->if_softc = sc;
552 ifp->if_capabilities = 0;
553 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
554 ifp->if_start = cpsw_start;
555 ifp->if_ioctl = cpsw_ioctl;
556 ifp->if_init = cpsw_init;
557 ifp->if_stop = cpsw_stop;
558 ifp->if_watchdog = cpsw_watchdog;
559 IFQ_SET_READY(&ifp->if_snd);
560
561 cpsw_stop(ifp, 0);
562
563 mii->mii_ifp = ifp;
564 mii->mii_readreg = cpsw_mii_readreg;
565 mii->mii_writereg = cpsw_mii_writereg;
566 mii->mii_statchg = cpsw_mii_statchg;
567
568 sc->sc_ec.ec_mii = mii;
569 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
570
571 /* Initialize MDIO */
572 cpsw_write_4(sc, MDIOCONTROL,
573 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
574 /* Clear ALE */
575 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
576
577 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, 0);
578 if (LIST_FIRST(&mii->mii_phys) == NULL) {
579 aprint_error_dev(self, "no PHY found!\n");
580 sc->sc_phy_has_1000t = false;
581 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
582 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
583 } else {
584 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
585 if (sc->sc_phy_has_1000t) {
586 #if 0
587 aprint_normal_dev(sc->sc_dev, "1000baseT PHY found. "
588 "Setting RGMII Mode\n");
589 /*
590 * Select the Interface RGMII Mode in the Control
591 * Module
592 */
593 sitara_cm_reg_write_4(CPSW_GMII_SEL,
594 GMIISEL_GMII2_SEL(RGMII_MODE) |
595 GMIISEL_GMII1_SEL(RGMII_MODE));
596 #endif
597 }
598
599 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
600 }
601
602 if_attach(ifp);
603 if_deferred_start_init(ifp, NULL);
604 ether_ifattach(ifp, sc->sc_enaddr);
605
606 /* The attach is successful. */
607 sc->sc_attached = true;
608
609 return;
610 }
611
612 static void
613 cpsw_start(struct ifnet *ifp)
614 {
615 struct cpsw_softc * const sc = ifp->if_softc;
616 struct cpsw_ring_data * const rdp = sc->sc_rdp;
617 struct cpsw_cpdma_bd bd;
618 uint32_t * const dw = bd.word;
619 struct mbuf *m;
620 bus_dmamap_t dm;
621 u_int eopi __diagused = ~0;
622 u_int seg;
623 u_int txfree;
624 int txstart = -1;
625 int error;
626 bool pad;
627 u_int mlen;
628
629 KERNHIST_FUNC(__func__);
630 CPSWHIST_CALLARGS(sc, 0, 0, 0);
631
632 if (__predict_false((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) !=
633 IFF_RUNNING)) {
634 return;
635 }
636
637 if (sc->sc_txnext >= sc->sc_txhead)
638 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
639 else
640 txfree = sc->sc_txhead - sc->sc_txnext - 1;
641
642 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
643 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
644
645 while (txfree > 0) {
646 IFQ_POLL(&ifp->if_snd, m);
647 if (m == NULL)
648 break;
649
650 dm = rdp->tx_dm[sc->sc_txnext];
651
652 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
653 if (error == EFBIG) {
654 device_printf(sc->sc_dev, "won't fit\n");
655 IFQ_DEQUEUE(&ifp->if_snd, m);
656 m_freem(m);
657 if_statinc(ifp, if_oerrors);
658 continue;
659 } else if (error != 0) {
660 device_printf(sc->sc_dev, "error\n");
661 break;
662 }
663
664 if (dm->dm_nsegs + 1 >= txfree) {
665 ifp->if_flags |= IFF_OACTIVE;
666 bus_dmamap_unload(sc->sc_bdt, dm);
667 break;
668 }
669
670 mlen = m_length(m);
671 pad = mlen < CPSW_PAD_LEN;
672
673 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
674 rdp->tx_mb[sc->sc_txnext] = m;
675 IFQ_DEQUEUE(&ifp->if_snd, m);
676
677 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
678 BUS_DMASYNC_PREWRITE);
679
680 if (txstart == -1)
681 txstart = sc->sc_txnext;
682 eopi = sc->sc_txnext;
683 for (seg = 0; seg < dm->dm_nsegs; seg++) {
684 dw[0] = cpsw_txdesc_paddr(sc,
685 TXDESC_NEXT(sc->sc_txnext));
686 dw[1] = dm->dm_segs[seg].ds_addr;
687 dw[2] = dm->dm_segs[seg].ds_len;
688 dw[3] = 0;
689
690 if (seg == 0)
691 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
692 MAX(mlen, CPSW_PAD_LEN);
693
694 if ((seg == dm->dm_nsegs - 1) && !pad)
695 dw[3] |= CPDMA_BD_EOP;
696
697 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
698 txfree--;
699 eopi = sc->sc_txnext;
700 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
701 }
702 if (pad) {
703 dw[0] = cpsw_txdesc_paddr(sc,
704 TXDESC_NEXT(sc->sc_txnext));
705 dw[1] = sc->sc_txpad_pa;
706 dw[2] = CPSW_PAD_LEN - mlen;
707 dw[3] = CPDMA_BD_EOP;
708
709 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
710 txfree--;
711 eopi = sc->sc_txnext;
712 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
713 }
714
715 bpf_mtap(ifp, m, BPF_D_OUT);
716 }
717
718 if (txstart >= 0) {
719 ifp->if_timer = 5;
720 /* terminate the new chain */
721 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
722 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
723 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
724 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
725 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
726 /* link the new chain on */
727 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
728 cpsw_txdesc_paddr(sc, txstart));
729 if (sc->sc_txeoq) {
730 /* kick the dma engine */
731 sc->sc_txeoq = false;
732 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
733 cpsw_txdesc_paddr(sc, txstart));
734 }
735 }
736 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
737 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
738 }
739
740 static int
741 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
742 {
743 const int s = splnet();
744 int error = 0;
745
746 switch (cmd) {
747 default:
748 error = ether_ioctl(ifp, cmd, data);
749 if (error == ENETRESET) {
750 error = 0;
751 }
752 break;
753 }
754
755 splx(s);
756
757 return error;
758 }
759
760 static void
761 cpsw_watchdog(struct ifnet *ifp)
762 {
763 struct cpsw_softc *sc = ifp->if_softc;
764
765 device_printf(sc->sc_dev, "device timeout\n");
766
767 if_statinc(ifp, if_oerrors);
768 cpsw_init(ifp);
769 cpsw_start(ifp);
770 }
771
772 static int
773 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
774 {
775 u_int tries;
776
777 for (tries = 0; tries < 1000; tries++) {
778 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
779 return 0;
780 delay(1);
781 }
782 return ETIMEDOUT;
783 }
784
785 static int
786 cpsw_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
787 {
788 struct cpsw_softc * const sc = device_private(dev);
789 uint32_t v;
790
791 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
792 return -1;
793
794 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
795 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
796
797 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
798 return -1;
799
800 v = cpsw_read_4(sc, MDIOUSERACCESS0);
801 if (v & __BIT(29)) {
802 *val = v & 0xffff;
803 return 0;
804 }
805
806 return -1;
807 }
808
809 static int
810 cpsw_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
811 {
812 struct cpsw_softc * const sc = device_private(dev);
813 uint32_t v;
814
815 KASSERT((val & 0xffff0000UL) == 0);
816
817 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
818 goto out;
819
820 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
821 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
822
823 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
824 goto out;
825
826 v = cpsw_read_4(sc, MDIOUSERACCESS0);
827 if ((v & __BIT(29)) == 0) {
828 out:
829 device_printf(sc->sc_dev, "%s error\n", __func__);
830 return -1;
831 }
832
833 return 0;
834 }
835
836 static void
837 cpsw_mii_statchg(struct ifnet *ifp)
838 {
839 return;
840 }
841
842 static int
843 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
844 {
845 struct cpsw_ring_data * const rdp = sc->sc_rdp;
846 const u_int h = RXDESC_PREV(i);
847 struct cpsw_cpdma_bd bd;
848 uint32_t * const dw = bd.word;
849 struct mbuf *m;
850 int error = ENOBUFS;
851
852 MGETHDR(m, M_DONTWAIT, MT_DATA);
853 if (m == NULL) {
854 goto reuse;
855 }
856
857 MCLGET(m, M_DONTWAIT);
858 if ((m->m_flags & M_EXT) == 0) {
859 m_freem(m);
860 goto reuse;
861 }
862
863 /* We have a new buffer, prepare it for the ring. */
864
865 if (rdp->rx_mb[i] != NULL)
866 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
867
868 m->m_len = m->m_pkthdr.len = MCLBYTES;
869
870 rdp->rx_mb[i] = m;
871
872 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
873 BUS_DMA_READ | BUS_DMA_NOWAIT);
874 if (error) {
875 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
876 i, error);
877 }
878
879 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
880 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
881
882 error = 0;
883
884 reuse:
885 /* (re-)setup the descriptor */
886 dw[0] = 0;
887 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
888 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
889 dw[3] = CPDMA_BD_OWNER;
890
891 cpsw_set_rxdesc(sc, i, &bd);
892 /* and link onto ring */
893 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
894
895 return error;
896 }
897
898 static int
899 cpsw_init(struct ifnet *ifp)
900 {
901 struct cpsw_softc * const sc = ifp->if_softc;
902 struct mii_data * const mii = &sc->sc_mii;
903 int i;
904
905 cpsw_stop(ifp, 0);
906
907 sc->sc_txnext = 0;
908 sc->sc_txhead = 0;
909
910 /* Reset wrapper */
911 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
912 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
913 ;
914
915 /* Reset SS */
916 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
917 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
918 ;
919
920 /* Clear table and enable ALE */
921 cpsw_write_4(sc, CPSW_ALE_CONTROL,
922 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
923
924 /* Reset and init Sliver port 1 and 2 */
925 for (i = 0; i < CPSW_ETH_PORTS; i++) {
926 uint32_t macctl;
927
928 /* Reset */
929 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
930 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
931 ;
932 /* Set Slave Mapping */
933 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
934 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
935 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
936 /* Set MAC Address */
937 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
938 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
939 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
940 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
941 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
942
943 /* Set MACCONTROL for ports 0,1 */
944 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
945 SLMACCTL_IFCTL_A;
946 if (sc->sc_phy_has_1000t)
947 macctl |= SLMACCTL_GIG;
948 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
949
950 /* Set ALE port to forwarding(3) */
951 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
952 }
953
954 /* Set Host Port Mapping */
955 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
956 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
957
958 /* Set ALE port to forwarding(3) */
959 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
960
961 /* Initialize addrs */
962 cpsw_ale_update_addresses(sc, 1);
963
964 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
965 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
966
967 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
968 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
969 ;
970
971 for (i = 0; i < 8; i++) {
972 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
973 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
974 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
975 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
976 }
977
978 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
979 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
980
981 sc->sc_txhead = 0;
982 sc->sc_txnext = 0;
983
984 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
985
986 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
987 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
988 /* Initialize RX Buffer Descriptors */
989 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
990 for (i = 0; i < CPSW_NRXDESCS; i++) {
991 cpsw_new_rxbuf(sc, i);
992 }
993 sc->sc_rxhead = 0;
994
995 /* turn off flow control */
996 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
997
998 /* align layer 3 header to 32-bit */
999 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
1000
1001 /* Clear all interrupt Masks */
1002 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
1003 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1004
1005 /* Enable TX & RX DMA */
1006 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
1007 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
1008
1009 /* Enable TX and RX interrupt receive for core 0 */
1010 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
1011 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
1012 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
1013
1014 /* Enable host Error Interrupt */
1015 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
1016
1017 /* Enable interrupts for TX and RX Channel 0 */
1018 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
1019 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
1020
1021 /* Ack stalled irqs */
1022 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1023 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1024 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1025 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1026
1027 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1028 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1029 cpsw_write_4(sc, MDIOCONTROL,
1030 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1031
1032 mii_mediachg(mii);
1033
1034 /* Write channel 0 RX HDP */
1035 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1036 sc->sc_rxrun = true;
1037 sc->sc_rxeoq = false;
1038
1039 sc->sc_txrun = true;
1040 sc->sc_txeoq = true;
1041 callout_schedule(&sc->sc_tick_ch, hz);
1042 ifp->if_flags |= IFF_RUNNING;
1043 ifp->if_flags &= ~IFF_OACTIVE;
1044
1045 return 0;
1046 }
1047
1048 static void
1049 cpsw_stop(struct ifnet *ifp, int disable)
1050 {
1051 struct cpsw_softc * const sc = ifp->if_softc;
1052 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1053 u_int i;
1054
1055 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1056 ifp, disable);
1057
1058 if ((ifp->if_flags & IFF_RUNNING) == 0)
1059 return;
1060
1061 callout_stop(&sc->sc_tick_ch);
1062 mii_down(&sc->sc_mii);
1063
1064 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1065 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1066 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1067 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1068 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1069
1070 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1071 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1072 i = 0;
1073 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1074 delay(10);
1075 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1076 sc->sc_txrun = false;
1077 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1078 sc->sc_rxrun = false;
1079 i++;
1080 }
1081 //printf("%s toredown complete in %u\n", __func__, i);
1082
1083 /* Reset wrapper */
1084 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1085 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
1086 ;
1087
1088 /* Reset SS */
1089 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1090 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
1091 ;
1092
1093 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1094 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1095 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
1096 ;
1097 }
1098
1099 /* Reset CPDMA */
1100 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1101 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
1102 ;
1103
1104 /* Release any queued transmit buffers. */
1105 for (i = 0; i < CPSW_NTXDESCS; i++) {
1106 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1107 m_freem(rdp->tx_mb[i]);
1108 rdp->tx_mb[i] = NULL;
1109 }
1110
1111 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1112 ifp->if_timer = 0;
1113
1114 if (!disable)
1115 return;
1116
1117 for (i = 0; i < CPSW_NRXDESCS; i++) {
1118 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1119 m_freem(rdp->rx_mb[i]);
1120 rdp->rx_mb[i] = NULL;
1121 }
1122 }
1123
1124 static void
1125 cpsw_tick(void *arg)
1126 {
1127 struct cpsw_softc * const sc = arg;
1128 struct mii_data * const mii = &sc->sc_mii;
1129 const int s = splnet();
1130
1131 mii_tick(mii);
1132
1133 splx(s);
1134
1135 callout_schedule(&sc->sc_tick_ch, hz);
1136 }
1137
1138 static int
1139 cpsw_rxthintr(void *arg)
1140 {
1141 struct cpsw_softc * const sc = arg;
1142
1143 /* this won't deassert the interrupt though */
1144 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1145
1146 return 1;
1147 }
1148
1149 static int
1150 cpsw_rxintr(void *arg)
1151 {
1152 struct cpsw_softc * const sc = arg;
1153 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1154 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1155 struct cpsw_cpdma_bd bd;
1156 const uint32_t * const dw = bd.word;
1157 bus_dmamap_t dm;
1158 struct mbuf *m;
1159 u_int i;
1160 u_int len, off;
1161
1162 KERNHIST_FUNC(__func__);
1163 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1164
1165 for (;;) {
1166 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1167
1168 i = sc->sc_rxhead;
1169 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1170 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1171 dm = rdp->rx_dm[i];
1172 m = rdp->rx_mb[i];
1173
1174 KASSERT(dm != NULL);
1175 KASSERT(m != NULL);
1176
1177 cpsw_get_rxdesc(sc, i, &bd);
1178
1179 if (ISSET(dw[3], CPDMA_BD_OWNER))
1180 break;
1181
1182 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1183 sc->sc_rxrun = false;
1184 return 1;
1185 }
1186
1187 if ((dw[3] & (CPDMA_BD_SOP | CPDMA_BD_EOP)) !=
1188 (CPDMA_BD_SOP | CPDMA_BD_EOP)) {
1189 //Debugger();
1190 }
1191
1192 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1193 BUS_DMASYNC_POSTREAD);
1194
1195 if (cpsw_new_rxbuf(sc, i) != 0) {
1196 /* drop current packet, reuse buffer for new */
1197 if_statinc(ifp, if_ierrors);
1198 goto next;
1199 }
1200
1201 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1202 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1203
1204 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1205 len -= ETHER_CRC_LEN;
1206
1207 m_set_rcvif(m, ifp);
1208 m->m_pkthdr.len = m->m_len = len;
1209 m->m_data += off;
1210
1211 if_percpuq_enqueue(ifp->if_percpuq, m);
1212
1213 next:
1214 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1215 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1216 sc->sc_rxeoq = true;
1217 break;
1218 } else {
1219 sc->sc_rxeoq = false;
1220 }
1221 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1222 cpsw_rxdesc_paddr(sc, i));
1223 }
1224
1225 if (sc->sc_rxeoq) {
1226 device_printf(sc->sc_dev, "rxeoq\n");
1227 //Debugger();
1228 }
1229
1230 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1231
1232 return 1;
1233 }
1234
1235 static int
1236 cpsw_txintr(void *arg)
1237 {
1238 struct cpsw_softc * const sc = arg;
1239 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1240 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1241 struct cpsw_cpdma_bd bd;
1242 const uint32_t * const dw = bd.word;
1243 bool handled = false;
1244 uint32_t tx0_cp;
1245 u_int cpi;
1246
1247 KERNHIST_FUNC(__func__);
1248 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1249
1250 KASSERT(sc->sc_txrun);
1251
1252 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1253 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1254
1255 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1256
1257 if (tx0_cp == 0xfffffffc) {
1258 /* Teardown, ack it */
1259 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1260 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1261 sc->sc_txrun = false;
1262 return 0;
1263 }
1264
1265 for (;;) {
1266 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1267 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1268 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1269
1270 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1271 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1272
1273 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1274
1275 if (dw[2] == 0) {
1276 //Debugger();
1277 }
1278
1279 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1280 goto next;
1281
1282 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1283 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1284 sc->sc_txnext);
1285 break;
1286 }
1287
1288 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1289 sc->sc_txrun = false;
1290 return 1;
1291 }
1292
1293 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1294 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1295 BUS_DMASYNC_POSTWRITE);
1296 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1297
1298 m_freem(rdp->tx_mb[sc->sc_txhead]);
1299 rdp->tx_mb[sc->sc_txhead] = NULL;
1300
1301 if_statinc(ifp, if_opackets);
1302
1303 handled = true;
1304
1305 ifp->if_flags &= ~IFF_OACTIVE;
1306
1307 next:
1308 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1309 sc->sc_txeoq = true;
1310 }
1311 if (sc->sc_txhead == cpi) {
1312 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1313 cpsw_txdesc_paddr(sc, cpi));
1314 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1315 break;
1316 }
1317 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1318 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1319 sc->sc_txeoq = true;
1320 break;
1321 }
1322 }
1323
1324 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1325
1326 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1327 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1328 sc->sc_txeoq = false;
1329 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1330 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1331 }
1332 }
1333
1334 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1335 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1336 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1337 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1338 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1339
1340 if (handled && sc->sc_txnext == sc->sc_txhead)
1341 ifp->if_timer = 0;
1342
1343 if (handled)
1344 if_schedule_deferred_start(ifp);
1345
1346 return handled;
1347 }
1348
1349 static int
1350 cpsw_miscintr(void *arg)
1351 {
1352 struct cpsw_softc * const sc = arg;
1353 uint32_t miscstat;
1354 uint32_t dmastat;
1355 uint32_t stat;
1356
1357 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1358 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1359
1360 #define CPSW_MISC_HOST_PEND __BIT32(2)
1361 #define CPSW_MISC_STAT_PEND __BIT32(3)
1362
1363 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1364 /* Host Error */
1365 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1366 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1367
1368 printf("rxhead %02x\n", sc->sc_rxhead);
1369
1370 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1371 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1372 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1373 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1374 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1375 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1376 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1377 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1378 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1379 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1380
1381 //Debugger();
1382
1383 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1384 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1385 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1386 }
1387
1388 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1389
1390 return 1;
1391 }
1392
1393 /*
1394 *
1395 * ALE support routines.
1396 *
1397 */
1398
1399 static void
1400 cpsw_ale_entry_init(uint32_t *ale_entry)
1401 {
1402 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1403 }
1404
1405 static void
1406 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1407 {
1408 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1409 ale_entry[1] = mac[0] << 8 | mac[1];
1410 }
1411
1412 static void
1413 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1414 {
1415 ale_entry[0] = 0xffffffff;
1416 ale_entry[1] = 0x0000ffff;
1417 }
1418
1419 static void
1420 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1421 {
1422 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1423 switch (field) {
1424 case ALE_ENTRY_TYPE:
1425 /* [61:60] */
1426 ale_entry[1] |= (val & 0x3) << 28;
1427 break;
1428 case ALE_MCAST_FWD_STATE:
1429 /* [63:62] */
1430 ale_entry[1] |= (val & 0x3) << 30;
1431 break;
1432 case ALE_PORT_MASK:
1433 /* [68:66] */
1434 ale_entry[2] |= (val & 0x7) << 2;
1435 break;
1436 case ALE_PORT_NUMBER:
1437 /* [67:66] */
1438 ale_entry[2] |= (val & 0x3) << 2;
1439 break;
1440 default:
1441 panic("Invalid ALE entry field: %d\n", field);
1442 }
1443
1444 return;
1445 }
1446
1447 static bool
1448 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1449 {
1450 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1451 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1452 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1453 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1454 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1455 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1456 }
1457
1458 static void
1459 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1460 {
1461 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1462 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1463 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1464 mac[5] << 8 | mac[4]);
1465 }
1466
1467 static void
1468 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1469 {
1470 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1471 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1472 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1473 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1474 }
1475
1476 static void
1477 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1478 const uint32_t *ale_entry)
1479 {
1480 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1481 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1482 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1483 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1484 }
1485
1486 static int
1487 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1488 {
1489 int i;
1490 uint32_t ale_entry[3];
1491
1492 /* First two entries are link address and broadcast. */
1493 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1494 cpsw_ale_read_entry(sc, i, ale_entry);
1495 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1496 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1497 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1498 cpsw_ale_write_entry(sc, i, ale_entry);
1499 }
1500 }
1501 return CPSW_MAX_ALE_ENTRIES;
1502 }
1503
1504 static int
1505 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1506 {
1507 int free_index = -1, matching_index = -1, i;
1508 uint32_t ale_entry[3];
1509
1510 /* Find a matching entry or a free entry. */
1511 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1512 cpsw_ale_read_entry(sc, i, ale_entry);
1513
1514 /* Entry Type[61:60] is 0 for free entry */
1515 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1516 free_index = i;
1517 }
1518
1519 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1520 matching_index = i;
1521 break;
1522 }
1523 }
1524
1525 if (matching_index < 0) {
1526 if (free_index < 0)
1527 return ENOMEM;
1528 i = free_index;
1529 }
1530
1531 cpsw_ale_entry_init(ale_entry);
1532
1533 cpsw_ale_entry_set_mac(ale_entry, mac);
1534 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1535 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1536 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1537
1538 cpsw_ale_write_entry(sc, i, ale_entry);
1539
1540 return 0;
1541 }
1542
1543 static int
1544 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1545 {
1546 uint8_t *mac = sc->sc_enaddr;
1547 uint32_t ale_entry[3];
1548 int i;
1549 struct ethercom * const ec = &sc->sc_ec;
1550 struct ether_multi *ifma;
1551
1552 cpsw_ale_entry_init(ale_entry);
1553 /* Route incoming packets for our MAC address to Port 0 (host). */
1554 /* For simplicity, keep this entry at table index 0 in the ALE. */
1555 cpsw_ale_entry_set_mac(ale_entry, mac);
1556 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1557 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1558 cpsw_ale_write_entry(sc, 0, ale_entry);
1559
1560 /* Set outgoing MAC Address for Ports 1 and 2. */
1561 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1562 cpsw_ale_set_outgoing_mac(sc, i, mac);
1563
1564 /* Keep the broadcast address at table entry 1. */
1565 cpsw_ale_entry_init(ale_entry);
1566 cpsw_ale_entry_set_bcast_mac(ale_entry);
1567 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1568 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1569 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1570 cpsw_ale_write_entry(sc, 1, ale_entry);
1571
1572 /* SIOCDELMULTI doesn't specify the particular address
1573 being removed, so we have to remove all and rebuild. */
1574 if (purge)
1575 cpsw_ale_remove_all_mc_entries(sc);
1576
1577 /* Set other multicast addrs desired. */
1578 ETHER_LOCK(ec);
1579 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1580 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1581 }
1582 ETHER_UNLOCK(ec);
1583
1584 return 0;
1585 }
1586