if_cpsw.c revision 1.2 1 /* $NetBSD: if_cpsw.c,v 1.2 2018/06/26 06:47:58 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion (at) Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.2 2018/06/26 06:47:58 msaitoh Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #if 0
77 #include <arch/arm/omap/omap2_obiovar.h>
78 #else
79 #include <dev/fdt/fdtvar.h>
80 #endif
81 #include <arch/arm/omap/if_cpswreg.h>
82 #include <arch/arm/omap/sitara_cmreg.h>
83 #include <arch/arm/omap/sitara_cm.h>
84
85 #define ETHER_ALIGN (roundup2(ETHER_HDR_LEN, sizeof(uint32_t)) - ETHER_HDR_LEN)
86
87 #define CPSW_TXFRAGS 16
88
89 #define CPSW_CPPI_RAM_SIZE (0x2000)
90 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
91 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
92 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
93 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
94 #define CPSW_CPPI_RAM_RXDESCS_BASE \
95 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
96
97 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
98 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
99
100 CTASSERT(powerof2(CPSW_NTXDESCS));
101 CTASSERT(powerof2(CPSW_NRXDESCS));
102
103 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
104
105 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
106 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
107
108 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
109 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
110
111 struct cpsw_ring_data {
112 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
113 struct mbuf *tx_mb[CPSW_NTXDESCS];
114 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
115 struct mbuf *rx_mb[CPSW_NRXDESCS];
116 };
117
118 struct cpsw_softc {
119 device_t sc_dev;
120 bus_space_tag_t sc_bst;
121 bus_space_handle_t sc_bsh;
122 bus_size_t sc_bss;
123 bus_dma_tag_t sc_bdt;
124 bus_space_handle_t sc_bsh_txdescs;
125 bus_space_handle_t sc_bsh_rxdescs;
126 bus_addr_t sc_txdescs_pa;
127 bus_addr_t sc_rxdescs_pa;
128 struct ethercom sc_ec;
129 struct mii_data sc_mii;
130 bool sc_phy_has_1000t;
131 bool sc_attached;
132 callout_t sc_tick_ch;
133 void *sc_ih;
134 struct cpsw_ring_data *sc_rdp;
135 volatile u_int sc_txnext;
136 volatile u_int sc_txhead;
137 volatile u_int sc_rxhead;
138 void *sc_rxthih;
139 void *sc_rxih;
140 void *sc_txih;
141 void *sc_miscih;
142 void *sc_txpad;
143 bus_dmamap_t sc_txpad_dm;
144 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
145 uint8_t sc_enaddr[ETHER_ADDR_LEN];
146 volatile bool sc_txrun;
147 volatile bool sc_rxrun;
148 volatile bool sc_txeoq;
149 volatile bool sc_rxeoq;
150 };
151
152 static int cpsw_match(device_t, cfdata_t, void *);
153 static void cpsw_attach(device_t, device_t, void *);
154 static int cpsw_detach(device_t, int);
155
156 static void cpsw_start(struct ifnet *);
157 static int cpsw_ioctl(struct ifnet *, u_long, void *);
158 static void cpsw_watchdog(struct ifnet *);
159 static int cpsw_init(struct ifnet *);
160 static void cpsw_stop(struct ifnet *, int);
161
162 static int cpsw_mii_readreg(device_t, int, int);
163 static void cpsw_mii_writereg(device_t, int, int, int);
164 static void cpsw_mii_statchg(struct ifnet *);
165
166 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
167 static void cpsw_tick(void *);
168
169 static int cpsw_rxthintr(void *);
170 static int cpsw_rxintr(void *);
171 static int cpsw_txintr(void *);
172 static int cpsw_miscintr(void *);
173
174 /* ALE support */
175 #define CPSW_MAX_ALE_ENTRIES 1024
176
177 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
178
179 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
180 cpsw_match, cpsw_attach, cpsw_detach, NULL);
181
182 #undef KERNHIST
183 #include <sys/kernhist.h>
184 KERNHIST_DEFINE(cpswhist);
185
186 #ifdef KERNHIST
187 #define KERNHIST_CALLED_5(NAME, i, j, k, l) \
188 do { \
189 _kernhist_call = atomic_inc_uint_nv(&_kernhist_cnt); \
190 KERNHIST_LOG(NAME, "called! %x %x %x %x", i, j, k, l); \
191 } while (/*CONSTCOND*/ 0)
192 #else
193 #define KERNHIST_CALLED_5(NAME, i, j, k, l)
194 #endif
195
196 static inline u_int
197 cpsw_txdesc_adjust(u_int x, int y)
198 {
199 return (((x) + y) & (CPSW_NTXDESCS - 1));
200 }
201
202 static inline u_int
203 cpsw_rxdesc_adjust(u_int x, int y)
204 {
205 return (((x) + y) & (CPSW_NRXDESCS - 1));
206 }
207
208 static inline uint32_t
209 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
210 {
211 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
212 }
213
214 static inline void
215 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
216 uint32_t const value)
217 {
218 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
219 }
220
221 static inline void
222 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
223 {
224 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
225
226 KERNHIST_FUNC(__func__);
227 KERNHIST_CALLED_5(cpswhist, sc, i, n, 0);
228
229 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
230 }
231
232 static inline void
233 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
234 {
235 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
236
237 KERNHIST_FUNC(__func__);
238 KERNHIST_CALLED_5(cpswhist, sc, i, n, 0);
239
240 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
241 }
242
243 static inline void
244 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
245 struct cpsw_cpdma_bd * const bdp)
246 {
247 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
248 uint32_t * const dp = bdp->word;
249 const bus_size_t c = __arraycount(bdp->word);
250
251 KERNHIST_FUNC(__func__);
252 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
253
254 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
255 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
256 dp[0], dp[1], dp[2], dp[3]);
257 }
258
259 static inline void
260 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
261 struct cpsw_cpdma_bd * const bdp)
262 {
263 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
264 uint32_t * const dp = bdp->word;
265 const bus_size_t c = __arraycount(bdp->word);
266
267 KERNHIST_FUNC(__func__);
268 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
269 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
270 dp[0], dp[1], dp[2], dp[3]);
271
272 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
273 }
274
275 static inline void
276 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
277 struct cpsw_cpdma_bd * const bdp)
278 {
279 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
280 uint32_t * const dp = bdp->word;
281 const bus_size_t c = __arraycount(bdp->word);
282
283 KERNHIST_FUNC(__func__);
284 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
285
286 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
287
288 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
289 dp[0], dp[1], dp[2], dp[3]);
290 }
291
292 static inline void
293 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
294 struct cpsw_cpdma_bd * const bdp)
295 {
296 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
297 uint32_t * const dp = bdp->word;
298 const bus_size_t c = __arraycount(bdp->word);
299
300 KERNHIST_FUNC(__func__);
301 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
302 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
303 dp[0], dp[1], dp[2], dp[3]);
304
305 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
306 }
307
308 static inline bus_addr_t
309 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
310 {
311 KASSERT(x < CPSW_NTXDESCS);
312 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
313 }
314
315 static inline bus_addr_t
316 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
317 {
318 KASSERT(x < CPSW_NRXDESCS);
319 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
320 }
321
322
323 static int
324 cpsw_match(device_t parent, cfdata_t cf, void *aux)
325 {
326 struct fdt_attach_args * const faa = aux;
327
328 static const char * const compatible[] = {
329 "ti,am335x-cpsw",
330 "ti,cpsw",
331 NULL
332 };
333
334 return of_match_compatible(faa->faa_phandle, compatible);
335 }
336
337 static bool
338 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
339 {
340 struct ifmedia_entry *ifm;
341
342 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
343 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
344 return true;
345 }
346 return false;
347 }
348
349 static int
350 cpsw_detach(device_t self, int flags)
351 {
352 struct cpsw_softc * const sc = device_private(self);
353 struct ifnet *ifp = &sc->sc_ec.ec_if;
354 u_int i;
355
356 /* Succeed now if there's no work to do. */
357 if (!sc->sc_attached)
358 return 0;
359
360 sc->sc_attached = false;
361
362 /* Stop the interface. Callouts are stopped in it. */
363 cpsw_stop(ifp, 1);
364
365 /* Destroy our callout. */
366 callout_destroy(&sc->sc_tick_ch);
367
368 /* Let go of the interrupts */
369 intr_disestablish(sc->sc_rxthih);
370 intr_disestablish(sc->sc_rxih);
371 intr_disestablish(sc->sc_txih);
372 intr_disestablish(sc->sc_miscih);
373
374 /* Delete all media. */
375 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
376
377 ether_ifdetach(ifp);
378 if_detach(ifp);
379
380 /* Free the packet padding buffer */
381 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
382 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
383
384 /* Destroy all the descriptors */
385 for (i = 0; i < CPSW_NTXDESCS; i++)
386 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
387 for (i = 0; i < CPSW_NRXDESCS; i++)
388 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
389 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
390
391 /* Unmap */
392 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
393
394
395 return 0;
396 }
397
398 static void
399 cpsw_attach(device_t parent, device_t self, void *aux)
400 {
401 struct fdt_attach_args * const faa = aux;
402 struct cpsw_softc * const sc = device_private(self);
403 prop_dictionary_t dict = device_properties(self);
404 struct ethercom * const ec = &sc->sc_ec;
405 struct ifnet * const ifp = &ec->ec_if;
406 const int phandle = faa->faa_phandle;
407 bus_addr_t addr;
408 bus_size_t size;
409 int error;
410 u_int i;
411
412 KERNHIST_INIT(cpswhist, 4096);
413
414 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
415 aprint_error(": couldn't get registers\n");
416 return;
417 }
418
419 sc->sc_dev = self;
420
421 aprint_normal(": TI Layer 2 3-Port Switch\n");
422 aprint_naive("\n");
423
424 callout_init(&sc->sc_tick_ch, 0);
425 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
426
427 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
428 if (eaprop == NULL) {
429 #if 0
430 /* grab mac_id0 from AM335x control module */
431 uint32_t reg_lo, reg_hi;
432
433 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
434 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
435 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
436 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
437 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
438 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
439 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
440 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
441 } else
442 #endif
443 {
444 aprint_error_dev(sc->sc_dev,
445 "using fake station address\n");
446 /* 'N' happens to have the Local bit set */
447 #if 0
448 sc->sc_enaddr[0] = 'N';
449 sc->sc_enaddr[1] = 'e';
450 sc->sc_enaddr[2] = 't';
451 sc->sc_enaddr[3] = 'B';
452 sc->sc_enaddr[4] = 'S';
453 sc->sc_enaddr[5] = 'D';
454 #else
455 /* XXX Glor */
456 sc->sc_enaddr[0] = 0xd4;
457 sc->sc_enaddr[1] = 0x94;
458 sc->sc_enaddr[2] = 0xa1;
459 sc->sc_enaddr[3] = 0x97;
460 sc->sc_enaddr[4] = 0x03;
461 sc->sc_enaddr[5] = 0x94;
462 #endif
463 }
464 } else {
465 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
466 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
467 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
468 ETHER_ADDR_LEN);
469 }
470
471 #if 0
472 sc->sc_rxthih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_RXTH,
473 IPL_VM, IST_LEVEL, cpsw_rxthintr, sc);
474 sc->sc_rxih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_RX,
475 IPL_VM, IST_LEVEL, cpsw_rxintr, sc);
476 sc->sc_txih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_TX,
477 IPL_VM, IST_LEVEL, cpsw_txintr, sc);
478 sc->sc_miscih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_MISC,
479 IPL_VM, IST_LEVEL, cpsw_miscintr, sc);
480 #else
481 #define FDT_INTR_FLAGS 0
482 sc->sc_rxthih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RXTH, IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc);
483 sc->sc_rxih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RX, IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc);
484 sc->sc_txih = fdtbus_intr_establish(phandle, CPSW_INTROFF_TX, IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc);
485 sc->sc_miscih = fdtbus_intr_establish(phandle, CPSW_INTROFF_MISC, IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc);
486 #endif
487
488 sc->sc_bst = faa->faa_bst;
489 sc->sc_bss = size;
490 sc->sc_bdt = faa->faa_dmat;
491
492 error = bus_space_map(sc->sc_bst, addr, size, 0,
493 &sc->sc_bsh);
494 if (error) {
495 aprint_error_dev(sc->sc_dev,
496 "can't map registers: %d\n", error);
497 return;
498 }
499
500 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
501 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
502 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
503 &sc->sc_bsh_txdescs);
504 if (error) {
505 aprint_error_dev(sc->sc_dev,
506 "can't subregion tx ring SRAM: %d\n", error);
507 return;
508 }
509 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
510 (void *)sc->sc_bsh_txdescs);
511
512 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
513 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
514 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
515 &sc->sc_bsh_rxdescs);
516 if (error) {
517 aprint_error_dev(sc->sc_dev,
518 "can't subregion rx ring SRAM: %d\n", error);
519 return;
520 }
521 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
522 (void *)sc->sc_bsh_rxdescs);
523
524 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
525
526 for (i = 0; i < CPSW_NTXDESCS; i++) {
527 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
528 CPSW_TXFRAGS, MCLBYTES, 0, 0,
529 &sc->sc_rdp->tx_dm[i])) != 0) {
530 aprint_error_dev(sc->sc_dev,
531 "unable to create tx DMA map: %d\n", error);
532 }
533 sc->sc_rdp->tx_mb[i] = NULL;
534 }
535
536 for (i = 0; i < CPSW_NRXDESCS; i++) {
537 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
538 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
539 aprint_error_dev(sc->sc_dev,
540 "unable to create rx DMA map: %d\n", error);
541 }
542 sc->sc_rdp->rx_mb[i] = NULL;
543 }
544
545 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
546 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
547 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
548 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
549 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK|BUS_DMA_WRITE);
550 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
551 BUS_DMASYNC_PREWRITE);
552
553 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
554 ether_sprintf(sc->sc_enaddr));
555
556 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
557 ifp->if_softc = sc;
558 ifp->if_capabilities = 0;
559 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
560 ifp->if_start = cpsw_start;
561 ifp->if_ioctl = cpsw_ioctl;
562 ifp->if_init = cpsw_init;
563 ifp->if_stop = cpsw_stop;
564 ifp->if_watchdog = cpsw_watchdog;
565 IFQ_SET_READY(&ifp->if_snd);
566
567 cpsw_stop(ifp, 0);
568
569 sc->sc_mii.mii_ifp = ifp;
570 sc->sc_mii.mii_readreg = cpsw_mii_readreg;
571 sc->sc_mii.mii_writereg = cpsw_mii_writereg;
572 sc->sc_mii.mii_statchg = cpsw_mii_statchg;
573
574 sc->sc_ec.ec_mii = &sc->sc_mii;
575 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
576 ether_mediastatus);
577
578 /* Initialize MDIO */
579 cpsw_write_4(sc, MDIOCONTROL,
580 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
581 /* Clear ALE */
582 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
583
584 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
585 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
586 aprint_error_dev(self, "no PHY found!\n");
587 sc->sc_phy_has_1000t = false;
588 ifmedia_add(&sc->sc_mii.mii_media,
589 IFM_ETHER|IFM_MANUAL, 0, NULL);
590 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
591 } else {
592 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
593 if (sc->sc_phy_has_1000t) {
594 #if 0
595 aprint_normal_dev(sc->sc_dev, "1000baseT PHY found. "
596 "Setting RGMII Mode\n");
597 /*
598 * Select the Interface RGMII Mode in the Control
599 * Module
600 */
601 sitara_cm_reg_write_4(CPSW_GMII_SEL,
602 GMIISEL_GMII2_SEL(RGMII_MODE) |
603 GMIISEL_GMII1_SEL(RGMII_MODE));
604 #endif
605 }
606
607 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
608 }
609
610 if_attach(ifp);
611 if_deferred_start_init(ifp, NULL);
612 ether_ifattach(ifp, sc->sc_enaddr);
613
614 /* The attach is successful. */
615 sc->sc_attached = true;
616
617 return;
618 }
619
620 static void
621 cpsw_start(struct ifnet *ifp)
622 {
623 struct cpsw_softc * const sc = ifp->if_softc;
624 struct cpsw_ring_data * const rdp = sc->sc_rdp;
625 struct cpsw_cpdma_bd bd;
626 uint32_t * const dw = bd.word;
627 struct mbuf *m;
628 bus_dmamap_t dm;
629 u_int eopi __diagused = ~0;
630 u_int seg;
631 u_int txfree;
632 int txstart = -1;
633 int error;
634 bool pad;
635 u_int mlen;
636
637 KERNHIST_FUNC(__func__);
638 KERNHIST_CALLED_5(cpswhist, sc, 0, 0, 0);
639
640 if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
641 IFF_RUNNING)) {
642 return;
643 }
644
645 if (sc->sc_txnext >= sc->sc_txhead)
646 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
647 else
648 txfree = sc->sc_txhead - sc->sc_txnext - 1;
649
650 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
651 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
652
653 while (txfree > 0) {
654 IFQ_POLL(&ifp->if_snd, m);
655 if (m == NULL)
656 break;
657
658 dm = rdp->tx_dm[sc->sc_txnext];
659
660 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
661 if (error == EFBIG) {
662 device_printf(sc->sc_dev, "won't fit\n");
663 IFQ_DEQUEUE(&ifp->if_snd, m);
664 m_freem(m);
665 ifp->if_oerrors++;
666 continue;
667 } else if (error != 0) {
668 device_printf(sc->sc_dev, "error\n");
669 break;
670 }
671
672 if (dm->dm_nsegs + 1 >= txfree) {
673 ifp->if_flags |= IFF_OACTIVE;
674 bus_dmamap_unload(sc->sc_bdt, dm);
675 break;
676 }
677
678 mlen = m_length(m);
679 pad = mlen < CPSW_PAD_LEN;
680
681 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
682 rdp->tx_mb[sc->sc_txnext] = m;
683 IFQ_DEQUEUE(&ifp->if_snd, m);
684
685 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
686 BUS_DMASYNC_PREWRITE);
687
688 if (txstart == -1)
689 txstart = sc->sc_txnext;
690 eopi = sc->sc_txnext;
691 for (seg = 0; seg < dm->dm_nsegs; seg++) {
692 dw[0] = cpsw_txdesc_paddr(sc,
693 TXDESC_NEXT(sc->sc_txnext));
694 dw[1] = dm->dm_segs[seg].ds_addr;
695 dw[2] = dm->dm_segs[seg].ds_len;
696 dw[3] = 0;
697
698 if (seg == 0)
699 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
700 MAX(mlen, CPSW_PAD_LEN);
701
702 if ((seg == dm->dm_nsegs - 1) && !pad)
703 dw[3] |= CPDMA_BD_EOP;
704
705 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
706 txfree--;
707 eopi = sc->sc_txnext;
708 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
709 }
710 if (pad) {
711 dw[0] = cpsw_txdesc_paddr(sc,
712 TXDESC_NEXT(sc->sc_txnext));
713 dw[1] = sc->sc_txpad_pa;
714 dw[2] = CPSW_PAD_LEN - mlen;
715 dw[3] = CPDMA_BD_EOP;
716
717 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
718 txfree--;
719 eopi = sc->sc_txnext;
720 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
721 }
722
723 bpf_mtap(ifp, m, BPF_D_OUT);
724 }
725
726 if (txstart >= 0) {
727 ifp->if_timer = 5;
728 /* terminate the new chain */
729 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
730 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
731 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
732 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
733 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
734 /* link the new chain on */
735 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
736 cpsw_txdesc_paddr(sc, txstart));
737 if (sc->sc_txeoq) {
738 /* kick the dma engine */
739 sc->sc_txeoq = false;
740 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
741 cpsw_txdesc_paddr(sc, txstart));
742 }
743 }
744 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
745 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
746 }
747
748 static int
749 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
750 {
751 const int s = splnet();
752 int error = 0;
753
754 switch (cmd) {
755 default:
756 error = ether_ioctl(ifp, cmd, data);
757 if (error == ENETRESET) {
758 error = 0;
759 }
760 break;
761 }
762
763 splx(s);
764
765 return error;
766 }
767
768 static void
769 cpsw_watchdog(struct ifnet *ifp)
770 {
771 struct cpsw_softc *sc = ifp->if_softc;
772
773 device_printf(sc->sc_dev, "device timeout\n");
774
775 ifp->if_oerrors++;
776 cpsw_init(ifp);
777 cpsw_start(ifp);
778 }
779
780 static int
781 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
782 {
783 u_int tries;
784
785 for (tries = 0; tries < 1000; tries++) {
786 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
787 return 0;
788 delay(1);
789 }
790 return ETIMEDOUT;
791 }
792
793 static int
794 cpsw_mii_readreg(device_t dev, int phy, int reg)
795 {
796 struct cpsw_softc * const sc = device_private(dev);
797 uint32_t v;
798
799 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
800 return 0;
801
802 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
803 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
804
805 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
806 return 0;
807
808 v = cpsw_read_4(sc, MDIOUSERACCESS0);
809 if (v & __BIT(29))
810 return v & 0xffff;
811 else
812 return 0;
813 }
814
815 static void
816 cpsw_mii_writereg(device_t dev, int phy, int reg, int val)
817 {
818 struct cpsw_softc * const sc = device_private(dev);
819 uint32_t v;
820
821 KASSERT((val & 0xffff0000UL) == 0);
822
823 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
824 goto out;
825
826 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
827 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
828
829 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
830 goto out;
831
832 v = cpsw_read_4(sc, MDIOUSERACCESS0);
833 if ((v & __BIT(29)) == 0)
834 out:
835 device_printf(sc->sc_dev, "%s error\n", __func__);
836
837 }
838
839 static void
840 cpsw_mii_statchg(struct ifnet *ifp)
841 {
842 return;
843 }
844
845 static int
846 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
847 {
848 struct cpsw_ring_data * const rdp = sc->sc_rdp;
849 const u_int h = RXDESC_PREV(i);
850 struct cpsw_cpdma_bd bd;
851 uint32_t * const dw = bd.word;
852 struct mbuf *m;
853 int error = ENOBUFS;
854
855 MGETHDR(m, M_DONTWAIT, MT_DATA);
856 if (m == NULL) {
857 goto reuse;
858 }
859
860 MCLGET(m, M_DONTWAIT);
861 if ((m->m_flags & M_EXT) == 0) {
862 m_freem(m);
863 goto reuse;
864 }
865
866 /* We have a new buffer, prepare it for the ring. */
867
868 if (rdp->rx_mb[i] != NULL)
869 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
870
871 m->m_len = m->m_pkthdr.len = MCLBYTES;
872
873 rdp->rx_mb[i] = m;
874
875 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
876 BUS_DMA_READ|BUS_DMA_NOWAIT);
877 if (error) {
878 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
879 i, error);
880 }
881
882 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
883 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
884
885 error = 0;
886
887 reuse:
888 /* (re-)setup the descriptor */
889 dw[0] = 0;
890 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
891 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
892 dw[3] = CPDMA_BD_OWNER;
893
894 cpsw_set_rxdesc(sc, i, &bd);
895 /* and link onto ring */
896 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
897
898 return error;
899 }
900
901 static int
902 cpsw_init(struct ifnet *ifp)
903 {
904 struct cpsw_softc * const sc = ifp->if_softc;
905 struct mii_data * const mii = &sc->sc_mii;
906 int i;
907
908 cpsw_stop(ifp, 0);
909
910 sc->sc_txnext = 0;
911 sc->sc_txhead = 0;
912
913 /* Reset wrapper */
914 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
915 while(cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1);
916
917 /* Reset SS */
918 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
919 while(cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1);
920
921 /* Clear table and enable ALE */
922 cpsw_write_4(sc, CPSW_ALE_CONTROL,
923 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
924
925 /* Reset and init Sliver port 1 and 2 */
926 for (i = 0; i < CPSW_ETH_PORTS; i++) {
927 uint32_t macctl;
928
929 /* Reset */
930 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
931 while(cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1);
932 /* Set Slave Mapping */
933 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
934 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
935 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
936 /* Set MAC Address */
937 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
938 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
939 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
940 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
941 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
942
943 /* Set MACCONTROL for ports 0,1 */
944 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
945 SLMACCTL_IFCTL_A;
946 if (sc->sc_phy_has_1000t)
947 macctl |= SLMACCTL_GIG;
948 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
949
950 /* Set ALE port to forwarding(3) */
951 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
952 }
953
954 /* Set Host Port Mapping */
955 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
956 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
957
958 /* Set ALE port to forwarding(3) */
959 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
960
961 /* Initialize addrs */
962 cpsw_ale_update_addresses(sc, 1);
963
964 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
965 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
966
967 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
968 while(cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1);
969
970 for (i = 0; i < 8; i++) {
971 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
972 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
973 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
974 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
975 }
976
977 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
978 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
979
980 sc->sc_txhead = 0;
981 sc->sc_txnext = 0;
982
983 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
984
985 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
986 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
987 /* Initialize RX Buffer Descriptors */
988 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
989 for (i = 0; i < CPSW_NRXDESCS; i++) {
990 cpsw_new_rxbuf(sc, i);
991 }
992 sc->sc_rxhead = 0;
993
994 /* turn off flow control */
995 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
996
997 /* align layer 3 header to 32-bit */
998 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
999
1000 /* Clear all interrupt Masks */
1001 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
1002 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1003
1004 /* Enable TX & RX DMA */
1005 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
1006 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
1007
1008 /* Enable TX and RX interrupt receive for core 0 */
1009 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
1010 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
1011 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
1012
1013 /* Enable host Error Interrupt */
1014 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
1015
1016 /* Enable interrupts for TX and RX Channel 0 */
1017 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
1018 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
1019
1020 /* Ack stalled irqs */
1021 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1022 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1023 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1024 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1025
1026 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1027 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1028 cpsw_write_4(sc, MDIOCONTROL,
1029 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1030
1031 mii_mediachg(mii);
1032
1033 /* Write channel 0 RX HDP */
1034 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1035 sc->sc_rxrun = true;
1036 sc->sc_rxeoq = false;
1037
1038 sc->sc_txrun = true;
1039 sc->sc_txeoq = true;
1040 callout_schedule(&sc->sc_tick_ch, hz);
1041 ifp->if_flags |= IFF_RUNNING;
1042 ifp->if_flags &= ~IFF_OACTIVE;
1043
1044 return 0;
1045 }
1046
1047 static void
1048 cpsw_stop(struct ifnet *ifp, int disable)
1049 {
1050 struct cpsw_softc * const sc = ifp->if_softc;
1051 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1052 u_int i;
1053
1054 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1055 ifp, disable);
1056
1057 if ((ifp->if_flags & IFF_RUNNING) == 0)
1058 return;
1059
1060 callout_stop(&sc->sc_tick_ch);
1061 mii_down(&sc->sc_mii);
1062
1063 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1064 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1065 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1066 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1067 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1068
1069 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1070 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1071 i = 0;
1072 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1073 delay(10);
1074 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1075 sc->sc_txrun = false;
1076 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1077 sc->sc_rxrun = false;
1078 i++;
1079 }
1080 //printf("%s toredown complete in %u\n", __func__, i);
1081
1082 /* Reset wrapper */
1083 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1084 while(cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1);
1085
1086 /* Reset SS */
1087 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1088 while(cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1);
1089
1090 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1091 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1092 while(cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1);
1093 }
1094
1095 /* Reset CPDMA */
1096 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1097 while(cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1);
1098
1099 /* Release any queued transmit buffers. */
1100 for (i = 0; i < CPSW_NTXDESCS; i++) {
1101 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1102 m_freem(rdp->tx_mb[i]);
1103 rdp->tx_mb[i] = NULL;
1104 }
1105
1106 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1107 ifp->if_timer = 0;
1108
1109 if (!disable)
1110 return;
1111
1112 for (i = 0; i < CPSW_NRXDESCS; i++) {
1113 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1114 m_freem(rdp->rx_mb[i]);
1115 rdp->rx_mb[i] = NULL;
1116 }
1117 }
1118
1119 static void
1120 cpsw_tick(void *arg)
1121 {
1122 struct cpsw_softc * const sc = arg;
1123 struct mii_data * const mii = &sc->sc_mii;
1124 const int s = splnet();
1125
1126 mii_tick(mii);
1127
1128 splx(s);
1129
1130 callout_schedule(&sc->sc_tick_ch, hz);
1131 }
1132
1133 static int
1134 cpsw_rxthintr(void *arg)
1135 {
1136 struct cpsw_softc * const sc = arg;
1137
1138 /* this won't deassert the interrupt though */
1139 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1140
1141 return 1;
1142 }
1143
1144 static int
1145 cpsw_rxintr(void *arg)
1146 {
1147 struct cpsw_softc * const sc = arg;
1148 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1149 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1150 struct cpsw_cpdma_bd bd;
1151 const uint32_t * const dw = bd.word;
1152 bus_dmamap_t dm;
1153 struct mbuf *m;
1154 u_int i;
1155 u_int len, off;
1156
1157 KERNHIST_FUNC(__func__);
1158 KERNHIST_CALLED_5(cpswhist, sc, 0, 0, 0);
1159
1160 for (;;) {
1161 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1162
1163 i = sc->sc_rxhead;
1164 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1165 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1166 dm = rdp->rx_dm[i];
1167 m = rdp->rx_mb[i];
1168
1169 KASSERT(dm != NULL);
1170 KASSERT(m != NULL);
1171
1172 cpsw_get_rxdesc(sc, i, &bd);
1173
1174 if (ISSET(dw[3], CPDMA_BD_OWNER))
1175 break;
1176
1177 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1178 sc->sc_rxrun = false;
1179 return 1;
1180 }
1181
1182 if ((dw[3] & (CPDMA_BD_SOP|CPDMA_BD_EOP)) !=
1183 (CPDMA_BD_SOP|CPDMA_BD_EOP)) {
1184 //Debugger();
1185 }
1186
1187 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1188 BUS_DMASYNC_POSTREAD);
1189
1190 if (cpsw_new_rxbuf(sc, i) != 0) {
1191 /* drop current packet, reuse buffer for new */
1192 ifp->if_ierrors++;
1193 goto next;
1194 }
1195
1196 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1197 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1198
1199 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1200 len -= ETHER_CRC_LEN;
1201
1202 m_set_rcvif(m, ifp);
1203 m->m_pkthdr.len = m->m_len = len;
1204 m->m_data += off;
1205
1206 if_percpuq_enqueue(ifp->if_percpuq, m);
1207
1208 next:
1209 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1210 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1211 sc->sc_rxeoq = true;
1212 break;
1213 } else {
1214 sc->sc_rxeoq = false;
1215 }
1216 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1217 cpsw_rxdesc_paddr(sc, i));
1218 }
1219
1220 if (sc->sc_rxeoq) {
1221 device_printf(sc->sc_dev, "rxeoq\n");
1222 //Debugger();
1223 }
1224
1225 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1226
1227 return 1;
1228 }
1229
1230 static int
1231 cpsw_txintr(void *arg)
1232 {
1233 struct cpsw_softc * const sc = arg;
1234 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1235 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1236 struct cpsw_cpdma_bd bd;
1237 const uint32_t * const dw = bd.word;
1238 bool handled = false;
1239 uint32_t tx0_cp;
1240 u_int cpi;
1241
1242 KERNHIST_FUNC(__func__);
1243 KERNHIST_CALLED_5(cpswhist, sc, 0, 0, 0);
1244
1245 KASSERT(sc->sc_txrun);
1246
1247 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1248 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1249
1250 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1251
1252 if (tx0_cp == 0xfffffffc) {
1253 /* Teardown, ack it */
1254 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1255 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1256 sc->sc_txrun = false;
1257 return 0;
1258 }
1259
1260 for (;;) {
1261 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1262 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1263 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1264
1265 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1266 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1267
1268 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1269
1270 if (dw[2] == 0) {
1271 //Debugger();
1272 }
1273
1274 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1275 goto next;
1276
1277 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1278 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1279 sc->sc_txnext);
1280 break;
1281 }
1282
1283 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1284 sc->sc_txrun = false;
1285 return 1;
1286 }
1287
1288 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1289 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1290 BUS_DMASYNC_POSTWRITE);
1291 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1292
1293 m_freem(rdp->tx_mb[sc->sc_txhead]);
1294 rdp->tx_mb[sc->sc_txhead] = NULL;
1295
1296 ifp->if_opackets++;
1297
1298 handled = true;
1299
1300 ifp->if_flags &= ~IFF_OACTIVE;
1301
1302 next:
1303 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1304 sc->sc_txeoq = true;
1305 }
1306 if (sc->sc_txhead == cpi) {
1307 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1308 cpsw_txdesc_paddr(sc, cpi));
1309 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1310 break;
1311 }
1312 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1313 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1314 sc->sc_txeoq = true;
1315 break;
1316 }
1317 }
1318
1319 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1320
1321 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1322 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1323 sc->sc_txeoq = false;
1324 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1325 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1326 }
1327 }
1328
1329 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1330 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1331 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1332 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1333 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1334
1335 if (handled && sc->sc_txnext == sc->sc_txhead)
1336 ifp->if_timer = 0;
1337
1338 if (handled)
1339 if_schedule_deferred_start(ifp);
1340
1341 return handled;
1342 }
1343
1344 static int
1345 cpsw_miscintr(void *arg)
1346 {
1347 struct cpsw_softc * const sc = arg;
1348 uint32_t miscstat;
1349 uint32_t dmastat;
1350 uint32_t stat;
1351
1352 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1353 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1354
1355 #define CPSW_MISC_HOST_PEND __BIT32(2)
1356 #define CPSW_MISC_STAT_PEND __BIT32(3)
1357
1358 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1359 /* Host Error */
1360 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1361 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1362
1363 printf("rxhead %02x\n", sc->sc_rxhead);
1364
1365 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1366 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1367 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1368 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1369 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1370 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1371 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1372 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1373 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1374 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1375
1376 //Debugger();
1377
1378 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1379 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1380 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1381 }
1382
1383 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1384
1385 return 1;
1386 }
1387
1388 /*
1389 *
1390 * ALE support routines.
1391 *
1392 */
1393
1394 static void
1395 cpsw_ale_entry_init(uint32_t *ale_entry)
1396 {
1397 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1398 }
1399
1400 static void
1401 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1402 {
1403 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1404 ale_entry[1] = mac[0] << 8 | mac[1];
1405 }
1406
1407 static void
1408 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1409 {
1410 ale_entry[0] = 0xffffffff;
1411 ale_entry[1] = 0x0000ffff;
1412 }
1413
1414 static void
1415 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1416 {
1417 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1418 switch (field) {
1419 case ALE_ENTRY_TYPE:
1420 /* [61:60] */
1421 ale_entry[1] |= (val & 0x3) << 28;
1422 break;
1423 case ALE_MCAST_FWD_STATE:
1424 /* [63:62] */
1425 ale_entry[1] |= (val & 0x3) << 30;
1426 break;
1427 case ALE_PORT_MASK:
1428 /* [68:66] */
1429 ale_entry[2] |= (val & 0x7) << 2;
1430 break;
1431 case ALE_PORT_NUMBER:
1432 /* [67:66] */
1433 ale_entry[2] |= (val & 0x3) << 2;
1434 break;
1435 default:
1436 panic("Invalid ALE entry field: %d\n", field);
1437 }
1438
1439 return;
1440 }
1441
1442 static bool
1443 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1444 {
1445 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1446 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1447 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1448 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1449 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1450 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1451 }
1452
1453 static void
1454 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1455 {
1456 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1457 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1458 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1459 mac[5] << 8 | mac[4]);
1460 }
1461
1462 static void
1463 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1464 {
1465 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1466 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1467 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1468 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1469 }
1470
1471 static void
1472 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1473 const uint32_t *ale_entry)
1474 {
1475 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1476 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1477 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1478 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1479 }
1480
1481 static int
1482 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1483 {
1484 int i;
1485 uint32_t ale_entry[3];
1486
1487 /* First two entries are link address and broadcast. */
1488 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1489 cpsw_ale_read_entry(sc, i, ale_entry);
1490 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1491 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1492 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1493 cpsw_ale_write_entry(sc, i, ale_entry);
1494 }
1495 }
1496 return CPSW_MAX_ALE_ENTRIES;
1497 }
1498
1499 static int
1500 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1501 {
1502 int free_index = -1, matching_index = -1, i;
1503 uint32_t ale_entry[3];
1504
1505 /* Find a matching entry or a free entry. */
1506 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1507 cpsw_ale_read_entry(sc, i, ale_entry);
1508
1509 /* Entry Type[61:60] is 0 for free entry */
1510 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1511 free_index = i;
1512 }
1513
1514 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1515 matching_index = i;
1516 break;
1517 }
1518 }
1519
1520 if (matching_index < 0) {
1521 if (free_index < 0)
1522 return ENOMEM;
1523 i = free_index;
1524 }
1525
1526 cpsw_ale_entry_init(ale_entry);
1527
1528 cpsw_ale_entry_set_mac(ale_entry, mac);
1529 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1530 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1531 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1532
1533 cpsw_ale_write_entry(sc, i, ale_entry);
1534
1535 return 0;
1536 }
1537
1538 static int
1539 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1540 {
1541 uint8_t *mac = sc->sc_enaddr;
1542 uint32_t ale_entry[3];
1543 int i;
1544 struct ethercom * const ec = &sc->sc_ec;
1545 struct ether_multi *ifma;
1546
1547 cpsw_ale_entry_init(ale_entry);
1548 /* Route incoming packets for our MAC address to Port 0 (host). */
1549 /* For simplicity, keep this entry at table index 0 in the ALE. */
1550 cpsw_ale_entry_set_mac(ale_entry, mac);
1551 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1552 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1553 cpsw_ale_write_entry(sc, 0, ale_entry);
1554
1555 /* Set outgoing MAC Address for Ports 1 and 2. */
1556 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1557 cpsw_ale_set_outgoing_mac(sc, i, mac);
1558
1559 /* Keep the broadcast address at table entry 1. */
1560 cpsw_ale_entry_init(ale_entry);
1561 cpsw_ale_entry_set_bcast_mac(ale_entry);
1562 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1563 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1564 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1565 cpsw_ale_write_entry(sc, 1, ale_entry);
1566
1567 /* SIOCDELMULTI doesn't specify the particular address
1568 being removed, so we have to remove all and rebuild. */
1569 if (purge)
1570 cpsw_ale_remove_all_mc_entries(sc);
1571
1572 /* Set other multicast addrs desired. */
1573 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1574 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1575 }
1576
1577 return 0;
1578 }
1579