if_cpsw.c revision 1.5 1 /* $NetBSD: if_cpsw.c,v 1.5 2019/05/29 05:05:24 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion (at) Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.5 2019/05/29 05:05:24 msaitoh Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #if 0
77 #include <arch/arm/omap/omap2_obiovar.h>
78 #else
79 #include <dev/fdt/fdtvar.h>
80 #endif
81 #include <arch/arm/omap/if_cpswreg.h>
82 #include <arch/arm/omap/sitara_cmreg.h>
83 #include <arch/arm/omap/sitara_cm.h>
84
85 #define CPSW_TXFRAGS 16
86
87 #define CPSW_CPPI_RAM_SIZE (0x2000)
88 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
89 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
90 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
91 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
92 #define CPSW_CPPI_RAM_RXDESCS_BASE \
93 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
94
95 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
96 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
97
98 CTASSERT(powerof2(CPSW_NTXDESCS));
99 CTASSERT(powerof2(CPSW_NRXDESCS));
100
101 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
102
103 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
104 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
105
106 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
107 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
108
109 struct cpsw_ring_data {
110 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
111 struct mbuf *tx_mb[CPSW_NTXDESCS];
112 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
113 struct mbuf *rx_mb[CPSW_NRXDESCS];
114 };
115
116 struct cpsw_softc {
117 device_t sc_dev;
118 bus_space_tag_t sc_bst;
119 bus_space_handle_t sc_bsh;
120 bus_size_t sc_bss;
121 bus_dma_tag_t sc_bdt;
122 bus_space_handle_t sc_bsh_txdescs;
123 bus_space_handle_t sc_bsh_rxdescs;
124 bus_addr_t sc_txdescs_pa;
125 bus_addr_t sc_rxdescs_pa;
126 struct ethercom sc_ec;
127 struct mii_data sc_mii;
128 bool sc_phy_has_1000t;
129 bool sc_attached;
130 callout_t sc_tick_ch;
131 void *sc_ih;
132 struct cpsw_ring_data *sc_rdp;
133 volatile u_int sc_txnext;
134 volatile u_int sc_txhead;
135 volatile u_int sc_rxhead;
136 void *sc_rxthih;
137 void *sc_rxih;
138 void *sc_txih;
139 void *sc_miscih;
140 void *sc_txpad;
141 bus_dmamap_t sc_txpad_dm;
142 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
143 uint8_t sc_enaddr[ETHER_ADDR_LEN];
144 volatile bool sc_txrun;
145 volatile bool sc_rxrun;
146 volatile bool sc_txeoq;
147 volatile bool sc_rxeoq;
148 };
149
150 static int cpsw_match(device_t, cfdata_t, void *);
151 static void cpsw_attach(device_t, device_t, void *);
152 static int cpsw_detach(device_t, int);
153
154 static void cpsw_start(struct ifnet *);
155 static int cpsw_ioctl(struct ifnet *, u_long, void *);
156 static void cpsw_watchdog(struct ifnet *);
157 static int cpsw_init(struct ifnet *);
158 static void cpsw_stop(struct ifnet *, int);
159
160 static int cpsw_mii_readreg(device_t, int, int, uint16_t *);
161 static int cpsw_mii_writereg(device_t, int, int, uint16_t);
162 static void cpsw_mii_statchg(struct ifnet *);
163
164 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
165 static void cpsw_tick(void *);
166
167 static int cpsw_rxthintr(void *);
168 static int cpsw_rxintr(void *);
169 static int cpsw_txintr(void *);
170 static int cpsw_miscintr(void *);
171
172 /* ALE support */
173 #define CPSW_MAX_ALE_ENTRIES 1024
174
175 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
176
177 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
178 cpsw_match, cpsw_attach, cpsw_detach, NULL);
179
180 #undef KERNHIST
181 #include <sys/kernhist.h>
182 KERNHIST_DEFINE(cpswhist);
183
184 #ifdef KERNHIST
185 #define KERNHIST_CALLED_5(NAME, i, j, k, l) \
186 do { \
187 _kernhist_call = atomic_inc_uint_nv(&_kernhist_cnt); \
188 KERNHIST_LOG(NAME, "called! %x %x %x %x", i, j, k, l); \
189 } while (/*CONSTCOND*/ 0)
190 #else
191 #define KERNHIST_CALLED_5(NAME, i, j, k, l)
192 #endif
193
194 static inline u_int
195 cpsw_txdesc_adjust(u_int x, int y)
196 {
197 return (((x) + y) & (CPSW_NTXDESCS - 1));
198 }
199
200 static inline u_int
201 cpsw_rxdesc_adjust(u_int x, int y)
202 {
203 return (((x) + y) & (CPSW_NRXDESCS - 1));
204 }
205
206 static inline uint32_t
207 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
208 {
209 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
210 }
211
212 static inline void
213 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
214 uint32_t const value)
215 {
216 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
217 }
218
219 static inline void
220 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
221 {
222 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
223
224 KERNHIST_FUNC(__func__);
225 KERNHIST_CALLED_5(cpswhist, sc, i, n, 0);
226
227 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
228 }
229
230 static inline void
231 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
232 {
233 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
234
235 KERNHIST_FUNC(__func__);
236 KERNHIST_CALLED_5(cpswhist, sc, i, n, 0);
237
238 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
239 }
240
241 static inline void
242 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
243 struct cpsw_cpdma_bd * const bdp)
244 {
245 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
246 uint32_t * const dp = bdp->word;
247 const bus_size_t c = __arraycount(bdp->word);
248
249 KERNHIST_FUNC(__func__);
250 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
251
252 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
253 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
254 dp[0], dp[1], dp[2], dp[3]);
255 }
256
257 static inline void
258 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
259 struct cpsw_cpdma_bd * const bdp)
260 {
261 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
262 uint32_t * const dp = bdp->word;
263 const bus_size_t c = __arraycount(bdp->word);
264
265 KERNHIST_FUNC(__func__);
266 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
267 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
268 dp[0], dp[1], dp[2], dp[3]);
269
270 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
271 }
272
273 static inline void
274 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
275 struct cpsw_cpdma_bd * const bdp)
276 {
277 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
278 uint32_t * const dp = bdp->word;
279 const bus_size_t c = __arraycount(bdp->word);
280
281 KERNHIST_FUNC(__func__);
282 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
283
284 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
285
286 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
287 dp[0], dp[1], dp[2], dp[3]);
288 }
289
290 static inline void
291 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
292 struct cpsw_cpdma_bd * const bdp)
293 {
294 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
295 uint32_t * const dp = bdp->word;
296 const bus_size_t c = __arraycount(bdp->word);
297
298 KERNHIST_FUNC(__func__);
299 KERNHIST_CALLED_5(cpswhist, sc, i, bdp, 0);
300 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
301 dp[0], dp[1], dp[2], dp[3]);
302
303 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
304 }
305
306 static inline bus_addr_t
307 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
308 {
309 KASSERT(x < CPSW_NTXDESCS);
310 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
311 }
312
313 static inline bus_addr_t
314 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
315 {
316 KASSERT(x < CPSW_NRXDESCS);
317 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
318 }
319
320
321 static int
322 cpsw_match(device_t parent, cfdata_t cf, void *aux)
323 {
324 struct fdt_attach_args * const faa = aux;
325
326 static const char * const compatible[] = {
327 "ti,am335x-cpsw",
328 "ti,cpsw",
329 NULL
330 };
331
332 return of_match_compatible(faa->faa_phandle, compatible);
333 }
334
335 static bool
336 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
337 {
338 struct ifmedia_entry *ifm;
339
340 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
341 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
342 return true;
343 }
344 return false;
345 }
346
347 static int
348 cpsw_detach(device_t self, int flags)
349 {
350 struct cpsw_softc * const sc = device_private(self);
351 struct ifnet *ifp = &sc->sc_ec.ec_if;
352 u_int i;
353
354 /* Succeed now if there's no work to do. */
355 if (!sc->sc_attached)
356 return 0;
357
358 sc->sc_attached = false;
359
360 /* Stop the interface. Callouts are stopped in it. */
361 cpsw_stop(ifp, 1);
362
363 /* Destroy our callout. */
364 callout_destroy(&sc->sc_tick_ch);
365
366 /* Let go of the interrupts */
367 intr_disestablish(sc->sc_rxthih);
368 intr_disestablish(sc->sc_rxih);
369 intr_disestablish(sc->sc_txih);
370 intr_disestablish(sc->sc_miscih);
371
372 /* Delete all media. */
373 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
374
375 ether_ifdetach(ifp);
376 if_detach(ifp);
377
378 /* Free the packet padding buffer */
379 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
380 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
381
382 /* Destroy all the descriptors */
383 for (i = 0; i < CPSW_NTXDESCS; i++)
384 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
385 for (i = 0; i < CPSW_NRXDESCS; i++)
386 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
387 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
388
389 /* Unmap */
390 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
391
392
393 return 0;
394 }
395
396 static void
397 cpsw_attach(device_t parent, device_t self, void *aux)
398 {
399 struct fdt_attach_args * const faa = aux;
400 struct cpsw_softc * const sc = device_private(self);
401 prop_dictionary_t dict = device_properties(self);
402 struct ethercom * const ec = &sc->sc_ec;
403 struct ifnet * const ifp = &ec->ec_if;
404 const int phandle = faa->faa_phandle;
405 bus_addr_t addr;
406 bus_size_t size;
407 int error;
408 u_int i;
409
410 KERNHIST_INIT(cpswhist, 4096);
411
412 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
413 aprint_error(": couldn't get registers\n");
414 return;
415 }
416
417 sc->sc_dev = self;
418
419 aprint_normal(": TI Layer 2 3-Port Switch\n");
420 aprint_naive("\n");
421
422 callout_init(&sc->sc_tick_ch, 0);
423 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
424
425 prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
426 if (eaprop == NULL) {
427 #if 0
428 /* grab mac_id0 from AM335x control module */
429 uint32_t reg_lo, reg_hi;
430
431 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
432 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
433 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
434 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
435 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
436 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
437 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
438 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
439 } else
440 #endif
441 {
442 aprint_error_dev(sc->sc_dev,
443 "using fake station address\n");
444 /* 'N' happens to have the Local bit set */
445 #if 0
446 sc->sc_enaddr[0] = 'N';
447 sc->sc_enaddr[1] = 'e';
448 sc->sc_enaddr[2] = 't';
449 sc->sc_enaddr[3] = 'B';
450 sc->sc_enaddr[4] = 'S';
451 sc->sc_enaddr[5] = 'D';
452 #else
453 /* XXX Glor */
454 sc->sc_enaddr[0] = 0xd4;
455 sc->sc_enaddr[1] = 0x94;
456 sc->sc_enaddr[2] = 0xa1;
457 sc->sc_enaddr[3] = 0x97;
458 sc->sc_enaddr[4] = 0x03;
459 sc->sc_enaddr[5] = 0x94;
460 #endif
461 }
462 } else {
463 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
464 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
465 memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
466 ETHER_ADDR_LEN);
467 }
468
469 #if 0
470 sc->sc_rxthih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_RXTH,
471 IPL_VM, IST_LEVEL, cpsw_rxthintr, sc);
472 sc->sc_rxih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_RX,
473 IPL_VM, IST_LEVEL, cpsw_rxintr, sc);
474 sc->sc_txih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_TX,
475 IPL_VM, IST_LEVEL, cpsw_txintr, sc);
476 sc->sc_miscih = intr_establish(oa->obio_intrbase + CPSW_INTROFF_MISC,
477 IPL_VM, IST_LEVEL, cpsw_miscintr, sc);
478 #else
479 #define FDT_INTR_FLAGS 0
480 sc->sc_rxthih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RXTH, IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc);
481 sc->sc_rxih = fdtbus_intr_establish(phandle, CPSW_INTROFF_RX, IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc);
482 sc->sc_txih = fdtbus_intr_establish(phandle, CPSW_INTROFF_TX, IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc);
483 sc->sc_miscih = fdtbus_intr_establish(phandle, CPSW_INTROFF_MISC, IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc);
484 #endif
485
486 sc->sc_bst = faa->faa_bst;
487 sc->sc_bss = size;
488 sc->sc_bdt = faa->faa_dmat;
489
490 error = bus_space_map(sc->sc_bst, addr, size, 0,
491 &sc->sc_bsh);
492 if (error) {
493 aprint_error_dev(sc->sc_dev,
494 "can't map registers: %d\n", error);
495 return;
496 }
497
498 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
499 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
500 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
501 &sc->sc_bsh_txdescs);
502 if (error) {
503 aprint_error_dev(sc->sc_dev,
504 "can't subregion tx ring SRAM: %d\n", error);
505 return;
506 }
507 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
508 (void *)sc->sc_bsh_txdescs);
509
510 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
511 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
512 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
513 &sc->sc_bsh_rxdescs);
514 if (error) {
515 aprint_error_dev(sc->sc_dev,
516 "can't subregion rx ring SRAM: %d\n", error);
517 return;
518 }
519 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
520 (void *)sc->sc_bsh_rxdescs);
521
522 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
523
524 for (i = 0; i < CPSW_NTXDESCS; i++) {
525 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
526 CPSW_TXFRAGS, MCLBYTES, 0, 0,
527 &sc->sc_rdp->tx_dm[i])) != 0) {
528 aprint_error_dev(sc->sc_dev,
529 "unable to create tx DMA map: %d\n", error);
530 }
531 sc->sc_rdp->tx_mb[i] = NULL;
532 }
533
534 for (i = 0; i < CPSW_NRXDESCS; i++) {
535 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
536 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
537 aprint_error_dev(sc->sc_dev,
538 "unable to create rx DMA map: %d\n", error);
539 }
540 sc->sc_rdp->rx_mb[i] = NULL;
541 }
542
543 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
544 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
545 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
546 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
547 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK|BUS_DMA_WRITE);
548 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
549 BUS_DMASYNC_PREWRITE);
550
551 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
552 ether_sprintf(sc->sc_enaddr));
553
554 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
555 ifp->if_softc = sc;
556 ifp->if_capabilities = 0;
557 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
558 ifp->if_start = cpsw_start;
559 ifp->if_ioctl = cpsw_ioctl;
560 ifp->if_init = cpsw_init;
561 ifp->if_stop = cpsw_stop;
562 ifp->if_watchdog = cpsw_watchdog;
563 IFQ_SET_READY(&ifp->if_snd);
564
565 cpsw_stop(ifp, 0);
566
567 sc->sc_mii.mii_ifp = ifp;
568 sc->sc_mii.mii_readreg = cpsw_mii_readreg;
569 sc->sc_mii.mii_writereg = cpsw_mii_writereg;
570 sc->sc_mii.mii_statchg = cpsw_mii_statchg;
571
572 sc->sc_ec.ec_mii = &sc->sc_mii;
573 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
574 ether_mediastatus);
575
576 /* Initialize MDIO */
577 cpsw_write_4(sc, MDIOCONTROL,
578 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
579 /* Clear ALE */
580 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
581
582 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
583 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
584 aprint_error_dev(self, "no PHY found!\n");
585 sc->sc_phy_has_1000t = false;
586 ifmedia_add(&sc->sc_mii.mii_media,
587 IFM_ETHER|IFM_MANUAL, 0, NULL);
588 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
589 } else {
590 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
591 if (sc->sc_phy_has_1000t) {
592 #if 0
593 aprint_normal_dev(sc->sc_dev, "1000baseT PHY found. "
594 "Setting RGMII Mode\n");
595 /*
596 * Select the Interface RGMII Mode in the Control
597 * Module
598 */
599 sitara_cm_reg_write_4(CPSW_GMII_SEL,
600 GMIISEL_GMII2_SEL(RGMII_MODE) |
601 GMIISEL_GMII1_SEL(RGMII_MODE));
602 #endif
603 }
604
605 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
606 }
607
608 if_attach(ifp);
609 if_deferred_start_init(ifp, NULL);
610 ether_ifattach(ifp, sc->sc_enaddr);
611
612 /* The attach is successful. */
613 sc->sc_attached = true;
614
615 return;
616 }
617
618 static void
619 cpsw_start(struct ifnet *ifp)
620 {
621 struct cpsw_softc * const sc = ifp->if_softc;
622 struct cpsw_ring_data * const rdp = sc->sc_rdp;
623 struct cpsw_cpdma_bd bd;
624 uint32_t * const dw = bd.word;
625 struct mbuf *m;
626 bus_dmamap_t dm;
627 u_int eopi __diagused = ~0;
628 u_int seg;
629 u_int txfree;
630 int txstart = -1;
631 int error;
632 bool pad;
633 u_int mlen;
634
635 KERNHIST_FUNC(__func__);
636 KERNHIST_CALLED_5(cpswhist, sc, 0, 0, 0);
637
638 if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
639 IFF_RUNNING)) {
640 return;
641 }
642
643 if (sc->sc_txnext >= sc->sc_txhead)
644 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
645 else
646 txfree = sc->sc_txhead - sc->sc_txnext - 1;
647
648 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
649 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
650
651 while (txfree > 0) {
652 IFQ_POLL(&ifp->if_snd, m);
653 if (m == NULL)
654 break;
655
656 dm = rdp->tx_dm[sc->sc_txnext];
657
658 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
659 if (error == EFBIG) {
660 device_printf(sc->sc_dev, "won't fit\n");
661 IFQ_DEQUEUE(&ifp->if_snd, m);
662 m_freem(m);
663 ifp->if_oerrors++;
664 continue;
665 } else if (error != 0) {
666 device_printf(sc->sc_dev, "error\n");
667 break;
668 }
669
670 if (dm->dm_nsegs + 1 >= txfree) {
671 ifp->if_flags |= IFF_OACTIVE;
672 bus_dmamap_unload(sc->sc_bdt, dm);
673 break;
674 }
675
676 mlen = m_length(m);
677 pad = mlen < CPSW_PAD_LEN;
678
679 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
680 rdp->tx_mb[sc->sc_txnext] = m;
681 IFQ_DEQUEUE(&ifp->if_snd, m);
682
683 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
684 BUS_DMASYNC_PREWRITE);
685
686 if (txstart == -1)
687 txstart = sc->sc_txnext;
688 eopi = sc->sc_txnext;
689 for (seg = 0; seg < dm->dm_nsegs; seg++) {
690 dw[0] = cpsw_txdesc_paddr(sc,
691 TXDESC_NEXT(sc->sc_txnext));
692 dw[1] = dm->dm_segs[seg].ds_addr;
693 dw[2] = dm->dm_segs[seg].ds_len;
694 dw[3] = 0;
695
696 if (seg == 0)
697 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
698 MAX(mlen, CPSW_PAD_LEN);
699
700 if ((seg == dm->dm_nsegs - 1) && !pad)
701 dw[3] |= CPDMA_BD_EOP;
702
703 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
704 txfree--;
705 eopi = sc->sc_txnext;
706 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
707 }
708 if (pad) {
709 dw[0] = cpsw_txdesc_paddr(sc,
710 TXDESC_NEXT(sc->sc_txnext));
711 dw[1] = sc->sc_txpad_pa;
712 dw[2] = CPSW_PAD_LEN - mlen;
713 dw[3] = CPDMA_BD_EOP;
714
715 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
716 txfree--;
717 eopi = sc->sc_txnext;
718 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
719 }
720
721 bpf_mtap(ifp, m, BPF_D_OUT);
722 }
723
724 if (txstart >= 0) {
725 ifp->if_timer = 5;
726 /* terminate the new chain */
727 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
728 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
729 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
730 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
731 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
732 /* link the new chain on */
733 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
734 cpsw_txdesc_paddr(sc, txstart));
735 if (sc->sc_txeoq) {
736 /* kick the dma engine */
737 sc->sc_txeoq = false;
738 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
739 cpsw_txdesc_paddr(sc, txstart));
740 }
741 }
742 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
743 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
744 }
745
746 static int
747 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
748 {
749 const int s = splnet();
750 int error = 0;
751
752 switch (cmd) {
753 default:
754 error = ether_ioctl(ifp, cmd, data);
755 if (error == ENETRESET) {
756 error = 0;
757 }
758 break;
759 }
760
761 splx(s);
762
763 return error;
764 }
765
766 static void
767 cpsw_watchdog(struct ifnet *ifp)
768 {
769 struct cpsw_softc *sc = ifp->if_softc;
770
771 device_printf(sc->sc_dev, "device timeout\n");
772
773 ifp->if_oerrors++;
774 cpsw_init(ifp);
775 cpsw_start(ifp);
776 }
777
778 static int
779 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
780 {
781 u_int tries;
782
783 for (tries = 0; tries < 1000; tries++) {
784 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
785 return 0;
786 delay(1);
787 }
788 return ETIMEDOUT;
789 }
790
791 static int
792 cpsw_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
793 {
794 struct cpsw_softc * const sc = device_private(dev);
795 uint32_t v;
796
797 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
798 return -1;
799
800 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
801 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
802
803 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
804 return -1;
805
806 v = cpsw_read_4(sc, MDIOUSERACCESS0);
807 if (v & __BIT(29)) {
808 *val = v & 0xffff;
809 return 0;
810 }
811
812 return -1;
813 }
814
815 static int
816 cpsw_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
817 {
818 struct cpsw_softc * const sc = device_private(dev);
819 uint32_t v;
820
821 KASSERT((val & 0xffff0000UL) == 0);
822
823 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
824 goto out;
825
826 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
827 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
828
829 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
830 goto out;
831
832 v = cpsw_read_4(sc, MDIOUSERACCESS0);
833 if ((v & __BIT(29)) == 0) {
834 out:
835 device_printf(sc->sc_dev, "%s error\n", __func__);
836 return -1;
837 }
838
839 return 0;
840 }
841
842 static void
843 cpsw_mii_statchg(struct ifnet *ifp)
844 {
845 return;
846 }
847
848 static int
849 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
850 {
851 struct cpsw_ring_data * const rdp = sc->sc_rdp;
852 const u_int h = RXDESC_PREV(i);
853 struct cpsw_cpdma_bd bd;
854 uint32_t * const dw = bd.word;
855 struct mbuf *m;
856 int error = ENOBUFS;
857
858 MGETHDR(m, M_DONTWAIT, MT_DATA);
859 if (m == NULL) {
860 goto reuse;
861 }
862
863 MCLGET(m, M_DONTWAIT);
864 if ((m->m_flags & M_EXT) == 0) {
865 m_freem(m);
866 goto reuse;
867 }
868
869 /* We have a new buffer, prepare it for the ring. */
870
871 if (rdp->rx_mb[i] != NULL)
872 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
873
874 m->m_len = m->m_pkthdr.len = MCLBYTES;
875
876 rdp->rx_mb[i] = m;
877
878 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
879 BUS_DMA_READ|BUS_DMA_NOWAIT);
880 if (error) {
881 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
882 i, error);
883 }
884
885 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
886 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
887
888 error = 0;
889
890 reuse:
891 /* (re-)setup the descriptor */
892 dw[0] = 0;
893 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
894 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
895 dw[3] = CPDMA_BD_OWNER;
896
897 cpsw_set_rxdesc(sc, i, &bd);
898 /* and link onto ring */
899 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
900
901 return error;
902 }
903
904 static int
905 cpsw_init(struct ifnet *ifp)
906 {
907 struct cpsw_softc * const sc = ifp->if_softc;
908 struct mii_data * const mii = &sc->sc_mii;
909 int i;
910
911 cpsw_stop(ifp, 0);
912
913 sc->sc_txnext = 0;
914 sc->sc_txhead = 0;
915
916 /* Reset wrapper */
917 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
918 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
919 ;
920
921 /* Reset SS */
922 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
923 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
924 ;
925
926 /* Clear table and enable ALE */
927 cpsw_write_4(sc, CPSW_ALE_CONTROL,
928 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
929
930 /* Reset and init Sliver port 1 and 2 */
931 for (i = 0; i < CPSW_ETH_PORTS; i++) {
932 uint32_t macctl;
933
934 /* Reset */
935 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
936 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
937 ;
938 /* Set Slave Mapping */
939 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
940 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
941 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
942 /* Set MAC Address */
943 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
944 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
945 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
946 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
947 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
948
949 /* Set MACCONTROL for ports 0,1 */
950 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
951 SLMACCTL_IFCTL_A;
952 if (sc->sc_phy_has_1000t)
953 macctl |= SLMACCTL_GIG;
954 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
955
956 /* Set ALE port to forwarding(3) */
957 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
958 }
959
960 /* Set Host Port Mapping */
961 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
962 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
963
964 /* Set ALE port to forwarding(3) */
965 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
966
967 /* Initialize addrs */
968 cpsw_ale_update_addresses(sc, 1);
969
970 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
971 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
972
973 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
974 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
975 ;
976
977 for (i = 0; i < 8; i++) {
978 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
979 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
980 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
981 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
982 }
983
984 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
985 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
986
987 sc->sc_txhead = 0;
988 sc->sc_txnext = 0;
989
990 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
991
992 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
993 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
994 /* Initialize RX Buffer Descriptors */
995 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
996 for (i = 0; i < CPSW_NRXDESCS; i++) {
997 cpsw_new_rxbuf(sc, i);
998 }
999 sc->sc_rxhead = 0;
1000
1001 /* turn off flow control */
1002 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
1003
1004 /* align layer 3 header to 32-bit */
1005 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
1006
1007 /* Clear all interrupt Masks */
1008 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
1009 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1010
1011 /* Enable TX & RX DMA */
1012 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
1013 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
1014
1015 /* Enable TX and RX interrupt receive for core 0 */
1016 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
1017 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
1018 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
1019
1020 /* Enable host Error Interrupt */
1021 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
1022
1023 /* Enable interrupts for TX and RX Channel 0 */
1024 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
1025 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
1026
1027 /* Ack stalled irqs */
1028 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1029 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1030 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1031 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1032
1033 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1034 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1035 cpsw_write_4(sc, MDIOCONTROL,
1036 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1037
1038 mii_mediachg(mii);
1039
1040 /* Write channel 0 RX HDP */
1041 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1042 sc->sc_rxrun = true;
1043 sc->sc_rxeoq = false;
1044
1045 sc->sc_txrun = true;
1046 sc->sc_txeoq = true;
1047 callout_schedule(&sc->sc_tick_ch, hz);
1048 ifp->if_flags |= IFF_RUNNING;
1049 ifp->if_flags &= ~IFF_OACTIVE;
1050
1051 return 0;
1052 }
1053
1054 static void
1055 cpsw_stop(struct ifnet *ifp, int disable)
1056 {
1057 struct cpsw_softc * const sc = ifp->if_softc;
1058 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1059 u_int i;
1060
1061 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1062 ifp, disable);
1063
1064 if ((ifp->if_flags & IFF_RUNNING) == 0)
1065 return;
1066
1067 callout_stop(&sc->sc_tick_ch);
1068 mii_down(&sc->sc_mii);
1069
1070 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1071 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1072 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1073 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1074 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1075
1076 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1077 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1078 i = 0;
1079 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1080 delay(10);
1081 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1082 sc->sc_txrun = false;
1083 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1084 sc->sc_rxrun = false;
1085 i++;
1086 }
1087 //printf("%s toredown complete in %u\n", __func__, i);
1088
1089 /* Reset wrapper */
1090 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1091 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
1092 ;
1093
1094 /* Reset SS */
1095 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1096 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
1097 ;
1098
1099 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1100 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1101 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
1102 ;
1103 }
1104
1105 /* Reset CPDMA */
1106 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1107 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
1108 ;
1109
1110 /* Release any queued transmit buffers. */
1111 for (i = 0; i < CPSW_NTXDESCS; i++) {
1112 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1113 m_freem(rdp->tx_mb[i]);
1114 rdp->tx_mb[i] = NULL;
1115 }
1116
1117 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1118 ifp->if_timer = 0;
1119
1120 if (!disable)
1121 return;
1122
1123 for (i = 0; i < CPSW_NRXDESCS; i++) {
1124 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1125 m_freem(rdp->rx_mb[i]);
1126 rdp->rx_mb[i] = NULL;
1127 }
1128 }
1129
1130 static void
1131 cpsw_tick(void *arg)
1132 {
1133 struct cpsw_softc * const sc = arg;
1134 struct mii_data * const mii = &sc->sc_mii;
1135 const int s = splnet();
1136
1137 mii_tick(mii);
1138
1139 splx(s);
1140
1141 callout_schedule(&sc->sc_tick_ch, hz);
1142 }
1143
1144 static int
1145 cpsw_rxthintr(void *arg)
1146 {
1147 struct cpsw_softc * const sc = arg;
1148
1149 /* this won't deassert the interrupt though */
1150 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1151
1152 return 1;
1153 }
1154
1155 static int
1156 cpsw_rxintr(void *arg)
1157 {
1158 struct cpsw_softc * const sc = arg;
1159 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1160 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1161 struct cpsw_cpdma_bd bd;
1162 const uint32_t * const dw = bd.word;
1163 bus_dmamap_t dm;
1164 struct mbuf *m;
1165 u_int i;
1166 u_int len, off;
1167
1168 KERNHIST_FUNC(__func__);
1169 KERNHIST_CALLED_5(cpswhist, sc, 0, 0, 0);
1170
1171 for (;;) {
1172 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1173
1174 i = sc->sc_rxhead;
1175 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1176 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1177 dm = rdp->rx_dm[i];
1178 m = rdp->rx_mb[i];
1179
1180 KASSERT(dm != NULL);
1181 KASSERT(m != NULL);
1182
1183 cpsw_get_rxdesc(sc, i, &bd);
1184
1185 if (ISSET(dw[3], CPDMA_BD_OWNER))
1186 break;
1187
1188 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1189 sc->sc_rxrun = false;
1190 return 1;
1191 }
1192
1193 if ((dw[3] & (CPDMA_BD_SOP|CPDMA_BD_EOP)) !=
1194 (CPDMA_BD_SOP|CPDMA_BD_EOP)) {
1195 //Debugger();
1196 }
1197
1198 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1199 BUS_DMASYNC_POSTREAD);
1200
1201 if (cpsw_new_rxbuf(sc, i) != 0) {
1202 /* drop current packet, reuse buffer for new */
1203 ifp->if_ierrors++;
1204 goto next;
1205 }
1206
1207 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1208 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1209
1210 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1211 len -= ETHER_CRC_LEN;
1212
1213 m_set_rcvif(m, ifp);
1214 m->m_pkthdr.len = m->m_len = len;
1215 m->m_data += off;
1216
1217 if_percpuq_enqueue(ifp->if_percpuq, m);
1218
1219 next:
1220 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1221 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1222 sc->sc_rxeoq = true;
1223 break;
1224 } else {
1225 sc->sc_rxeoq = false;
1226 }
1227 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1228 cpsw_rxdesc_paddr(sc, i));
1229 }
1230
1231 if (sc->sc_rxeoq) {
1232 device_printf(sc->sc_dev, "rxeoq\n");
1233 //Debugger();
1234 }
1235
1236 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1237
1238 return 1;
1239 }
1240
1241 static int
1242 cpsw_txintr(void *arg)
1243 {
1244 struct cpsw_softc * const sc = arg;
1245 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1246 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1247 struct cpsw_cpdma_bd bd;
1248 const uint32_t * const dw = bd.word;
1249 bool handled = false;
1250 uint32_t tx0_cp;
1251 u_int cpi;
1252
1253 KERNHIST_FUNC(__func__);
1254 KERNHIST_CALLED_5(cpswhist, sc, 0, 0, 0);
1255
1256 KASSERT(sc->sc_txrun);
1257
1258 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1259 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1260
1261 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1262
1263 if (tx0_cp == 0xfffffffc) {
1264 /* Teardown, ack it */
1265 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1266 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1267 sc->sc_txrun = false;
1268 return 0;
1269 }
1270
1271 for (;;) {
1272 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1273 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1274 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1275
1276 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1277 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1278
1279 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1280
1281 if (dw[2] == 0) {
1282 //Debugger();
1283 }
1284
1285 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1286 goto next;
1287
1288 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1289 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1290 sc->sc_txnext);
1291 break;
1292 }
1293
1294 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1295 sc->sc_txrun = false;
1296 return 1;
1297 }
1298
1299 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1300 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1301 BUS_DMASYNC_POSTWRITE);
1302 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1303
1304 m_freem(rdp->tx_mb[sc->sc_txhead]);
1305 rdp->tx_mb[sc->sc_txhead] = NULL;
1306
1307 ifp->if_opackets++;
1308
1309 handled = true;
1310
1311 ifp->if_flags &= ~IFF_OACTIVE;
1312
1313 next:
1314 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1315 sc->sc_txeoq = true;
1316 }
1317 if (sc->sc_txhead == cpi) {
1318 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1319 cpsw_txdesc_paddr(sc, cpi));
1320 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1321 break;
1322 }
1323 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1324 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1325 sc->sc_txeoq = true;
1326 break;
1327 }
1328 }
1329
1330 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1331
1332 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1333 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1334 sc->sc_txeoq = false;
1335 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1336 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1337 }
1338 }
1339
1340 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1341 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1342 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1343 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1344 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1345
1346 if (handled && sc->sc_txnext == sc->sc_txhead)
1347 ifp->if_timer = 0;
1348
1349 if (handled)
1350 if_schedule_deferred_start(ifp);
1351
1352 return handled;
1353 }
1354
1355 static int
1356 cpsw_miscintr(void *arg)
1357 {
1358 struct cpsw_softc * const sc = arg;
1359 uint32_t miscstat;
1360 uint32_t dmastat;
1361 uint32_t stat;
1362
1363 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1364 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1365
1366 #define CPSW_MISC_HOST_PEND __BIT32(2)
1367 #define CPSW_MISC_STAT_PEND __BIT32(3)
1368
1369 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1370 /* Host Error */
1371 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1372 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1373
1374 printf("rxhead %02x\n", sc->sc_rxhead);
1375
1376 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1377 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1378 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1379 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1380 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1381 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1382 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1383 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1384 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1385 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1386
1387 //Debugger();
1388
1389 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1390 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1391 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1392 }
1393
1394 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1395
1396 return 1;
1397 }
1398
1399 /*
1400 *
1401 * ALE support routines.
1402 *
1403 */
1404
1405 static void
1406 cpsw_ale_entry_init(uint32_t *ale_entry)
1407 {
1408 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1409 }
1410
1411 static void
1412 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1413 {
1414 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1415 ale_entry[1] = mac[0] << 8 | mac[1];
1416 }
1417
1418 static void
1419 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1420 {
1421 ale_entry[0] = 0xffffffff;
1422 ale_entry[1] = 0x0000ffff;
1423 }
1424
1425 static void
1426 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1427 {
1428 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1429 switch (field) {
1430 case ALE_ENTRY_TYPE:
1431 /* [61:60] */
1432 ale_entry[1] |= (val & 0x3) << 28;
1433 break;
1434 case ALE_MCAST_FWD_STATE:
1435 /* [63:62] */
1436 ale_entry[1] |= (val & 0x3) << 30;
1437 break;
1438 case ALE_PORT_MASK:
1439 /* [68:66] */
1440 ale_entry[2] |= (val & 0x7) << 2;
1441 break;
1442 case ALE_PORT_NUMBER:
1443 /* [67:66] */
1444 ale_entry[2] |= (val & 0x3) << 2;
1445 break;
1446 default:
1447 panic("Invalid ALE entry field: %d\n", field);
1448 }
1449
1450 return;
1451 }
1452
1453 static bool
1454 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1455 {
1456 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1457 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1458 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1459 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1460 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1461 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1462 }
1463
1464 static void
1465 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1466 {
1467 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1468 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1469 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1470 mac[5] << 8 | mac[4]);
1471 }
1472
1473 static void
1474 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1475 {
1476 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1477 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1478 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1479 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1480 }
1481
1482 static void
1483 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1484 const uint32_t *ale_entry)
1485 {
1486 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1487 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1488 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1489 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1490 }
1491
1492 static int
1493 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1494 {
1495 int i;
1496 uint32_t ale_entry[3];
1497
1498 /* First two entries are link address and broadcast. */
1499 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1500 cpsw_ale_read_entry(sc, i, ale_entry);
1501 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1502 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1503 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1504 cpsw_ale_write_entry(sc, i, ale_entry);
1505 }
1506 }
1507 return CPSW_MAX_ALE_ENTRIES;
1508 }
1509
1510 static int
1511 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1512 {
1513 int free_index = -1, matching_index = -1, i;
1514 uint32_t ale_entry[3];
1515
1516 /* Find a matching entry or a free entry. */
1517 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1518 cpsw_ale_read_entry(sc, i, ale_entry);
1519
1520 /* Entry Type[61:60] is 0 for free entry */
1521 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1522 free_index = i;
1523 }
1524
1525 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1526 matching_index = i;
1527 break;
1528 }
1529 }
1530
1531 if (matching_index < 0) {
1532 if (free_index < 0)
1533 return ENOMEM;
1534 i = free_index;
1535 }
1536
1537 cpsw_ale_entry_init(ale_entry);
1538
1539 cpsw_ale_entry_set_mac(ale_entry, mac);
1540 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1541 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1542 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1543
1544 cpsw_ale_write_entry(sc, i, ale_entry);
1545
1546 return 0;
1547 }
1548
1549 static int
1550 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1551 {
1552 uint8_t *mac = sc->sc_enaddr;
1553 uint32_t ale_entry[3];
1554 int i;
1555 struct ethercom * const ec = &sc->sc_ec;
1556 struct ether_multi *ifma;
1557
1558 cpsw_ale_entry_init(ale_entry);
1559 /* Route incoming packets for our MAC address to Port 0 (host). */
1560 /* For simplicity, keep this entry at table index 0 in the ALE. */
1561 cpsw_ale_entry_set_mac(ale_entry, mac);
1562 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1563 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1564 cpsw_ale_write_entry(sc, 0, ale_entry);
1565
1566 /* Set outgoing MAC Address for Ports 1 and 2. */
1567 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1568 cpsw_ale_set_outgoing_mac(sc, i, mac);
1569
1570 /* Keep the broadcast address at table entry 1. */
1571 cpsw_ale_entry_init(ale_entry);
1572 cpsw_ale_entry_set_bcast_mac(ale_entry);
1573 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1574 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1575 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1576 cpsw_ale_write_entry(sc, 1, ale_entry);
1577
1578 /* SIOCDELMULTI doesn't specify the particular address
1579 being removed, so we have to remove all and rebuild. */
1580 if (purge)
1581 cpsw_ale_remove_all_mc_entries(sc);
1582
1583 /* Set other multicast addrs desired. */
1584 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1585 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1586 }
1587
1588 return 0;
1589 }
1590