pq3etsec.c revision 1.45 1 /* $NetBSD: pq3etsec.c,v 1.45 2019/05/28 07:41:48 msaitoh Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_inet.h"
38 #include "opt_mpc85xx.h"
39 #include "opt_multiprocessor.h"
40 #include "opt_net_mpsafe.h"
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.45 2019/05/28 07:41:48 msaitoh Exp $");
45
46 #include <sys/param.h>
47 #include <sys/cpu.h>
48 #include <sys/device.h>
49 #include <sys/mbuf.h>
50 #include <sys/ioctl.h>
51 #include <sys/intr.h>
52 #include <sys/bus.h>
53 #include <sys/kernel.h>
54 #include <sys/kmem.h>
55 #include <sys/proc.h>
56 #include <sys/atomic.h>
57 #include <sys/callout.h>
58 #include <sys/sysctl.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65
66 #include <dev/mii/miivar.h>
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
72 #include <netinet/in_offload.h>
73 #endif /* INET */
74 #ifdef INET6
75 #include <netinet6/in6.h>
76 #include <netinet/ip6.h>
77 #endif
78 #include <netinet6/in6_offload.h>
79
80 #include <powerpc/spr.h>
81 #include <powerpc/booke/spr.h>
82 #include <powerpc/booke/cpuvar.h>
83 #include <powerpc/booke/e500var.h>
84 #include <powerpc/booke/e500reg.h>
85 #include <powerpc/booke/etsecreg.h>
86
87 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */
88
89 #define ETSEC_MAXTXMBUFS 30
90 #define ETSEC_NTXSEGS 30
91 #define ETSEC_MAXRXMBUFS 511
92 #define ETSEC_MINRXMBUFS 32
93 #define ETSEC_NRXSEGS 1
94
95 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx
96 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx \
97 | IFCAP_CSUM_UDPv4_Rx \
98 | IFCAP_CSUM_TCPv6_Rx \
99 | IFCAP_CSUM_UDPv6_Rx)
100
101 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx
102 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx \
103 | IFCAP_CSUM_UDPv4_Tx \
104 | IFCAP_CSUM_TCPv6_Tx \
105 | IFCAP_CSUM_UDPv6_Tx)
106
107 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN | IFCAP_RCTRL_TUCSEN \
108 | IFCAP_TCTRL_IPCSEN | IFCAP_TCTRL_TUCSEN)
109
110 #define M_CSUM_IP (M_CSUM_CIP | M_CSUM_CTU)
111 #define M_CSUM_IP6 (M_CSUM_TCPv6 | M_CSUM_UDPv6)
112 #define M_CSUM_TUP (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)
113 #define M_CSUM_UDP (M_CSUM_UDPv4 | M_CSUM_UDPv6)
114 #define M_CSUM_IP4 (M_CSUM_IPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)
115 #define M_CSUM_CIP (M_CSUM_IPv4)
116 #define M_CSUM_CTU (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)
117
118 struct pq3etsec_txqueue {
119 bus_dmamap_t txq_descmap;
120 volatile struct txbd *txq_consumer;
121 volatile struct txbd *txq_producer;
122 volatile struct txbd *txq_first;
123 volatile struct txbd *txq_last;
124 struct ifqueue txq_mbufs;
125 struct mbuf *txq_next;
126 #ifdef ETSEC_DEBUG
127 struct mbuf *txq_lmbufs[512];
128 #endif
129 uint32_t txq_qmask;
130 uint32_t txq_free;
131 uint32_t txq_threshold;
132 uint32_t txq_lastintr;
133 bus_size_t txq_reg_tbase;
134 bus_dma_segment_t txq_descmap_seg;
135 };
136
137 struct pq3etsec_rxqueue {
138 bus_dmamap_t rxq_descmap;
139 volatile struct rxbd *rxq_consumer;
140 volatile struct rxbd *rxq_producer;
141 volatile struct rxbd *rxq_first;
142 volatile struct rxbd *rxq_last;
143 struct mbuf *rxq_mhead;
144 struct mbuf **rxq_mtail;
145 struct mbuf *rxq_mconsumer;
146 #ifdef ETSEC_DEBUG
147 struct mbuf *rxq_mbufs[512];
148 #endif
149 uint32_t rxq_qmask;
150 uint32_t rxq_inuse;
151 uint32_t rxq_threshold;
152 bus_size_t rxq_reg_rbase;
153 bus_size_t rxq_reg_rbptr;
154 bus_dma_segment_t rxq_descmap_seg;
155 };
156
157 struct pq3etsec_mapcache {
158 u_int dmc_nmaps;
159 u_int dmc_maxseg;
160 u_int dmc_maxmaps;
161 u_int dmc_maxmapsize;
162 bus_dmamap_t dmc_maps[0];
163 };
164
165 struct pq3etsec_softc {
166 device_t sc_dev;
167 device_t sc_mdio_dev;
168 struct ethercom sc_ec;
169 #define sc_if sc_ec.ec_if
170 struct mii_data sc_mii;
171 bus_space_tag_t sc_bst;
172 bus_space_handle_t sc_bsh;
173 bus_space_handle_t sc_mdio_bsh;
174 bus_dma_tag_t sc_dmat;
175 int sc_phy_addr;
176 prop_dictionary_t sc_intrmap;
177 uint32_t sc_intrmask;
178
179 uint32_t sc_soft_flags;
180 #define SOFT_RESET 0x0001
181 #define SOFT_RXINTR 0x0010
182 #define SOFT_RXBSY 0x0020
183 #define SOFT_TXINTR 0x0100
184 #define SOFT_TXERROR 0x0200
185
186 struct pq3etsec_txqueue sc_txq;
187 struct pq3etsec_rxqueue sc_rxq;
188 uint32_t sc_txerrors;
189 uint32_t sc_rxerrors;
190
191 size_t sc_rx_adjlen;
192
193 /*
194 * Copies of various ETSEC registers.
195 */
196 uint32_t sc_imask;
197 uint32_t sc_maccfg1;
198 uint32_t sc_maccfg2;
199 uint32_t sc_maxfrm;
200 uint32_t sc_ecntrl;
201 uint32_t sc_dmactrl;
202 uint32_t sc_macstnaddr1;
203 uint32_t sc_macstnaddr2;
204 uint32_t sc_tctrl;
205 uint32_t sc_rctrl;
206 uint32_t sc_gaddr[16];
207 uint64_t sc_macaddrs[15];
208
209 void *sc_tx_ih;
210 void *sc_rx_ih;
211 void *sc_error_ih;
212 void *sc_soft_ih;
213
214 kmutex_t *sc_lock;
215 kmutex_t *sc_hwlock;
216
217 struct evcnt sc_ev_tx_stall;
218 struct evcnt sc_ev_tx_intr;
219 struct evcnt sc_ev_rx_stall;
220 struct evcnt sc_ev_rx_intr;
221 struct evcnt sc_ev_error_intr;
222 struct evcnt sc_ev_soft_intr;
223 struct evcnt sc_ev_tx_pause;
224 struct evcnt sc_ev_rx_pause;
225 struct evcnt sc_ev_mii_ticks;
226
227 struct callout sc_mii_callout;
228 uint64_t sc_mii_last_tick;
229
230 struct ifqueue sc_rx_bufcache;
231 struct pq3etsec_mapcache *sc_rx_mapcache;
232 struct pq3etsec_mapcache *sc_tx_mapcache;
233
234 /* Interrupt Coalescing parameters */
235 int sc_ic_rx_time;
236 int sc_ic_rx_count;
237 int sc_ic_tx_time;
238 int sc_ic_tx_count;
239 };
240
241 #define ETSEC_IC_RX_ENABLED(sc) \
242 ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0)
243 #define ETSEC_IC_TX_ENABLED(sc) \
244 ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0)
245
246 struct pq3mdio_softc {
247 device_t mdio_dev;
248
249 kmutex_t *mdio_lock;
250
251 bus_space_tag_t mdio_bst;
252 bus_space_handle_t mdio_bsh;
253 };
254
255 static int pq3etsec_match(device_t, cfdata_t, void *);
256 static void pq3etsec_attach(device_t, device_t, void *);
257
258 static int pq3mdio_match(device_t, cfdata_t, void *);
259 static void pq3mdio_attach(device_t, device_t, void *);
260
261 static void pq3etsec_ifstart(struct ifnet *);
262 static void pq3etsec_ifwatchdog(struct ifnet *);
263 static int pq3etsec_ifinit(struct ifnet *);
264 static void pq3etsec_ifstop(struct ifnet *, int);
265 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *);
266
267 static int pq3etsec_mapcache_create(struct pq3etsec_softc *,
268 struct pq3etsec_mapcache **, size_t, size_t, size_t);
269 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *,
270 struct pq3etsec_mapcache *);
271 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *,
272 struct pq3etsec_mapcache *);
273 static void pq3etsec_mapcache_put(struct pq3etsec_softc *,
274 struct pq3etsec_mapcache *, bus_dmamap_t);
275
276 static int pq3etsec_txq_attach(struct pq3etsec_softc *,
277 struct pq3etsec_txqueue *, u_int);
278 static void pq3etsec_txq_purge(struct pq3etsec_softc *,
279 struct pq3etsec_txqueue *);
280 static void pq3etsec_txq_reset(struct pq3etsec_softc *,
281 struct pq3etsec_txqueue *);
282 static bool pq3etsec_txq_consume(struct pq3etsec_softc *,
283 struct pq3etsec_txqueue *);
284 static bool pq3etsec_txq_produce(struct pq3etsec_softc *,
285 struct pq3etsec_txqueue *, struct mbuf *m);
286 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *,
287 struct pq3etsec_txqueue *);
288
289 static int pq3etsec_rxq_attach(struct pq3etsec_softc *,
290 struct pq3etsec_rxqueue *, u_int);
291 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *,
292 struct pq3etsec_rxqueue *);
293 static void pq3etsec_rxq_purge(struct pq3etsec_softc *,
294 struct pq3etsec_rxqueue *, bool);
295 static void pq3etsec_rxq_reset(struct pq3etsec_softc *,
296 struct pq3etsec_rxqueue *);
297
298 static void pq3etsec_mc_setup(struct pq3etsec_softc *);
299
300 static void pq3etsec_mii_tick(void *);
301 static int pq3etsec_rx_intr(void *);
302 static int pq3etsec_tx_intr(void *);
303 static int pq3etsec_error_intr(void *);
304 static void pq3etsec_soft_intr(void *);
305
306 static void pq3etsec_set_ic_rx(struct pq3etsec_softc *);
307 static void pq3etsec_set_ic_tx(struct pq3etsec_softc *);
308
309 static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *);
310
311 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc),
312 pq3etsec_match, pq3etsec_attach, NULL, NULL);
313
314 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc),
315 pq3mdio_match, pq3mdio_attach, NULL, NULL);
316
317 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc),
318 pq3mdio_match, pq3mdio_attach, NULL, NULL);
319
320 static inline uint32_t
321 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off)
322 {
323 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off);
324 }
325
326 static inline void
327 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data)
328 {
329 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data);
330 }
331
332 static inline uint32_t
333 etsec_read(struct pq3etsec_softc *sc, bus_size_t off)
334 {
335 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
336 }
337
338 static int
339 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
340 {
341 return strcmp(cf->cf_name, "mdio") == 0;
342 }
343
344 static int
345 pq3mdio_match(device_t parent, cfdata_t cf, void *aux)
346 {
347 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
348 const bool p1025_p = (svr == (SVR_P1025v1 >> 16)
349 || svr == (SVR_P1016v1 >> 16));
350
351 if (device_is_a(parent, "cpunode")) {
352 if (!p1025_p
353 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
354 return 0;
355
356 return 1;
357 }
358
359 if (device_is_a(parent, "tsec")) {
360 if (p1025_p
361 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
362 return 0;
363
364 return 1;
365 }
366
367 return 0;
368 }
369
370 static void
371 pq3mdio_attach(device_t parent, device_t self, void *aux)
372 {
373 struct pq3mdio_softc * const mdio = device_private(self);
374 struct cpunode_attach_args * const cna = aux;
375 struct cpunode_locators * const cnl = &cna->cna_locs;
376
377 mdio->mdio_dev = self;
378 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
379
380 if (device_is_a(parent, "cpunode")) {
381 struct cpunode_softc * const psc = device_private(parent);
382 psc->sc_children |= cna->cna_childmask;
383
384 mdio->mdio_bst = cna->cna_memt;
385 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr,
386 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) {
387 aprint_error(": error mapping registers @ %#x\n",
388 cnl->cnl_addr);
389 return;
390 }
391 } else {
392 struct pq3etsec_softc * const sc = device_private(parent);
393
394 KASSERT(device_is_a(parent, "tsec"));
395 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE
396 || cnl->cnl_addr == ETSEC2_BASE
397 || cnl->cnl_addr == ETSEC3_BASE
398 || cnl->cnl_addr == ETSEC4_BASE,
399 "unknown tsec addr %x", cnl->cnl_addr);
400
401 mdio->mdio_bst = sc->sc_bst;
402 mdio->mdio_bsh = sc->sc_bsh;
403 }
404
405 aprint_normal("\n");
406 }
407
408 static int
409 pq3mdio_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
410 {
411 struct pq3mdio_softc * const mdio = device_private(self);
412 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
413
414 mutex_enter(mdio->mdio_lock);
415
416 etsec_mdio_write(mdio, MIIMADD,
417 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
418
419 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */
420 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ);
421
422 while (etsec_mdio_read(mdio, MIIMIND) != 0) {
423 delay(1);
424 }
425 *val = etsec_mdio_read(mdio, MIIMSTAT) &0xffff;
426
427 if (miimcom == MIIMCOM_SCAN)
428 etsec_mdio_write(mdio, MIIMCOM, miimcom);
429
430 #if 0
431 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
432 __func__, phy, reg, data);
433 #endif
434 mutex_exit(mdio->mdio_lock);
435 return 0;
436 }
437
438 static int
439 pq3mdio_mii_writereg(device_t self, int phy, int reg, uint16_t data)
440 {
441 struct pq3mdio_softc * const mdio = device_private(self);
442 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
443
444 #if 0
445 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
446 __func__, phy, reg, data);
447 #endif
448
449 mutex_enter(mdio->mdio_lock);
450
451 etsec_mdio_write(mdio, MIIMADD,
452 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
453 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */
454 etsec_mdio_write(mdio, MIIMCON, data);
455
456 int timo = 1000; /* 1ms */
457 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) {
458 delay(1);
459 }
460
461 if (miimcom == MIIMCOM_SCAN)
462 etsec_mdio_write(mdio, MIIMCOM, miimcom);
463
464 mutex_exit(mdio->mdio_lock);
465
466 return 0;
467 }
468
469 static inline void
470 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data)
471 {
472 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data);
473 }
474
475 static void
476 pq3etsec_mii_statchg(struct ifnet *ifp)
477 {
478 struct pq3etsec_softc * const sc = ifp->if_softc;
479 struct mii_data * const mii = &sc->sc_mii;
480
481 uint32_t maccfg1 = sc->sc_maccfg1;
482 uint32_t maccfg2 = sc->sc_maccfg2;
483 uint32_t ecntrl = sc->sc_ecntrl;
484
485 maccfg1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
486 maccfg2 &= ~(MACCFG2_IFMODE | MACCFG2_FD);
487
488 if (sc->sc_mii.mii_media_active & IFM_FDX) {
489 maccfg2 |= MACCFG2_FD;
490 }
491
492 /*
493 * Now deal with the flow control bits.
494 */
495 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO
496 && (mii->mii_media_active & IFM_ETH_FMASK)) {
497 if (mii->mii_media_active & IFM_ETH_RXPAUSE)
498 maccfg1 |= MACCFG1_RX_FLOW;
499 if (mii->mii_media_active & IFM_ETH_TXPAUSE)
500 maccfg1 |= MACCFG1_TX_FLOW;
501 }
502
503 /*
504 * Now deal with the speed.
505 */
506 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
507 maccfg2 |= MACCFG2_IFMODE_GMII;
508 } else {
509 maccfg2 |= MACCFG2_IFMODE_MII;
510 ecntrl &= ~ECNTRL_R100M;
511 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) {
512 ecntrl |= ECNTRL_R100M;
513 }
514 }
515
516 /*
517 * If things are different, re-init things.
518 */
519 if (maccfg1 != sc->sc_maccfg1
520 || maccfg2 != sc->sc_maccfg2
521 || ecntrl != sc->sc_ecntrl) {
522 if (sc->sc_if.if_flags & IFF_RUNNING)
523 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET);
524 sc->sc_maccfg1 = maccfg1;
525 sc->sc_maccfg2 = maccfg2;
526 sc->sc_ecntrl = ecntrl;
527 }
528 }
529
530 #if 0
531 static void
532 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
533 {
534 struct pq3etsec_softc * const sc = ifp->if_softc;
535
536 mii_pollstat(&sc->sc_mii);
537 ether_mediastatus(ifp, ifmr);
538 ifmr->ifm_status = sc->sc_mii.mii_media_status;
539 ifmr->ifm_active = sc->sc_mii.mii_media_active;
540 }
541
542 static int
543 pq3etsec_mediachange(struct ifnet *ifp)
544 {
545 struct pq3etsec_softc * const sc = ifp->if_softc;
546
547 if ((ifp->if_flags & IFF_UP) == 0)
548 return 0;
549
550 int rv = mii_mediachg(&sc->sc_mii);
551 return (rv == ENXIO) ? 0 : rv;
552 }
553 #endif
554
555 static int
556 pq3etsec_match(device_t parent, cfdata_t cf, void *aux)
557 {
558
559 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
560 return 0;
561
562 return 1;
563 }
564
565 static void
566 pq3etsec_attach(device_t parent, device_t self, void *aux)
567 {
568 struct cpunode_softc * const psc = device_private(parent);
569 struct pq3etsec_softc * const sc = device_private(self);
570 struct mii_data * const mii = &sc->sc_mii;
571 struct cpunode_attach_args * const cna = aux;
572 struct cpunode_locators * const cnl = &cna->cna_locs;
573 cfdata_t cf = device_cfdata(self);
574 int error;
575
576 psc->sc_children |= cna->cna_childmask;
577 sc->sc_dev = self;
578 sc->sc_bst = cna->cna_memt;
579 sc->sc_dmat = &booke_bus_dma_tag;
580
581 /*
582 * Pull out the mdio bus and phy we are supposed to use.
583 */
584 const int mdio = cf->cf_loc[CPUNODECF_MDIO];
585 const int phy = cf->cf_loc[CPUNODECF_PHY];
586 if (mdio != CPUNODECF_MDIO_DEFAULT)
587 aprint_normal(" mdio %d", mdio);
588
589 /*
590 * See if the phy is in the config file...
591 */
592 if (phy != CPUNODECF_PHY_DEFAULT) {
593 sc->sc_phy_addr = phy;
594 } else {
595 unsigned char prop_name[20];
596 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr",
597 cnl->cnl_instance);
598 sc->sc_phy_addr = board_info_get_number(prop_name);
599 }
600 if (sc->sc_phy_addr != MII_PHY_ANY)
601 aprint_normal(" phy %d", sc->sc_phy_addr);
602
603 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0,
604 &sc->sc_bsh);
605 if (error) {
606 aprint_error(": error mapping registers: %d\n", error);
607 return;
608 }
609
610 /*
611 * Assume firmware has aready set the mac address and fetch it
612 * before we reinit it.
613 */
614 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2);
615 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1);
616 sc->sc_rctrl = RCTRL_DEFAULT;
617 sc->sc_ecntrl = etsec_read(sc, ECNTRL);
618 sc->sc_maccfg1 = etsec_read(sc, MACCFG1);
619 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT;
620
621 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) {
622 size_t len;
623 const uint8_t *mac_addr =
624 board_info_get_data("tsec-mac-addr-base", &len);
625 KASSERT(len == ETHER_ADDR_LEN);
626 sc->sc_macstnaddr2 =
627 (mac_addr[1] << 24)
628 | (mac_addr[0] << 16);
629 sc->sc_macstnaddr1 =
630 ((mac_addr[5] + cnl->cnl_instance - 1) << 24)
631 | (mac_addr[4] << 16)
632 | (mac_addr[3] << 8)
633 | (mac_addr[2] << 0);
634 #if 0
635 aprint_error(": mac-address unknown\n");
636 return;
637 #endif
638 }
639
640 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
641 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
642
643 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE);
644 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc);
645
646 /* Disable interrupts */
647 etsec_write(sc, IMASK, 0);
648
649 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0);
650 if (error) {
651 aprint_error(": failed to init rxq: %d\n", error);
652 goto fail_1;
653 }
654
655 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0);
656 if (error) {
657 aprint_error(": failed to init txq: %d\n", error);
658 goto fail_2;
659 }
660
661 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache,
662 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS);
663 if (error) {
664 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
665 goto fail_3;
666 }
667
668 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
669 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS);
670 if (error) {
671 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
672 goto fail_4;
673 }
674
675 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP,
676 pq3etsec_tx_intr, sc);
677 if (sc->sc_tx_ih == NULL) {
678 aprint_error(": failed to establish tx interrupt: %d\n",
679 cnl->cnl_intrs[0]);
680 goto fail_5;
681 }
682
683 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP,
684 pq3etsec_rx_intr, sc);
685 if (sc->sc_rx_ih == NULL) {
686 aprint_error(": failed to establish rx interrupt: %d\n",
687 cnl->cnl_intrs[1]);
688 goto fail_6;
689 }
690
691 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP,
692 pq3etsec_error_intr, sc);
693 if (sc->sc_error_ih == NULL) {
694 aprint_error(": failed to establish error interrupt: %d\n",
695 cnl->cnl_intrs[2]);
696 goto fail_7;
697 }
698
699 int softint_flags = SOFTINT_NET;
700 #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE)
701 softint_flags |= SOFTINT_MPSAFE;
702 #endif /* !MULTIPROCESSOR || NET_MPSAFE */
703 sc->sc_soft_ih = softint_establish(softint_flags,
704 pq3etsec_soft_intr, sc);
705 if (sc->sc_soft_ih == NULL) {
706 aprint_error(": failed to establish soft interrupt\n");
707 goto fail_8;
708 }
709
710 /*
711 * If there was no MDIO
712 */
713 if (mdio == CPUNODECF_MDIO_DEFAULT) {
714 aprint_normal("\n");
715 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna);
716 if (mdio_cf != NULL) {
717 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL);
718 }
719 } else {
720 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio);
721 if (sc->sc_mdio_dev == NULL) {
722 aprint_error(": failed to locate mdio device\n");
723 goto fail_9;
724 }
725 aprint_normal("\n");
726 }
727
728 etsec_write(sc, ATTR, ATTR_DEFAULT);
729 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT);
730
731 /* Enable interrupt coalesing */
732 sc->sc_ic_rx_time = 768;
733 sc->sc_ic_rx_count = 16;
734 sc->sc_ic_tx_time = 768;
735 sc->sc_ic_tx_count = 16;
736 pq3etsec_set_ic_rx(sc);
737 pq3etsec_set_ic_tx(sc);
738
739 char enaddr[ETHER_ADDR_LEN] = {
740 [0] = sc->sc_macstnaddr2 >> 16,
741 [1] = sc->sc_macstnaddr2 >> 24,
742 [2] = sc->sc_macstnaddr1 >> 0,
743 [3] = sc->sc_macstnaddr1 >> 8,
744 [4] = sc->sc_macstnaddr1 >> 16,
745 [5] = sc->sc_macstnaddr1 >> 24,
746 };
747 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
748 ether_sprintf(enaddr));
749
750 const char * const xname = device_xname(sc->sc_dev);
751 struct ethercom * const ec = &sc->sc_ec;
752 struct ifnet * const ifp = &ec->ec_if;
753
754 ec->ec_mii = mii;
755
756 mii->mii_ifp = ifp;
757 mii->mii_readreg = pq3mdio_mii_readreg;
758 mii->mii_writereg = pq3mdio_mii_writereg;
759 mii->mii_statchg = pq3etsec_mii_statchg;
760
761 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
762
763 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) {
764 mii_attach(sc->sc_mdio_dev, mii, 0xffffffff,
765 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
766
767 if (LIST_FIRST(&mii->mii_phys) == NULL) {
768 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE,
769 0, NULL);
770 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
771 } else {
772 callout_schedule(&sc->sc_mii_callout, hz);
773 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
774 }
775 } else {
776 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX,
777 0, NULL);
778 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX);
779 }
780
781 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
782 | ETHERCAP_JUMBO_MTU;
783
784 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
785 ifp->if_softc = sc;
786 ifp->if_capabilities = IFCAP_ETSEC;
787 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
788 ifp->if_ioctl = pq3etsec_ifioctl;
789 ifp->if_start = pq3etsec_ifstart;
790 ifp->if_watchdog = pq3etsec_ifwatchdog;
791 ifp->if_init = pq3etsec_ifinit;
792 ifp->if_stop = pq3etsec_ifstop;
793 IFQ_SET_READY(&ifp->if_snd);
794
795 /*
796 * Attach the interface.
797 */
798 error = if_initialize(ifp);
799 if (error != 0) {
800 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
801 error);
802 goto fail_10;
803 }
804 pq3etsec_sysctl_setup(NULL, sc);
805 ether_ifattach(ifp, enaddr);
806 if_register(ifp);
807
808 pq3etsec_ifstop(ifp, true);
809
810 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC,
811 NULL, xname, "rx stall");
812 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
813 NULL, xname, "tx stall");
814 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR,
815 NULL, xname, "tx intr");
816 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR,
817 NULL, xname, "rx intr");
818 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR,
819 NULL, xname, "error intr");
820 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
821 NULL, xname, "soft intr");
822 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC,
823 NULL, xname, "tx pause");
824 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC,
825 NULL, xname, "rx pause");
826 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC,
827 NULL, xname, "mii ticks");
828 return;
829
830 fail_10:
831 ifmedia_removeall(&mii->mii_media);
832 mii_detach(mii, sc->sc_phy_addr, MII_OFFSET_ANY);
833 fail_9:
834 softint_disestablish(sc->sc_soft_ih);
835 fail_8:
836 intr_disestablish(sc->sc_error_ih);
837 fail_7:
838 intr_disestablish(sc->sc_rx_ih);
839 fail_6:
840 intr_disestablish(sc->sc_tx_ih);
841 fail_5:
842 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
843 fail_4:
844 pq3etsec_mapcache_destroy(sc, sc->sc_rx_mapcache);
845 fail_3:
846 #if 0 /* notyet */
847 pq3etsec_txq_detach(sc);
848 #endif
849 fail_2:
850 #if 0 /* notyet */
851 pq3etsec_rxq_detach(sc);
852 #endif
853 fail_1:
854 callout_destroy(&sc->sc_mii_callout);
855 mutex_obj_free(sc->sc_lock);
856 mutex_obj_free(sc->sc_hwlock);
857 bus_space_unmap(sc->sc_bst, sc->sc_bsh, cnl->cnl_size);
858 }
859
860 static uint64_t
861 pq3etsec_macaddr_create(const uint8_t *lladdr)
862 {
863 uint64_t macaddr = 0;
864
865 lladdr += ETHER_ADDR_LEN;
866 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) {
867 macaddr = (macaddr << 8) | *--lladdr;
868 }
869 return macaddr << 16;
870 }
871
872 static int
873 pq3etsec_ifinit(struct ifnet *ifp)
874 {
875 struct pq3etsec_softc * const sc = ifp->if_softc;
876 int error = 0;
877
878 sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES);
879 if (ifp->if_mtu > ETHERMTU_JUMBO)
880 return error;
881
882 KASSERT(ifp->if_flags & IFF_UP);
883
884 /*
885 * Stop the interface (steps 1 to 4 in the Soft Reset and
886 * Reconfigurating Procedure.
887 */
888 pq3etsec_ifstop(ifp, 0);
889
890 /*
891 * If our frame size has changed (or it's our first time through)
892 * destroy the existing transmit mapcache.
893 */
894 if (sc->sc_tx_mapcache != NULL
895 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
896 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
897 sc->sc_tx_mapcache = NULL;
898 }
899
900 if (sc->sc_tx_mapcache == NULL) {
901 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
902 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS);
903 if (error)
904 return error;
905 }
906
907 sc->sc_ev_mii_ticks.ev_count++;
908 mii_tick(&sc->sc_mii);
909
910 if (ifp->if_flags & IFF_PROMISC) {
911 sc->sc_rctrl |= RCTRL_PROM;
912 } else {
913 sc->sc_rctrl &= ~RCTRL_PROM;
914 }
915
916 uint32_t rctrl_prsdep = 0;
917 sc->sc_rctrl &=
918 ~(RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP);
919 if (VLAN_ATTACHED(&sc->sc_ec)) {
920 sc->sc_rctrl |= RCTRL_VLEX;
921 rctrl_prsdep = RCTRL_PRSDEP_L2;
922 }
923 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) {
924 sc->sc_rctrl |= RCTRL_IPCSEN;
925 rctrl_prsdep = RCTRL_PRSDEP_L3;
926 }
927 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) {
928 sc->sc_rctrl |= RCTRL_TUCSEN;
929 rctrl_prsdep = RCTRL_PRSDEP_L4;
930 }
931 sc->sc_rctrl |= rctrl_prsdep;
932 #if 0
933 if (sc->sc_rctrl
934 & (RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP))
935 aprint_normal_dev(sc->sc_dev,
936 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n",
937 sc->sc_rctrl,
938 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN),
939 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN),
940 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX),
941 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP));
942 #endif
943
944 sc->sc_tctrl &= ~(TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS);
945 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */
946 sc->sc_tctrl |= TCTRL_VLINS;
947 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN)
948 sc->sc_tctrl |= TCTRL_IPCSEN;
949 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN)
950 sc->sc_tctrl |= TCTRL_TUCSEN;
951 #if 0
952 if (sc->sc_tctrl & (TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS))
953 aprint_normal_dev(sc->sc_dev,
954 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n",
955 sc->sc_tctrl,
956 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN),
957 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN),
958 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS));
959 #endif
960
961 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN | MACCFG1_RX_EN);
962
963 const uint64_t macstnaddr =
964 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl));
965
966 sc->sc_imask = IEVENT_DPE;
967
968 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */
969 pq3etsec_rxq_reset(sc, &sc->sc_rxq);
970 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
971
972 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */
973 pq3etsec_txq_reset(sc, &sc->sc_txq);
974
975 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */
976 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2);
977 etsec_write(sc, MAXFRM, sc->sc_maxfrm);
978 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32));
979 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0));
980 etsec_write(sc, MACCFG1, sc->sc_maccfg1);
981 etsec_write(sc, MACCFG2, sc->sc_maccfg2);
982 etsec_write(sc, ECNTRL, sc->sc_ecntrl);
983
984 /* 8. Setup group address hash table (GADDR0-GADDR15) */
985 pq3etsec_mc_setup(sc);
986
987 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */
988 etsec_write(sc, MRBLR, MCLBYTES);
989
990 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */
991 sc->sc_dmactrl |= DMACTRL_DEFAULT;
992 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
993
994 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
995 etsec_write(sc, TQUEUE, TQUEUE_EN0);
996 sc->sc_imask |= IEVENT_TXF | IEVENT_TXE | IEVENT_TXC;
997
998 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */
999
1000 /* 12. Enable receive queues in RQUEUE, */
1001 etsec_write(sc, RQUEUE, RQUEUE_EN0 | RQUEUE_EX0);
1002 sc->sc_imask |= IEVENT_RXF | IEVENT_BSY | IEVENT_RXC;
1003
1004 /* and optionally set TOE functionality in RCTRL. */
1005 etsec_write(sc, RCTRL, sc->sc_rctrl);
1006 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL);
1007 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF)
1008 sc->sc_rx_adjlen += sizeof(struct rxfcb);
1009
1010 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */
1011 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF);
1012
1013 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/
1014 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF);
1015
1016 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */
1017 sc->sc_dmactrl &= ~(DMACTRL_GRS | DMACTRL_GTS);
1018 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
1019
1020 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */
1021 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
1022 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
1023
1024 sc->sc_soft_flags = 0;
1025
1026 etsec_write(sc, IMASK, sc->sc_imask);
1027
1028 ifp->if_flags |= IFF_RUNNING;
1029
1030 return error;
1031 }
1032
1033 static void
1034 pq3etsec_ifstop(struct ifnet *ifp, int disable)
1035 {
1036 struct pq3etsec_softc * const sc = ifp->if_softc;
1037
1038 KASSERT(!cpu_intr_p());
1039 const uint32_t imask_gsc_mask = IEVENT_GTSC | IEVENT_GRSC;
1040 /*
1041 * Clear the GTSC and GRSC from the interrupt mask until
1042 * we are ready for them. Then clear them from IEVENT,
1043 * request the graceful shutdown, and then enable the
1044 * GTSC and GRSC bits in the mask. This should cause the
1045 * error interrupt to fire which will issue a wakeup to
1046 * allow us to resume.
1047 */
1048
1049 /*
1050 * 1. Set GRS/GTS bits in DMACTRL register
1051 */
1052 sc->sc_dmactrl |= DMACTRL_GRS | DMACTRL_GTS;
1053 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask);
1054 etsec_write(sc, IEVENT, imask_gsc_mask);
1055 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
1056
1057 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN | MACCFG1_RX_EN)) {
1058 /*
1059 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set
1060 */
1061 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask);
1062
1063 u_int timo = 1000;
1064 uint32_t ievent = etsec_read(sc, IEVENT);
1065 while ((ievent & imask_gsc_mask) != imask_gsc_mask) {
1066 if (--timo == 0) {
1067 aprint_error_dev(sc->sc_dev,
1068 "WARNING: "
1069 "request to stop failed (IEVENT=%#x)\n",
1070 ievent);
1071 break;
1072 }
1073 delay(10);
1074 ievent = etsec_read(sc, IEVENT);
1075 }
1076 }
1077
1078 /*
1079 * Now reset the controller.
1080 *
1081 * 3. Set SOFT_RESET bit in MACCFG1 register
1082 * 4. Clear SOFT_RESET bit in MACCFG1 register
1083 */
1084 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET);
1085 etsec_write(sc, MACCFG1, 0);
1086 etsec_write(sc, IMASK, 0);
1087 etsec_write(sc, IEVENT, ~0);
1088 sc->sc_imask = 0;
1089 ifp->if_flags &= ~IFF_RUNNING;
1090
1091 uint32_t tbipa = etsec_read(sc, TBIPA);
1092 if (tbipa == sc->sc_phy_addr) {
1093 aprint_normal_dev(sc->sc_dev, "relocating TBI\n");
1094 etsec_write(sc, TBIPA, 0x1f);
1095 }
1096 uint32_t miimcfg = etsec_read(sc, MIIMCFG);
1097 etsec_write(sc, MIIMCFG, MIIMCFG_RESET);
1098 etsec_write(sc, MIIMCFG, miimcfg);
1099
1100 /*
1101 * Let's consume any remaing transmitted packets. And if we are
1102 * disabling the interface, purge ourselves of any untransmitted
1103 * packets. But don't consume any received packets, just drop them.
1104 * If we aren't disabling the interface, save the mbufs in the
1105 * receive queue for reuse.
1106 */
1107 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable);
1108 pq3etsec_txq_consume(sc, &sc->sc_txq);
1109 if (disable) {
1110 pq3etsec_txq_purge(sc, &sc->sc_txq);
1111 IFQ_PURGE(&ifp->if_snd);
1112 }
1113 }
1114
1115 static void
1116 pq3etsec_ifwatchdog(struct ifnet *ifp)
1117 {
1118 }
1119
1120 static void
1121 pq3etsec_mc_setup(
1122 struct pq3etsec_softc *sc)
1123 {
1124 struct ethercom * const ec = &sc->sc_ec;
1125 struct ifnet * const ifp = &sc->sc_if;
1126 struct ether_multi *enm;
1127 struct ether_multistep step;
1128 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8);
1129 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8);
1130
1131 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr));
1132 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1133
1134 ifp->if_flags &= ~IFF_ALLMULTI;
1135
1136 ETHER_LOCK(ec);
1137 ETHER_FIRST_MULTI(step, ec, enm);
1138 for (u_int i = 0; enm != NULL; ) {
1139 const char *addr = enm->enm_addrlo;
1140 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1141 ifp->if_flags |= IFF_ALLMULTI;
1142 memset(gaddr, 0xff, 32 << (crc_shift & 1));
1143 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1144 break;
1145 }
1146 if ((sc->sc_rctrl & RCTRL_EMEN)
1147 && i < __arraycount(sc->sc_macaddrs)) {
1148 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr);
1149 } else {
1150 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
1151 #if 0
1152 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__,
1153 ether_sprintf(addr), crc,
1154 crc >> crc_shift,
1155 crc >> (crc_shift + 5),
1156 (crc >> crc_shift) & 31,
1157 1 << (((crc >> crc_shift) & 31) ^ 31));
1158 #endif
1159 /*
1160 * The documentation doesn't completely follow PowerPC
1161 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01
1162 * is 0x7fa32d9b. By empirical testing, the
1163 * corresponding hash bit is word 3, bit 31 (ppc bit
1164 * order). Since 3 << 31 | 31 is 0x7f, we deduce
1165 * H[0:2] selects the register while H[3:7] selects
1166 * the bit (ppc bit order).
1167 */
1168 crc >>= crc_shift;
1169 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31);
1170 }
1171 ETHER_NEXT_MULTI(step, enm);
1172 }
1173 ETHER_UNLOCK(ec);
1174 for (u_int i = 0; i < 8; i++) {
1175 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]);
1176 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]);
1177 #if 0
1178 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8])
1179 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__,
1180 i, IGADDR(i), etsec_read(sc, IGADDR(i)),
1181 i, GADDR(i), etsec_read(sc, GADDR(i)));
1182 #endif
1183 }
1184 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) {
1185 uint64_t macaddr = sc->sc_macaddrs[i];
1186 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32));
1187 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0));
1188 #if 0
1189 if (macaddr)
1190 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__,
1191 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)),
1192 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i)));
1193 #endif
1194 }
1195 }
1196
1197 static int
1198 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
1199 {
1200 struct pq3etsec_softc *sc = ifp->if_softc;
1201 struct ifreq * const ifr = data;
1202 const int s = splnet();
1203 int error;
1204
1205 switch (cmd) {
1206 case SIOCSIFMEDIA:
1207 /* Flow control requires full-duplex mode. */
1208 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1209 (ifr->ifr_media & IFM_FDX) == 0)
1210 ifr->ifr_media &= ~IFM_ETH_FMASK;
1211 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1212 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1213 /* We can do both TXPAUSE and RXPAUSE. */
1214 ifr->ifr_media |=
1215 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1216 }
1217 }
1218 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1219 break;
1220
1221 default:
1222 error = ether_ioctl(ifp, cmd, data);
1223 if (error != ENETRESET)
1224 break;
1225
1226 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1227 error = 0;
1228 if (ifp->if_flags & IFF_RUNNING)
1229 pq3etsec_mc_setup(sc);
1230 break;
1231 }
1232 error = pq3etsec_ifinit(ifp);
1233 break;
1234 }
1235
1236 splx(s);
1237 return error;
1238 }
1239
1240 static void
1241 pq3etsec_rxq_desc_presync(
1242 struct pq3etsec_softc *sc,
1243 struct pq3etsec_rxqueue *rxq,
1244 volatile struct rxbd *rxbd,
1245 size_t count)
1246 {
1247 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1248 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1249 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1250 }
1251
1252 static void
1253 pq3etsec_rxq_desc_postsync(
1254 struct pq3etsec_softc *sc,
1255 struct pq3etsec_rxqueue *rxq,
1256 volatile struct rxbd *rxbd,
1257 size_t count)
1258 {
1259 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1260 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1261 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1262 }
1263
1264 static void
1265 pq3etsec_txq_desc_presync(
1266 struct pq3etsec_softc *sc,
1267 struct pq3etsec_txqueue *txq,
1268 volatile struct txbd *txbd,
1269 size_t count)
1270 {
1271 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1272 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1273 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1274 }
1275
1276 static void
1277 pq3etsec_txq_desc_postsync(
1278 struct pq3etsec_softc *sc,
1279 struct pq3etsec_txqueue *txq,
1280 volatile struct txbd *txbd,
1281 size_t count)
1282 {
1283 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1284 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1285 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1286 }
1287
1288 static bus_dmamap_t
1289 pq3etsec_mapcache_get(
1290 struct pq3etsec_softc *sc,
1291 struct pq3etsec_mapcache *dmc)
1292 {
1293 KASSERT(dmc->dmc_nmaps > 0);
1294 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
1295 return dmc->dmc_maps[--dmc->dmc_nmaps];
1296 }
1297
1298 static void
1299 pq3etsec_mapcache_put(
1300 struct pq3etsec_softc *sc,
1301 struct pq3etsec_mapcache *dmc,
1302 bus_dmamap_t map)
1303 {
1304 KASSERT(map != NULL);
1305 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
1306 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
1307 }
1308
1309 static void
1310 pq3etsec_mapcache_destroy(
1311 struct pq3etsec_softc *sc,
1312 struct pq3etsec_mapcache *dmc)
1313 {
1314 const size_t dmc_size =
1315 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]);
1316
1317 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
1318 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
1319 }
1320 kmem_intr_free(dmc, dmc_size);
1321 }
1322
1323 static int
1324 pq3etsec_mapcache_create(
1325 struct pq3etsec_softc *sc,
1326 struct pq3etsec_mapcache **dmc_p,
1327 size_t maxmaps,
1328 size_t maxmapsize,
1329 size_t maxseg)
1330 {
1331 const size_t dmc_size =
1332 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]);
1333 struct pq3etsec_mapcache * const dmc =
1334 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
1335
1336 dmc->dmc_maxmaps = maxmaps;
1337 dmc->dmc_nmaps = maxmaps;
1338 dmc->dmc_maxmapsize = maxmapsize;
1339 dmc->dmc_maxseg = maxseg;
1340
1341 for (u_int i = 0; i < maxmaps; i++) {
1342 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1343 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1344 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
1345 if (error) {
1346 aprint_error_dev(sc->sc_dev,
1347 "failed to creat dma map cache "
1348 "entry %u of %zu: %d\n",
1349 i, maxmaps, error);
1350 while (i-- > 0) {
1351 bus_dmamap_destroy(sc->sc_dmat,
1352 dmc->dmc_maps[i]);
1353 }
1354 kmem_intr_free(dmc, dmc_size);
1355 return error;
1356 }
1357 KASSERT(dmc->dmc_maps[i] != NULL);
1358 }
1359
1360 *dmc_p = dmc;
1361
1362 return 0;
1363 }
1364
1365 #if 0
1366 static void
1367 pq3etsec_dmamem_free(
1368 bus_dma_tag_t dmat,
1369 size_t map_size,
1370 bus_dma_segment_t *seg,
1371 bus_dmamap_t map,
1372 void *kvap)
1373 {
1374 bus_dmamap_destroy(dmat, map);
1375 bus_dmamem_unmap(dmat, kvap, map_size);
1376 bus_dmamem_free(dmat, seg, 1);
1377 }
1378 #endif
1379
1380 static int
1381 pq3etsec_dmamem_alloc(
1382 bus_dma_tag_t dmat,
1383 size_t map_size,
1384 bus_dma_segment_t *seg,
1385 bus_dmamap_t *map,
1386 void **kvap)
1387 {
1388 int error;
1389 int nseg;
1390
1391 *kvap = NULL;
1392 *map = NULL;
1393
1394 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
1395 seg, 1, &nseg, 0);
1396 if (error)
1397 return error;
1398
1399 KASSERT(nseg == 1);
1400
1401 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
1402 BUS_DMA_COHERENT);
1403 if (error == 0) {
1404 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
1405 map);
1406 if (error == 0) {
1407 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
1408 NULL, 0);
1409 if (error == 0)
1410 return 0;
1411 bus_dmamap_destroy(dmat, *map);
1412 *map = NULL;
1413 }
1414 bus_dmamem_unmap(dmat, *kvap, map_size);
1415 *kvap = NULL;
1416 }
1417 bus_dmamem_free(dmat, seg, nseg);
1418 return 0;
1419 }
1420
1421 static struct mbuf *
1422 pq3etsec_rx_buf_alloc(
1423 struct pq3etsec_softc *sc)
1424 {
1425 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
1426 if (m == NULL) {
1427 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
1428 return NULL;
1429 }
1430 MCLGET(m, M_DONTWAIT);
1431 if ((m->m_flags & M_EXT) == 0) {
1432 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
1433 m_freem(m);
1434 return NULL;
1435 }
1436 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1437
1438 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache);
1439 if (map == NULL) {
1440 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
1441 m_freem(m);
1442 return NULL;
1443 }
1444 M_SETCTX(m, map);
1445 m->m_len = m->m_pkthdr.len = MCLBYTES;
1446 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1447 BUS_DMA_READ | BUS_DMA_NOWAIT);
1448 if (error) {
1449 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
1450 error);
1451 M_SETCTX(m, NULL);
1452 m_freem(m);
1453 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1454 return NULL;
1455 }
1456 KASSERT(map->dm_mapsize == MCLBYTES);
1457 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1458 BUS_DMASYNC_PREREAD);
1459
1460 return m;
1461 }
1462
1463 static void
1464 pq3etsec_rx_map_unload(
1465 struct pq3etsec_softc *sc,
1466 struct mbuf *m)
1467 {
1468 KASSERT(m);
1469 for (; m != NULL; m = m->m_next) {
1470 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1471 KASSERT(map);
1472 KASSERT(map->dm_mapsize == MCLBYTES);
1473 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
1474 BUS_DMASYNC_POSTREAD);
1475 bus_dmamap_unload(sc->sc_dmat, map);
1476 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1477 M_SETCTX(m, NULL);
1478 }
1479 }
1480
1481 static bool
1482 pq3etsec_rxq_produce(
1483 struct pq3etsec_softc *sc,
1484 struct pq3etsec_rxqueue *rxq)
1485 {
1486 volatile struct rxbd *producer = rxq->rxq_producer;
1487 #if 0
1488 size_t inuse = rxq->rxq_inuse;
1489 #endif
1490 while (rxq->rxq_inuse < rxq->rxq_threshold) {
1491 struct mbuf *m;
1492 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
1493 if (m == NULL) {
1494 m = pq3etsec_rx_buf_alloc(sc);
1495 if (m == NULL) {
1496 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__);
1497 break;
1498 }
1499 }
1500 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1501 KASSERT(map);
1502
1503 #ifdef ETSEC_DEBUG
1504 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL);
1505 rxq->rxq_mbufs[producer-rxq->rxq_first] = m;
1506 #endif
1507
1508 /* rxbd_len is write-only by the ETSEC */
1509 producer->rxbd_bufptr = map->dm_segs[0].ds_addr;
1510 membar_producer();
1511 producer->rxbd_flags |= RXBD_E;
1512 if (__predict_false(rxq->rxq_mhead == NULL)) {
1513 KASSERT(producer == rxq->rxq_consumer);
1514 rxq->rxq_mconsumer = m;
1515 }
1516 *rxq->rxq_mtail = m;
1517 rxq->rxq_mtail = &m->m_next;
1518 m->m_len = MCLBYTES;
1519 m->m_next = NULL;
1520 rxq->rxq_inuse++;
1521 if (++producer == rxq->rxq_last) {
1522 membar_producer();
1523 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1524 rxq->rxq_last - rxq->rxq_producer);
1525 producer = rxq->rxq_producer = rxq->rxq_first;
1526 }
1527 }
1528 if (producer != rxq->rxq_producer) {
1529 membar_producer();
1530 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1531 producer - rxq->rxq_producer);
1532 rxq->rxq_producer = producer;
1533 }
1534 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT;
1535 if (qhlt) {
1536 KASSERT(qhlt & rxq->rxq_qmask);
1537 sc->sc_ev_rx_stall.ev_count++;
1538 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask);
1539 }
1540 #if 0
1541 aprint_normal_dev(sc->sc_dev,
1542 "%s: buffers inuse went from %zu to %zu\n",
1543 __func__, inuse, rxq->rxq_inuse);
1544 #endif
1545 return true;
1546 }
1547
1548 static bool
1549 pq3etsec_rx_offload(
1550 struct pq3etsec_softc *sc,
1551 struct mbuf *m,
1552 const struct rxfcb *fcb)
1553 {
1554 if (fcb->rxfcb_flags & RXFCB_VLN) {
1555 vlan_set_tag(m, fcb->rxfcb_vlctl);
1556 }
1557 if ((fcb->rxfcb_flags & RXFCB_IP) == 0
1558 || (fcb->rxfcb_flags & (RXFCB_CIP | RXFCB_CTU)) == 0)
1559 return true;
1560 int csum_flags = 0;
1561 if ((fcb->rxfcb_flags & (RXFCB_IP6 | RXFCB_CIP)) == RXFCB_CIP) {
1562 csum_flags |= M_CSUM_IPv4;
1563 if (fcb->rxfcb_flags & RXFCB_EIP)
1564 csum_flags |= M_CSUM_IPv4_BAD;
1565 }
1566 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) {
1567 int ipv_flags;
1568 if (fcb->rxfcb_flags & RXFCB_IP6)
1569 ipv_flags = M_CSUM_TCPv6 | M_CSUM_UDPv6;
1570 else
1571 ipv_flags = M_CSUM_TCPv4 | M_CSUM_UDPv4;
1572 if (fcb->rxfcb_pro == IPPROTO_TCP) {
1573 csum_flags |= (M_CSUM_TCPv4 |M_CSUM_TCPv6) & ipv_flags;
1574 } else {
1575 csum_flags |= (M_CSUM_UDPv4 |M_CSUM_UDPv6) & ipv_flags;
1576 }
1577 if (fcb->rxfcb_flags & RXFCB_ETU)
1578 csum_flags |= M_CSUM_TCP_UDP_BAD;
1579 }
1580
1581 m->m_pkthdr.csum_flags = csum_flags;
1582 return true;
1583 }
1584
1585 static void
1586 pq3etsec_rx_input(
1587 struct pq3etsec_softc *sc,
1588 struct mbuf *m,
1589 uint16_t rxbd_flags)
1590 {
1591 struct ifnet * const ifp = &sc->sc_if;
1592
1593 pq3etsec_rx_map_unload(sc, m);
1594
1595 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) {
1596 struct rxfcb fcb = *mtod(m, struct rxfcb *);
1597 if (!pq3etsec_rx_offload(sc, m, &fcb))
1598 return;
1599 }
1600 m_adj(m, sc->sc_rx_adjlen);
1601
1602 if (rxbd_flags & RXBD_M)
1603 m->m_flags |= M_PROMISC;
1604 if (rxbd_flags & RXBD_BC)
1605 m->m_flags |= M_BCAST;
1606 if (rxbd_flags & RXBD_MC)
1607 m->m_flags |= M_MCAST;
1608 m->m_flags |= M_HASFCS;
1609 m_set_rcvif(m, &sc->sc_if);
1610
1611 ifp->if_ibytes += m->m_pkthdr.len;
1612
1613 /*
1614 * Let's give it to the network subsystm to deal with.
1615 */
1616 int s = splnet();
1617 if_input(ifp, m);
1618 splx(s);
1619 }
1620
1621 static void
1622 pq3etsec_rxq_consume(
1623 struct pq3etsec_softc *sc,
1624 struct pq3etsec_rxqueue *rxq)
1625 {
1626 struct ifnet * const ifp = &sc->sc_if;
1627 volatile struct rxbd *consumer = rxq->rxq_consumer;
1628 size_t rxconsumed = 0;
1629
1630 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask);
1631
1632 for (;;) {
1633 if (consumer == rxq->rxq_producer) {
1634 rxq->rxq_consumer = consumer;
1635 rxq->rxq_inuse -= rxconsumed;
1636 KASSERT(rxq->rxq_inuse == 0);
1637 return;
1638 }
1639 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1);
1640 const uint16_t rxbd_flags = consumer->rxbd_flags;
1641 if (rxbd_flags & RXBD_E) {
1642 rxq->rxq_consumer = consumer;
1643 rxq->rxq_inuse -= rxconsumed;
1644 return;
1645 }
1646 KASSERT(rxq->rxq_mconsumer != NULL);
1647 #ifdef ETSEC_DEBUG
1648 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1649 #endif
1650 #if 0
1651 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n",
1652 __func__,
1653 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len,
1654 mtod(rxq->rxq_mconsumer, int *)[0],
1655 mtod(rxq->rxq_mconsumer, int *)[1],
1656 mtod(rxq->rxq_mconsumer, int *)[2],
1657 mtod(rxq->rxq_mconsumer, int *)[3]);
1658 #endif
1659 /*
1660 * We own this packet again. Clear all flags except wrap.
1661 */
1662 rxconsumed++;
1663 consumer->rxbd_flags = rxbd_flags & (RXBD_W | RXBD_I);
1664
1665 /*
1666 * If this descriptor has the LAST bit set and no errors,
1667 * it's a valid input packet.
1668 */
1669 if ((rxbd_flags & (RXBD_L | RXBD_ERRORS)) == RXBD_L) {
1670 size_t rxbd_len = consumer->rxbd_len;
1671 struct mbuf *m = rxq->rxq_mhead;
1672 struct mbuf *m_last = rxq->rxq_mconsumer;
1673 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1674 rxq->rxq_mtail = &rxq->rxq_mhead;
1675 rxq->rxq_mconsumer = rxq->rxq_mhead;
1676 m_last->m_next = NULL;
1677 m_last->m_len = rxbd_len & (MCLBYTES - 1);
1678 m->m_pkthdr.len = rxbd_len;
1679 pq3etsec_rx_input(sc, m, rxbd_flags);
1680 } else if (rxbd_flags & RXBD_L) {
1681 KASSERT(rxbd_flags & RXBD_ERRORS);
1682 struct mbuf *m;
1683 /*
1684 * We encountered an error, take the mbufs and add
1685 * then to the rx bufcache so we can reuse them.
1686 */
1687 ifp->if_ierrors++;
1688 for (m = rxq->rxq_mhead;
1689 m != rxq->rxq_mconsumer;
1690 m = m->m_next) {
1691 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1692 }
1693 m = rxq->rxq_mconsumer;
1694 if ((rxq->rxq_mhead = m->m_next) == NULL)
1695 rxq->rxq_mtail = &rxq->rxq_mhead;
1696 rxq->rxq_mconsumer = m->m_next;
1697 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1698 } else {
1699 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next;
1700 }
1701 #ifdef ETSEC_DEBUG
1702 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL;
1703 #endif
1704
1705 /*
1706 * Wrap at the last entry!
1707 */
1708 if (rxbd_flags & RXBD_W) {
1709 KASSERT(consumer + 1 == rxq->rxq_last);
1710 consumer = rxq->rxq_first;
1711 } else {
1712 consumer++;
1713 }
1714 #ifdef ETSEC_DEBUG
1715 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1716 #endif
1717 }
1718 }
1719
1720 static void
1721 pq3etsec_rxq_purge(
1722 struct pq3etsec_softc *sc,
1723 struct pq3etsec_rxqueue *rxq,
1724 bool discard)
1725 {
1726 struct mbuf *m;
1727
1728 if ((m = rxq->rxq_mhead) != NULL) {
1729 #ifdef ETSEC_DEBUG
1730 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs));
1731 #endif
1732
1733 if (discard) {
1734 pq3etsec_rx_map_unload(sc, m);
1735 m_freem(m);
1736 } else {
1737 while (m != NULL) {
1738 struct mbuf *m0 = m->m_next;
1739 m->m_next = NULL;
1740 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1741 m = m0;
1742 }
1743 }
1744 }
1745
1746 rxq->rxq_mconsumer = NULL;
1747 rxq->rxq_mhead = NULL;
1748 rxq->rxq_mtail = &rxq->rxq_mhead;
1749 rxq->rxq_inuse = 0;
1750 }
1751
1752 static void
1753 pq3etsec_rxq_reset(
1754 struct pq3etsec_softc *sc,
1755 struct pq3etsec_rxqueue *rxq)
1756 {
1757 /*
1758 * sync all the descriptors
1759 */
1760 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1761 rxq->rxq_last - rxq->rxq_first);
1762
1763 /*
1764 * Make sure we own all descriptors in the ring.
1765 */
1766 volatile struct rxbd *rxbd;
1767 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) {
1768 rxbd->rxbd_flags = RXBD_I;
1769 }
1770
1771 /*
1772 * Last descriptor has the wrap flag.
1773 */
1774 rxbd->rxbd_flags = RXBD_W | RXBD_I;
1775
1776 /*
1777 * Reset the producer consumer indexes.
1778 */
1779 rxq->rxq_consumer = rxq->rxq_first;
1780 rxq->rxq_producer = rxq->rxq_first;
1781 rxq->rxq_inuse = 0;
1782 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS)
1783 rxq->rxq_threshold = ETSEC_MINRXMBUFS;
1784
1785 sc->sc_imask |= IEVENT_RXF | IEVENT_BSY;
1786
1787 /*
1788 * Restart the transmit at the first descriptor
1789 */
1790 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr);
1791 }
1792
1793 static int
1794 pq3etsec_rxq_attach(
1795 struct pq3etsec_softc *sc,
1796 struct pq3etsec_rxqueue *rxq,
1797 u_int qno)
1798 {
1799 size_t map_size = PAGE_SIZE;
1800 size_t desc_count = map_size / sizeof(struct rxbd);
1801 int error;
1802 void *descs;
1803
1804 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1805 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1806 if (error)
1807 return error;
1808
1809 memset(descs, 0, map_size);
1810 rxq->rxq_first = descs;
1811 rxq->rxq_last = rxq->rxq_first + desc_count;
1812 rxq->rxq_consumer = descs;
1813 rxq->rxq_producer = descs;
1814
1815 pq3etsec_rxq_purge(sc, rxq, true);
1816 pq3etsec_rxq_reset(sc, rxq);
1817
1818 rxq->rxq_reg_rbase = RBASEn(qno);
1819 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno);
1820
1821 return 0;
1822 }
1823
1824 static bool
1825 pq3etsec_txq_active_p(
1826 struct pq3etsec_softc * const sc,
1827 struct pq3etsec_txqueue *txq)
1828 {
1829 return !IF_IS_EMPTY(&txq->txq_mbufs);
1830 }
1831
1832 static bool
1833 pq3etsec_txq_fillable_p(
1834 struct pq3etsec_softc * const sc,
1835 struct pq3etsec_txqueue *txq)
1836 {
1837 return txq->txq_free >= txq->txq_threshold;
1838 }
1839
1840 static int
1841 pq3etsec_txq_attach(
1842 struct pq3etsec_softc *sc,
1843 struct pq3etsec_txqueue *txq,
1844 u_int qno)
1845 {
1846 size_t map_size = PAGE_SIZE;
1847 size_t desc_count = map_size / sizeof(struct txbd);
1848 int error;
1849 void *descs;
1850
1851 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1852 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1853 if (error)
1854 return error;
1855
1856 memset(descs, 0, map_size);
1857 txq->txq_first = descs;
1858 txq->txq_last = txq->txq_first + desc_count;
1859 txq->txq_consumer = descs;
1860 txq->txq_producer = descs;
1861
1862 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS);
1863
1864 txq->txq_reg_tbase = TBASEn(qno);
1865 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno);
1866
1867 pq3etsec_txq_reset(sc, txq);
1868
1869 return 0;
1870 }
1871
1872 static int
1873 pq3etsec_txq_map_load(
1874 struct pq3etsec_softc *sc,
1875 struct pq3etsec_txqueue *txq,
1876 struct mbuf *m)
1877 {
1878 bus_dmamap_t map;
1879 int error;
1880
1881 map = M_GETCTX(m, bus_dmamap_t);
1882 if (map != NULL)
1883 return 0;
1884
1885 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache);
1886 if (map == NULL)
1887 return ENOMEM;
1888
1889 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1890 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1891 if (error)
1892 return error;
1893
1894 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1895 BUS_DMASYNC_PREWRITE);
1896 M_SETCTX(m, map);
1897 return 0;
1898 }
1899
1900 static void
1901 pq3etsec_txq_map_unload(
1902 struct pq3etsec_softc *sc,
1903 struct pq3etsec_txqueue *txq,
1904 struct mbuf *m)
1905 {
1906 KASSERT(m);
1907 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1908 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1909 BUS_DMASYNC_POSTWRITE);
1910 bus_dmamap_unload(sc->sc_dmat, map);
1911 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map);
1912 }
1913
1914 static bool
1915 pq3etsec_txq_produce(
1916 struct pq3etsec_softc *sc,
1917 struct pq3etsec_txqueue *txq,
1918 struct mbuf *m)
1919 {
1920 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1921
1922 if (map->dm_nsegs > txq->txq_free)
1923 return false;
1924
1925 /*
1926 * TCP Offload flag must be set in the first descriptor.
1927 */
1928 volatile struct txbd *producer = txq->txq_producer;
1929 uint16_t last_flags = TXBD_L;
1930 uint16_t first_flags = TXBD_R
1931 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0);
1932
1933 /*
1934 * If we've produced enough descriptors without consuming any
1935 * we need to ask for an interrupt to reclaim some.
1936 */
1937 txq->txq_lastintr += map->dm_nsegs;
1938 if (ETSEC_IC_TX_ENABLED(sc)
1939 || txq->txq_lastintr >= txq->txq_threshold
1940 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1941 txq->txq_lastintr = 0;
1942 last_flags |= TXBD_I;
1943 }
1944
1945 #ifdef ETSEC_DEBUG
1946 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1947 #endif
1948 KASSERT(producer != txq->txq_last);
1949 producer->txbd_bufptr = map->dm_segs[0].ds_addr;
1950 producer->txbd_len = map->dm_segs[0].ds_len;
1951
1952 if (map->dm_nsegs > 1) {
1953 volatile struct txbd *start = producer + 1;
1954 size_t count = map->dm_nsegs - 1;
1955 for (u_int i = 1; i < map->dm_nsegs; i++) {
1956 if (__predict_false(++producer == txq->txq_last)) {
1957 producer = txq->txq_first;
1958 if (start < txq->txq_last) {
1959 pq3etsec_txq_desc_presync(sc, txq,
1960 start, txq->txq_last - start);
1961 count -= txq->txq_last - start;
1962 }
1963 start = txq->txq_first;
1964 }
1965 #ifdef ETSEC_DEBUG
1966 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1967 #endif
1968 producer->txbd_bufptr = map->dm_segs[i].ds_addr;
1969 producer->txbd_len = map->dm_segs[i].ds_len;
1970 producer->txbd_flags = TXBD_R
1971 | (producer->txbd_flags & TXBD_W)
1972 | (i == map->dm_nsegs - 1 ? last_flags : 0);
1973 #if 0
1974 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first,
1975 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr);
1976 #endif
1977 }
1978 pq3etsec_txq_desc_presync(sc, txq, start, count);
1979 } else {
1980 first_flags |= last_flags;
1981 }
1982
1983 membar_producer();
1984 txq->txq_producer->txbd_flags =
1985 first_flags | (txq->txq_producer->txbd_flags & TXBD_W);
1986 #if 0
1987 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__,
1988 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags,
1989 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr);
1990 #endif
1991 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1);
1992
1993 /*
1994 * Reduce free count by the number of segments we consumed.
1995 */
1996 txq->txq_free -= map->dm_nsegs;
1997 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1998 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0);
1999 KASSERT(producer->txbd_flags & TXBD_L);
2000 #ifdef ETSEC_DEBUG
2001 txq->txq_lmbufs[producer - txq->txq_first] = m;
2002 #endif
2003
2004 #if 0
2005 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n",
2006 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
2007 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
2008 #endif
2009
2010 if (++producer == txq->txq_last)
2011 txq->txq_producer = txq->txq_first;
2012 else
2013 txq->txq_producer = producer;
2014 IF_ENQUEUE(&txq->txq_mbufs, m);
2015
2016 /*
2017 * Restart the transmitter.
2018 */
2019 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */
2020
2021 return true;
2022 }
2023
2024 static void
2025 pq3etsec_tx_offload(
2026 struct pq3etsec_softc *sc,
2027 struct pq3etsec_txqueue *txq,
2028 struct mbuf **mp)
2029 {
2030 struct mbuf *m = *mp;
2031 u_int csum_flags = m->m_pkthdr.csum_flags;
2032 bool have_vtag;
2033 uint16_t vtag;
2034
2035 KASSERT(m->m_flags & M_PKTHDR);
2036
2037 have_vtag = vlan_has_tag(m);
2038 vtag = (have_vtag) ? vlan_get_tag(m) : 0;
2039
2040 /*
2041 * Let see if we are doing any offload first.
2042 */
2043 if (csum_flags == 0 && !have_vtag) {
2044 m->m_flags &= ~M_HASFCB;
2045 return;
2046 }
2047
2048 uint16_t flags = 0;
2049 if (csum_flags & M_CSUM_IP) {
2050 flags |= TXFCB_IP
2051 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0)
2052 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0)
2053 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0)
2054 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0)
2055 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0);
2056 }
2057 if (have_vtag) {
2058 flags |= TXFCB_VLN;
2059 }
2060 if (flags == 0) {
2061 m->m_flags &= ~M_HASFCB;
2062 return;
2063 }
2064
2065 struct txfcb fcb;
2066 fcb.txfcb_flags = flags;
2067 if (csum_flags & M_CSUM_IPv4)
2068 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2069 else
2070 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2071 fcb.txfcb_l3os = ETHER_HDR_LEN;
2072 fcb.txfcb_phcs = 0;
2073 fcb.txfcb_vlctl = vtag;
2074
2075 #if 0
2076 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n",
2077 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os,
2078 fcb.txfcb_phcs, fcb.txfcb_vlctl);
2079 #endif
2080
2081 if (M_LEADINGSPACE(m) >= sizeof(fcb)) {
2082 m->m_data -= sizeof(fcb);
2083 m->m_len += sizeof(fcb);
2084 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) {
2085 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len);
2086 m->m_data = m->m_pktdat;
2087 m->m_len += sizeof(fcb);
2088 } else {
2089 struct mbuf *mn;
2090 MGET(mn, M_DONTWAIT, m->m_type);
2091 if (mn == NULL) {
2092 if (csum_flags & M_CSUM_IP4) {
2093 #ifdef INET
2094 in_undefer_cksum(m, ETHER_HDR_LEN,
2095 csum_flags & M_CSUM_IP4);
2096 #else
2097 panic("%s: impossible M_CSUM flags %#x",
2098 device_xname(sc->sc_dev), csum_flags);
2099 #endif
2100 } else if (csum_flags & M_CSUM_IP6) {
2101 #ifdef INET6
2102 in6_undefer_cksum(m, ETHER_HDR_LEN,
2103 csum_flags & M_CSUM_IP6);
2104 #else
2105 panic("%s: impossible M_CSUM flags %#x",
2106 device_xname(sc->sc_dev), csum_flags);
2107 #endif
2108 }
2109
2110 m->m_flags &= ~M_HASFCB;
2111 return;
2112 }
2113
2114 m_move_pkthdr(mn, m);
2115 mn->m_next = m;
2116 m = mn;
2117 m_align(m, sizeof(fcb));
2118 m->m_len = sizeof(fcb);
2119 *mp = m;
2120 }
2121 m->m_pkthdr.len += sizeof(fcb);
2122 m->m_flags |= M_HASFCB;
2123 *mtod(m, struct txfcb *) = fcb;
2124 return;
2125 }
2126
2127 static bool
2128 pq3etsec_txq_enqueue(
2129 struct pq3etsec_softc *sc,
2130 struct pq3etsec_txqueue *txq)
2131 {
2132 for (;;) {
2133 if (IF_QFULL(&txq->txq_mbufs))
2134 return false;
2135 struct mbuf *m = txq->txq_next;
2136 if (m == NULL) {
2137 int s = splnet();
2138 IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
2139 splx(s);
2140 if (m == NULL)
2141 return true;
2142 M_SETCTX(m, NULL);
2143 pq3etsec_tx_offload(sc, txq, &m);
2144 } else {
2145 txq->txq_next = NULL;
2146 }
2147 int error = pq3etsec_txq_map_load(sc, txq, m);
2148 if (error) {
2149 aprint_error_dev(sc->sc_dev,
2150 "discarded packet due to "
2151 "dmamap load failure: %d\n", error);
2152 m_freem(m);
2153 continue;
2154 }
2155 KASSERT(txq->txq_next == NULL);
2156 if (!pq3etsec_txq_produce(sc, txq, m)) {
2157 txq->txq_next = m;
2158 return false;
2159 }
2160 KASSERT(txq->txq_next == NULL);
2161 }
2162 }
2163
2164 static bool
2165 pq3etsec_txq_consume(
2166 struct pq3etsec_softc *sc,
2167 struct pq3etsec_txqueue *txq)
2168 {
2169 struct ifnet * const ifp = &sc->sc_if;
2170 volatile struct txbd *consumer = txq->txq_consumer;
2171 size_t txfree = 0;
2172
2173 #if 0
2174 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
2175 #endif
2176 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask);
2177
2178 for (;;) {
2179 if (consumer == txq->txq_producer) {
2180 txq->txq_consumer = consumer;
2181 txq->txq_free += txfree;
2182 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
2183 #if 0
2184 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n",
2185 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
2186 #endif
2187 KASSERT(txq->txq_lastintr == 0);
2188 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
2189 return true;
2190 }
2191 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1);
2192 const uint16_t txbd_flags = consumer->txbd_flags;
2193 if (txbd_flags & TXBD_R) {
2194 txq->txq_consumer = consumer;
2195 txq->txq_free += txfree;
2196 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
2197 #if 0
2198 printf("%s: freed %zu descriptors\n",
2199 __func__, txfree);
2200 #endif
2201 return pq3etsec_txq_fillable_p(sc, txq);
2202 }
2203
2204 /*
2205 * If this is the last descriptor in the chain, get the
2206 * mbuf, free its dmamap, and free the mbuf chain itself.
2207 */
2208 if (txbd_flags & TXBD_L) {
2209 struct mbuf *m;
2210
2211 IF_DEQUEUE(&txq->txq_mbufs, m);
2212 #ifdef ETSEC_DEBUG
2213 KASSERTMSG(
2214 m == txq->txq_lmbufs[consumer-txq->txq_first],
2215 "%s: %p [%u]: flags %#x m (%p) != %p (%p)",
2216 __func__, consumer, consumer - txq->txq_first,
2217 txbd_flags, m,
2218 &txq->txq_lmbufs[consumer-txq->txq_first],
2219 txq->txq_lmbufs[consumer-txq->txq_first]);
2220 #endif
2221 KASSERT(m);
2222 pq3etsec_txq_map_unload(sc, txq, m);
2223 #if 0
2224 printf("%s: mbuf %p: consumed a %u byte packet\n",
2225 __func__, m, m->m_pkthdr.len);
2226 #endif
2227 if (m->m_flags & M_HASFCB)
2228 m_adj(m, sizeof(struct txfcb));
2229 bpf_mtap(ifp, m, BPF_D_OUT);
2230 ifp->if_opackets++;
2231 ifp->if_obytes += m->m_pkthdr.len;
2232 if (m->m_flags & M_MCAST)
2233 ifp->if_omcasts++;
2234 if (txbd_flags & TXBD_ERRORS)
2235 ifp->if_oerrors++;
2236 m_freem(m);
2237 #ifdef ETSEC_DEBUG
2238 txq->txq_lmbufs[consumer - txq->txq_first] = NULL;
2239 #endif
2240 } else {
2241 #ifdef ETSEC_DEBUG
2242 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL);
2243 #endif
2244 }
2245
2246 /*
2247 * We own this packet again. Clear all flags except wrap.
2248 */
2249 txfree++;
2250 //consumer->txbd_flags = txbd_flags & TXBD_W;
2251
2252 /*
2253 * Wrap at the last entry!
2254 */
2255 if (txbd_flags & TXBD_W) {
2256 KASSERT(consumer + 1 == txq->txq_last);
2257 consumer = txq->txq_first;
2258 } else {
2259 consumer++;
2260 KASSERT(consumer < txq->txq_last);
2261 }
2262 }
2263 }
2264
2265 static void
2266 pq3etsec_txq_purge(
2267 struct pq3etsec_softc *sc,
2268 struct pq3etsec_txqueue *txq)
2269 {
2270 struct mbuf *m;
2271 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0);
2272
2273 for (;;) {
2274 IF_DEQUEUE(&txq->txq_mbufs, m);
2275 if (m == NULL)
2276 break;
2277 pq3etsec_txq_map_unload(sc, txq, m);
2278 m_freem(m);
2279 }
2280 if ((m = txq->txq_next) != NULL) {
2281 txq->txq_next = NULL;
2282 pq3etsec_txq_map_unload(sc, txq, m);
2283 m_freem(m);
2284 }
2285 #ifdef ETSEC_DEBUG
2286 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs));
2287 #endif
2288 }
2289
2290 static void
2291 pq3etsec_txq_reset(
2292 struct pq3etsec_softc *sc,
2293 struct pq3etsec_txqueue *txq)
2294 {
2295 /*
2296 * sync all the descriptors
2297 */
2298 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first,
2299 txq->txq_last - txq->txq_first);
2300
2301 /*
2302 * Make sure we own all descriptors in the ring.
2303 */
2304 volatile struct txbd *txbd;
2305 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) {
2306 txbd->txbd_flags = 0;
2307 }
2308
2309 /*
2310 * Last descriptor has the wrap flag.
2311 */
2312 txbd->txbd_flags = TXBD_W;
2313
2314 /*
2315 * Reset the producer consumer indexes.
2316 */
2317 txq->txq_consumer = txq->txq_first;
2318 txq->txq_producer = txq->txq_first;
2319 txq->txq_free = txq->txq_last - txq->txq_first - 1;
2320 txq->txq_threshold = txq->txq_free / 2;
2321 txq->txq_lastintr = 0;
2322
2323 /*
2324 * What do we want to get interrupted on?
2325 */
2326 sc->sc_imask |= IEVENT_TXF | IEVENT_TXE;
2327
2328 /*
2329 * Restart the transmit at the first descriptor
2330 */
2331 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr);
2332 }
2333
2334 static void
2335 pq3etsec_ifstart(struct ifnet *ifp)
2336 {
2337 struct pq3etsec_softc * const sc = ifp->if_softc;
2338
2339 if (__predict_false((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) {
2340 return;
2341 }
2342
2343 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2344 softint_schedule(sc->sc_soft_ih);
2345 }
2346
2347 static void
2348 pq3etsec_tx_error(
2349 struct pq3etsec_softc * const sc)
2350 {
2351 struct pq3etsec_txqueue * const txq = &sc->sc_txq;
2352
2353 pq3etsec_txq_consume(sc, txq);
2354
2355 if (pq3etsec_txq_fillable_p(sc, txq))
2356 sc->sc_if.if_flags &= ~IFF_OACTIVE;
2357 if (sc->sc_txerrors
2358 & (IEVENT_LC | IEVENT_CRL | IEVENT_XFUN | IEVENT_BABT)) {
2359 } else if (sc->sc_txerrors & IEVENT_EBERR) {
2360 }
2361
2362 if (pq3etsec_txq_active_p(sc, txq))
2363 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask);
2364 if (!pq3etsec_txq_enqueue(sc, txq)) {
2365 sc->sc_ev_tx_stall.ev_count++;
2366 sc->sc_if.if_flags |= IFF_OACTIVE;
2367 }
2368
2369 sc->sc_txerrors = 0;
2370 }
2371
2372 int
2373 pq3etsec_tx_intr(void *arg)
2374 {
2375 struct pq3etsec_softc * const sc = arg;
2376
2377 mutex_enter(sc->sc_hwlock);
2378
2379 sc->sc_ev_tx_intr.ev_count++;
2380
2381 uint32_t ievent = etsec_read(sc, IEVENT);
2382 ievent &= IEVENT_TXF | IEVENT_TXB;
2383 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2384
2385 #if 0
2386 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2387 __func__, ievent, etsec_read(sc, IMASK));
2388 #endif
2389
2390 if (ievent == 0) {
2391 mutex_exit(sc->sc_hwlock);
2392 return 0;
2393 }
2394
2395 sc->sc_imask &= ~(IEVENT_TXF | IEVENT_TXB);
2396 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2397 etsec_write(sc, IMASK, sc->sc_imask);
2398 softint_schedule(sc->sc_soft_ih);
2399
2400 mutex_exit(sc->sc_hwlock);
2401
2402 return 1;
2403 }
2404
2405 int
2406 pq3etsec_rx_intr(void *arg)
2407 {
2408 struct pq3etsec_softc * const sc = arg;
2409
2410 mutex_enter(sc->sc_hwlock);
2411
2412 sc->sc_ev_rx_intr.ev_count++;
2413
2414 uint32_t ievent = etsec_read(sc, IEVENT);
2415 ievent &= IEVENT_RXF | IEVENT_RXB;
2416 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2417 if (ievent == 0) {
2418 mutex_exit(sc->sc_hwlock);
2419 return 0;
2420 }
2421
2422 #if 0
2423 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent);
2424 #endif
2425
2426 sc->sc_imask &= ~(IEVENT_RXF | IEVENT_RXB);
2427 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR);
2428 etsec_write(sc, IMASK, sc->sc_imask);
2429 softint_schedule(sc->sc_soft_ih);
2430
2431 mutex_exit(sc->sc_hwlock);
2432
2433 return 1;
2434 }
2435
2436 int
2437 pq3etsec_error_intr(void *arg)
2438 {
2439 struct pq3etsec_softc * const sc = arg;
2440
2441 mutex_enter(sc->sc_hwlock);
2442
2443 sc->sc_ev_error_intr.ev_count++;
2444
2445 for (int rv = 0, soft_flags = 0;; rv = 1) {
2446 uint32_t ievent = etsec_read(sc, IEVENT);
2447 ievent &= ~(IEVENT_RXF | IEVENT_RXB | IEVENT_TXF | IEVENT_TXB);
2448 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2449 if (ievent == 0) {
2450 if (soft_flags) {
2451 atomic_or_uint(&sc->sc_soft_flags, soft_flags);
2452 softint_schedule(sc->sc_soft_ih);
2453 }
2454 mutex_exit(sc->sc_hwlock);
2455 return rv;
2456 }
2457 #if 0
2458 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2459 __func__, ievent, etsec_read(sc, IMASK));
2460 #endif
2461
2462 if (ievent & (IEVENT_GRSC | IEVENT_GTSC)) {
2463 sc->sc_imask &= ~(IEVENT_GRSC | IEVENT_GTSC);
2464 etsec_write(sc, IMASK, sc->sc_imask);
2465 wakeup(sc);
2466 }
2467 if (ievent & (IEVENT_MMRD | IEVENT_MMWR)) {
2468 sc->sc_imask &= ~(IEVENT_MMRD | IEVENT_MMWR);
2469 etsec_write(sc, IMASK, sc->sc_imask);
2470 wakeup(&sc->sc_mii);
2471 }
2472 if (ievent & IEVENT_BSY) {
2473 soft_flags |= SOFT_RXBSY;
2474 sc->sc_imask &= ~IEVENT_BSY;
2475 etsec_write(sc, IMASK, sc->sc_imask);
2476 }
2477 if (ievent & IEVENT_TXE) {
2478 soft_flags |= SOFT_TXERROR;
2479 sc->sc_imask &= ~IEVENT_TXE;
2480 sc->sc_txerrors |= ievent;
2481 }
2482 if (ievent & IEVENT_TXC) {
2483 sc->sc_ev_tx_pause.ev_count++;
2484 }
2485 if (ievent & IEVENT_RXC) {
2486 sc->sc_ev_rx_pause.ev_count++;
2487 }
2488 if (ievent & IEVENT_DPE) {
2489 soft_flags |= SOFT_RESET;
2490 sc->sc_imask &= ~IEVENT_DPE;
2491 etsec_write(sc, IMASK, sc->sc_imask);
2492 }
2493 }
2494 }
2495
2496 void
2497 pq3etsec_soft_intr(void *arg)
2498 {
2499 struct pq3etsec_softc * const sc = arg;
2500 struct ifnet * const ifp = &sc->sc_if;
2501 uint32_t imask = 0;
2502
2503 mutex_enter(sc->sc_lock);
2504
2505 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
2506
2507 sc->sc_ev_soft_intr.ev_count++;
2508
2509 if (soft_flags & SOFT_RESET) {
2510 int s = splnet();
2511 pq3etsec_ifinit(ifp);
2512 splx(s);
2513 soft_flags = 0;
2514 }
2515
2516 if (soft_flags & SOFT_RXBSY) {
2517 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq;
2518 size_t threshold = 5 * rxq->rxq_threshold / 4;
2519 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
2520 threshold = rxq->rxq_last - rxq->rxq_first - 1;
2521 } else {
2522 imask |= IEVENT_BSY;
2523 }
2524 aprint_normal_dev(sc->sc_dev,
2525 "increasing receive buffers from %zu to %zu\n",
2526 rxq->rxq_threshold, threshold);
2527 rxq->rxq_threshold = threshold;
2528 }
2529
2530 if ((soft_flags & SOFT_TXINTR)
2531 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) {
2532 /*
2533 * Let's do what we came here for. Consume transmitted
2534 * packets off the transmit ring.
2535 */
2536 if (!pq3etsec_txq_consume(sc, &sc->sc_txq)
2537 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
2538 sc->sc_ev_tx_stall.ev_count++;
2539 ifp->if_flags |= IFF_OACTIVE;
2540 } else {
2541 ifp->if_flags &= ~IFF_OACTIVE;
2542 }
2543 imask |= IEVENT_TXF;
2544 }
2545
2546 if (soft_flags & (SOFT_RXINTR | SOFT_RXBSY)) {
2547 /* Let's consume */
2548 pq3etsec_rxq_consume(sc, &sc->sc_rxq);
2549 imask |= IEVENT_RXF;
2550 }
2551
2552 if (soft_flags & SOFT_TXERROR) {
2553 pq3etsec_tx_error(sc);
2554 imask |= IEVENT_TXE;
2555 }
2556
2557 if (ifp->if_flags & IFF_RUNNING) {
2558 pq3etsec_rxq_produce(sc, &sc->sc_rxq);
2559 mutex_spin_enter(sc->sc_hwlock);
2560 sc->sc_imask |= imask;
2561 etsec_write(sc, IMASK, sc->sc_imask);
2562 mutex_spin_exit(sc->sc_hwlock);
2563 } else {
2564 KASSERT((soft_flags & SOFT_RXBSY) == 0);
2565 }
2566
2567 mutex_exit(sc->sc_lock);
2568 }
2569
2570 static void
2571 pq3etsec_mii_tick(void *arg)
2572 {
2573 struct pq3etsec_softc * const sc = arg;
2574 mutex_enter(sc->sc_lock);
2575 callout_ack(&sc->sc_mii_callout);
2576 sc->sc_ev_mii_ticks.ev_count++;
2577 #ifdef DEBUG
2578 uint64_t now = mftb();
2579 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) {
2580 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n",
2581 __func__, now - sc->sc_mii_last_tick);
2582 callout_stop(&sc->sc_mii_callout);
2583 }
2584 #endif
2585 mii_tick(&sc->sc_mii);
2586 int s = splnet();
2587 if (sc->sc_soft_flags & SOFT_RESET)
2588 softint_schedule(sc->sc_soft_ih);
2589 splx(s);
2590 callout_schedule(&sc->sc_mii_callout, hz);
2591 #ifdef DEBUG
2592 sc->sc_mii_last_tick = now;
2593 #endif
2594 mutex_exit(sc->sc_lock);
2595 }
2596
2597 static void
2598 pq3etsec_set_ic_rx(struct pq3etsec_softc *sc)
2599 {
2600 uint32_t reg;
2601
2602 if (ETSEC_IC_RX_ENABLED(sc)) {
2603 reg = RXIC_ICEN;
2604 reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count);
2605 reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time);
2606 } else {
2607 /* Disable RX interrupt coalescing */
2608 reg = 0;
2609 }
2610
2611 etsec_write(sc, RXIC, reg);
2612 }
2613
2614 static void
2615 pq3etsec_set_ic_tx(struct pq3etsec_softc *sc)
2616 {
2617 uint32_t reg;
2618
2619 if (ETSEC_IC_TX_ENABLED(sc)) {
2620 reg = TXIC_ICEN;
2621 reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count);
2622 reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time);
2623 } else {
2624 /* Disable TX interrupt coalescing */
2625 reg = 0;
2626 }
2627
2628 etsec_write(sc, TXIC, reg);
2629 }
2630
2631 /*
2632 * sysctl
2633 */
2634 static int
2635 pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep)
2636 {
2637 struct sysctlnode node = *rnode;
2638 struct pq3etsec_softc *sc = rnode->sysctl_data;
2639 int value = *valuep;
2640 int error;
2641
2642 node.sysctl_data = &value;
2643 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2644 if (error != 0 || newp == NULL)
2645 return error;
2646
2647 if (value < 0 || value > 65535)
2648 return EINVAL;
2649
2650 mutex_enter(sc->sc_lock);
2651 *valuep = value;
2652 if (valuep == &sc->sc_ic_rx_time)
2653 pq3etsec_set_ic_rx(sc);
2654 else
2655 pq3etsec_set_ic_tx(sc);
2656 mutex_exit(sc->sc_lock);
2657
2658 return 0;
2659 }
2660
2661 static int
2662 pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep)
2663 {
2664 struct sysctlnode node = *rnode;
2665 struct pq3etsec_softc *sc = rnode->sysctl_data;
2666 int value = *valuep;
2667 int error;
2668
2669 node.sysctl_data = &value;
2670 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2671 if (error != 0 || newp == NULL)
2672 return error;
2673
2674 if (value < 0 || value > 255)
2675 return EINVAL;
2676
2677 mutex_enter(sc->sc_lock);
2678 *valuep = value;
2679 if (valuep == &sc->sc_ic_rx_count)
2680 pq3etsec_set_ic_rx(sc);
2681 else
2682 pq3etsec_set_ic_tx(sc);
2683 mutex_exit(sc->sc_lock);
2684
2685 return 0;
2686 }
2687
2688 static int
2689 pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS)
2690 {
2691 struct pq3etsec_softc *sc = rnode->sysctl_data;
2692
2693 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode),
2694 &sc->sc_ic_rx_time);
2695 }
2696
2697 static int
2698 pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS)
2699 {
2700 struct pq3etsec_softc *sc = rnode->sysctl_data;
2701
2702 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode),
2703 &sc->sc_ic_rx_count);
2704 }
2705
2706 static int
2707 pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS)
2708 {
2709 struct pq3etsec_softc *sc = rnode->sysctl_data;
2710
2711 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode),
2712 &sc->sc_ic_tx_time);
2713 }
2714
2715 static int
2716 pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS)
2717 {
2718 struct pq3etsec_softc *sc = rnode->sysctl_data;
2719
2720 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode),
2721 &sc->sc_ic_tx_count);
2722 }
2723
2724 static void pq3etsec_sysctl_setup(struct sysctllog **clog,
2725 struct pq3etsec_softc *sc)
2726 {
2727 const struct sysctlnode *cnode, *rnode;
2728
2729 if (sysctl_createv(clog, 0, NULL, &rnode,
2730 CTLFLAG_PERMANENT,
2731 CTLTYPE_NODE, device_xname(sc->sc_dev),
2732 SYSCTL_DESCR("TSEC interface"),
2733 NULL, 0, NULL, 0,
2734 CTL_HW, CTL_CREATE, CTL_EOL) != 0)
2735 goto bad;
2736
2737 if (sysctl_createv(clog, 0, &rnode, &rnode,
2738 CTLFLAG_PERMANENT,
2739 CTLTYPE_NODE, "int_coal",
2740 SYSCTL_DESCR("Interrupts coalescing"),
2741 NULL, 0, NULL, 0,
2742 CTL_CREATE, CTL_EOL) != 0)
2743 goto bad;
2744
2745 if (sysctl_createv(clog, 0, &rnode, &cnode,
2746 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2747 CTLTYPE_INT, "rx_time",
2748 SYSCTL_DESCR("RX time threshold (0-65535)"),
2749 pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0,
2750 CTL_CREATE, CTL_EOL) != 0)
2751 goto bad;
2752
2753 if (sysctl_createv(clog, 0, &rnode, &cnode,
2754 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2755 CTLTYPE_INT, "rx_count",
2756 SYSCTL_DESCR("RX frame count threshold (0-255)"),
2757 pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0,
2758 CTL_CREATE, CTL_EOL) != 0)
2759 goto bad;
2760
2761 if (sysctl_createv(clog, 0, &rnode, &cnode,
2762 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2763 CTLTYPE_INT, "tx_time",
2764 SYSCTL_DESCR("TX time threshold (0-65535)"),
2765 pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0,
2766 CTL_CREATE, CTL_EOL) != 0)
2767 goto bad;
2768
2769 if (sysctl_createv(clog, 0, &rnode, &cnode,
2770 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
2771 CTLTYPE_INT, "tx_count",
2772 SYSCTL_DESCR("TX frame count threshold (0-255)"),
2773 pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0,
2774 CTL_CREATE, CTL_EOL) != 0)
2775 goto bad;
2776
2777 return;
2778
2779 bad:
2780 aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n");
2781 }
2782