pq3etsec.c revision 1.24 1 /* $NetBSD: pq3etsec.c,v 1.24 2015/01/23 06:58:32 nonaka Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_inet.h"
38 #include "opt_mpc85xx.h"
39 #include "opt_multiprocessor.h"
40 #include "opt_net_mpsafe.h"
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.24 2015/01/23 06:58:32 nonaka Exp $");
45
46 #include <sys/param.h>
47 #include <sys/cpu.h>
48 #include <sys/device.h>
49 #include <sys/mbuf.h>
50 #include <sys/ioctl.h>
51 #include <sys/intr.h>
52 #include <sys/bus.h>
53 #include <sys/kernel.h>
54 #include <sys/kmem.h>
55 #include <sys/proc.h>
56 #include <sys/atomic.h>
57 #include <sys/callout.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_ether.h>
62 #include <net/if_media.h>
63
64 #include <dev/mii/miivar.h>
65
66 #include <net/bpf.h>
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
72 #include <netinet/in_offload.h>
73 #endif /* INET */
74 #ifdef INET6
75 #include <netinet6/in6.h>
76 #include <netinet/ip6.h>
77 #endif
78 #include <netinet6/in6_offload.h>
79
80
81 #include <powerpc/spr.h>
82 #include <powerpc/booke/spr.h>
83
84 #include <powerpc/booke/cpuvar.h>
85 #include <powerpc/booke/e500var.h>
86 #include <powerpc/booke/e500reg.h>
87 #include <powerpc/booke/etsecreg.h>
88
89 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */
90
91 #define ETSEC_MAXTXMBUFS 30
92 #define ETSEC_NTXSEGS 30
93 #define ETSEC_MAXRXMBUFS 511
94 #define ETSEC_MINRXMBUFS 32
95 #define ETSEC_NRXSEGS 1
96
97 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx
98 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\
99 |IFCAP_CSUM_UDPv4_Rx\
100 |IFCAP_CSUM_TCPv6_Rx\
101 |IFCAP_CSUM_UDPv6_Rx)
102
103 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx
104 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\
105 |IFCAP_CSUM_UDPv4_Tx\
106 |IFCAP_CSUM_TCPv6_Tx\
107 |IFCAP_CSUM_UDPv6_Tx)
108
109 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\
110 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN)
111
112 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU)
113 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6)
114 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
115 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6)
116 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4)
117 #define M_CSUM_CIP (M_CSUM_IPv4)
118 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
119
120 struct pq3etsec_txqueue {
121 bus_dmamap_t txq_descmap;
122 volatile struct txbd *txq_consumer;
123 volatile struct txbd *txq_producer;
124 volatile struct txbd *txq_first;
125 volatile struct txbd *txq_last;
126 struct ifqueue txq_mbufs;
127 struct mbuf *txq_next;
128 #ifdef ETSEC_DEBUG
129 struct mbuf *txq_lmbufs[512];
130 #endif
131 uint32_t txq_qmask;
132 uint32_t txq_free;
133 uint32_t txq_threshold;
134 uint32_t txq_lastintr;
135 bus_size_t txq_reg_tbase;
136 bus_dma_segment_t txq_descmap_seg;
137 };
138
139 struct pq3etsec_rxqueue {
140 bus_dmamap_t rxq_descmap;
141 volatile struct rxbd *rxq_consumer;
142 volatile struct rxbd *rxq_producer;
143 volatile struct rxbd *rxq_first;
144 volatile struct rxbd *rxq_last;
145 struct mbuf *rxq_mhead;
146 struct mbuf **rxq_mtail;
147 struct mbuf *rxq_mconsumer;
148 #ifdef ETSEC_DEBUG
149 struct mbuf *rxq_mbufs[512];
150 #endif
151 uint32_t rxq_qmask;
152 uint32_t rxq_inuse;
153 uint32_t rxq_threshold;
154 bus_size_t rxq_reg_rbase;
155 bus_size_t rxq_reg_rbptr;
156 bus_dma_segment_t rxq_descmap_seg;
157 };
158
159 struct pq3etsec_mapcache {
160 u_int dmc_nmaps;
161 u_int dmc_maxseg;
162 u_int dmc_maxmaps;
163 u_int dmc_maxmapsize;
164 bus_dmamap_t dmc_maps[0];
165 };
166
167 struct pq3etsec_softc {
168 device_t sc_dev;
169 device_t sc_mdio_dev;
170 struct ethercom sc_ec;
171 #define sc_if sc_ec.ec_if
172 struct mii_data sc_mii;
173 bus_space_tag_t sc_bst;
174 bus_space_handle_t sc_bsh;
175 bus_space_handle_t sc_mdio_bsh;
176 bus_dma_tag_t sc_dmat;
177 int sc_phy_addr;
178 prop_dictionary_t sc_intrmap;
179 uint32_t sc_intrmask;
180
181 uint32_t sc_soft_flags;
182 #define SOFT_RESET 0x0001
183 #define SOFT_RXINTR 0x0010
184 #define SOFT_RXBSY 0x0020
185 #define SOFT_TXINTR 0x0100
186 #define SOFT_TXERROR 0x0200
187
188 struct pq3etsec_txqueue sc_txq;
189 struct pq3etsec_rxqueue sc_rxq;
190 uint32_t sc_txerrors;
191 uint32_t sc_rxerrors;
192
193 size_t sc_rx_adjlen;
194
195 /*
196 * Copies of various ETSEC registers.
197 */
198 uint32_t sc_imask;
199 uint32_t sc_maccfg1;
200 uint32_t sc_maccfg2;
201 uint32_t sc_maxfrm;
202 uint32_t sc_ecntrl;
203 uint32_t sc_dmactrl;
204 uint32_t sc_macstnaddr1;
205 uint32_t sc_macstnaddr2;
206 uint32_t sc_tctrl;
207 uint32_t sc_rctrl;
208 uint32_t sc_gaddr[16];
209 uint64_t sc_macaddrs[15];
210
211 void *sc_tx_ih;
212 void *sc_rx_ih;
213 void *sc_error_ih;
214 void *sc_soft_ih;
215
216 kmutex_t *sc_lock;
217 kmutex_t *sc_hwlock;
218
219 struct evcnt sc_ev_tx_stall;
220 struct evcnt sc_ev_tx_intr;
221 struct evcnt sc_ev_rx_stall;
222 struct evcnt sc_ev_rx_intr;
223 struct evcnt sc_ev_error_intr;
224 struct evcnt sc_ev_soft_intr;
225 struct evcnt sc_ev_tx_pause;
226 struct evcnt sc_ev_rx_pause;
227 struct evcnt sc_ev_mii_ticks;
228
229 struct callout sc_mii_callout;
230 uint64_t sc_mii_last_tick;
231
232 struct ifqueue sc_rx_bufcache;
233 struct pq3etsec_mapcache *sc_rx_mapcache;
234 struct pq3etsec_mapcache *sc_tx_mapcache;
235 };
236
237 struct pq3mdio_softc {
238 device_t mdio_dev;
239
240 kmutex_t *mdio_lock;
241
242 bus_space_tag_t mdio_bst;
243 bus_space_handle_t mdio_bsh;
244 };
245
246 static int pq3etsec_match(device_t, cfdata_t, void *);
247 static void pq3etsec_attach(device_t, device_t, void *);
248
249 static int pq3mdio_match(device_t, cfdata_t, void *);
250 static void pq3mdio_attach(device_t, device_t, void *);
251
252 static void pq3etsec_ifstart(struct ifnet *);
253 static void pq3etsec_ifwatchdog(struct ifnet *);
254 static int pq3etsec_ifinit(struct ifnet *);
255 static void pq3etsec_ifstop(struct ifnet *, int);
256 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *);
257
258 static int pq3etsec_mapcache_create(struct pq3etsec_softc *,
259 struct pq3etsec_mapcache **, size_t, size_t, size_t);
260 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *,
261 struct pq3etsec_mapcache *);
262 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *,
263 struct pq3etsec_mapcache *);
264 static void pq3etsec_mapcache_put(struct pq3etsec_softc *,
265 struct pq3etsec_mapcache *, bus_dmamap_t);
266
267 static int pq3etsec_txq_attach(struct pq3etsec_softc *,
268 struct pq3etsec_txqueue *, u_int);
269 static void pq3etsec_txq_purge(struct pq3etsec_softc *,
270 struct pq3etsec_txqueue *);
271 static void pq3etsec_txq_reset(struct pq3etsec_softc *,
272 struct pq3etsec_txqueue *);
273 static bool pq3etsec_txq_consume(struct pq3etsec_softc *,
274 struct pq3etsec_txqueue *);
275 static bool pq3etsec_txq_produce(struct pq3etsec_softc *,
276 struct pq3etsec_txqueue *, struct mbuf *m);
277 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *,
278 struct pq3etsec_txqueue *);
279
280 static int pq3etsec_rxq_attach(struct pq3etsec_softc *,
281 struct pq3etsec_rxqueue *, u_int);
282 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *,
283 struct pq3etsec_rxqueue *);
284 static void pq3etsec_rxq_purge(struct pq3etsec_softc *,
285 struct pq3etsec_rxqueue *, bool);
286 static void pq3etsec_rxq_reset(struct pq3etsec_softc *,
287 struct pq3etsec_rxqueue *);
288
289 static void pq3etsec_mc_setup(struct pq3etsec_softc *);
290
291 static void pq3etsec_mii_tick(void *);
292 static int pq3etsec_rx_intr(void *);
293 static int pq3etsec_tx_intr(void *);
294 static int pq3etsec_error_intr(void *);
295 static void pq3etsec_soft_intr(void *);
296
297 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc),
298 pq3etsec_match, pq3etsec_attach, NULL, NULL);
299
300 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc),
301 pq3mdio_match, pq3mdio_attach, NULL, NULL);
302
303 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc),
304 pq3mdio_match, pq3mdio_attach, NULL, NULL);
305
306 static inline uint32_t
307 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off)
308 {
309 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off);
310 }
311
312 static inline void
313 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data)
314 {
315 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data);
316 }
317
318 static inline uint32_t
319 etsec_read(struct pq3etsec_softc *sc, bus_size_t off)
320 {
321 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
322 }
323
324 static int
325 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
326 {
327 return strcmp(cf->cf_name, "mdio") == 0;
328 }
329
330 static int
331 pq3mdio_match(device_t parent, cfdata_t cf, void *aux)
332 {
333 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
334 const bool p1025_p = (svr == (SVR_P1025v1 >> 16)
335 || svr == (SVR_P1016v1 >> 16));
336
337 if (device_is_a(parent, "cpunode")) {
338 if (!p1025_p
339 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
340 return 0;
341
342 return 1;
343 }
344
345 if (device_is_a(parent, "tsec")) {
346 if (p1025_p
347 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
348 return 0;
349
350 return 1;
351 }
352
353 return 0;
354 }
355
356 static void
357 pq3mdio_attach(device_t parent, device_t self, void *aux)
358 {
359 struct pq3mdio_softc * const mdio = device_private(self);
360 struct cpunode_attach_args * const cna = aux;
361 struct cpunode_locators * const cnl = &cna->cna_locs;
362
363 mdio->mdio_dev = self;
364 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
365
366 if (device_is_a(parent, "cpunode")) {
367 struct cpunode_softc * const psc = device_private(parent);
368 psc->sc_children |= cna->cna_childmask;
369
370 mdio->mdio_bst = cna->cna_memt;
371 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr,
372 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) {
373 aprint_error(": error mapping registers @ %#x\n",
374 cnl->cnl_addr);
375 return;
376 }
377 } else {
378 struct pq3etsec_softc * const sc = device_private(parent);
379
380 KASSERT(device_is_a(parent, "tsec"));
381 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE
382 || cnl->cnl_addr == ETSEC2_BASE
383 || cnl->cnl_addr == ETSEC3_BASE
384 || cnl->cnl_addr == ETSEC4_BASE,
385 "unknown tsec addr %x", cnl->cnl_addr);
386
387 mdio->mdio_bst = sc->sc_bst;
388 mdio->mdio_bsh = sc->sc_bsh;
389 }
390
391 aprint_normal("\n");
392 }
393
394 static int
395 pq3mdio_mii_readreg(device_t self, int phy, int reg)
396 {
397 struct pq3mdio_softc * const mdio = device_private(self);
398 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
399
400 mutex_enter(mdio->mdio_lock);
401
402 etsec_mdio_write(mdio, MIIMADD,
403 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
404
405 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */
406 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ);
407
408 while (etsec_mdio_read(mdio, MIIMIND) != 0) {
409 delay(1);
410 }
411 int data = etsec_mdio_read(mdio, MIIMSTAT);
412
413 if (miimcom == MIIMCOM_SCAN)
414 etsec_mdio_write(mdio, MIIMCOM, miimcom);
415
416 #if 0
417 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
418 __func__, phy, reg, data);
419 #endif
420 mutex_exit(mdio->mdio_lock);
421 return data;
422 }
423
424 static void
425 pq3mdio_mii_writereg(device_t self, int phy, int reg, int data)
426 {
427 struct pq3mdio_softc * const mdio = device_private(self);
428 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
429
430 #if 0
431 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
432 __func__, phy, reg, data);
433 #endif
434
435 mutex_enter(mdio->mdio_lock);
436
437 etsec_mdio_write(mdio, MIIMADD,
438 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
439 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */
440 etsec_mdio_write(mdio, MIIMCON, data);
441
442 int timo = 1000; /* 1ms */
443 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) {
444 delay(1);
445 }
446
447 if (miimcom == MIIMCOM_SCAN)
448 etsec_mdio_write(mdio, MIIMCOM, miimcom);
449
450 mutex_exit(mdio->mdio_lock);
451 }
452
453 static inline void
454 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data)
455 {
456 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data);
457 }
458
459 static void
460 pq3etsec_mii_statchg(struct ifnet *ifp)
461 {
462 struct pq3etsec_softc * const sc = ifp->if_softc;
463 struct mii_data * const mii = &sc->sc_mii;
464
465 uint32_t maccfg1 = sc->sc_maccfg1;
466 uint32_t maccfg2 = sc->sc_maccfg2;
467 uint32_t ecntrl = sc->sc_ecntrl;
468
469 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW);
470 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD);
471
472 if (sc->sc_mii.mii_media_active & IFM_FDX) {
473 maccfg2 |= MACCFG2_FD;
474 }
475
476 /*
477 * Now deal with the flow control bits.
478 */
479 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO
480 && (mii->mii_media_active & IFM_ETH_FMASK)) {
481 if (mii->mii_media_active & IFM_ETH_RXPAUSE)
482 maccfg1 |= MACCFG1_RX_FLOW;
483 if (mii->mii_media_active & IFM_ETH_TXPAUSE)
484 maccfg1 |= MACCFG1_TX_FLOW;
485 }
486
487 /*
488 * Now deal with the speed.
489 */
490 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
491 maccfg2 |= MACCFG2_IFMODE_GMII;
492 } else {
493 maccfg2 |= MACCFG2_IFMODE_MII;
494 ecntrl &= ~ECNTRL_R100M;
495 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) {
496 ecntrl |= ECNTRL_R100M;
497 }
498 }
499
500 /*
501 * If things are different, re-init things.
502 */
503 if (maccfg1 != sc->sc_maccfg1
504 || maccfg2 != sc->sc_maccfg2
505 || ecntrl != sc->sc_ecntrl) {
506 if (sc->sc_if.if_flags & IFF_RUNNING)
507 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET);
508 sc->sc_maccfg1 = maccfg1;
509 sc->sc_maccfg2 = maccfg2;
510 sc->sc_ecntrl = ecntrl;
511 }
512 }
513
514 #if 0
515 static void
516 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
517 {
518 struct pq3etsec_softc * const sc = ifp->if_softc;
519
520 mii_pollstat(&sc->sc_mii);
521 ether_mediastatus(ifp, ifmr);
522 ifmr->ifm_status = sc->sc_mii.mii_media_status;
523 ifmr->ifm_active = sc->sc_mii.mii_media_active;
524 }
525
526 static int
527 pq3etsec_mediachange(struct ifnet *ifp)
528 {
529 struct pq3etsec_softc * const sc = ifp->if_softc;
530
531 if ((ifp->if_flags & IFF_UP) == 0)
532 return 0;
533
534 int rv = mii_mediachg(&sc->sc_mii);
535 return (rv == ENXIO) ? 0 : rv;
536 }
537 #endif
538
539 static int
540 pq3etsec_match(device_t parent, cfdata_t cf, void *aux)
541 {
542
543 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
544 return 0;
545
546 return 1;
547 }
548
549 static void
550 pq3etsec_attach(device_t parent, device_t self, void *aux)
551 {
552 struct cpunode_softc * const psc = device_private(parent);
553 struct pq3etsec_softc * const sc = device_private(self);
554 struct cpunode_attach_args * const cna = aux;
555 struct cpunode_locators * const cnl = &cna->cna_locs;
556 cfdata_t cf = device_cfdata(self);
557 int error;
558
559 psc->sc_children |= cna->cna_childmask;
560 sc->sc_dev = self;
561 sc->sc_bst = cna->cna_memt;
562 sc->sc_dmat = &booke_bus_dma_tag;
563
564 /*
565 * Pull out the mdio bus and phy we are supposed to use.
566 */
567 const int mdio = cf->cf_loc[CPUNODECF_MDIO];
568 const int phy = cf->cf_loc[CPUNODECF_PHY];
569 if (mdio != CPUNODECF_MDIO_DEFAULT)
570 aprint_normal(" mdio %d", mdio);
571
572 /*
573 * See if the phy is in the config file...
574 */
575 if (phy != CPUNODECF_PHY_DEFAULT) {
576 sc->sc_phy_addr = phy;
577 } else {
578 unsigned char prop_name[20];
579 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr",
580 cnl->cnl_instance);
581 sc->sc_phy_addr = board_info_get_number(prop_name);
582 }
583 if (sc->sc_phy_addr != MII_PHY_ANY)
584 aprint_normal(" phy %d", sc->sc_phy_addr);
585
586 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0,
587 &sc->sc_bsh);
588 if (error) {
589 aprint_error(": error mapping registers: %d\n", error);
590 return;
591 }
592
593 /*
594 * Assume firmware has aready set the mac address and fetch it
595 * before we reinit it.
596 */
597 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2);
598 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1);
599 sc->sc_rctrl = RCTRL_DEFAULT;
600 sc->sc_ecntrl = etsec_read(sc, ECNTRL);
601 sc->sc_maccfg1 = etsec_read(sc, MACCFG1);
602 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT;
603
604 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) {
605 size_t len;
606 const uint8_t *mac_addr =
607 board_info_get_data("tsec-mac-addr-base", &len);
608 KASSERT(len == ETHER_ADDR_LEN);
609 sc->sc_macstnaddr2 =
610 (mac_addr[1] << 24)
611 | (mac_addr[0] << 16);
612 sc->sc_macstnaddr1 =
613 ((mac_addr[5] + cnl->cnl_instance - 1) << 24)
614 | (mac_addr[4] << 16)
615 | (mac_addr[3] << 8)
616 | (mac_addr[2] << 0);
617 #if 0
618 aprint_error(": mac-address unknown\n");
619 return;
620 #endif
621 }
622
623 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
624 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
625
626 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE);
627 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc);
628
629 /* Disable interrupts */
630 etsec_write(sc, IMASK, 0);
631
632 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0);
633 if (error) {
634 aprint_error(": failed to init rxq: %d\n", error);
635 return;
636 }
637
638 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0);
639 if (error) {
640 aprint_error(": failed to init txq: %d\n", error);
641 return;
642 }
643
644 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache,
645 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS);
646 if (error) {
647 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
648 return;
649 }
650
651 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
652 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS);
653 if (error) {
654 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
655 return;
656 }
657
658 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP,
659 pq3etsec_tx_intr, sc);
660 if (sc->sc_tx_ih == NULL) {
661 aprint_error(": failed to establish tx interrupt: %d\n",
662 cnl->cnl_intrs[0]);
663 return;
664 }
665
666 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP,
667 pq3etsec_rx_intr, sc);
668 if (sc->sc_rx_ih == NULL) {
669 aprint_error(": failed to establish rx interrupt: %d\n",
670 cnl->cnl_intrs[1]);
671 return;
672 }
673
674 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP,
675 pq3etsec_error_intr, sc);
676 if (sc->sc_error_ih == NULL) {
677 aprint_error(": failed to establish error interrupt: %d\n",
678 cnl->cnl_intrs[2]);
679 return;
680 }
681
682 int softint_flags = SOFTINT_NET;
683 #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE)
684 softint_flags |= SOFTINT_MPSAFE;
685 #endif /* !MULTIPROCESSOR || NET_MPSAFE */
686 sc->sc_soft_ih = softint_establish(softint_flags,
687 pq3etsec_soft_intr, sc);
688 if (sc->sc_soft_ih == NULL) {
689 aprint_error(": failed to establish soft interrupt\n");
690 return;
691 }
692
693 /*
694 * If there was no MDIO
695 */
696 if (mdio == CPUNODECF_MDIO_DEFAULT) {
697 aprint_normal("\n");
698 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna);
699 if (mdio_cf != NULL) {
700 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL);
701 }
702 } else {
703 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio);
704 if (sc->sc_mdio_dev == NULL) {
705 aprint_error(": failed to locate mdio device\n");
706 return;
707 }
708 aprint_normal("\n");
709 }
710
711 etsec_write(sc, ATTR, ATTR_DEFAULT);
712 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT);
713
714 char enaddr[ETHER_ADDR_LEN] = {
715 [0] = sc->sc_macstnaddr2 >> 16,
716 [1] = sc->sc_macstnaddr2 >> 24,
717 [2] = sc->sc_macstnaddr1 >> 0,
718 [3] = sc->sc_macstnaddr1 >> 8,
719 [4] = sc->sc_macstnaddr1 >> 16,
720 [5] = sc->sc_macstnaddr1 >> 24,
721 };
722 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
723 ether_sprintf(enaddr));
724
725 const char * const xname = device_xname(sc->sc_dev);
726 struct ethercom * const ec = &sc->sc_ec;
727 struct ifnet * const ifp = &ec->ec_if;
728
729 ec->ec_mii = &sc->sc_mii;
730
731 sc->sc_mii.mii_ifp = ifp;
732 sc->sc_mii.mii_readreg = pq3mdio_mii_readreg;
733 sc->sc_mii.mii_writereg = pq3mdio_mii_writereg;
734 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg;
735
736 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
737 ether_mediastatus);
738
739 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) {
740 mii_attach(sc->sc_mdio_dev, &sc->sc_mii, 0xffffffff,
741 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
742
743 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
744 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
745 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
746 } else {
747 callout_schedule(&sc->sc_mii_callout, hz);
748 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
749 }
750 } else {
751 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
752 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
753 }
754
755 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
756 | ETHERCAP_JUMBO_MTU;
757
758 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
759 ifp->if_softc = sc;
760 ifp->if_capabilities = IFCAP_ETSEC;
761 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
762 ifp->if_ioctl = pq3etsec_ifioctl;
763 ifp->if_start = pq3etsec_ifstart;
764 ifp->if_watchdog = pq3etsec_ifwatchdog;
765 ifp->if_init = pq3etsec_ifinit;
766 ifp->if_stop = pq3etsec_ifstop;
767 IFQ_SET_READY(&ifp->if_snd);
768
769 pq3etsec_ifstop(ifp, true);
770
771 /*
772 * Attach the interface.
773 */
774 if_initialize(ifp);
775 ether_ifattach(ifp, enaddr);
776 if_register(ifp);
777
778 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC,
779 NULL, xname, "rx stall");
780 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
781 NULL, xname, "tx stall");
782 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR,
783 NULL, xname, "tx intr");
784 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR,
785 NULL, xname, "rx intr");
786 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR,
787 NULL, xname, "error intr");
788 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
789 NULL, xname, "soft intr");
790 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC,
791 NULL, xname, "tx pause");
792 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC,
793 NULL, xname, "rx pause");
794 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC,
795 NULL, xname, "mii ticks");
796 }
797
798 static uint64_t
799 pq3etsec_macaddr_create(const uint8_t *lladdr)
800 {
801 uint64_t macaddr = 0;
802
803 lladdr += ETHER_ADDR_LEN;
804 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) {
805 macaddr = (macaddr << 8) | *--lladdr;
806 }
807 return macaddr << 16;
808 }
809
810 static int
811 pq3etsec_ifinit(struct ifnet *ifp)
812 {
813 struct pq3etsec_softc * const sc = ifp->if_softc;
814 int error = 0;
815
816 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
817 if (ifp->if_mtu > ETHERMTU_JUMBO)
818 return error;
819
820 KASSERT(ifp->if_flags & IFF_UP);
821
822 /*
823 * Stop the interface (steps 1 to 4 in the Soft Reset and
824 * Reconfigurating Procedure.
825 */
826 pq3etsec_ifstop(ifp, 0);
827
828 /*
829 * If our frame size has changed (or it's our first time through)
830 * destroy the existing transmit mapcache.
831 */
832 if (sc->sc_tx_mapcache != NULL
833 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
834 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
835 sc->sc_tx_mapcache = NULL;
836 }
837
838 if (sc->sc_tx_mapcache == NULL) {
839 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
840 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS);
841 if (error)
842 return error;
843 }
844
845 sc->sc_ev_mii_ticks.ev_count++;
846 mii_tick(&sc->sc_mii);
847
848 if (ifp->if_flags & IFF_PROMISC) {
849 sc->sc_rctrl |= RCTRL_PROM;
850 } else {
851 sc->sc_rctrl &= ~RCTRL_PROM;
852 }
853
854 uint32_t rctrl_prsdep = 0;
855 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP);
856 if (VLAN_ATTACHED(&sc->sc_ec)) {
857 sc->sc_rctrl |= RCTRL_VLEX;
858 rctrl_prsdep = RCTRL_PRSDEP_L2;
859 }
860 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) {
861 sc->sc_rctrl |= RCTRL_IPCSEN;
862 rctrl_prsdep = RCTRL_PRSDEP_L3;
863 }
864 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) {
865 sc->sc_rctrl |= RCTRL_TUCSEN;
866 rctrl_prsdep = RCTRL_PRSDEP_L4;
867 }
868 sc->sc_rctrl |= rctrl_prsdep;
869 #if 0
870 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP))
871 aprint_normal_dev(sc->sc_dev,
872 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n",
873 sc->sc_rctrl,
874 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN),
875 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN),
876 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX),
877 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP));
878 #endif
879
880 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS);
881 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */
882 sc->sc_tctrl |= TCTRL_VLINS;
883 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN)
884 sc->sc_tctrl |= TCTRL_IPCSEN;
885 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN)
886 sc->sc_tctrl |= TCTRL_TUCSEN;
887 #if 0
888 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS))
889 aprint_normal_dev(sc->sc_dev,
890 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n",
891 sc->sc_tctrl,
892 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN),
893 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN),
894 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS));
895 #endif
896
897 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN);
898
899 const uint64_t macstnaddr =
900 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl));
901
902 sc->sc_imask = IEVENT_DPE;
903
904 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */
905 pq3etsec_rxq_reset(sc, &sc->sc_rxq);
906 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
907
908 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */
909 pq3etsec_txq_reset(sc, &sc->sc_txq);
910
911 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */
912 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2);
913 etsec_write(sc, MAXFRM, sc->sc_maxfrm);
914 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32));
915 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0));
916 etsec_write(sc, MACCFG1, sc->sc_maccfg1);
917 etsec_write(sc, MACCFG2, sc->sc_maccfg2);
918 etsec_write(sc, ECNTRL, sc->sc_ecntrl);
919
920 /* 8. Setup group address hash table (GADDR0-GADDR15) */
921 pq3etsec_mc_setup(sc);
922
923 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */
924 etsec_write(sc, MRBLR, MCLBYTES);
925
926 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */
927 sc->sc_dmactrl |= DMACTRL_DEFAULT;
928 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
929
930 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
931 etsec_write(sc, TQUEUE, TQUEUE_EN0);
932 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC;
933
934 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */
935
936 /* 12. Enable receive queues in RQUEUE, */
937 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0);
938 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC;
939
940 /* and optionally set TOE functionality in RCTRL. */
941 etsec_write(sc, RCTRL, sc->sc_rctrl);
942 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL);
943 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF)
944 sc->sc_rx_adjlen += sizeof(struct rxfcb);
945
946 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */
947 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF);
948
949 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/
950 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF);
951
952 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */
953 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS);
954 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
955
956 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */
957 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
958 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
959
960 sc->sc_soft_flags = 0;
961
962 etsec_write(sc, IMASK, sc->sc_imask);
963
964 ifp->if_flags |= IFF_RUNNING;
965
966 return error;
967 }
968
969 static void
970 pq3etsec_ifstop(struct ifnet *ifp, int disable)
971 {
972 struct pq3etsec_softc * const sc = ifp->if_softc;
973
974 KASSERT(!cpu_intr_p());
975 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC;
976 /*
977 * Clear the GTSC and GRSC from the interrupt mask until
978 * we are ready for them. Then clear them from IEVENT,
979 * request the graceful shutdown, and then enable the
980 * GTSC and GRSC bits in the mask. This should cause the
981 * error interrupt to fire which will issue a wakeup to
982 * allow us to resume.
983 */
984
985 /*
986 * 1. Set GRS/GTS bits in DMACTRL register
987 */
988 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS;
989 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask);
990 etsec_write(sc, IEVENT, imask_gsc_mask);
991 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
992
993 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) {
994 /*
995 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set
996 */
997 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask);
998
999 u_int timo = 1000;
1000 uint32_t ievent = etsec_read(sc, IEVENT);
1001 while ((ievent & imask_gsc_mask) != imask_gsc_mask) {
1002 if (--timo == 0) {
1003 aprint_error_dev(sc->sc_dev,
1004 "WARNING: "
1005 "request to stop failed (IEVENT=%#x)\n",
1006 ievent);
1007 break;
1008 }
1009 delay(10);
1010 ievent = etsec_read(sc, IEVENT);
1011 }
1012 }
1013
1014 /*
1015 * Now reset the controller.
1016 *
1017 * 3. Set SOFT_RESET bit in MACCFG1 register
1018 * 4. Clear SOFT_RESET bit in MACCFG1 register
1019 */
1020 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET);
1021 etsec_write(sc, MACCFG1, 0);
1022 etsec_write(sc, IMASK, 0);
1023 etsec_write(sc, IEVENT, ~0);
1024 sc->sc_imask = 0;
1025 ifp->if_flags &= ~IFF_RUNNING;
1026
1027 uint32_t tbipa = etsec_read(sc, TBIPA);
1028 if (tbipa == sc->sc_phy_addr) {
1029 aprint_normal_dev(sc->sc_dev, "relocating TBI\n");
1030 etsec_write(sc, TBIPA, 0x1f);
1031 }
1032 uint32_t miimcfg = etsec_read(sc, MIIMCFG);
1033 etsec_write(sc, MIIMCFG, MIIMCFG_RESET);
1034 etsec_write(sc, MIIMCFG, miimcfg);
1035
1036 /*
1037 * Let's consume any remaing transmitted packets. And if we are
1038 * disabling the interface, purge ourselves of any untransmitted
1039 * packets. But don't consume any received packets, just drop them.
1040 * If we aren't disabling the interface, save the mbufs in the
1041 * receive queue for reuse.
1042 */
1043 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable);
1044 pq3etsec_txq_consume(sc, &sc->sc_txq);
1045 if (disable) {
1046 pq3etsec_txq_purge(sc, &sc->sc_txq);
1047 IFQ_PURGE(&ifp->if_snd);
1048 }
1049 }
1050
1051 static void
1052 pq3etsec_ifwatchdog(struct ifnet *ifp)
1053 {
1054 }
1055
1056 static void
1057 pq3etsec_mc_setup(
1058 struct pq3etsec_softc *sc)
1059 {
1060 struct ethercom * const ec = &sc->sc_ec;
1061 struct ifnet * const ifp = &sc->sc_if;
1062 struct ether_multi *enm;
1063 struct ether_multistep step;
1064 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8);
1065 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8);
1066
1067 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr));
1068 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1069
1070 ifp->if_flags &= ~IFF_ALLMULTI;
1071
1072 ETHER_FIRST_MULTI(step, ec, enm);
1073 for (u_int i = 0; enm != NULL; ) {
1074 const char *addr = enm->enm_addrlo;
1075 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1076 ifp->if_flags |= IFF_ALLMULTI;
1077 memset(gaddr, 0xff, 32 << (crc_shift & 1));
1078 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1079 break;
1080 }
1081 if ((sc->sc_rctrl & RCTRL_EMEN)
1082 && i < __arraycount(sc->sc_macaddrs)) {
1083 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr);
1084 } else {
1085 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
1086 #if 0
1087 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__,
1088 ether_sprintf(addr), crc,
1089 crc >> crc_shift,
1090 crc >> (crc_shift + 5),
1091 (crc >> crc_shift) & 31,
1092 1 << (((crc >> crc_shift) & 31) ^ 31));
1093 #endif
1094 /*
1095 * The documentation doesn't completely follow PowerPC
1096 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01
1097 * is 0x7fa32d9b. By empirical testing, the
1098 * corresponding hash bit is word 3, bit 31 (ppc bit
1099 * order). Since 3 << 31 | 31 is 0x7f, we deduce
1100 * H[0:2] selects the register while H[3:7] selects
1101 * the bit (ppc bit order).
1102 */
1103 crc >>= crc_shift;
1104 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31);
1105 }
1106 ETHER_NEXT_MULTI(step, enm);
1107 }
1108 for (u_int i = 0; i < 8; i++) {
1109 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]);
1110 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]);
1111 #if 0
1112 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8])
1113 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__,
1114 i, IGADDR(i), etsec_read(sc, IGADDR(i)),
1115 i, GADDR(i), etsec_read(sc, GADDR(i)));
1116 #endif
1117 }
1118 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) {
1119 uint64_t macaddr = sc->sc_macaddrs[i];
1120 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32));
1121 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0));
1122 #if 0
1123 if (macaddr)
1124 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__,
1125 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)),
1126 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i)));
1127 #endif
1128 }
1129 }
1130
1131 static int
1132 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
1133 {
1134 struct pq3etsec_softc *sc = ifp->if_softc;
1135 struct ifreq * const ifr = data;
1136 const int s = splnet();
1137 int error;
1138
1139 switch (cmd) {
1140 case SIOCSIFMEDIA:
1141 case SIOCGIFMEDIA:
1142 /* Flow control requires full-duplex mode. */
1143 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1144 (ifr->ifr_media & IFM_FDX) == 0)
1145 ifr->ifr_media &= ~IFM_ETH_FMASK;
1146 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1147 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1148 /* We can do both TXPAUSE and RXPAUSE. */
1149 ifr->ifr_media |=
1150 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1151 }
1152 }
1153 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1154 break;
1155
1156 default:
1157 error = ether_ioctl(ifp, cmd, data);
1158 if (error != ENETRESET)
1159 break;
1160
1161 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1162 error = 0;
1163 if (ifp->if_flags & IFF_RUNNING)
1164 pq3etsec_mc_setup(sc);
1165 break;
1166 }
1167 error = pq3etsec_ifinit(ifp);
1168 break;
1169 }
1170
1171 splx(s);
1172 return error;
1173 }
1174
1175 static void
1176 pq3etsec_rxq_desc_presync(
1177 struct pq3etsec_softc *sc,
1178 struct pq3etsec_rxqueue *rxq,
1179 volatile struct rxbd *rxbd,
1180 size_t count)
1181 {
1182 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1183 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1184 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1185 }
1186
1187 static void
1188 pq3etsec_rxq_desc_postsync(
1189 struct pq3etsec_softc *sc,
1190 struct pq3etsec_rxqueue *rxq,
1191 volatile struct rxbd *rxbd,
1192 size_t count)
1193 {
1194 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1195 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1196 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1197 }
1198
1199 static void
1200 pq3etsec_txq_desc_presync(
1201 struct pq3etsec_softc *sc,
1202 struct pq3etsec_txqueue *txq,
1203 volatile struct txbd *txbd,
1204 size_t count)
1205 {
1206 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1207 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1208 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1209 }
1210
1211 static void
1212 pq3etsec_txq_desc_postsync(
1213 struct pq3etsec_softc *sc,
1214 struct pq3etsec_txqueue *txq,
1215 volatile struct txbd *txbd,
1216 size_t count)
1217 {
1218 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1219 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1220 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1221 }
1222
1223 static bus_dmamap_t
1224 pq3etsec_mapcache_get(
1225 struct pq3etsec_softc *sc,
1226 struct pq3etsec_mapcache *dmc)
1227 {
1228 KASSERT(dmc->dmc_nmaps > 0);
1229 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
1230 return dmc->dmc_maps[--dmc->dmc_nmaps];
1231 }
1232
1233 static void
1234 pq3etsec_mapcache_put(
1235 struct pq3etsec_softc *sc,
1236 struct pq3etsec_mapcache *dmc,
1237 bus_dmamap_t map)
1238 {
1239 KASSERT(map != NULL);
1240 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
1241 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
1242 }
1243
1244 static void
1245 pq3etsec_mapcache_destroy(
1246 struct pq3etsec_softc *sc,
1247 struct pq3etsec_mapcache *dmc)
1248 {
1249 const size_t dmc_size =
1250 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]);
1251
1252 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
1253 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
1254 }
1255 kmem_intr_free(dmc, dmc_size);
1256 }
1257
1258 static int
1259 pq3etsec_mapcache_create(
1260 struct pq3etsec_softc *sc,
1261 struct pq3etsec_mapcache **dmc_p,
1262 size_t maxmaps,
1263 size_t maxmapsize,
1264 size_t maxseg)
1265 {
1266 const size_t dmc_size =
1267 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]);
1268 struct pq3etsec_mapcache * const dmc =
1269 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
1270
1271 dmc->dmc_maxmaps = maxmaps;
1272 dmc->dmc_nmaps = maxmaps;
1273 dmc->dmc_maxmapsize = maxmapsize;
1274 dmc->dmc_maxseg = maxseg;
1275
1276 for (u_int i = 0; i < maxmaps; i++) {
1277 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1278 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1279 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
1280 if (error) {
1281 aprint_error_dev(sc->sc_dev,
1282 "failed to creat dma map cache "
1283 "entry %u of %zu: %d\n",
1284 i, maxmaps, error);
1285 while (i-- > 0) {
1286 bus_dmamap_destroy(sc->sc_dmat,
1287 dmc->dmc_maps[i]);
1288 }
1289 kmem_intr_free(dmc, dmc_size);
1290 return error;
1291 }
1292 KASSERT(dmc->dmc_maps[i] != NULL);
1293 }
1294
1295 *dmc_p = dmc;
1296
1297 return 0;
1298 }
1299
1300 #if 0
1301 static void
1302 pq3etsec_dmamem_free(
1303 bus_dma_tag_t dmat,
1304 size_t map_size,
1305 bus_dma_segment_t *seg,
1306 bus_dmamap_t map,
1307 void *kvap)
1308 {
1309 bus_dmamap_destroy(dmat, map);
1310 bus_dmamem_unmap(dmat, kvap, map_size);
1311 bus_dmamem_free(dmat, seg, 1);
1312 }
1313 #endif
1314
1315 static int
1316 pq3etsec_dmamem_alloc(
1317 bus_dma_tag_t dmat,
1318 size_t map_size,
1319 bus_dma_segment_t *seg,
1320 bus_dmamap_t *map,
1321 void **kvap)
1322 {
1323 int error;
1324 int nseg;
1325
1326 *kvap = NULL;
1327 *map = NULL;
1328
1329 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
1330 seg, 1, &nseg, 0);
1331 if (error)
1332 return error;
1333
1334 KASSERT(nseg == 1);
1335
1336 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
1337 BUS_DMA_COHERENT);
1338 if (error == 0) {
1339 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
1340 map);
1341 if (error == 0) {
1342 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
1343 NULL, 0);
1344 if (error == 0)
1345 return 0;
1346 bus_dmamap_destroy(dmat, *map);
1347 *map = NULL;
1348 }
1349 bus_dmamem_unmap(dmat, *kvap, map_size);
1350 *kvap = NULL;
1351 }
1352 bus_dmamem_free(dmat, seg, nseg);
1353 return 0;
1354 }
1355
1356 static struct mbuf *
1357 pq3etsec_rx_buf_alloc(
1358 struct pq3etsec_softc *sc)
1359 {
1360 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
1361 if (m == NULL) {
1362 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
1363 return NULL;
1364 }
1365 MCLGET(m, M_DONTWAIT);
1366 if ((m->m_flags & M_EXT) == 0) {
1367 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
1368 m_freem(m);
1369 return NULL;
1370 }
1371 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1372
1373 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache);
1374 if (map == NULL) {
1375 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
1376 m_freem(m);
1377 return NULL;
1378 }
1379 M_SETCTX(m, map);
1380 m->m_len = m->m_pkthdr.len = MCLBYTES;
1381 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1382 BUS_DMA_READ|BUS_DMA_NOWAIT);
1383 if (error) {
1384 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
1385 error);
1386 M_SETCTX(m, NULL);
1387 m_freem(m);
1388 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1389 return NULL;
1390 }
1391 KASSERT(map->dm_mapsize == MCLBYTES);
1392 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1393 BUS_DMASYNC_PREREAD);
1394
1395 return m;
1396 }
1397
1398 static void
1399 pq3etsec_rx_map_unload(
1400 struct pq3etsec_softc *sc,
1401 struct mbuf *m)
1402 {
1403 KASSERT(m);
1404 for (; m != NULL; m = m->m_next) {
1405 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1406 KASSERT(map);
1407 KASSERT(map->dm_mapsize == MCLBYTES);
1408 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
1409 BUS_DMASYNC_POSTREAD);
1410 bus_dmamap_unload(sc->sc_dmat, map);
1411 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1412 M_SETCTX(m, NULL);
1413 }
1414 }
1415
1416 static bool
1417 pq3etsec_rxq_produce(
1418 struct pq3etsec_softc *sc,
1419 struct pq3etsec_rxqueue *rxq)
1420 {
1421 volatile struct rxbd *producer = rxq->rxq_producer;
1422 #if 0
1423 size_t inuse = rxq->rxq_inuse;
1424 #endif
1425 while (rxq->rxq_inuse < rxq->rxq_threshold) {
1426 struct mbuf *m;
1427 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
1428 if (m == NULL) {
1429 m = pq3etsec_rx_buf_alloc(sc);
1430 if (m == NULL) {
1431 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__);
1432 break;
1433 }
1434 }
1435 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1436 KASSERT(map);
1437
1438 #ifdef ETSEC_DEBUG
1439 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL);
1440 rxq->rxq_mbufs[producer-rxq->rxq_first] = m;
1441 #endif
1442
1443 /* rxbd_len is write-only by the ETSEC */
1444 producer->rxbd_bufptr = map->dm_segs[0].ds_addr;
1445 membar_producer();
1446 producer->rxbd_flags |= RXBD_E;
1447 if (__predict_false(rxq->rxq_mhead == NULL)) {
1448 KASSERT(producer == rxq->rxq_consumer);
1449 rxq->rxq_mconsumer = m;
1450 }
1451 *rxq->rxq_mtail = m;
1452 rxq->rxq_mtail = &m->m_next;
1453 m->m_len = MCLBYTES;
1454 m->m_next = NULL;
1455 rxq->rxq_inuse++;
1456 if (++producer == rxq->rxq_last) {
1457 membar_producer();
1458 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1459 rxq->rxq_last - rxq->rxq_producer);
1460 producer = rxq->rxq_producer = rxq->rxq_first;
1461 }
1462 }
1463 if (producer != rxq->rxq_producer) {
1464 membar_producer();
1465 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1466 producer - rxq->rxq_producer);
1467 rxq->rxq_producer = producer;
1468 }
1469 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT;
1470 if (qhlt) {
1471 KASSERT(qhlt & rxq->rxq_qmask);
1472 sc->sc_ev_rx_stall.ev_count++;
1473 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask);
1474 }
1475 #if 0
1476 aprint_normal_dev(sc->sc_dev,
1477 "%s: buffers inuse went from %zu to %zu\n",
1478 __func__, inuse, rxq->rxq_inuse);
1479 #endif
1480 return true;
1481 }
1482
1483 static bool
1484 pq3etsec_rx_offload(
1485 struct pq3etsec_softc *sc,
1486 struct mbuf *m,
1487 const struct rxfcb *fcb)
1488 {
1489 if (fcb->rxfcb_flags & RXFCB_VLN) {
1490 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl,
1491 m_freem(m); return false);
1492 }
1493 if ((fcb->rxfcb_flags & RXFCB_IP) == 0
1494 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0)
1495 return true;
1496 int csum_flags = 0;
1497 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) {
1498 csum_flags |= M_CSUM_IPv4;
1499 if (fcb->rxfcb_flags & RXFCB_EIP)
1500 csum_flags |= M_CSUM_IPv4_BAD;
1501 }
1502 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) {
1503 int ipv_flags;
1504 if (fcb->rxfcb_flags & RXFCB_IP6)
1505 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6;
1506 else
1507 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4;
1508 if (fcb->rxfcb_pro == IPPROTO_TCP) {
1509 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags;
1510 } else {
1511 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags;
1512 }
1513 if (fcb->rxfcb_flags & RXFCB_ETU)
1514 csum_flags |= M_CSUM_TCP_UDP_BAD;
1515 }
1516
1517 m->m_pkthdr.csum_flags = csum_flags;
1518 return true;
1519 }
1520
1521 static void
1522 pq3etsec_rx_input(
1523 struct pq3etsec_softc *sc,
1524 struct mbuf *m,
1525 uint16_t rxbd_flags)
1526 {
1527 struct ifnet * const ifp = &sc->sc_if;
1528
1529 pq3etsec_rx_map_unload(sc, m);
1530
1531 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) {
1532 struct rxfcb fcb = *mtod(m, struct rxfcb *);
1533 if (!pq3etsec_rx_offload(sc, m, &fcb))
1534 return;
1535 }
1536 m_adj(m, sc->sc_rx_adjlen);
1537
1538 if (rxbd_flags & RXBD_M)
1539 m->m_flags |= M_PROMISC;
1540 if (rxbd_flags & RXBD_BC)
1541 m->m_flags |= M_BCAST;
1542 if (rxbd_flags & RXBD_MC)
1543 m->m_flags |= M_MCAST;
1544 m->m_flags |= M_HASFCS;
1545 m->m_pkthdr.rcvif = &sc->sc_if;
1546
1547 ifp->if_ipackets++;
1548 ifp->if_ibytes += m->m_pkthdr.len;
1549
1550 /*
1551 * Let's give it to the network subsystm to deal with.
1552 */
1553 int s = splnet();
1554 bpf_mtap(ifp, m);
1555 (*ifp->if_input)(ifp, m);
1556 splx(s);
1557 }
1558
1559 static void
1560 pq3etsec_rxq_consume(
1561 struct pq3etsec_softc *sc,
1562 struct pq3etsec_rxqueue *rxq)
1563 {
1564 struct ifnet * const ifp = &sc->sc_if;
1565 volatile struct rxbd *consumer = rxq->rxq_consumer;
1566 size_t rxconsumed = 0;
1567
1568 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask);
1569
1570 for (;;) {
1571 if (consumer == rxq->rxq_producer) {
1572 rxq->rxq_consumer = consumer;
1573 rxq->rxq_inuse -= rxconsumed;
1574 KASSERT(rxq->rxq_inuse == 0);
1575 return;
1576 }
1577 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1);
1578 const uint16_t rxbd_flags = consumer->rxbd_flags;
1579 if (rxbd_flags & RXBD_E) {
1580 rxq->rxq_consumer = consumer;
1581 rxq->rxq_inuse -= rxconsumed;
1582 return;
1583 }
1584 KASSERT(rxq->rxq_mconsumer != NULL);
1585 #ifdef ETSEC_DEBUG
1586 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1587 #endif
1588 #if 0
1589 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n",
1590 __func__,
1591 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len,
1592 mtod(rxq->rxq_mconsumer, int *)[0],
1593 mtod(rxq->rxq_mconsumer, int *)[1],
1594 mtod(rxq->rxq_mconsumer, int *)[2],
1595 mtod(rxq->rxq_mconsumer, int *)[3]);
1596 #endif
1597 /*
1598 * We own this packet again. Clear all flags except wrap.
1599 */
1600 rxconsumed++;
1601 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I);
1602
1603 /*
1604 * If this descriptor has the LAST bit set and no errors,
1605 * it's a valid input packet.
1606 */
1607 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) {
1608 size_t rxbd_len = consumer->rxbd_len;
1609 struct mbuf *m = rxq->rxq_mhead;
1610 struct mbuf *m_last = rxq->rxq_mconsumer;
1611 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1612 rxq->rxq_mtail = &rxq->rxq_mhead;
1613 rxq->rxq_mconsumer = rxq->rxq_mhead;
1614 m_last->m_next = NULL;
1615 m_last->m_len = rxbd_len & (MCLBYTES - 1);
1616 m->m_pkthdr.len = rxbd_len;
1617 pq3etsec_rx_input(sc, m, rxbd_flags);
1618 } else if (rxbd_flags & RXBD_L) {
1619 KASSERT(rxbd_flags & RXBD_ERRORS);
1620 struct mbuf *m;
1621 /*
1622 * We encountered an error, take the mbufs and add
1623 * then to the rx bufcache so we can reuse them.
1624 */
1625 ifp->if_ierrors++;
1626 for (m = rxq->rxq_mhead;
1627 m != rxq->rxq_mconsumer;
1628 m = m->m_next) {
1629 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1630 }
1631 m = rxq->rxq_mconsumer;
1632 if ((rxq->rxq_mhead = m->m_next) == NULL)
1633 rxq->rxq_mtail = &rxq->rxq_mhead;
1634 rxq->rxq_mconsumer = m->m_next;
1635 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1636 } else {
1637 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next;
1638 }
1639 #ifdef ETSEC_DEBUG
1640 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL;
1641 #endif
1642
1643 /*
1644 * Wrap at the last entry!
1645 */
1646 if (rxbd_flags & RXBD_W) {
1647 KASSERT(consumer + 1 == rxq->rxq_last);
1648 consumer = rxq->rxq_first;
1649 } else {
1650 consumer++;
1651 }
1652 #ifdef ETSEC_DEBUG
1653 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1654 #endif
1655 }
1656 }
1657
1658 static void
1659 pq3etsec_rxq_purge(
1660 struct pq3etsec_softc *sc,
1661 struct pq3etsec_rxqueue *rxq,
1662 bool discard)
1663 {
1664 struct mbuf *m;
1665
1666 if ((m = rxq->rxq_mhead) != NULL) {
1667 #ifdef ETSEC_DEBUG
1668 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs));
1669 #endif
1670
1671 if (discard) {
1672 pq3etsec_rx_map_unload(sc, m);
1673 m_freem(m);
1674 } else {
1675 while (m != NULL) {
1676 struct mbuf *m0 = m->m_next;
1677 m->m_next = NULL;
1678 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1679 m = m0;
1680 }
1681 }
1682
1683 }
1684
1685 rxq->rxq_mconsumer = NULL;
1686 rxq->rxq_mhead = NULL;
1687 rxq->rxq_mtail = &rxq->rxq_mhead;
1688 rxq->rxq_inuse = 0;
1689 }
1690
1691 static void
1692 pq3etsec_rxq_reset(
1693 struct pq3etsec_softc *sc,
1694 struct pq3etsec_rxqueue *rxq)
1695 {
1696 /*
1697 * sync all the descriptors
1698 */
1699 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1700 rxq->rxq_last - rxq->rxq_first);
1701
1702 /*
1703 * Make sure we own all descriptors in the ring.
1704 */
1705 volatile struct rxbd *rxbd;
1706 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) {
1707 rxbd->rxbd_flags = RXBD_I;
1708 }
1709
1710 /*
1711 * Last descriptor has the wrap flag.
1712 */
1713 rxbd->rxbd_flags = RXBD_W|RXBD_I;
1714
1715 /*
1716 * Reset the producer consumer indexes.
1717 */
1718 rxq->rxq_consumer = rxq->rxq_first;
1719 rxq->rxq_producer = rxq->rxq_first;
1720 rxq->rxq_inuse = 0;
1721 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS)
1722 rxq->rxq_threshold = ETSEC_MINRXMBUFS;
1723
1724 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY;
1725
1726 /*
1727 * Restart the transmit at the first descriptor
1728 */
1729 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr);
1730 }
1731
1732 static int
1733 pq3etsec_rxq_attach(
1734 struct pq3etsec_softc *sc,
1735 struct pq3etsec_rxqueue *rxq,
1736 u_int qno)
1737 {
1738 size_t map_size = PAGE_SIZE;
1739 size_t desc_count = map_size / sizeof(struct rxbd);
1740 int error;
1741 void *descs;
1742
1743 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1744 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1745 if (error)
1746 return error;
1747
1748 memset(descs, 0, map_size);
1749 rxq->rxq_first = descs;
1750 rxq->rxq_last = rxq->rxq_first + desc_count;
1751 rxq->rxq_consumer = descs;
1752 rxq->rxq_producer = descs;
1753
1754 pq3etsec_rxq_purge(sc, rxq, true);
1755 pq3etsec_rxq_reset(sc, rxq);
1756
1757 rxq->rxq_reg_rbase = RBASEn(qno);
1758 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno);
1759
1760 return 0;
1761 }
1762
1763 static bool
1764 pq3etsec_txq_active_p(
1765 struct pq3etsec_softc * const sc,
1766 struct pq3etsec_txqueue *txq)
1767 {
1768 return !IF_IS_EMPTY(&txq->txq_mbufs);
1769 }
1770
1771 static bool
1772 pq3etsec_txq_fillable_p(
1773 struct pq3etsec_softc * const sc,
1774 struct pq3etsec_txqueue *txq)
1775 {
1776 return txq->txq_free >= txq->txq_threshold;
1777 }
1778
1779 static int
1780 pq3etsec_txq_attach(
1781 struct pq3etsec_softc *sc,
1782 struct pq3etsec_txqueue *txq,
1783 u_int qno)
1784 {
1785 size_t map_size = PAGE_SIZE;
1786 size_t desc_count = map_size / sizeof(struct txbd);
1787 int error;
1788 void *descs;
1789
1790 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1791 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1792 if (error)
1793 return error;
1794
1795 memset(descs, 0, map_size);
1796 txq->txq_first = descs;
1797 txq->txq_last = txq->txq_first + desc_count;
1798 txq->txq_consumer = descs;
1799 txq->txq_producer = descs;
1800
1801 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS);
1802
1803 txq->txq_reg_tbase = TBASEn(qno);
1804 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno);
1805
1806 pq3etsec_txq_reset(sc, txq);
1807
1808 return 0;
1809 }
1810
1811 static int
1812 pq3etsec_txq_map_load(
1813 struct pq3etsec_softc *sc,
1814 struct pq3etsec_txqueue *txq,
1815 struct mbuf *m)
1816 {
1817 bus_dmamap_t map;
1818 int error;
1819
1820 map = M_GETCTX(m, bus_dmamap_t);
1821 if (map != NULL)
1822 return 0;
1823
1824 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache);
1825 if (map == NULL)
1826 return ENOMEM;
1827
1828 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1829 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1830 if (error)
1831 return error;
1832
1833 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1834 BUS_DMASYNC_PREWRITE);
1835 M_SETCTX(m, map);
1836 return 0;
1837 }
1838
1839 static void
1840 pq3etsec_txq_map_unload(
1841 struct pq3etsec_softc *sc,
1842 struct pq3etsec_txqueue *txq,
1843 struct mbuf *m)
1844 {
1845 KASSERT(m);
1846 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1847 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1848 BUS_DMASYNC_POSTWRITE);
1849 bus_dmamap_unload(sc->sc_dmat, map);
1850 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map);
1851 }
1852
1853 static bool
1854 pq3etsec_txq_produce(
1855 struct pq3etsec_softc *sc,
1856 struct pq3etsec_txqueue *txq,
1857 struct mbuf *m)
1858 {
1859 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1860
1861 if (map->dm_nsegs > txq->txq_free)
1862 return false;
1863
1864 /*
1865 * TCP Offload flag must be set in the first descriptor.
1866 */
1867 volatile struct txbd *producer = txq->txq_producer;
1868 uint16_t last_flags = TXBD_L;
1869 uint16_t first_flags = TXBD_R
1870 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0);
1871
1872 /*
1873 * If we've produced enough descriptors without consuming any
1874 * we need to ask for an interrupt to reclaim some.
1875 */
1876 txq->txq_lastintr += map->dm_nsegs;
1877 if (txq->txq_lastintr >= txq->txq_threshold
1878 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1879 txq->txq_lastintr = 0;
1880 last_flags |= TXBD_I;
1881 }
1882
1883 #ifdef ETSEC_DEBUG
1884 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1885 #endif
1886 KASSERT(producer != txq->txq_last);
1887 producer->txbd_bufptr = map->dm_segs[0].ds_addr;
1888 producer->txbd_len = map->dm_segs[0].ds_len;
1889
1890 if (map->dm_nsegs > 1) {
1891 volatile struct txbd *start = producer + 1;
1892 size_t count = map->dm_nsegs - 1;
1893 for (u_int i = 1; i < map->dm_nsegs; i++) {
1894 if (__predict_false(++producer == txq->txq_last)) {
1895 producer = txq->txq_first;
1896 if (start < txq->txq_last) {
1897 pq3etsec_txq_desc_presync(sc, txq,
1898 start, txq->txq_last - start);
1899 count -= txq->txq_last - start;
1900 }
1901 start = txq->txq_first;
1902 }
1903 #ifdef ETSEC_DEBUG
1904 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1905 #endif
1906 producer->txbd_bufptr = map->dm_segs[i].ds_addr;
1907 producer->txbd_len = map->dm_segs[i].ds_len;
1908 producer->txbd_flags = TXBD_R
1909 | (producer->txbd_flags & TXBD_W)
1910 | (i == map->dm_nsegs - 1 ? last_flags : 0);
1911 #if 0
1912 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first,
1913 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr);
1914 #endif
1915 }
1916 pq3etsec_txq_desc_presync(sc, txq, start, count);
1917 } else {
1918 first_flags |= last_flags;
1919 }
1920
1921 membar_producer();
1922 txq->txq_producer->txbd_flags =
1923 first_flags | (txq->txq_producer->txbd_flags & TXBD_W);
1924 #if 0
1925 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__,
1926 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags,
1927 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr);
1928 #endif
1929 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1);
1930
1931 /*
1932 * Reduce free count by the number of segments we consumed.
1933 */
1934 txq->txq_free -= map->dm_nsegs;
1935 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1936 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0);
1937 KASSERT(producer->txbd_flags & TXBD_L);
1938 #ifdef ETSEC_DEBUG
1939 txq->txq_lmbufs[producer - txq->txq_first] = m;
1940 #endif
1941
1942 #if 0
1943 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n",
1944 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1945 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1946 #endif
1947
1948 if (++producer == txq->txq_last)
1949 txq->txq_producer = txq->txq_first;
1950 else
1951 txq->txq_producer = producer;
1952 IF_ENQUEUE(&txq->txq_mbufs, m);
1953
1954 /*
1955 * Restart the transmitter.
1956 */
1957 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */
1958
1959 return true;
1960 }
1961
1962 static void
1963 pq3etsec_tx_offload(
1964 struct pq3etsec_softc *sc,
1965 struct pq3etsec_txqueue *txq,
1966 struct mbuf **mp)
1967 {
1968 struct mbuf *m = *mp;
1969 u_int csum_flags = m->m_pkthdr.csum_flags;
1970 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m);
1971
1972 KASSERT(m->m_flags & M_PKTHDR);
1973
1974 /*
1975 * Let see if we are doing any offload first.
1976 */
1977 if (csum_flags == 0 && vtag == 0) {
1978 m->m_flags &= ~M_HASFCB;
1979 return;
1980 }
1981
1982 uint16_t flags = 0;
1983 if (csum_flags & M_CSUM_IP) {
1984 flags |= TXFCB_IP
1985 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0)
1986 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0)
1987 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0)
1988 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0)
1989 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0);
1990 }
1991 if (vtag) {
1992 flags |= TXFCB_VLN;
1993 }
1994 if (flags == 0) {
1995 m->m_flags &= ~M_HASFCB;
1996 return;
1997 }
1998
1999 struct txfcb fcb;
2000 fcb.txfcb_flags = flags;
2001 if (csum_flags & M_CSUM_IPv4)
2002 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2003 else
2004 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
2005 fcb.txfcb_l3os = ETHER_HDR_LEN;
2006 fcb.txfcb_phcs = 0;
2007 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0;
2008
2009 #if 0
2010 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n",
2011 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os,
2012 fcb.txfcb_phcs, fcb.txfcb_vlctl);
2013 #endif
2014
2015 if (M_LEADINGSPACE(m) >= sizeof(fcb)) {
2016 m->m_data -= sizeof(fcb);
2017 m->m_len += sizeof(fcb);
2018 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) {
2019 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len);
2020 m->m_data = m->m_pktdat;
2021 m->m_len += sizeof(fcb);
2022 } else {
2023 struct mbuf *mn;
2024 MGET(mn, M_DONTWAIT, m->m_type);
2025 if (mn == NULL) {
2026 if (csum_flags & M_CSUM_IP4) {
2027 #ifdef INET
2028 ip_undefer_csum(m, ETHER_HDR_LEN,
2029 csum_flags & M_CSUM_IP4);
2030 #else
2031 panic("%s: impossible M_CSUM flags %#x",
2032 device_xname(sc->sc_dev), csum_flags);
2033 #endif
2034 } else if (csum_flags & M_CSUM_IP6) {
2035 #ifdef INET6
2036 ip6_undefer_csum(m, ETHER_HDR_LEN,
2037 csum_flags & M_CSUM_IP6);
2038 #else
2039 panic("%s: impossible M_CSUM flags %#x",
2040 device_xname(sc->sc_dev), csum_flags);
2041 #endif
2042 } else if (vtag) {
2043 }
2044
2045 m->m_flags &= ~M_HASFCB;
2046 return;
2047 }
2048
2049 M_MOVE_PKTHDR(mn, m);
2050 mn->m_next = m;
2051 m = mn;
2052 MH_ALIGN(m, sizeof(fcb));
2053 m->m_len = sizeof(fcb);
2054 *mp = m;
2055 }
2056 m->m_pkthdr.len += sizeof(fcb);
2057 m->m_flags |= M_HASFCB;
2058 *mtod(m, struct txfcb *) = fcb;
2059 return;
2060 }
2061
2062 static bool
2063 pq3etsec_txq_enqueue(
2064 struct pq3etsec_softc *sc,
2065 struct pq3etsec_txqueue *txq)
2066 {
2067 for (;;) {
2068 if (IF_QFULL(&txq->txq_mbufs))
2069 return false;
2070 struct mbuf *m = txq->txq_next;
2071 if (m == NULL) {
2072 int s = splnet();
2073 IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
2074 splx(s);
2075 if (m == NULL)
2076 return true;
2077 M_SETCTX(m, NULL);
2078 pq3etsec_tx_offload(sc, txq, &m);
2079 } else {
2080 txq->txq_next = NULL;
2081 }
2082 int error = pq3etsec_txq_map_load(sc, txq, m);
2083 if (error) {
2084 aprint_error_dev(sc->sc_dev,
2085 "discarded packet due to "
2086 "dmamap load failure: %d\n", error);
2087 m_freem(m);
2088 continue;
2089 }
2090 KASSERT(txq->txq_next == NULL);
2091 if (!pq3etsec_txq_produce(sc, txq, m)) {
2092 txq->txq_next = m;
2093 return false;
2094 }
2095 KASSERT(txq->txq_next == NULL);
2096 }
2097 }
2098
2099 static bool
2100 pq3etsec_txq_consume(
2101 struct pq3etsec_softc *sc,
2102 struct pq3etsec_txqueue *txq)
2103 {
2104 struct ifnet * const ifp = &sc->sc_if;
2105 volatile struct txbd *consumer = txq->txq_consumer;
2106 size_t txfree = 0;
2107
2108 #if 0
2109 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
2110 #endif
2111 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask);
2112
2113 for (;;) {
2114 if (consumer == txq->txq_producer) {
2115 txq->txq_consumer = consumer;
2116 txq->txq_free += txfree;
2117 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2118 #if 0
2119 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n",
2120 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
2121 #endif
2122 KASSERT(txq->txq_lastintr == 0);
2123 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
2124 return true;
2125 }
2126 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1);
2127 const uint16_t txbd_flags = consumer->txbd_flags;
2128 if (txbd_flags & TXBD_R) {
2129 txq->txq_consumer = consumer;
2130 txq->txq_free += txfree;
2131 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2132 #if 0
2133 printf("%s: freed %zu descriptors\n",
2134 __func__, txfree);
2135 #endif
2136 return pq3etsec_txq_fillable_p(sc, txq);
2137 }
2138
2139 /*
2140 * If this is the last descriptor in the chain, get the
2141 * mbuf, free its dmamap, and free the mbuf chain itself.
2142 */
2143 if (txbd_flags & TXBD_L) {
2144 struct mbuf *m;
2145
2146 IF_DEQUEUE(&txq->txq_mbufs, m);
2147 #ifdef ETSEC_DEBUG
2148 KASSERTMSG(
2149 m == txq->txq_lmbufs[consumer-txq->txq_first],
2150 "%s: %p [%u]: flags %#x m (%p) != %p (%p)",
2151 __func__, consumer, consumer - txq->txq_first,
2152 txbd_flags, m,
2153 &txq->txq_lmbufs[consumer-txq->txq_first],
2154 txq->txq_lmbufs[consumer-txq->txq_first]);
2155 #endif
2156 KASSERT(m);
2157 pq3etsec_txq_map_unload(sc, txq, m);
2158 #if 0
2159 printf("%s: mbuf %p: consumed a %u byte packet\n",
2160 __func__, m, m->m_pkthdr.len);
2161 #endif
2162 if (m->m_flags & M_HASFCB)
2163 m_adj(m, sizeof(struct txfcb));
2164 bpf_mtap(ifp, m);
2165 ifp->if_opackets++;
2166 ifp->if_obytes += m->m_pkthdr.len;
2167 if (m->m_flags & M_MCAST)
2168 ifp->if_omcasts++;
2169 if (txbd_flags & TXBD_ERRORS)
2170 ifp->if_oerrors++;
2171 m_freem(m);
2172 #ifdef ETSEC_DEBUG
2173 txq->txq_lmbufs[consumer - txq->txq_first] = NULL;
2174 #endif
2175 } else {
2176 #ifdef ETSEC_DEBUG
2177 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL);
2178 #endif
2179 }
2180
2181 /*
2182 * We own this packet again. Clear all flags except wrap.
2183 */
2184 txfree++;
2185 //consumer->txbd_flags = txbd_flags & TXBD_W;
2186
2187 /*
2188 * Wrap at the last entry!
2189 */
2190 if (txbd_flags & TXBD_W) {
2191 KASSERT(consumer + 1 == txq->txq_last);
2192 consumer = txq->txq_first;
2193 } else {
2194 consumer++;
2195 KASSERT(consumer < txq->txq_last);
2196 }
2197 }
2198 }
2199
2200 static void
2201 pq3etsec_txq_purge(
2202 struct pq3etsec_softc *sc,
2203 struct pq3etsec_txqueue *txq)
2204 {
2205 struct mbuf *m;
2206 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0);
2207
2208 for (;;) {
2209 IF_DEQUEUE(&txq->txq_mbufs, m);
2210 if (m == NULL)
2211 break;
2212 pq3etsec_txq_map_unload(sc, txq, m);
2213 m_freem(m);
2214 }
2215 if ((m = txq->txq_next) != NULL) {
2216 txq->txq_next = NULL;
2217 pq3etsec_txq_map_unload(sc, txq, m);
2218 m_freem(m);
2219 }
2220 #ifdef ETSEC_DEBUG
2221 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs));
2222 #endif
2223 }
2224
2225 static void
2226 pq3etsec_txq_reset(
2227 struct pq3etsec_softc *sc,
2228 struct pq3etsec_txqueue *txq)
2229 {
2230 /*
2231 * sync all the descriptors
2232 */
2233 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first,
2234 txq->txq_last - txq->txq_first);
2235
2236 /*
2237 * Make sure we own all descriptors in the ring.
2238 */
2239 volatile struct txbd *txbd;
2240 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) {
2241 txbd->txbd_flags = 0;
2242 }
2243
2244 /*
2245 * Last descriptor has the wrap flag.
2246 */
2247 txbd->txbd_flags = TXBD_W;
2248
2249 /*
2250 * Reset the producer consumer indexes.
2251 */
2252 txq->txq_consumer = txq->txq_first;
2253 txq->txq_producer = txq->txq_first;
2254 txq->txq_free = txq->txq_last - txq->txq_first - 1;
2255 txq->txq_threshold = txq->txq_free / 2;
2256 txq->txq_lastintr = 0;
2257
2258 /*
2259 * What do we want to get interrupted on?
2260 */
2261 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE;
2262
2263 /*
2264 * Restart the transmit at the first descriptor
2265 */
2266 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr);
2267 }
2268
2269 static void
2270 pq3etsec_ifstart(struct ifnet *ifp)
2271 {
2272 struct pq3etsec_softc * const sc = ifp->if_softc;
2273
2274 if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) {
2275 return;
2276 }
2277
2278 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2279 softint_schedule(sc->sc_soft_ih);
2280 }
2281
2282 static void
2283 pq3etsec_tx_error(
2284 struct pq3etsec_softc * const sc)
2285 {
2286 struct pq3etsec_txqueue * const txq = &sc->sc_txq;
2287
2288 pq3etsec_txq_consume(sc, txq);
2289
2290 if (pq3etsec_txq_fillable_p(sc, txq))
2291 sc->sc_if.if_flags &= ~IFF_OACTIVE;
2292 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) {
2293 } else if (sc->sc_txerrors & IEVENT_EBERR) {
2294 }
2295
2296 if (pq3etsec_txq_active_p(sc, txq))
2297 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask);
2298 if (!pq3etsec_txq_enqueue(sc, txq)) {
2299 sc->sc_ev_tx_stall.ev_count++;
2300 sc->sc_if.if_flags |= IFF_OACTIVE;
2301 }
2302
2303 sc->sc_txerrors = 0;
2304 }
2305
2306 int
2307 pq3etsec_tx_intr(void *arg)
2308 {
2309 struct pq3etsec_softc * const sc = arg;
2310
2311 mutex_enter(sc->sc_hwlock);
2312
2313 sc->sc_ev_tx_intr.ev_count++;
2314
2315 uint32_t ievent = etsec_read(sc, IEVENT);
2316 ievent &= IEVENT_TXF|IEVENT_TXB;
2317 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2318
2319 #if 0
2320 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2321 __func__, ievent, etsec_read(sc, IMASK));
2322 #endif
2323
2324 if (ievent == 0) {
2325 mutex_exit(sc->sc_hwlock);
2326 return 0;
2327 }
2328
2329 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB);
2330 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2331 etsec_write(sc, IMASK, sc->sc_imask);
2332 softint_schedule(sc->sc_soft_ih);
2333
2334 mutex_exit(sc->sc_hwlock);
2335
2336 return 1;
2337 }
2338
2339 int
2340 pq3etsec_rx_intr(void *arg)
2341 {
2342 struct pq3etsec_softc * const sc = arg;
2343
2344 mutex_enter(sc->sc_hwlock);
2345
2346 sc->sc_ev_rx_intr.ev_count++;
2347
2348 uint32_t ievent = etsec_read(sc, IEVENT);
2349 ievent &= IEVENT_RXF|IEVENT_RXB;
2350 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2351 if (ievent == 0) {
2352 mutex_exit(sc->sc_hwlock);
2353 return 0;
2354 }
2355
2356 #if 0
2357 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent);
2358 #endif
2359
2360 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB);
2361 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR);
2362 etsec_write(sc, IMASK, sc->sc_imask);
2363 softint_schedule(sc->sc_soft_ih);
2364
2365 mutex_exit(sc->sc_hwlock);
2366
2367 return 1;
2368 }
2369
2370 int
2371 pq3etsec_error_intr(void *arg)
2372 {
2373 struct pq3etsec_softc * const sc = arg;
2374
2375 mutex_enter(sc->sc_hwlock);
2376
2377 sc->sc_ev_error_intr.ev_count++;
2378
2379 for (int rv = 0, soft_flags = 0;; rv = 1) {
2380 uint32_t ievent = etsec_read(sc, IEVENT);
2381 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB);
2382 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2383 if (ievent == 0) {
2384 if (soft_flags) {
2385 atomic_or_uint(&sc->sc_soft_flags, soft_flags);
2386 softint_schedule(sc->sc_soft_ih);
2387 }
2388 mutex_exit(sc->sc_hwlock);
2389 return rv;
2390 }
2391 #if 0
2392 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2393 __func__, ievent, etsec_read(sc, IMASK));
2394 #endif
2395
2396 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) {
2397 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC);
2398 etsec_write(sc, IMASK, sc->sc_imask);
2399 wakeup(sc);
2400 }
2401 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) {
2402 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR);
2403 etsec_write(sc, IMASK, sc->sc_imask);
2404 wakeup(&sc->sc_mii);
2405 }
2406 if (ievent & IEVENT_BSY) {
2407 soft_flags |= SOFT_RXBSY;
2408 sc->sc_imask &= ~IEVENT_BSY;
2409 etsec_write(sc, IMASK, sc->sc_imask);
2410 }
2411 if (ievent & IEVENT_TXE) {
2412 soft_flags |= SOFT_TXERROR;
2413 sc->sc_imask &= ~IEVENT_TXE;
2414 sc->sc_txerrors |= ievent;
2415 }
2416 if (ievent & IEVENT_TXC) {
2417 sc->sc_ev_tx_pause.ev_count++;
2418 }
2419 if (ievent & IEVENT_RXC) {
2420 sc->sc_ev_rx_pause.ev_count++;
2421 }
2422 if (ievent & IEVENT_DPE) {
2423 soft_flags |= SOFT_RESET;
2424 sc->sc_imask &= ~IEVENT_DPE;
2425 etsec_write(sc, IMASK, sc->sc_imask);
2426 }
2427 }
2428 }
2429
2430 void
2431 pq3etsec_soft_intr(void *arg)
2432 {
2433 struct pq3etsec_softc * const sc = arg;
2434 struct ifnet * const ifp = &sc->sc_if;
2435 uint32_t imask = 0;
2436
2437 mutex_enter(sc->sc_lock);
2438
2439 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
2440
2441 sc->sc_ev_soft_intr.ev_count++;
2442
2443 if (soft_flags & SOFT_RESET) {
2444 int s = splnet();
2445 pq3etsec_ifinit(ifp);
2446 splx(s);
2447 soft_flags = 0;
2448 }
2449
2450 if (soft_flags & SOFT_RXBSY) {
2451 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq;
2452 size_t threshold = 5 * rxq->rxq_threshold / 4;
2453 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
2454 threshold = rxq->rxq_last - rxq->rxq_first - 1;
2455 } else {
2456 imask |= IEVENT_BSY;
2457 }
2458 aprint_normal_dev(sc->sc_dev,
2459 "increasing receive buffers from %zu to %zu\n",
2460 rxq->rxq_threshold, threshold);
2461 rxq->rxq_threshold = threshold;
2462 }
2463
2464 if ((soft_flags & SOFT_TXINTR)
2465 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) {
2466 /*
2467 * Let's do what we came here for. Consume transmitted
2468 * packets off the the transmit ring.
2469 */
2470 if (!pq3etsec_txq_consume(sc, &sc->sc_txq)
2471 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
2472 sc->sc_ev_tx_stall.ev_count++;
2473 ifp->if_flags |= IFF_OACTIVE;
2474 } else {
2475 ifp->if_flags &= ~IFF_OACTIVE;
2476 }
2477 imask |= IEVENT_TXF;
2478 }
2479
2480 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) {
2481 /*
2482 * Let's consume
2483 */
2484 pq3etsec_rxq_consume(sc, &sc->sc_rxq);
2485 imask |= IEVENT_RXF;
2486 }
2487
2488 if (soft_flags & SOFT_TXERROR) {
2489 pq3etsec_tx_error(sc);
2490 imask |= IEVENT_TXE;
2491 }
2492
2493 if (ifp->if_flags & IFF_RUNNING) {
2494 pq3etsec_rxq_produce(sc, &sc->sc_rxq);
2495 mutex_spin_enter(sc->sc_hwlock);
2496 sc->sc_imask |= imask;
2497 etsec_write(sc, IMASK, sc->sc_imask);
2498 mutex_spin_exit(sc->sc_hwlock);
2499 } else {
2500 KASSERT((soft_flags & SOFT_RXBSY) == 0);
2501 }
2502
2503 mutex_exit(sc->sc_lock);
2504 }
2505
2506 static void
2507 pq3etsec_mii_tick(void *arg)
2508 {
2509 struct pq3etsec_softc * const sc = arg;
2510 mutex_enter(sc->sc_lock);
2511 callout_ack(&sc->sc_mii_callout);
2512 sc->sc_ev_mii_ticks.ev_count++;
2513 #ifdef DEBUG
2514 uint64_t now = mftb();
2515 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) {
2516 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n",
2517 __func__, now - sc->sc_mii_last_tick);
2518 callout_stop(&sc->sc_mii_callout);
2519 }
2520 #endif
2521 mii_tick(&sc->sc_mii);
2522 int s = splnet();
2523 if (sc->sc_soft_flags & SOFT_RESET)
2524 softint_schedule(sc->sc_soft_ih);
2525 splx(s);
2526 callout_schedule(&sc->sc_mii_callout, hz);
2527 #ifdef DEBUG
2528 sc->sc_mii_last_tick = now;
2529 #endif
2530 mutex_exit(sc->sc_lock);
2531 }
2532