pq3etsec.c revision 1.3.2.1 1 /* $NetBSD: pq3etsec.c,v 1.3.2.1 2011/06/23 14:19:28 cherry Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_inet.h"
38
39 #include <sys/cdefs.h>
40
41 #include <sys/param.h>
42 #include <sys/cpu.h>
43 #include <sys/device.h>
44 #include <sys/mbuf.h>
45 #include <sys/ioctl.h>
46 #include <sys/intr.h>
47 #include <sys/bus.h>
48 #include <sys/kernel.h>
49 #include <sys/kmem.h>
50 #include <sys/proc.h>
51 #include <sys/atomic.h>
52 #include <sys/callout.h>
53
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <dev/mii/miivar.h>
60
61 #include "ioconf.h"
62
63 #include <net/bpf.h>
64
65 #ifdef INET
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69 #include <netinet/in_offload.h>
70 #endif /* INET */
71 #ifdef INET6
72 #include <netinet6/in6.h>
73 #include <netinet/ip6.h>
74 #endif
75 #include <netinet6/in6_offload.h>
76
77
78 #include <powerpc/spr.h>
79 #include <powerpc/booke/spr.h>
80
81 #include <powerpc/booke/cpuvar.h>
82 #include <powerpc/booke/e500var.h>
83 #include <powerpc/booke/e500reg.h>
84 #include <powerpc/booke/etsecreg.h>
85
86 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */
87
88 #define ETSEC_MAXTXMBUFS 30
89 #define ETSEC_NTXSEGS 30
90 #define ETSEC_MAXRXMBUFS 511
91 #define ETSEC_MINRXMBUFS 32
92 #define ETSEC_NRXSEGS 1
93
94 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx
95 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\
96 |IFCAP_CSUM_UDPv4_Rx\
97 |IFCAP_CSUM_TCPv6_Rx\
98 |IFCAP_CSUM_UDPv6_Rx)
99
100 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx
101 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\
102 |IFCAP_CSUM_UDPv4_Tx\
103 |IFCAP_CSUM_TCPv6_Tx\
104 |IFCAP_CSUM_UDPv6_Tx)
105
106 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\
107 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN)
108
109 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU)
110 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6)
111 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
112 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6)
113 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4)
114 #define M_CSUM_CIP (M_CSUM_IPv4)
115 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
116
117 struct pq3etsec_txqueue {
118 bus_dmamap_t txq_descmap;
119 volatile struct txbd *txq_consumer;
120 volatile struct txbd *txq_producer;
121 volatile struct txbd *txq_first;
122 volatile struct txbd *txq_last;
123 struct ifqueue txq_mbufs;
124 struct mbuf *txq_next;
125 #ifdef ETSEC_DEBUG
126 struct mbuf *txq_lmbufs[512];
127 #endif
128 uint32_t txq_qmask;
129 uint32_t txq_free;
130 uint32_t txq_threshold;
131 uint32_t txq_lastintr;
132 bus_size_t txq_reg_tbase;
133 bus_dma_segment_t txq_descmap_seg;
134 };
135
136 struct pq3etsec_rxqueue {
137 bus_dmamap_t rxq_descmap;
138 volatile struct rxbd *rxq_consumer;
139 volatile struct rxbd *rxq_producer;
140 volatile struct rxbd *rxq_first;
141 volatile struct rxbd *rxq_last;
142 struct mbuf *rxq_mhead;
143 struct mbuf **rxq_mtail;
144 struct mbuf *rxq_mconsumer;
145 #ifdef ETSEC_DEBUG
146 struct mbuf *rxq_mbufs[512];
147 #endif
148 uint32_t rxq_qmask;
149 uint32_t rxq_inuse;
150 uint32_t rxq_threshold;
151 bus_size_t rxq_reg_rbase;
152 bus_size_t rxq_reg_rbptr;
153 bus_dma_segment_t rxq_descmap_seg;
154 };
155
156 struct pq3etsec_mapcache {
157 u_int dmc_nmaps;
158 u_int dmc_maxseg;
159 u_int dmc_maxmaps;
160 u_int dmc_maxmapsize;
161 bus_dmamap_t dmc_maps[0];
162 };
163
164 struct pq3etsec_softc {
165 device_t sc_dev;
166 struct ethercom sc_ec;
167 #define sc_if sc_ec.ec_if
168 struct mii_data sc_mii;
169 bus_space_tag_t sc_bst;
170 bus_space_handle_t sc_bsh;
171 bus_dma_tag_t sc_dmat;
172 int sc_phy_addr;
173 prop_dictionary_t sc_intrmap;
174 uint32_t sc_intrmask;
175
176 uint32_t sc_soft_flags;
177 #define SOFT_RESET 0x0001
178 #define SOFT_RXINTR 0x0010
179 #define SOFT_RXBSY 0x0020
180 #define SOFT_TXINTR 0x0100
181 #define SOFT_TXERROR 0x0200
182
183 struct pq3etsec_txqueue sc_txq;
184 struct pq3etsec_rxqueue sc_rxq;
185 uint32_t sc_txerrors;
186 uint32_t sc_rxerrors;
187
188 size_t sc_rx_adjlen;
189
190 /*
191 * Copies of various ETSEC registers.
192 */
193 uint32_t sc_imask;
194 uint32_t sc_maccfg1;
195 uint32_t sc_maccfg2;
196 uint32_t sc_maxfrm;
197 uint32_t sc_ecntrl;
198 uint32_t sc_dmactrl;
199 uint32_t sc_macstnaddr1;
200 uint32_t sc_macstnaddr2;
201 uint32_t sc_tctrl;
202 uint32_t sc_rctrl;
203 uint32_t sc_gaddr[16];
204 uint64_t sc_macaddrs[15];
205
206 void *sc_tx_ih;
207 void *sc_rx_ih;
208 void *sc_error_ih;
209 void *sc_soft_ih;
210
211 kmutex_t *sc_lock;
212
213 struct evcnt sc_ev_tx_stall;
214 struct evcnt sc_ev_tx_intr;
215 struct evcnt sc_ev_rx_stall;
216 struct evcnt sc_ev_rx_intr;
217 struct evcnt sc_ev_error_intr;
218 struct evcnt sc_ev_soft_intr;
219 struct evcnt sc_ev_tx_pause;
220 struct evcnt sc_ev_rx_pause;
221 struct evcnt sc_ev_mii_ticks;
222
223 struct callout sc_mii_callout;
224 uint64_t sc_mii_last_tick;
225
226 struct ifqueue sc_rx_bufcache;
227 struct pq3etsec_mapcache *sc_rx_mapcache;
228 struct pq3etsec_mapcache *sc_tx_mapcache;
229 };
230
231 static int pq3etsec_match(device_t, cfdata_t, void *);
232 static void pq3etsec_attach(device_t, device_t, void *);
233
234 static void pq3etsec_ifstart(struct ifnet *);
235 static void pq3etsec_ifwatchdog(struct ifnet *);
236 static int pq3etsec_ifinit(struct ifnet *);
237 static void pq3etsec_ifstop(struct ifnet *, int);
238 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *);
239
240 static int pq3etsec_mapcache_create(struct pq3etsec_softc *,
241 struct pq3etsec_mapcache **, size_t, size_t, size_t, size_t);
242 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *,
243 struct pq3etsec_mapcache *);
244 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *,
245 struct pq3etsec_mapcache *);
246 static void pq3etsec_mapcache_put(struct pq3etsec_softc *,
247 struct pq3etsec_mapcache *, bus_dmamap_t);
248
249 static int pq3etsec_txq_attach(struct pq3etsec_softc *,
250 struct pq3etsec_txqueue *, u_int);
251 static void pq3etsec_txq_purge(struct pq3etsec_softc *,
252 struct pq3etsec_txqueue *);
253 static void pq3etsec_txq_reset(struct pq3etsec_softc *,
254 struct pq3etsec_txqueue *);
255 static bool pq3etsec_txq_consume(struct pq3etsec_softc *,
256 struct pq3etsec_txqueue *);
257 static bool pq3etsec_txq_produce(struct pq3etsec_softc *,
258 struct pq3etsec_txqueue *, struct mbuf *m);
259 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *,
260 struct pq3etsec_txqueue *);
261
262 static int pq3etsec_rxq_attach(struct pq3etsec_softc *,
263 struct pq3etsec_rxqueue *, u_int);
264 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *,
265 struct pq3etsec_rxqueue *);
266 static void pq3etsec_rxq_purge(struct pq3etsec_softc *,
267 struct pq3etsec_rxqueue *, bool);
268 static void pq3etsec_rxq_reset(struct pq3etsec_softc *,
269 struct pq3etsec_rxqueue *);
270
271 static void pq3etsec_mc_setup(struct pq3etsec_softc *);
272
273 static void pq3etsec_mii_tick(void *);
274 static int pq3etsec_rx_intr(void *);
275 static int pq3etsec_tx_intr(void *);
276 static int pq3etsec_error_intr(void *);
277 static void pq3etsec_soft_intr(void *);
278
279 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc),
280 pq3etsec_match, pq3etsec_attach, NULL, NULL);
281
282 static int
283 pq3etsec_match(device_t parent, cfdata_t cf, void *aux)
284 {
285
286 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
287 return 0;
288
289 return 1;
290 }
291
292 static inline uint32_t
293 etsec_read(struct pq3etsec_softc *sc, bus_size_t off)
294 {
295 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
296 }
297
298 static inline void
299 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data)
300 {
301 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data);
302 }
303
304 static int
305 pq3etsec_mii_readreg(device_t self, int phy, int reg)
306 {
307 struct pq3etsec_softc * const sc = device_private(self);
308 uint32_t miimcom = etsec_read(sc, MIIMCOM);
309
310 // int s = splnet();
311
312 etsec_write(sc, MIIMADD,
313 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
314
315 etsec_write(sc, IEVENT, IEVENT_MMRD);
316 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */
317 etsec_write(sc, MIIMCOM, MIIMCOM_READ);
318 #if 0
319 sc->sc_imask |= IEVENT_MMRD;
320 etsec_write(sc, IMASK, sc->sc_imask);
321 #endif
322
323 while (etsec_read(sc, MIIMIND) != 0) {
324 delay(1);
325 }
326 int data = etsec_read(sc, MIIMSTAT);
327
328 if (miimcom == MIIMCOM_SCAN)
329 etsec_write(sc, MIIMCOM, miimcom);
330
331 #if 0
332 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n",
333 __func__, phy, reg, data);
334 #endif
335 etsec_write(sc, IEVENT, IEVENT_MMRD);
336 // splx(s);
337 return data;
338 }
339
340 static void
341 pq3etsec_mii_writereg(device_t self, int phy, int reg, int data)
342 {
343 struct pq3etsec_softc * const sc = device_private(self);
344 uint32_t miimcom = etsec_read(sc, MIIMCOM);
345
346 #if 0
347 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n",
348 __func__, phy, reg, data);
349 #endif
350
351 // int s = splnet();
352 etsec_write(sc, IEVENT, IEVENT_MMWR);
353 etsec_write(sc, MIIMADD,
354 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
355 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */
356 etsec_write(sc, MIIMCON, data);
357
358 #if 0
359 sc->sc_imask |= IEVENT_MMWR;
360 etsec_write(sc, IMASK, sc->sc_imask);
361 #endif
362
363 int timo = 1000; /* 1ms */
364 while ((etsec_read(sc, MIIMIND) & MIIMIND_BUSY) && --timo > 0) {
365 delay(1);
366 }
367
368 if (miimcom == MIIMCOM_SCAN)
369 etsec_write(sc, MIIMCOM, miimcom);
370 etsec_write(sc, IEVENT, IEVENT_MMWR);
371 // splx(s);
372 }
373
374 static void
375 pq3etsec_mii_statchg(device_t self)
376 {
377 struct pq3etsec_softc * const sc = device_private(self);
378 struct mii_data * const mii = &sc->sc_mii;
379
380 uint32_t maccfg1 = sc->sc_maccfg1;
381 uint32_t maccfg2 = sc->sc_maccfg2;
382 uint32_t ecntrl = sc->sc_ecntrl;
383
384 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW);
385 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD);
386
387 if (sc->sc_mii.mii_media_active & IFM_FDX) {
388 maccfg2 |= MACCFG2_FD;
389 }
390
391 /*
392 * Now deal with the flow control bits.
393 */
394 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO
395 && (mii->mii_media_active & IFM_ETH_FMASK)) {
396 if (mii->mii_media_active & IFM_ETH_RXPAUSE)
397 maccfg1 |= MACCFG1_RX_FLOW;
398 if (mii->mii_media_active & IFM_ETH_TXPAUSE)
399 maccfg1 |= MACCFG1_TX_FLOW;
400 }
401
402 /*
403 * Now deal with the speed.
404 */
405 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
406 maccfg2 |= MACCFG2_IFMODE_GMII;
407 } else {
408 maccfg2 |= MACCFG2_IFMODE_MII;
409 ecntrl &= ~ECNTRL_R100M;
410 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) {
411 ecntrl |= ECNTRL_R100M;
412 }
413 }
414
415 /*
416 * If things are different, re-init things.
417 */
418 if (maccfg1 != sc->sc_maccfg1
419 || maccfg2 != sc->sc_maccfg2
420 || ecntrl != sc->sc_ecntrl) {
421 if (sc->sc_if.if_flags & IFF_RUNNING)
422 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET);
423 sc->sc_maccfg1 = maccfg1;
424 sc->sc_maccfg2 = maccfg2;
425 sc->sc_ecntrl = ecntrl;
426 }
427 }
428
429 #if 0
430 static void
431 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
432 {
433 struct pq3etsec_softc * const sc = ifp->if_softc;
434
435 mii_pollstat(&sc->sc_mii);
436 ether_mediastatus(ifp, ifmr);
437 ifmr->ifm_status = sc->sc_mii.mii_media_status;
438 ifmr->ifm_active = sc->sc_mii.mii_media_active;
439 }
440
441 static int
442 pq3etsec_mediachange(struct ifnet *ifp)
443 {
444 struct pq3etsec_softc * const sc = ifp->if_softc;
445
446 if ((ifp->if_flags & IFF_UP) == 0)
447 return 0;
448
449 int rv = mii_mediachg(&sc->sc_mii);
450 return (rv == ENXIO) ? 0 : rv;
451 }
452 #endif
453
454 static void
455 pq3etsec_attach(device_t parent, device_t self, void *aux)
456 {
457 struct cpunode_softc * const psc = device_private(parent);
458 struct pq3etsec_softc * const sc = device_private(self);
459 struct cpunode_attach_args * const cna = aux;
460 struct cpunode_locators * const cnl = &cna->cna_locs;
461 cfdata_t cf = device_cfdata(self);
462 int error;
463
464 psc->sc_children |= cna->cna_childmask;
465 sc->sc_dev = self;
466 sc->sc_bst = cna->cna_memt;
467 sc->sc_dmat = &booke_bus_dma_tag;
468
469 /*
470 * If we have a common MDIO bus, if all off instance 1.
471 */
472 device_t miiself = (cf->cf_flags & 0x100) ? tsec_cd.cd_devs[0] : self;
473
474 /*
475 * See if the phy is in the config file...
476 */
477 if (cf->cf_flags & 0x3f) {
478 sc->sc_phy_addr = (cf->cf_flags & 0x3f) - 1;
479 } else {
480 unsigned char prop_name[20];
481 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr",
482 cnl->cnl_instance);
483 sc->sc_phy_addr = board_info_get_number(prop_name);
484 }
485 aprint_normal(" phy %d", sc->sc_phy_addr);
486
487 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0,
488 &sc->sc_bsh);
489 if (error) {
490 aprint_error(": error mapping registers: %d\n", error);
491 return;
492 }
493
494 /*
495 * Assume firmware has aready set the mac address and fetch it
496 * before we reinit it.
497 */
498 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2);
499 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1);
500 sc->sc_rctrl = RCTRL_DEFAULT;
501 sc->sc_maccfg2 = MACCFG2_DEFAULT;
502
503 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) {
504 size_t len;
505 const uint8_t *mac_addr =
506 board_info_get_data("tsec-mac-addr-base", &len);
507 KASSERT(len == ETHER_ADDR_LEN);
508 sc->sc_macstnaddr2 =
509 (mac_addr[1] << 24)
510 | (mac_addr[0] << 16);
511 sc->sc_macstnaddr1 =
512 ((mac_addr[5] + cnl->cnl_instance - 1) << 24)
513 | (mac_addr[4] << 16)
514 | (mac_addr[3] << 8)
515 | (mac_addr[2] << 0);
516 #if 0
517 aprint_error(": mac-address unknown\n");
518 return;
519 #endif
520 }
521
522 char enaddr[ETHER_ADDR_LEN] = {
523 [0] = sc->sc_macstnaddr2 >> 16,
524 [1] = sc->sc_macstnaddr2 >> 24,
525 [2] = sc->sc_macstnaddr1 >> 0,
526 [3] = sc->sc_macstnaddr1 >> 8,
527 [4] = sc->sc_macstnaddr1 >> 16,
528 [5] = sc->sc_macstnaddr1 >> 24,
529 };
530
531 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0);
532 if (error) {
533 aprint_error(": failed to init rxq: %d\n", error);
534 return;
535 }
536
537 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0);
538 if (error) {
539 aprint_error(": failed to init txq: %d\n", error);
540 return;
541 }
542
543 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache,
544 ETSEC_MAXRXMBUFS, ETSEC_MINRXMBUFS, MCLBYTES, ETSEC_NRXSEGS);
545 if (error) {
546 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
547 return;
548 }
549
550 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
551 ETSEC_MAXTXMBUFS, ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS);
552 if (error) {
553 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
554 return;
555 }
556
557 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP,
558 pq3etsec_tx_intr, sc);
559 if (sc->sc_tx_ih == NULL) {
560 aprint_error(": failed to establish tx interrupt: %d\n",
561 cnl->cnl_intrs[0]);
562 return;
563 }
564
565 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP,
566 pq3etsec_rx_intr, sc);
567 if (sc->sc_rx_ih == NULL) {
568 aprint_error(": failed to establish rx interrupt: %d\n",
569 cnl->cnl_intrs[1]);
570 return;
571 }
572
573 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP,
574 pq3etsec_error_intr, sc);
575 if (sc->sc_error_ih == NULL) {
576 aprint_error(": failed to establish error interrupt: %d\n",
577 cnl->cnl_intrs[2]);
578 return;
579 }
580
581 sc->sc_soft_ih = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
582 pq3etsec_soft_intr, sc);
583 if (sc->sc_soft_ih == NULL) {
584 aprint_error(": failed to establish soft interrupt\n");
585 return;
586 }
587
588 aprint_normal("\n");
589
590 etsec_write(sc, ATTR, ATTR_DEFAULT);
591 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT);
592
593 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
594
595 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE);
596 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc);
597
598 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
599 ether_sprintf(enaddr));
600
601 const char * const xname = device_xname(sc->sc_dev);
602 struct ethercom * const ec = &sc->sc_ec;
603 struct ifnet * const ifp = &ec->ec_if;
604
605 ec->ec_mii = &sc->sc_mii;
606
607 sc->sc_mii.mii_ifp = ifp;
608 sc->sc_mii.mii_readreg = pq3etsec_mii_readreg;
609 sc->sc_mii.mii_writereg = pq3etsec_mii_writereg;
610 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg;
611
612 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
613 ether_mediastatus);
614
615 if (sc->sc_phy_addr < 32) {
616 mii_attach(miiself, &sc->sc_mii, 0xffffffff,
617 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
618
619 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
620 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
621 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
622 } else {
623 callout_schedule(&sc->sc_mii_callout, hz);
624 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
625 }
626 } else {
627 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
628 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
629 }
630
631 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
632 | ETHERCAP_JUMBO_MTU;
633
634 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
635 ifp->if_softc = sc;
636 ifp->if_capabilities = IFCAP_ETSEC;
637 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
638 ifp->if_ioctl = pq3etsec_ifioctl;
639 ifp->if_start = pq3etsec_ifstart;
640 ifp->if_watchdog = pq3etsec_ifwatchdog;
641 ifp->if_init = pq3etsec_ifinit;
642 ifp->if_stop = pq3etsec_ifstop;
643 IFQ_SET_READY(&ifp->if_snd);
644
645 pq3etsec_ifstop(ifp, true);
646
647 /*
648 * Attach the interface.
649 */
650 if_attach(ifp);
651 ether_ifattach(ifp, enaddr);
652
653 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC,
654 NULL, xname, "rx stall");
655 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
656 NULL, xname, "tx stall");
657 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR,
658 NULL, xname, "tx intr");
659 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR,
660 NULL, xname, "rx intr");
661 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR,
662 NULL, xname, "error intr");
663 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
664 NULL, xname, "soft intr");
665 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC,
666 NULL, xname, "tx pause");
667 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC,
668 NULL, xname, "rx pause");
669 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC,
670 NULL, xname, "mii ticks");
671 }
672
673 static uint64_t
674 pq3etsec_macaddr_create(const uint8_t *lladdr)
675 {
676 uint64_t macaddr = 0;
677
678 lladdr += ETHER_ADDR_LEN;
679 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) {
680 macaddr = (macaddr << 8) | *--lladdr;
681 }
682 return macaddr << 16;
683 }
684
685 static int
686 pq3etsec_ifinit(struct ifnet *ifp)
687 {
688 struct pq3etsec_softc * const sc = ifp->if_softc;
689 int error = 0;
690
691 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
692 if (ifp->if_mtu > ETHERMTU_JUMBO)
693 return error;
694
695 KASSERT(ifp->if_flags & IFF_UP);
696
697 /*
698 * Stop the interface (steps 1 to 4 in the Soft Reset and
699 * Reconfigurating Procedure.
700 */
701 pq3etsec_ifstop(ifp, 0);
702
703 /*
704 * If our frame size has changed (or it's our first time through)
705 * destroy the existing transmit mapcache.
706 */
707 if (sc->sc_tx_mapcache != NULL
708 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
709 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
710 sc->sc_tx_mapcache = NULL;
711 }
712
713 if (sc->sc_tx_mapcache == NULL) {
714 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
715 ETSEC_MAXTXMBUFS, ETSEC_MAXTXMBUFS, sc->sc_maxfrm,
716 ETSEC_NTXSEGS);
717 if (error)
718 return error;
719 }
720
721 sc->sc_ev_mii_ticks.ev_count++;
722 mii_tick(&sc->sc_mii);
723
724 if (ifp->if_flags & IFF_PROMISC) {
725 sc->sc_rctrl |= RCTRL_PROM;
726 } else {
727 sc->sc_rctrl &= ~RCTRL_PROM;
728 }
729
730 uint32_t rctrl_prsdep = 0;
731 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP);
732 if (VLAN_ATTACHED(&sc->sc_ec)) {
733 sc->sc_rctrl |= RCTRL_VLEX;
734 rctrl_prsdep = RCTRL_PRSDEP_L2;
735 }
736 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) {
737 sc->sc_rctrl |= RCTRL_IPCSEN;
738 rctrl_prsdep = RCTRL_PRSDEP_L3;
739 }
740 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) {
741 sc->sc_rctrl |= RCTRL_TUCSEN;
742 rctrl_prsdep = RCTRL_PRSDEP_L4;
743 }
744 sc->sc_rctrl |= rctrl_prsdep;
745 #if 0
746 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP))
747 aprint_normal_dev(sc->sc_dev,
748 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n",
749 sc->sc_rctrl,
750 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN),
751 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN),
752 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX),
753 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP));
754 #endif
755
756 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS);
757 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */
758 sc->sc_tctrl |= TCTRL_VLINS;
759 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN)
760 sc->sc_tctrl |= TCTRL_IPCSEN;
761 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN)
762 sc->sc_tctrl |= TCTRL_TUCSEN;
763 #if 0
764 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS))
765 aprint_normal_dev(sc->sc_dev,
766 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n",
767 sc->sc_tctrl,
768 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN),
769 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN),
770 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS));
771 #endif
772
773 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN);
774
775 const uint64_t macstnaddr =
776 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl));
777
778 sc->sc_imask = IEVENT_DPE;
779
780 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */
781 pq3etsec_rxq_reset(sc, &sc->sc_rxq);
782 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
783
784 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */
785 pq3etsec_txq_reset(sc, &sc->sc_txq);
786
787 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */
788 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2);
789 etsec_write(sc, MAXFRM, sc->sc_maxfrm);
790 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32));
791 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0));
792 etsec_write(sc, MACCFG1, sc->sc_maccfg1);
793 etsec_write(sc, MACCFG2, sc->sc_maccfg2);
794 etsec_write(sc, ECNTRL, sc->sc_ecntrl);
795
796 /* 8. Setup group address hash table (GADDR0-GADDR15) */
797 pq3etsec_mc_setup(sc);
798
799 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */
800 etsec_write(sc, MRBLR, MCLBYTES);
801
802 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */
803 sc->sc_dmactrl |= DMACTRL_DEFAULT;
804 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
805
806 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
807 etsec_write(sc, TQUEUE, TQUEUE_EN0);
808 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC;
809
810 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */
811
812 /* 12. Enable receive queues in RQUEUE, */
813 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0);
814 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC;
815
816 /* and optionally set TOE functionality in RCTRL. */
817 etsec_write(sc, RCTRL, sc->sc_rctrl);
818 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL);
819 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF)
820 sc->sc_rx_adjlen += sizeof(struct rxfcb);
821
822 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */
823 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF);
824
825 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/
826 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF);
827
828 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */
829 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS);
830 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
831
832 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */
833 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
834 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
835
836 sc->sc_soft_flags = 0;
837
838 etsec_write(sc, IMASK, sc->sc_imask);
839
840 ifp->if_flags |= IFF_RUNNING;
841
842 return error;
843 }
844
845 static void
846 pq3etsec_ifstop(struct ifnet *ifp, int disable)
847 {
848 struct pq3etsec_softc * const sc = ifp->if_softc;
849
850 KASSERT(!cpu_intr_p());
851 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC;
852 /*
853 * Clear the GTSC and GRSC from the interrupt mask until
854 * we are ready for them. Then clear them from IEVENT,
855 * request the graceful shutdown, and then enable the
856 * GTSC and GRSC bits in the mask. This should cause the
857 * error interrupt to fire which will issue a wakeup to
858 * allow us to resume.
859 */
860
861 /*
862 * 1. Set GRS/GTS bits in DMACTRL register
863 */
864 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS;
865 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask);
866 etsec_write(sc, IEVENT, imask_gsc_mask);
867 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
868
869 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) {
870 /*
871 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set
872 */
873 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask);
874
875 u_int timo = 1000;
876 uint32_t ievent = etsec_read(sc, IEVENT);
877 while ((ievent & imask_gsc_mask) != imask_gsc_mask) {
878 if (--timo == 0) {
879 aprint_error_dev(sc->sc_dev,
880 "WARNING: "
881 "request to stop failed (IEVENT=%#x)\n",
882 ievent);
883 break;
884 }
885 delay(10);
886 ievent = etsec_read(sc, IEVENT);
887 }
888 }
889
890 /*
891 * Now reset the controller.
892 *
893 * 3. Set SOFT_RESET bit in MACCFG1 register
894 * 4. Clear SOFT_RESET bit in MACCFG1 register
895 */
896 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET);
897 etsec_write(sc, MACCFG1, 0);
898 etsec_write(sc, IMASK, 0);
899 etsec_write(sc, IEVENT, ~0);
900 sc->sc_imask = 0;
901 ifp->if_flags &= ~IFF_RUNNING;
902
903 uint32_t tbipa = etsec_read(sc, TBIPA);
904 if (tbipa == sc->sc_phy_addr) {
905 aprint_normal_dev(sc->sc_dev, "relocating TBI\n");
906 etsec_write(sc, TBIPA, 0x1f);
907 }
908 uint32_t miimcfg = etsec_read(sc, MIIMCFG);
909 etsec_write(sc, MIIMCFG, MIIMCFG_RESET);
910 etsec_write(sc, MIIMCFG, miimcfg);
911
912 /*
913 * Let's consume any remaing transmitted packets. And if we are
914 * disabling the interface, purge ourselves of any untransmitted
915 * packets. But don't consume any received packets, just drop them.
916 * If we aren't disabling the interface, save the mbufs in the
917 * receive queue for reuse.
918 */
919 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable);
920 pq3etsec_txq_consume(sc, &sc->sc_txq);
921 if (disable) {
922 pq3etsec_txq_purge(sc, &sc->sc_txq);
923 IF_PURGE(&ifp->if_snd);
924 }
925 }
926
927 static void
928 pq3etsec_ifwatchdog(struct ifnet *ifp)
929 {
930 }
931
932 static void
933 pq3etsec_mc_setup(
934 struct pq3etsec_softc *sc)
935 {
936 struct ethercom * const ec = &sc->sc_ec;
937 struct ifnet * const ifp = &sc->sc_if;
938 struct ether_multi *enm;
939 struct ether_multistep step;
940 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8);
941 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8);
942
943 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr));
944 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
945
946 ifp->if_flags &= ~IFF_ALLMULTI;
947
948 ETHER_FIRST_MULTI(step, ec, enm);
949 for (u_int i = 0; enm != NULL; ) {
950 const char *addr = enm->enm_addrlo;
951 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
952 ifp->if_flags |= IFF_ALLMULTI;
953 memset(gaddr, 0xff, 32 << (crc_shift & 1));
954 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
955 break;
956 }
957 if ((sc->sc_rctrl & RCTRL_EMEN)
958 && i < __arraycount(sc->sc_macaddrs)) {
959 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr);
960 } else {
961 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
962 #if 0
963 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__,
964 ether_sprintf(addr), crc,
965 crc >> crc_shift,
966 crc >> (crc_shift + 5),
967 (crc >> crc_shift) & 31,
968 1 << (((crc >> crc_shift) & 31) ^ 31));
969 #endif
970 /*
971 * The documentation doesn't completely follow PowerPC
972 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01
973 * is 0x7fa32d9b. By empirical testing, the
974 * corresponding hash bit is word 3, bit 31 (ppc bit
975 * order). Since 3 << 31 | 31 is 0x7f, we deduce
976 * H[0:2] selects the register while H[3:7] selects
977 * the bit (ppc bit order).
978 */
979 crc >>= crc_shift;
980 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31);
981 }
982 ETHER_NEXT_MULTI(step, enm);
983 }
984 for (u_int i = 0; i < 8; i++) {
985 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]);
986 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]);
987 #if 0
988 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8])
989 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__,
990 i, IGADDR(i), etsec_read(sc, IGADDR(i)),
991 i, GADDR(i), etsec_read(sc, GADDR(i)));
992 #endif
993 }
994 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) {
995 uint64_t macaddr = sc->sc_macaddrs[i];
996 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32));
997 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0));
998 #if 0
999 if (macaddr)
1000 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__,
1001 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)),
1002 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i)));
1003 #endif
1004 }
1005 }
1006
1007 static int
1008 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
1009 {
1010 struct pq3etsec_softc *sc = ifp->if_softc;
1011 struct ifreq * const ifr = data;
1012 const int s = splnet();
1013 int error;
1014
1015 switch (cmd) {
1016 case SIOCSIFMEDIA:
1017 case SIOCGIFMEDIA:
1018 /* Flow control requires full-duplex mode. */
1019 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1020 (ifr->ifr_media & IFM_FDX) == 0)
1021 ifr->ifr_media &= ~IFM_ETH_FMASK;
1022 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1023 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1024 /* We can do both TXPAUSE and RXPAUSE. */
1025 ifr->ifr_media |=
1026 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1027 }
1028 }
1029 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1030 break;
1031
1032 default:
1033 error = ether_ioctl(ifp, cmd, data);
1034 if (error != ENETRESET)
1035 break;
1036
1037 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1038 error = 0;
1039 if (ifp->if_flags & IFF_RUNNING)
1040 pq3etsec_mc_setup(sc);
1041 break;
1042 }
1043 error = pq3etsec_ifinit(ifp);
1044 break;
1045 }
1046
1047 splx(s);
1048 return error;
1049 }
1050
1051 static void
1052 pq3etsec_rxq_desc_presync(
1053 struct pq3etsec_softc *sc,
1054 struct pq3etsec_rxqueue *rxq,
1055 volatile struct rxbd *rxbd,
1056 size_t count)
1057 {
1058 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1059 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1060 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1061 }
1062
1063 static void
1064 pq3etsec_rxq_desc_postsync(
1065 struct pq3etsec_softc *sc,
1066 struct pq3etsec_rxqueue *rxq,
1067 volatile struct rxbd *rxbd,
1068 size_t count)
1069 {
1070 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1071 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1072 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1073 }
1074
1075 static void
1076 pq3etsec_txq_desc_presync(
1077 struct pq3etsec_softc *sc,
1078 struct pq3etsec_txqueue *txq,
1079 volatile struct txbd *txbd,
1080 size_t count)
1081 {
1082 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1083 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1084 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1085 }
1086
1087 static void
1088 pq3etsec_txq_desc_postsync(
1089 struct pq3etsec_softc *sc,
1090 struct pq3etsec_txqueue *txq,
1091 volatile struct txbd *txbd,
1092 size_t count)
1093 {
1094 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1095 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1096 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1097 }
1098
1099 static bus_dmamap_t
1100 pq3etsec_mapcache_get(
1101 struct pq3etsec_softc *sc,
1102 struct pq3etsec_mapcache *dmc)
1103 {
1104 if (dmc->dmc_nmaps == 0) {
1105 bus_dmamap_t map;
1106 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1107 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1108 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &map);
1109 if (error) {
1110 aprint_error_dev(sc->sc_dev,
1111 "failed to allocate a %zuB map: %d\n",
1112 dmc->dmc_maxmapsize, error);
1113 return NULL;
1114 }
1115 return map;
1116 }
1117
1118 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
1119 return dmc->dmc_maps[--dmc->dmc_nmaps];
1120 }
1121
1122 static void
1123 pq3etsec_mapcache_put(
1124 struct pq3etsec_softc *sc,
1125 struct pq3etsec_mapcache *dmc,
1126 bus_dmamap_t map)
1127 {
1128 KASSERT(map != NULL);
1129 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
1130 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
1131 }
1132
1133 static void
1134 pq3etsec_mapcache_destroy(
1135 struct pq3etsec_softc *sc,
1136 struct pq3etsec_mapcache *dmc)
1137 {
1138 const size_t dmc_size =
1139 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]);
1140
1141 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
1142 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
1143 }
1144 kmem_free(dmc, dmc_size);
1145 }
1146
1147 static int
1148 pq3etsec_mapcache_create(
1149 struct pq3etsec_softc *sc,
1150 struct pq3etsec_mapcache **dmc_p,
1151 size_t maxmaps,
1152 size_t minmaps,
1153 size_t maxmapsize,
1154 size_t maxseg)
1155 {
1156 const size_t dmc_size =
1157 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]);
1158 struct pq3etsec_mapcache * const dmc = kmem_zalloc(dmc_size, KM_SLEEP);
1159
1160 dmc->dmc_maxmaps = maxmaps;
1161 dmc->dmc_nmaps = minmaps;
1162 dmc->dmc_maxmapsize = maxmapsize;
1163 dmc->dmc_maxseg = maxseg;
1164
1165 for (u_int i = 0; i < minmaps; i++) {
1166 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1167 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1168 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
1169 if (error) {
1170 aprint_error_dev(sc->sc_dev,
1171 "failed to creat dma map cache "
1172 "entry %u of %zu (max %zu): %d\n",
1173 i, minmaps, maxmaps, error);
1174 while (i-- > 0) {
1175 bus_dmamap_destroy(sc->sc_dmat,
1176 dmc->dmc_maps[i]);
1177 }
1178 kmem_free(dmc, dmc_size);
1179 return error;
1180 }
1181 KASSERT(dmc->dmc_maps[i] != NULL);
1182 }
1183
1184 *dmc_p = dmc;
1185
1186 return 0;
1187 }
1188
1189 #if 0
1190 static void
1191 pq3etsec_dmamem_free(
1192 bus_dma_tag_t dmat,
1193 size_t map_size,
1194 bus_dma_segment_t *seg,
1195 bus_dmamap_t map,
1196 void *kvap)
1197 {
1198 bus_dmamap_destroy(dmat, map);
1199 bus_dmamem_unmap(dmat, kvap, map_size);
1200 bus_dmamem_free(dmat, seg, 1);
1201 }
1202 #endif
1203
1204 static int
1205 pq3etsec_dmamem_alloc(
1206 bus_dma_tag_t dmat,
1207 size_t map_size,
1208 bus_dma_segment_t *seg,
1209 bus_dmamap_t *map,
1210 void **kvap)
1211 {
1212 int error;
1213 int nseg;
1214
1215 *kvap = NULL;
1216 *map = NULL;
1217
1218 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
1219 seg, 1, &nseg, 0);
1220 if (error)
1221 return error;
1222
1223 KASSERT(nseg == 1);
1224
1225 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
1226 BUS_DMA_COHERENT);
1227 if (error == 0) {
1228 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
1229 map);
1230 if (error == 0) {
1231 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
1232 NULL, 0);
1233 if (error == 0)
1234 return 0;
1235 bus_dmamap_destroy(dmat, *map);
1236 *map = NULL;
1237 }
1238 bus_dmamem_unmap(dmat, *kvap, map_size);
1239 *kvap = NULL;
1240 }
1241 bus_dmamem_free(dmat, seg, nseg);
1242 return 0;
1243 }
1244
1245 static struct mbuf *
1246 pq3etsec_rx_buf_alloc(
1247 struct pq3etsec_softc *sc)
1248 {
1249 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
1250 if (m == NULL) {
1251 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
1252 return NULL;
1253 }
1254 MCLGET(m, M_DONTWAIT);
1255 if ((m->m_flags & M_EXT) == 0) {
1256 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
1257 m_freem(m);
1258 return NULL;
1259 }
1260 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1261
1262 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache);
1263 if (map == NULL) {
1264 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
1265 m_freem(m);
1266 return NULL;
1267 }
1268 M_SETCTX(m, map);
1269 m->m_len = m->m_pkthdr.len = MCLBYTES;
1270 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1271 BUS_DMA_READ|BUS_DMA_NOWAIT);
1272 if (error) {
1273 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
1274 error);
1275 M_SETCTX(m, NULL);
1276 m_freem(m);
1277 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1278 return NULL;
1279 }
1280 KASSERT(map->dm_mapsize == MCLBYTES);
1281 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1282 BUS_DMASYNC_PREREAD);
1283
1284 return m;
1285 }
1286
1287 static void
1288 pq3etsec_rx_map_unload(
1289 struct pq3etsec_softc *sc,
1290 struct mbuf *m)
1291 {
1292 KASSERT(m);
1293 for (; m != NULL; m = m->m_next) {
1294 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1295 KASSERT(map);
1296 KASSERT(map->dm_mapsize == MCLBYTES);
1297 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
1298 BUS_DMASYNC_POSTREAD);
1299 bus_dmamap_unload(sc->sc_dmat, map);
1300 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1301 M_SETCTX(m, NULL);
1302 }
1303 }
1304
1305 static bool
1306 pq3etsec_rxq_produce(
1307 struct pq3etsec_softc *sc,
1308 struct pq3etsec_rxqueue *rxq)
1309 {
1310 volatile struct rxbd *producer = rxq->rxq_producer;
1311 #if 0
1312 size_t inuse = rxq->rxq_inuse;
1313 #endif
1314 while (rxq->rxq_inuse < rxq->rxq_threshold) {
1315 struct mbuf *m;
1316 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
1317 if (m == NULL) {
1318 m = pq3etsec_rx_buf_alloc(sc);
1319 if (m == NULL) {
1320 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__);
1321 break;
1322 }
1323 }
1324 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1325 KASSERT(map);
1326
1327 #ifdef ETSEC_DEBUG
1328 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL);
1329 rxq->rxq_mbufs[producer-rxq->rxq_first] = m;
1330 #endif
1331
1332 /* rxbd_len is write-only by the ETSEC */
1333 producer->rxbd_bufptr = map->dm_segs[0].ds_addr;
1334 membar_producer();
1335 producer->rxbd_flags |= RXBD_E;
1336 if (__predict_false(rxq->rxq_mhead == NULL)) {
1337 KASSERT(producer == rxq->rxq_consumer);
1338 rxq->rxq_mconsumer = m;
1339 }
1340 *rxq->rxq_mtail = m;
1341 rxq->rxq_mtail = &m->m_next;
1342 m->m_len = MCLBYTES;
1343 m->m_next = NULL;
1344 rxq->rxq_inuse++;
1345 if (++producer == rxq->rxq_last) {
1346 membar_producer();
1347 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1348 rxq->rxq_last - rxq->rxq_producer);
1349 producer = rxq->rxq_producer = rxq->rxq_first;
1350 }
1351 }
1352 if (producer != rxq->rxq_producer) {
1353 membar_producer();
1354 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1355 producer - rxq->rxq_producer);
1356 rxq->rxq_producer = producer;
1357 }
1358 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT;
1359 if (qhlt) {
1360 KASSERT(qhlt & rxq->rxq_qmask);
1361 sc->sc_ev_rx_stall.ev_count++;
1362 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask);
1363 }
1364 #if 0
1365 aprint_normal_dev(sc->sc_dev,
1366 "%s: buffers inuse went from %zu to %zu\n",
1367 __func__, inuse, rxq->rxq_inuse);
1368 #endif
1369 return true;
1370 }
1371
1372 static bool
1373 pq3etsec_rx_offload(
1374 struct pq3etsec_softc *sc,
1375 struct mbuf *m,
1376 const struct rxfcb *fcb)
1377 {
1378 if (fcb->rxfcb_flags & RXFCB_VLN) {
1379 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl,
1380 m_freem(m); return false);
1381 }
1382 if ((fcb->rxfcb_flags & RXFCB_IP) == 0
1383 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0)
1384 return true;
1385 int csum_flags = 0;
1386 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) {
1387 csum_flags |= M_CSUM_IPv4;
1388 if (fcb->rxfcb_flags & RXFCB_EIP)
1389 csum_flags |= M_CSUM_IPv4_BAD;
1390 }
1391 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) {
1392 int ipv_flags;
1393 if (fcb->rxfcb_flags & RXFCB_IP6)
1394 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6;
1395 else
1396 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4;
1397 if (fcb->rxfcb_pro == IPPROTO_TCP) {
1398 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags;
1399 } else {
1400 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags;
1401 }
1402 if (fcb->rxfcb_flags & RXFCB_ETU)
1403 csum_flags |= M_CSUM_TCP_UDP_BAD;
1404 }
1405
1406 m->m_pkthdr.csum_flags = csum_flags;
1407 return true;
1408 }
1409
1410 static void
1411 pq3etsec_rx_input(
1412 struct pq3etsec_softc *sc,
1413 struct mbuf *m,
1414 uint16_t rxbd_flags)
1415 {
1416 struct ifnet * const ifp = &sc->sc_if;
1417
1418 pq3etsec_rx_map_unload(sc, m);
1419
1420 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) {
1421 struct rxfcb fcb = *mtod(m, struct rxfcb *);
1422 if (!pq3etsec_rx_offload(sc, m, &fcb))
1423 return;
1424 }
1425 m_adj(m, sc->sc_rx_adjlen);
1426
1427 if (rxbd_flags & RXBD_M)
1428 m->m_flags |= M_PROMISC;
1429 if (rxbd_flags & RXBD_BC)
1430 m->m_flags |= M_BCAST;
1431 if (rxbd_flags & RXBD_MC)
1432 m->m_flags |= M_MCAST;
1433 m->m_flags |= M_HASFCS;
1434 m->m_pkthdr.rcvif = &sc->sc_if;
1435
1436 ifp->if_ipackets++;
1437 ifp->if_ibytes += m->m_pkthdr.len;
1438
1439 /*
1440 * Let's give it to the network subsystm to deal with.
1441 */
1442 int s = splnet();
1443 bpf_mtap(ifp, m);
1444 (*ifp->if_input)(ifp, m);
1445 splx(s);
1446 }
1447
1448 static void
1449 pq3etsec_rxq_consume(
1450 struct pq3etsec_softc *sc,
1451 struct pq3etsec_rxqueue *rxq)
1452 {
1453 struct ifnet * const ifp = &sc->sc_if;
1454 volatile struct rxbd *consumer = rxq->rxq_consumer;
1455 size_t rxconsumed = 0;
1456
1457 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask);
1458
1459 for (;;) {
1460 if (consumer == rxq->rxq_producer) {
1461 rxq->rxq_consumer = consumer;
1462 rxq->rxq_inuse -= rxconsumed;
1463 KASSERT(rxq->rxq_inuse == 0);
1464 return;
1465 }
1466 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1);
1467 const uint16_t rxbd_flags = consumer->rxbd_flags;
1468 if (rxbd_flags & RXBD_E) {
1469 rxq->rxq_consumer = consumer;
1470 rxq->rxq_inuse -= rxconsumed;
1471 return;
1472 }
1473 KASSERT(rxq->rxq_mconsumer != NULL);
1474 #ifdef ETSEC_DEBUG
1475 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1476 #endif
1477 #if 0
1478 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n",
1479 __func__,
1480 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len,
1481 mtod(rxq->rxq_mconsumer, int *)[0],
1482 mtod(rxq->rxq_mconsumer, int *)[1],
1483 mtod(rxq->rxq_mconsumer, int *)[2],
1484 mtod(rxq->rxq_mconsumer, int *)[3]);
1485 #endif
1486 /*
1487 * We own this packet again. Clear all flags except wrap.
1488 */
1489 rxconsumed++;
1490 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I);
1491
1492 /*
1493 * If this descriptor has the LAST bit set and no errors,
1494 * it's a valid input packet.
1495 */
1496 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) {
1497 size_t rxbd_len = consumer->rxbd_len;
1498 struct mbuf *m = rxq->rxq_mhead;
1499 struct mbuf *m_last = rxq->rxq_mconsumer;
1500 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1501 rxq->rxq_mtail = &rxq->rxq_mhead;
1502 rxq->rxq_mconsumer = rxq->rxq_mhead;
1503 m_last->m_next = NULL;
1504 m_last->m_len = rxbd_len & (MCLBYTES - 1);
1505 m->m_pkthdr.len = rxbd_len;
1506 pq3etsec_rx_input(sc, m, rxbd_flags);
1507 } else if (rxbd_flags & RXBD_L) {
1508 KASSERT(rxbd_flags & RXBD_ERRORS);
1509 struct mbuf *m;
1510 /*
1511 * We encountered an error, take the mbufs and add
1512 * then to the rx bufcache so we can reuse them.
1513 */
1514 ifp->if_ierrors++;
1515 for (m = rxq->rxq_mhead;
1516 m != rxq->rxq_mconsumer;
1517 m = m->m_next) {
1518 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1519 }
1520 m = rxq->rxq_mconsumer;
1521 if ((rxq->rxq_mhead = m->m_next) == NULL)
1522 rxq->rxq_mtail = &rxq->rxq_mhead;
1523 rxq->rxq_mconsumer = m->m_next;
1524 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1525 } else {
1526 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next;
1527 }
1528 #ifdef ETSEC_DEBUG
1529 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL;
1530 #endif
1531
1532 /*
1533 * Wrap at the last entry!
1534 */
1535 if (rxbd_flags & RXBD_W) {
1536 KASSERT(consumer + 1 == rxq->rxq_last);
1537 consumer = rxq->rxq_first;
1538 } else {
1539 consumer++;
1540 }
1541 #ifdef ETSEC_DEBUG
1542 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1543 #endif
1544 }
1545 }
1546
1547 static void
1548 pq3etsec_rxq_purge(
1549 struct pq3etsec_softc *sc,
1550 struct pq3etsec_rxqueue *rxq,
1551 bool discard)
1552 {
1553 struct mbuf *m;
1554
1555 if ((m = rxq->rxq_mhead) != NULL) {
1556 #ifdef ETSEC_DEBUG
1557 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs));
1558 #endif
1559
1560 if (discard) {
1561 pq3etsec_rx_map_unload(sc, m);
1562 m_freem(m);
1563 } else {
1564 while (m != NULL) {
1565 struct mbuf *m0 = m->m_next;
1566 m->m_next = NULL;
1567 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1568 m = m0;
1569 }
1570 }
1571
1572 }
1573
1574 rxq->rxq_mconsumer = NULL;
1575 rxq->rxq_mhead = NULL;
1576 rxq->rxq_mtail = &rxq->rxq_mhead;
1577 rxq->rxq_inuse = 0;
1578 }
1579
1580 static void
1581 pq3etsec_rxq_reset(
1582 struct pq3etsec_softc *sc,
1583 struct pq3etsec_rxqueue *rxq)
1584 {
1585 /*
1586 * sync all the descriptors
1587 */
1588 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1589 rxq->rxq_last - rxq->rxq_first);
1590
1591 /*
1592 * Make sure we own all descriptors in the ring.
1593 */
1594 volatile struct rxbd *rxbd;
1595 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) {
1596 rxbd->rxbd_flags = RXBD_I;
1597 }
1598
1599 /*
1600 * Last descriptor has the wrap flag.
1601 */
1602 rxbd->rxbd_flags = RXBD_W|RXBD_I;
1603
1604 /*
1605 * Reset the producer consumer indexes.
1606 */
1607 rxq->rxq_consumer = rxq->rxq_first;
1608 rxq->rxq_producer = rxq->rxq_first;
1609 rxq->rxq_inuse = 0;
1610 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS)
1611 rxq->rxq_threshold = ETSEC_MINRXMBUFS;
1612
1613 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY;
1614
1615 /*
1616 * Restart the transmit at the first descriptor
1617 */
1618 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr);
1619 }
1620
1621 static int
1622 pq3etsec_rxq_attach(
1623 struct pq3etsec_softc *sc,
1624 struct pq3etsec_rxqueue *rxq,
1625 u_int qno)
1626 {
1627 size_t map_size = PAGE_SIZE;
1628 size_t desc_count = map_size / sizeof(struct rxbd);
1629 int error;
1630 void *descs;
1631
1632 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1633 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1634 if (error)
1635 return error;
1636
1637 memset(descs, 0, map_size);
1638 rxq->rxq_first = descs;
1639 rxq->rxq_last = rxq->rxq_first + desc_count;
1640 rxq->rxq_consumer = descs;
1641 rxq->rxq_producer = descs;
1642
1643 pq3etsec_rxq_purge(sc, rxq, true);
1644 pq3etsec_rxq_reset(sc, rxq);
1645
1646 rxq->rxq_reg_rbase = RBASEn(qno);
1647 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno);
1648
1649 return 0;
1650 }
1651
1652 static bool
1653 pq3etsec_txq_active_p(
1654 struct pq3etsec_softc * const sc,
1655 struct pq3etsec_txqueue *txq)
1656 {
1657 return !IF_IS_EMPTY(&txq->txq_mbufs);
1658 }
1659
1660 static bool
1661 pq3etsec_txq_fillable_p(
1662 struct pq3etsec_softc * const sc,
1663 struct pq3etsec_txqueue *txq)
1664 {
1665 return txq->txq_free >= txq->txq_threshold;
1666 }
1667
1668 static int
1669 pq3etsec_txq_attach(
1670 struct pq3etsec_softc *sc,
1671 struct pq3etsec_txqueue *txq,
1672 u_int qno)
1673 {
1674 size_t map_size = PAGE_SIZE;
1675 size_t desc_count = map_size / sizeof(struct txbd);
1676 int error;
1677 void *descs;
1678
1679 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1680 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1681 if (error)
1682 return error;
1683
1684 memset(descs, 0, map_size);
1685 txq->txq_first = descs;
1686 txq->txq_last = txq->txq_first + desc_count;
1687 txq->txq_consumer = descs;
1688 txq->txq_producer = descs;
1689
1690 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS);
1691
1692 txq->txq_reg_tbase = TBASEn(qno);
1693 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno);
1694
1695 pq3etsec_txq_reset(sc, txq);
1696
1697 return 0;
1698 }
1699
1700 static int
1701 pq3etsec_txq_map_load(
1702 struct pq3etsec_softc *sc,
1703 struct pq3etsec_txqueue *txq,
1704 struct mbuf *m)
1705 {
1706 bus_dmamap_t map;
1707 int error;
1708
1709 map = M_GETCTX(m, bus_dmamap_t);
1710 if (map != NULL)
1711 return 0;
1712
1713 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache);
1714 if (map == NULL)
1715 return ENOMEM;
1716
1717 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1718 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1719 if (error)
1720 return error;
1721
1722 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1723 BUS_DMASYNC_PREWRITE);
1724 M_SETCTX(m, map);
1725 return 0;
1726 }
1727
1728 static void
1729 pq3etsec_txq_map_unload(
1730 struct pq3etsec_softc *sc,
1731 struct pq3etsec_txqueue *txq,
1732 struct mbuf *m)
1733 {
1734 KASSERT(m);
1735 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1736 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1737 BUS_DMASYNC_POSTWRITE);
1738 bus_dmamap_unload(sc->sc_dmat, map);
1739 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map);
1740 }
1741
1742 static bool
1743 pq3etsec_txq_produce(
1744 struct pq3etsec_softc *sc,
1745 struct pq3etsec_txqueue *txq,
1746 struct mbuf *m)
1747 {
1748 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1749
1750 if (map->dm_nsegs > txq->txq_free)
1751 return false;
1752
1753 /*
1754 * TCP Offload flag must be set in the first descriptor.
1755 */
1756 volatile struct txbd *producer = txq->txq_producer;
1757 uint16_t last_flags = TXBD_L;
1758 uint16_t first_flags = TXBD_R
1759 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0);
1760
1761 /*
1762 * If we've produced enough descriptors without consuming any
1763 * we need to ask for an interrupt to reclaim some.
1764 */
1765 txq->txq_lastintr += map->dm_nsegs;
1766 if (txq->txq_lastintr >= txq->txq_threshold
1767 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1768 txq->txq_lastintr = 0;
1769 last_flags |= TXBD_I;
1770 }
1771
1772 #ifdef ETSEC_DEBUG
1773 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1774 #endif
1775 KASSERT(producer != txq->txq_last);
1776 producer->txbd_bufptr = map->dm_segs[0].ds_addr;
1777 producer->txbd_len = map->dm_segs[0].ds_len;
1778
1779 if (map->dm_nsegs > 1) {
1780 volatile struct txbd *start = producer + 1;
1781 size_t count = map->dm_nsegs - 1;
1782 for (u_int i = 1; i < map->dm_nsegs; i++) {
1783 if (__predict_false(++producer == txq->txq_last)) {
1784 producer = txq->txq_first;
1785 if (start < txq->txq_last) {
1786 pq3etsec_txq_desc_presync(sc, txq,
1787 start, txq->txq_last - start);
1788 count -= txq->txq_last - start;
1789 }
1790 start = txq->txq_first;
1791 }
1792 #ifdef ETSEC_DEBUG
1793 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1794 #endif
1795 producer->txbd_bufptr = map->dm_segs[i].ds_addr;
1796 producer->txbd_len = map->dm_segs[i].ds_len;
1797 producer->txbd_flags = TXBD_R
1798 | (producer->txbd_flags & TXBD_W)
1799 | (i == map->dm_nsegs - 1 ? last_flags : 0);
1800 #if 0
1801 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first,
1802 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr);
1803 #endif
1804 }
1805 pq3etsec_txq_desc_presync(sc, txq, start, count);
1806 } else {
1807 first_flags |= last_flags;
1808 }
1809
1810 membar_producer();
1811 txq->txq_producer->txbd_flags =
1812 first_flags | (txq->txq_producer->txbd_flags & TXBD_W);
1813 #if 0
1814 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__,
1815 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags,
1816 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr);
1817 #endif
1818 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1);
1819
1820 /*
1821 * Reduce free count by the number of segments we consumed.
1822 */
1823 txq->txq_free -= map->dm_nsegs;
1824 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1825 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0);
1826 KASSERT(producer->txbd_flags & TXBD_L);
1827 #ifdef ETSEC_DEBUG
1828 txq->txq_lmbufs[producer - txq->txq_first] = m;
1829 #endif
1830
1831 #if 0
1832 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n",
1833 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1834 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1835 #endif
1836
1837 if (++producer == txq->txq_last)
1838 txq->txq_producer = txq->txq_first;
1839 else
1840 txq->txq_producer = producer;
1841 IF_ENQUEUE(&txq->txq_mbufs, m);
1842
1843 /*
1844 * Restart the transmitter.
1845 */
1846 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */
1847
1848 return true;
1849 }
1850
1851 static void
1852 pq3etsec_tx_offload(
1853 struct pq3etsec_softc *sc,
1854 struct pq3etsec_txqueue *txq,
1855 struct mbuf **mp)
1856 {
1857 struct mbuf *m = *mp;
1858 u_int csum_flags = m->m_pkthdr.csum_flags;
1859 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m);
1860
1861 KASSERT(m->m_flags & M_PKTHDR);
1862
1863 /*
1864 * Let see if we are doing any offload first.
1865 */
1866 if (csum_flags == 0 && vtag == 0) {
1867 m->m_flags &= ~M_HASFCB;
1868 return;
1869 }
1870
1871 uint16_t flags = 0;
1872 if (csum_flags & M_CSUM_IP) {
1873 flags |= TXFCB_IP
1874 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0)
1875 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0)
1876 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0)
1877 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0)
1878 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0);
1879 }
1880 if (vtag) {
1881 flags |= TXFCB_VLN;
1882 }
1883 if (flags == 0) {
1884 m->m_flags &= ~M_HASFCB;
1885 return;
1886 }
1887
1888 struct txfcb fcb;
1889 fcb.txfcb_flags = flags;
1890 if (csum_flags & M_CSUM_IPv4)
1891 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
1892 else
1893 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
1894 fcb.txfcb_l3os = ETHER_HDR_LEN;
1895 fcb.txfcb_phcs = 0;
1896 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0;
1897
1898 #if 0
1899 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n",
1900 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os,
1901 fcb.txfcb_phcs, fcb.txfcb_vlctl);
1902 #endif
1903
1904 if (M_LEADINGSPACE(m) >= sizeof(fcb)) {
1905 m->m_data -= sizeof(fcb);
1906 m->m_len += sizeof(fcb);
1907 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) {
1908 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len);
1909 m->m_data = m->m_pktdat;
1910 m->m_len += sizeof(fcb);
1911 } else {
1912 struct mbuf *mn;
1913 MGET(mn, M_DONTWAIT, m->m_type);
1914 if (mn == NULL) {
1915 if (csum_flags & M_CSUM_IP4) {
1916 #ifdef INET
1917 ip_undefer_csum(m, ETHER_HDR_LEN,
1918 csum_flags & M_CSUM_IP4);
1919 #else
1920 panic("%s: impossible M_CSUM flags %#x",
1921 device_xname(sc->sc_dev), csum_flags);
1922 #endif
1923 } else if (csum_flags & M_CSUM_IP6) {
1924 #ifdef INET6
1925 ip6_undefer_csum(m, ETHER_HDR_LEN,
1926 csum_flags & M_CSUM_IP6);
1927 #else
1928 panic("%s: impossible M_CSUM flags %#x",
1929 device_xname(sc->sc_dev), csum_flags);
1930 #endif
1931 } else if (vtag) {
1932 }
1933
1934 m->m_flags &= ~M_HASFCB;
1935 return;
1936 }
1937
1938 M_MOVE_PKTHDR(mn, m);
1939 mn->m_next = m;
1940 m = mn;
1941 MH_ALIGN(m, sizeof(fcb));
1942 m->m_len = sizeof(fcb);
1943 *mp = m;
1944 }
1945 m->m_pkthdr.len += sizeof(fcb);
1946 m->m_flags |= M_HASFCB;
1947 *mtod(m, struct txfcb *) = fcb;
1948 return;
1949 }
1950
1951 static bool
1952 pq3etsec_txq_enqueue(
1953 struct pq3etsec_softc *sc,
1954 struct pq3etsec_txqueue *txq)
1955 {
1956 for (;;) {
1957 if (IF_QFULL(&txq->txq_mbufs))
1958 return false;
1959 struct mbuf *m = txq->txq_next;
1960 if (m == NULL) {
1961 int s = splnet();
1962 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1963 splx(s);
1964 if (m == NULL)
1965 return true;
1966 M_SETCTX(m, NULL);
1967 pq3etsec_tx_offload(sc, txq, &m);
1968 } else {
1969 txq->txq_next = NULL;
1970 }
1971 int error = pq3etsec_txq_map_load(sc, txq, m);
1972 if (error) {
1973 aprint_error_dev(sc->sc_dev,
1974 "discarded packet due to "
1975 "dmamap load failure: %d\n", error);
1976 m_freem(m);
1977 continue;
1978 }
1979 KASSERT(txq->txq_next == NULL);
1980 if (!pq3etsec_txq_produce(sc, txq, m)) {
1981 txq->txq_next = m;
1982 return false;
1983 }
1984 KASSERT(txq->txq_next == NULL);
1985 }
1986 }
1987
1988 static bool
1989 pq3etsec_txq_consume(
1990 struct pq3etsec_softc *sc,
1991 struct pq3etsec_txqueue *txq)
1992 {
1993 struct ifnet * const ifp = &sc->sc_if;
1994 volatile struct txbd *consumer = txq->txq_consumer;
1995 size_t txfree = 0;
1996
1997 #if 0
1998 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1999 #endif
2000 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask);
2001
2002 for (;;) {
2003 if (consumer == txq->txq_producer) {
2004 txq->txq_consumer = consumer;
2005 txq->txq_free += txfree;
2006 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2007 #if 0
2008 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n",
2009 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
2010 #endif
2011 KASSERT(txq->txq_lastintr == 0);
2012 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
2013 return true;
2014 }
2015 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1);
2016 const uint16_t txbd_flags = consumer->txbd_flags;
2017 if (txbd_flags & TXBD_R) {
2018 txq->txq_consumer = consumer;
2019 txq->txq_free += txfree;
2020 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2021 #if 0
2022 printf("%s: freed %zu descriptors\n",
2023 __func__, txfree);
2024 #endif
2025 return pq3etsec_txq_fillable_p(sc, txq);
2026 }
2027
2028 /*
2029 * If this is the last descriptor in the chain, get the
2030 * mbuf, free its dmamap, and free the mbuf chain itself.
2031 */
2032 if (txbd_flags & TXBD_L) {
2033 struct mbuf *m;
2034
2035 IF_DEQUEUE(&txq->txq_mbufs, m);
2036 #ifdef ETSEC_DEBUG
2037 KASSERTMSG(m == txq->txq_lmbufs[consumer-txq->txq_first],
2038 ("%s: %p [%u]: flags %#x m (%p) != %p (%p)", __func__,
2039 consumer, consumer - txq->txq_first, txbd_flags,
2040 m, &txq->txq_lmbufs[consumer-txq->txq_first],
2041 txq->txq_lmbufs[consumer-txq->txq_first]));
2042 #endif
2043 KASSERT(m);
2044 pq3etsec_txq_map_unload(sc, txq, m);
2045 #if 0
2046 printf("%s: mbuf %p: consumed a %u byte packet\n",
2047 __func__, m, m->m_pkthdr.len);
2048 #endif
2049 if (m->m_flags & M_HASFCB)
2050 m_adj(m, sizeof(struct txfcb));
2051 ifp->if_opackets++;
2052 ifp->if_obytes += m->m_pkthdr.len;
2053 if (m->m_flags & M_MCAST)
2054 ifp->if_omcasts++;
2055 if (txbd_flags & TXBD_ERRORS)
2056 ifp->if_oerrors++;
2057 m_freem(m);
2058 #ifdef ETSEC_DEBUG
2059 txq->txq_lmbufs[consumer - txq->txq_first] = NULL;
2060 #endif
2061 } else {
2062 #ifdef ETSEC_DEBUG
2063 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL);
2064 #endif
2065 }
2066
2067 /*
2068 * We own this packet again. Clear all flags except wrap.
2069 */
2070 txfree++;
2071 //consumer->txbd_flags = txbd_flags & TXBD_W;
2072
2073 /*
2074 * Wrap at the last entry!
2075 */
2076 if (txbd_flags & TXBD_W) {
2077 KASSERT(consumer + 1 == txq->txq_last);
2078 consumer = txq->txq_first;
2079 } else {
2080 consumer++;
2081 KASSERT(consumer < txq->txq_last);
2082 }
2083 }
2084 }
2085
2086 static void
2087 pq3etsec_txq_purge(
2088 struct pq3etsec_softc *sc,
2089 struct pq3etsec_txqueue *txq)
2090 {
2091 struct mbuf *m;
2092 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0);
2093
2094 for (;;) {
2095 IF_DEQUEUE(&txq->txq_mbufs, m);
2096 if (m == NULL)
2097 break;
2098 pq3etsec_txq_map_unload(sc, txq, m);
2099 m_freem(m);
2100 }
2101 if ((m = txq->txq_next) != NULL) {
2102 txq->txq_next = NULL;
2103 pq3etsec_txq_map_unload(sc, txq, m);
2104 m_freem(m);
2105 }
2106 #ifdef ETSEC_DEBUG
2107 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs));
2108 #endif
2109 }
2110
2111 static void
2112 pq3etsec_txq_reset(
2113 struct pq3etsec_softc *sc,
2114 struct pq3etsec_txqueue *txq)
2115 {
2116 /*
2117 * sync all the descriptors
2118 */
2119 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first,
2120 txq->txq_last - txq->txq_first);
2121
2122 /*
2123 * Make sure we own all descriptors in the ring.
2124 */
2125 volatile struct txbd *txbd;
2126 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) {
2127 txbd->txbd_flags = 0;
2128 }
2129
2130 /*
2131 * Last descriptor has the wrap flag.
2132 */
2133 txbd->txbd_flags = TXBD_W;
2134
2135 /*
2136 * Reset the producer consumer indexes.
2137 */
2138 txq->txq_consumer = txq->txq_first;
2139 txq->txq_producer = txq->txq_first;
2140 txq->txq_free = txq->txq_last - txq->txq_first - 1;
2141 txq->txq_threshold = txq->txq_free / 2;
2142 txq->txq_lastintr = 0;
2143
2144 /*
2145 * What do we want to get interrupted on?
2146 */
2147 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE;
2148
2149 /*
2150 * Restart the transmit at the first descriptor
2151 */
2152 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr);
2153 }
2154
2155 static void
2156 pq3etsec_ifstart(struct ifnet *ifp)
2157 {
2158 struct pq3etsec_softc * const sc = ifp->if_softc;
2159
2160 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2161 softint_schedule(sc->sc_soft_ih);
2162 }
2163
2164 static void
2165 pq3etsec_tx_error(
2166 struct pq3etsec_softc * const sc)
2167 {
2168 struct pq3etsec_txqueue * const txq = &sc->sc_txq;
2169
2170 pq3etsec_txq_consume(sc, txq);
2171
2172 if (pq3etsec_txq_fillable_p(sc, txq))
2173 sc->sc_if.if_flags &= ~IFF_OACTIVE;
2174 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) {
2175 } else if (sc->sc_txerrors & IEVENT_EBERR) {
2176 }
2177
2178 if (pq3etsec_txq_active_p(sc, txq))
2179 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask);
2180 if (!pq3etsec_txq_enqueue(sc, txq)) {
2181 sc->sc_ev_tx_stall.ev_count++;
2182 sc->sc_if.if_flags |= IFF_OACTIVE;
2183 }
2184
2185 sc->sc_txerrors = 0;
2186 }
2187
2188 int
2189 pq3etsec_tx_intr(void *arg)
2190 {
2191 struct pq3etsec_softc * const sc = arg;
2192
2193 sc->sc_ev_tx_intr.ev_count++;
2194
2195 uint32_t ievent = etsec_read(sc, IEVENT);
2196 ievent &= IEVENT_TXF|IEVENT_TXB;
2197 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2198
2199 #if 0
2200 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2201 __func__, ievent, etsec_read(sc, IMASK));
2202 #endif
2203
2204 if (ievent == 0)
2205 return 0;
2206
2207 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB);
2208 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2209 etsec_write(sc, IMASK, sc->sc_imask);
2210 softint_schedule(sc->sc_soft_ih);
2211 return 1;
2212 }
2213
2214 int
2215 pq3etsec_rx_intr(void *arg)
2216 {
2217 struct pq3etsec_softc * const sc = arg;
2218
2219 sc->sc_ev_rx_intr.ev_count++;
2220
2221 uint32_t ievent = etsec_read(sc, IEVENT);
2222 ievent &= IEVENT_RXF|IEVENT_RXB;
2223 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2224 if (ievent == 0)
2225 return 0;
2226
2227 #if 0
2228 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent);
2229 #endif
2230
2231 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB);
2232 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR);
2233 etsec_write(sc, IMASK, sc->sc_imask);
2234 softint_schedule(sc->sc_soft_ih);
2235 return 1;
2236 }
2237
2238 int
2239 pq3etsec_error_intr(void *arg)
2240 {
2241 struct pq3etsec_softc * const sc = arg;
2242
2243 sc->sc_ev_error_intr.ev_count++;
2244
2245 for (int rv = 0, soft_flags = 0;; rv = 1) {
2246 uint32_t ievent = etsec_read(sc, IEVENT);
2247 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB);
2248 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2249 if (ievent == 0) {
2250 if (soft_flags) {
2251 atomic_or_uint(&sc->sc_soft_flags, soft_flags);
2252 softint_schedule(sc->sc_soft_ih);
2253 }
2254 return rv;
2255 }
2256 #if 0
2257 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2258 __func__, ievent, etsec_read(sc, IMASK));
2259 #endif
2260
2261 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) {
2262 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC);
2263 etsec_write(sc, IMASK, sc->sc_imask);
2264 wakeup(sc);
2265 }
2266 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) {
2267 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR);
2268 etsec_write(sc, IMASK, sc->sc_imask);
2269 wakeup(&sc->sc_mii);
2270 }
2271 if (ievent & IEVENT_BSY) {
2272 soft_flags |= SOFT_RXBSY;
2273 sc->sc_imask &= ~IEVENT_BSY;
2274 etsec_write(sc, IMASK, sc->sc_imask);
2275 }
2276 if (ievent & IEVENT_TXE) {
2277 soft_flags |= SOFT_TXERROR;
2278 sc->sc_imask &= ~IEVENT_TXE;
2279 sc->sc_txerrors |= ievent;
2280 }
2281 if (ievent & IEVENT_TXC) {
2282 sc->sc_ev_tx_pause.ev_count++;
2283 }
2284 if (ievent & IEVENT_RXC) {
2285 sc->sc_ev_rx_pause.ev_count++;
2286 }
2287 if (ievent & IEVENT_DPE) {
2288 soft_flags |= SOFT_RESET;
2289 sc->sc_imask &= ~IEVENT_DPE;
2290 etsec_write(sc, IMASK, sc->sc_imask);
2291 }
2292 }
2293 }
2294
2295 void
2296 pq3etsec_soft_intr(void *arg)
2297 {
2298 struct pq3etsec_softc * const sc = arg;
2299 struct ifnet * const ifp = &sc->sc_if;
2300
2301 mutex_enter(sc->sc_lock);
2302
2303 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
2304
2305 sc->sc_ev_soft_intr.ev_count++;
2306
2307 if (soft_flags & SOFT_RESET) {
2308 int s = splnet();
2309 pq3etsec_ifinit(ifp);
2310 splx(s);
2311 soft_flags = 0;
2312 }
2313
2314 if (soft_flags & SOFT_RXBSY) {
2315 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq;
2316 size_t threshold = 5 * rxq->rxq_threshold / 4;
2317 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
2318 threshold = rxq->rxq_last - rxq->rxq_first - 1;
2319 } else {
2320 sc->sc_imask |= IEVENT_BSY;
2321 }
2322 aprint_normal_dev(sc->sc_dev,
2323 "increasing receive buffers from %zu to %zu\n",
2324 rxq->rxq_threshold, threshold);
2325 rxq->rxq_threshold = threshold;
2326 }
2327
2328 if ((soft_flags & SOFT_TXINTR)
2329 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) {
2330 /*
2331 * Let's do what we came here for. Consume transmitted
2332 * packets off the the transmit ring.
2333 */
2334 if (!pq3etsec_txq_consume(sc, &sc->sc_txq)
2335 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
2336 sc->sc_ev_tx_stall.ev_count++;
2337 ifp->if_flags |= IFF_OACTIVE;
2338 } else {
2339 ifp->if_flags &= ~IFF_OACTIVE;
2340 }
2341 sc->sc_imask |= IEVENT_TXF;
2342 }
2343
2344 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) {
2345 /*
2346 * Let's consume
2347 */
2348 pq3etsec_rxq_consume(sc, &sc->sc_rxq);
2349 sc->sc_imask |= IEVENT_RXF;
2350 }
2351
2352 if (soft_flags & SOFT_TXERROR) {
2353 pq3etsec_tx_error(sc);
2354 sc->sc_imask |= IEVENT_TXE;
2355 }
2356
2357 if (ifp->if_flags & IFF_RUNNING) {
2358 pq3etsec_rxq_produce(sc, &sc->sc_rxq);
2359 etsec_write(sc, IMASK, sc->sc_imask);
2360 } else {
2361 KASSERT((soft_flags & SOFT_RXBSY) == 0);
2362 }
2363
2364 mutex_exit(sc->sc_lock);
2365 }
2366
2367 static void
2368 pq3etsec_mii_tick(void *arg)
2369 {
2370 struct pq3etsec_softc * const sc = arg;
2371 mutex_enter(sc->sc_lock);
2372 callout_ack(&sc->sc_mii_callout);
2373 sc->sc_ev_mii_ticks.ev_count++;
2374 #ifdef DEBUG
2375 uint64_t now = mftb();
2376 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) {
2377 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n",
2378 __func__, now - sc->sc_mii_last_tick);
2379 callout_stop(&sc->sc_mii_callout);
2380 }
2381 #endif
2382 mii_tick(&sc->sc_mii);
2383 int s = splnet();
2384 if (sc->sc_soft_flags & SOFT_RESET)
2385 softint_schedule(sc->sc_soft_ih);
2386 splx(s);
2387 callout_schedule(&sc->sc_mii_callout, hz);
2388 #ifdef DEBUG
2389 sc->sc_mii_last_tick = now;
2390 #endif
2391 mutex_exit(sc->sc_lock);
2392 }
2393