pq3etsec.c revision 1.9.8.1 1 /* $NetBSD: pq3etsec.c,v 1.9.8.1 2012/05/17 18:09:44 riz Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_inet.h"
38
39 #include <sys/cdefs.h>
40
41 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.9.8.1 2012/05/17 18:09:44 riz Exp $");
42
43 #include <sys/param.h>
44 #include <sys/cpu.h>
45 #include <sys/device.h>
46 #include <sys/mbuf.h>
47 #include <sys/ioctl.h>
48 #include <sys/intr.h>
49 #include <sys/bus.h>
50 #include <sys/kernel.h>
51 #include <sys/kmem.h>
52 #include <sys/proc.h>
53 #include <sys/atomic.h>
54 #include <sys/callout.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_ether.h>
59 #include <net/if_media.h>
60
61 #include <dev/mii/miivar.h>
62
63 #include "ioconf.h"
64
65 #include <net/bpf.h>
66
67 #ifdef INET
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
71 #include <netinet/in_offload.h>
72 #endif /* INET */
73 #ifdef INET6
74 #include <netinet6/in6.h>
75 #include <netinet/ip6.h>
76 #endif
77 #include <netinet6/in6_offload.h>
78
79
80 #include <powerpc/spr.h>
81 #include <powerpc/booke/spr.h>
82
83 #include <powerpc/booke/cpuvar.h>
84 #include <powerpc/booke/e500var.h>
85 #include <powerpc/booke/e500reg.h>
86 #include <powerpc/booke/etsecreg.h>
87
88 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */
89
90 #define ETSEC_MAXTXMBUFS 30
91 #define ETSEC_NTXSEGS 30
92 #define ETSEC_MAXRXMBUFS 511
93 #define ETSEC_MINRXMBUFS 32
94 #define ETSEC_NRXSEGS 1
95
96 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx
97 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\
98 |IFCAP_CSUM_UDPv4_Rx\
99 |IFCAP_CSUM_TCPv6_Rx\
100 |IFCAP_CSUM_UDPv6_Rx)
101
102 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx
103 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\
104 |IFCAP_CSUM_UDPv4_Tx\
105 |IFCAP_CSUM_TCPv6_Tx\
106 |IFCAP_CSUM_UDPv6_Tx)
107
108 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\
109 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN)
110
111 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU)
112 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6)
113 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
114 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6)
115 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4)
116 #define M_CSUM_CIP (M_CSUM_IPv4)
117 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
118
119 struct pq3etsec_txqueue {
120 bus_dmamap_t txq_descmap;
121 volatile struct txbd *txq_consumer;
122 volatile struct txbd *txq_producer;
123 volatile struct txbd *txq_first;
124 volatile struct txbd *txq_last;
125 struct ifqueue txq_mbufs;
126 struct mbuf *txq_next;
127 #ifdef ETSEC_DEBUG
128 struct mbuf *txq_lmbufs[512];
129 #endif
130 uint32_t txq_qmask;
131 uint32_t txq_free;
132 uint32_t txq_threshold;
133 uint32_t txq_lastintr;
134 bus_size_t txq_reg_tbase;
135 bus_dma_segment_t txq_descmap_seg;
136 };
137
138 struct pq3etsec_rxqueue {
139 bus_dmamap_t rxq_descmap;
140 volatile struct rxbd *rxq_consumer;
141 volatile struct rxbd *rxq_producer;
142 volatile struct rxbd *rxq_first;
143 volatile struct rxbd *rxq_last;
144 struct mbuf *rxq_mhead;
145 struct mbuf **rxq_mtail;
146 struct mbuf *rxq_mconsumer;
147 #ifdef ETSEC_DEBUG
148 struct mbuf *rxq_mbufs[512];
149 #endif
150 uint32_t rxq_qmask;
151 uint32_t rxq_inuse;
152 uint32_t rxq_threshold;
153 bus_size_t rxq_reg_rbase;
154 bus_size_t rxq_reg_rbptr;
155 bus_dma_segment_t rxq_descmap_seg;
156 };
157
158 struct pq3etsec_mapcache {
159 u_int dmc_nmaps;
160 u_int dmc_maxseg;
161 u_int dmc_maxmaps;
162 u_int dmc_maxmapsize;
163 bus_dmamap_t dmc_maps[0];
164 };
165
166 struct pq3etsec_softc {
167 device_t sc_dev;
168 struct ethercom sc_ec;
169 #define sc_if sc_ec.ec_if
170 struct mii_data sc_mii;
171 bus_space_tag_t sc_bst;
172 bus_space_handle_t sc_bsh;
173 bus_dma_tag_t sc_dmat;
174 int sc_phy_addr;
175 prop_dictionary_t sc_intrmap;
176 uint32_t sc_intrmask;
177
178 uint32_t sc_soft_flags;
179 #define SOFT_RESET 0x0001
180 #define SOFT_RXINTR 0x0010
181 #define SOFT_RXBSY 0x0020
182 #define SOFT_TXINTR 0x0100
183 #define SOFT_TXERROR 0x0200
184
185 struct pq3etsec_txqueue sc_txq;
186 struct pq3etsec_rxqueue sc_rxq;
187 uint32_t sc_txerrors;
188 uint32_t sc_rxerrors;
189
190 size_t sc_rx_adjlen;
191
192 /*
193 * Copies of various ETSEC registers.
194 */
195 uint32_t sc_imask;
196 uint32_t sc_maccfg1;
197 uint32_t sc_maccfg2;
198 uint32_t sc_maxfrm;
199 uint32_t sc_ecntrl;
200 uint32_t sc_dmactrl;
201 uint32_t sc_macstnaddr1;
202 uint32_t sc_macstnaddr2;
203 uint32_t sc_tctrl;
204 uint32_t sc_rctrl;
205 uint32_t sc_gaddr[16];
206 uint64_t sc_macaddrs[15];
207
208 void *sc_tx_ih;
209 void *sc_rx_ih;
210 void *sc_error_ih;
211 void *sc_soft_ih;
212
213 kmutex_t *sc_lock;
214
215 struct evcnt sc_ev_tx_stall;
216 struct evcnt sc_ev_tx_intr;
217 struct evcnt sc_ev_rx_stall;
218 struct evcnt sc_ev_rx_intr;
219 struct evcnt sc_ev_error_intr;
220 struct evcnt sc_ev_soft_intr;
221 struct evcnt sc_ev_tx_pause;
222 struct evcnt sc_ev_rx_pause;
223 struct evcnt sc_ev_mii_ticks;
224
225 struct callout sc_mii_callout;
226 uint64_t sc_mii_last_tick;
227
228 struct ifqueue sc_rx_bufcache;
229 struct pq3etsec_mapcache *sc_rx_mapcache;
230 struct pq3etsec_mapcache *sc_tx_mapcache;
231 };
232
233 static int pq3etsec_match(device_t, cfdata_t, void *);
234 static void pq3etsec_attach(device_t, device_t, void *);
235
236 static void pq3etsec_ifstart(struct ifnet *);
237 static void pq3etsec_ifwatchdog(struct ifnet *);
238 static int pq3etsec_ifinit(struct ifnet *);
239 static void pq3etsec_ifstop(struct ifnet *, int);
240 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *);
241
242 static int pq3etsec_mapcache_create(struct pq3etsec_softc *,
243 struct pq3etsec_mapcache **, size_t, size_t, size_t, size_t);
244 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *,
245 struct pq3etsec_mapcache *);
246 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *,
247 struct pq3etsec_mapcache *);
248 static void pq3etsec_mapcache_put(struct pq3etsec_softc *,
249 struct pq3etsec_mapcache *, bus_dmamap_t);
250
251 static int pq3etsec_txq_attach(struct pq3etsec_softc *,
252 struct pq3etsec_txqueue *, u_int);
253 static void pq3etsec_txq_purge(struct pq3etsec_softc *,
254 struct pq3etsec_txqueue *);
255 static void pq3etsec_txq_reset(struct pq3etsec_softc *,
256 struct pq3etsec_txqueue *);
257 static bool pq3etsec_txq_consume(struct pq3etsec_softc *,
258 struct pq3etsec_txqueue *);
259 static bool pq3etsec_txq_produce(struct pq3etsec_softc *,
260 struct pq3etsec_txqueue *, struct mbuf *m);
261 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *,
262 struct pq3etsec_txqueue *);
263
264 static int pq3etsec_rxq_attach(struct pq3etsec_softc *,
265 struct pq3etsec_rxqueue *, u_int);
266 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *,
267 struct pq3etsec_rxqueue *);
268 static void pq3etsec_rxq_purge(struct pq3etsec_softc *,
269 struct pq3etsec_rxqueue *, bool);
270 static void pq3etsec_rxq_reset(struct pq3etsec_softc *,
271 struct pq3etsec_rxqueue *);
272
273 static void pq3etsec_mc_setup(struct pq3etsec_softc *);
274
275 static void pq3etsec_mii_tick(void *);
276 static int pq3etsec_rx_intr(void *);
277 static int pq3etsec_tx_intr(void *);
278 static int pq3etsec_error_intr(void *);
279 static void pq3etsec_soft_intr(void *);
280
281 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc),
282 pq3etsec_match, pq3etsec_attach, NULL, NULL);
283
284 static int
285 pq3etsec_match(device_t parent, cfdata_t cf, void *aux)
286 {
287
288 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
289 return 0;
290
291 return 1;
292 }
293
294 static inline uint32_t
295 etsec_read(struct pq3etsec_softc *sc, bus_size_t off)
296 {
297 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
298 }
299
300 static inline void
301 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data)
302 {
303 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data);
304 }
305
306 static int
307 pq3etsec_mii_readreg(device_t self, int phy, int reg)
308 {
309 struct pq3etsec_softc * const sc = device_private(self);
310 uint32_t miimcom = etsec_read(sc, MIIMCOM);
311
312 // int s = splnet();
313
314 etsec_write(sc, MIIMADD,
315 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
316
317 etsec_write(sc, IEVENT, IEVENT_MMRD);
318 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */
319 etsec_write(sc, MIIMCOM, MIIMCOM_READ);
320 #if 0
321 sc->sc_imask |= IEVENT_MMRD;
322 etsec_write(sc, IMASK, sc->sc_imask);
323 #endif
324
325 while (etsec_read(sc, MIIMIND) != 0) {
326 delay(1);
327 }
328 int data = etsec_read(sc, MIIMSTAT);
329
330 if (miimcom == MIIMCOM_SCAN)
331 etsec_write(sc, MIIMCOM, miimcom);
332
333 #if 0
334 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n",
335 __func__, phy, reg, data);
336 #endif
337 etsec_write(sc, IEVENT, IEVENT_MMRD);
338 // splx(s);
339 return data;
340 }
341
342 static void
343 pq3etsec_mii_writereg(device_t self, int phy, int reg, int data)
344 {
345 struct pq3etsec_softc * const sc = device_private(self);
346 uint32_t miimcom = etsec_read(sc, MIIMCOM);
347
348 #if 0
349 aprint_normal_dev(sc->sc_dev, "%s: phy %d reg %d: %#x\n",
350 __func__, phy, reg, data);
351 #endif
352
353 // int s = splnet();
354 etsec_write(sc, IEVENT, IEVENT_MMWR);
355 etsec_write(sc, MIIMADD,
356 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
357 etsec_write(sc, MIIMCOM, 0); /* clear any past bits */
358 etsec_write(sc, MIIMCON, data);
359
360 #if 0
361 sc->sc_imask |= IEVENT_MMWR;
362 etsec_write(sc, IMASK, sc->sc_imask);
363 #endif
364
365 int timo = 1000; /* 1ms */
366 while ((etsec_read(sc, MIIMIND) & MIIMIND_BUSY) && --timo > 0) {
367 delay(1);
368 }
369
370 if (miimcom == MIIMCOM_SCAN)
371 etsec_write(sc, MIIMCOM, miimcom);
372 etsec_write(sc, IEVENT, IEVENT_MMWR);
373 // splx(s);
374 }
375
376 static void
377 pq3etsec_mii_statchg(device_t self)
378 {
379 struct pq3etsec_softc * const sc = device_private(self);
380 struct mii_data * const mii = &sc->sc_mii;
381
382 uint32_t maccfg1 = sc->sc_maccfg1;
383 uint32_t maccfg2 = sc->sc_maccfg2;
384 uint32_t ecntrl = sc->sc_ecntrl;
385
386 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW);
387 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD);
388
389 if (sc->sc_mii.mii_media_active & IFM_FDX) {
390 maccfg2 |= MACCFG2_FD;
391 }
392
393 /*
394 * Now deal with the flow control bits.
395 */
396 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO
397 && (mii->mii_media_active & IFM_ETH_FMASK)) {
398 if (mii->mii_media_active & IFM_ETH_RXPAUSE)
399 maccfg1 |= MACCFG1_RX_FLOW;
400 if (mii->mii_media_active & IFM_ETH_TXPAUSE)
401 maccfg1 |= MACCFG1_TX_FLOW;
402 }
403
404 /*
405 * Now deal with the speed.
406 */
407 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
408 maccfg2 |= MACCFG2_IFMODE_GMII;
409 } else {
410 maccfg2 |= MACCFG2_IFMODE_MII;
411 ecntrl &= ~ECNTRL_R100M;
412 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) {
413 ecntrl |= ECNTRL_R100M;
414 }
415 }
416
417 /*
418 * If things are different, re-init things.
419 */
420 if (maccfg1 != sc->sc_maccfg1
421 || maccfg2 != sc->sc_maccfg2
422 || ecntrl != sc->sc_ecntrl) {
423 if (sc->sc_if.if_flags & IFF_RUNNING)
424 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET);
425 sc->sc_maccfg1 = maccfg1;
426 sc->sc_maccfg2 = maccfg2;
427 sc->sc_ecntrl = ecntrl;
428 }
429 }
430
431 #if 0
432 static void
433 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
434 {
435 struct pq3etsec_softc * const sc = ifp->if_softc;
436
437 mii_pollstat(&sc->sc_mii);
438 ether_mediastatus(ifp, ifmr);
439 ifmr->ifm_status = sc->sc_mii.mii_media_status;
440 ifmr->ifm_active = sc->sc_mii.mii_media_active;
441 }
442
443 static int
444 pq3etsec_mediachange(struct ifnet *ifp)
445 {
446 struct pq3etsec_softc * const sc = ifp->if_softc;
447
448 if ((ifp->if_flags & IFF_UP) == 0)
449 return 0;
450
451 int rv = mii_mediachg(&sc->sc_mii);
452 return (rv == ENXIO) ? 0 : rv;
453 }
454 #endif
455
456 static void
457 pq3etsec_attach(device_t parent, device_t self, void *aux)
458 {
459 struct cpunode_softc * const psc = device_private(parent);
460 struct pq3etsec_softc * const sc = device_private(self);
461 struct cpunode_attach_args * const cna = aux;
462 struct cpunode_locators * const cnl = &cna->cna_locs;
463 cfdata_t cf = device_cfdata(self);
464 int error;
465
466 psc->sc_children |= cna->cna_childmask;
467 sc->sc_dev = self;
468 sc->sc_bst = cna->cna_memt;
469 sc->sc_dmat = &booke_bus_dma_tag;
470
471 /*
472 * If we have a common MDIO bus, if all off instance 1.
473 */
474 device_t miiself = (cf->cf_flags & 0x100) ? tsec_cd.cd_devs[0] : self;
475
476 /*
477 * See if the phy is in the config file...
478 */
479 if (cf->cf_flags & 0x3f) {
480 sc->sc_phy_addr = (cf->cf_flags & 0x3f) - 1;
481 } else {
482 unsigned char prop_name[20];
483 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr",
484 cnl->cnl_instance);
485 sc->sc_phy_addr = board_info_get_number(prop_name);
486 }
487 if (sc->sc_phy_addr != MII_PHY_ANY)
488 aprint_normal(" phy %d", sc->sc_phy_addr);
489
490 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0,
491 &sc->sc_bsh);
492 if (error) {
493 aprint_error(": error mapping registers: %d\n", error);
494 return;
495 }
496
497 /*
498 * Assume firmware has aready set the mac address and fetch it
499 * before we reinit it.
500 */
501 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2);
502 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1);
503 sc->sc_rctrl = RCTRL_DEFAULT;
504 sc->sc_ecntrl = etsec_read(sc, ECNTRL);
505 sc->sc_maccfg1 = etsec_read(sc, MACCFG1);
506 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT;
507
508 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) {
509 size_t len;
510 const uint8_t *mac_addr =
511 board_info_get_data("tsec-mac-addr-base", &len);
512 KASSERT(len == ETHER_ADDR_LEN);
513 sc->sc_macstnaddr2 =
514 (mac_addr[1] << 24)
515 | (mac_addr[0] << 16);
516 sc->sc_macstnaddr1 =
517 ((mac_addr[5] + cnl->cnl_instance - 1) << 24)
518 | (mac_addr[4] << 16)
519 | (mac_addr[3] << 8)
520 | (mac_addr[2] << 0);
521 #if 0
522 aprint_error(": mac-address unknown\n");
523 return;
524 #endif
525 }
526
527 char enaddr[ETHER_ADDR_LEN] = {
528 [0] = sc->sc_macstnaddr2 >> 16,
529 [1] = sc->sc_macstnaddr2 >> 24,
530 [2] = sc->sc_macstnaddr1 >> 0,
531 [3] = sc->sc_macstnaddr1 >> 8,
532 [4] = sc->sc_macstnaddr1 >> 16,
533 [5] = sc->sc_macstnaddr1 >> 24,
534 };
535
536 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0);
537 if (error) {
538 aprint_error(": failed to init rxq: %d\n", error);
539 return;
540 }
541
542 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0);
543 if (error) {
544 aprint_error(": failed to init txq: %d\n", error);
545 return;
546 }
547
548 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache,
549 ETSEC_MAXRXMBUFS, ETSEC_MINRXMBUFS, MCLBYTES, ETSEC_NRXSEGS);
550 if (error) {
551 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
552 return;
553 }
554
555 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
556 ETSEC_MAXTXMBUFS, ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS);
557 if (error) {
558 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
559 return;
560 }
561
562 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP,
563 pq3etsec_tx_intr, sc);
564 if (sc->sc_tx_ih == NULL) {
565 aprint_error(": failed to establish tx interrupt: %d\n",
566 cnl->cnl_intrs[0]);
567 return;
568 }
569
570 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP,
571 pq3etsec_rx_intr, sc);
572 if (sc->sc_rx_ih == NULL) {
573 aprint_error(": failed to establish rx interrupt: %d\n",
574 cnl->cnl_intrs[1]);
575 return;
576 }
577
578 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP,
579 pq3etsec_error_intr, sc);
580 if (sc->sc_error_ih == NULL) {
581 aprint_error(": failed to establish error interrupt: %d\n",
582 cnl->cnl_intrs[2]);
583 return;
584 }
585
586 sc->sc_soft_ih = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
587 pq3etsec_soft_intr, sc);
588 if (sc->sc_soft_ih == NULL) {
589 aprint_error(": failed to establish soft interrupt\n");
590 return;
591 }
592
593 aprint_normal("\n");
594
595 etsec_write(sc, ATTR, ATTR_DEFAULT);
596 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT);
597
598 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
599
600 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE);
601 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc);
602
603 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
604 ether_sprintf(enaddr));
605
606 const char * const xname = device_xname(sc->sc_dev);
607 struct ethercom * const ec = &sc->sc_ec;
608 struct ifnet * const ifp = &ec->ec_if;
609
610 ec->ec_mii = &sc->sc_mii;
611
612 sc->sc_mii.mii_ifp = ifp;
613 sc->sc_mii.mii_readreg = pq3etsec_mii_readreg;
614 sc->sc_mii.mii_writereg = pq3etsec_mii_writereg;
615 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg;
616
617 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
618 ether_mediastatus);
619
620 if (sc->sc_phy_addr < 32) {
621 mii_attach(miiself, &sc->sc_mii, 0xffffffff,
622 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
623
624 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
625 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
626 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
627 } else {
628 callout_schedule(&sc->sc_mii_callout, hz);
629 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
630 }
631 } else {
632 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
633 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
634 }
635
636 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
637 | ETHERCAP_JUMBO_MTU;
638
639 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
640 ifp->if_softc = sc;
641 ifp->if_capabilities = IFCAP_ETSEC;
642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
643 ifp->if_ioctl = pq3etsec_ifioctl;
644 ifp->if_start = pq3etsec_ifstart;
645 ifp->if_watchdog = pq3etsec_ifwatchdog;
646 ifp->if_init = pq3etsec_ifinit;
647 ifp->if_stop = pq3etsec_ifstop;
648 IFQ_SET_READY(&ifp->if_snd);
649
650 pq3etsec_ifstop(ifp, true);
651
652 /*
653 * Attach the interface.
654 */
655 if_attach(ifp);
656 ether_ifattach(ifp, enaddr);
657
658 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC,
659 NULL, xname, "rx stall");
660 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
661 NULL, xname, "tx stall");
662 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR,
663 NULL, xname, "tx intr");
664 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR,
665 NULL, xname, "rx intr");
666 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR,
667 NULL, xname, "error intr");
668 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
669 NULL, xname, "soft intr");
670 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC,
671 NULL, xname, "tx pause");
672 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC,
673 NULL, xname, "rx pause");
674 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC,
675 NULL, xname, "mii ticks");
676 }
677
678 static uint64_t
679 pq3etsec_macaddr_create(const uint8_t *lladdr)
680 {
681 uint64_t macaddr = 0;
682
683 lladdr += ETHER_ADDR_LEN;
684 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) {
685 macaddr = (macaddr << 8) | *--lladdr;
686 }
687 return macaddr << 16;
688 }
689
690 static int
691 pq3etsec_ifinit(struct ifnet *ifp)
692 {
693 struct pq3etsec_softc * const sc = ifp->if_softc;
694 int error = 0;
695
696 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
697 if (ifp->if_mtu > ETHERMTU_JUMBO)
698 return error;
699
700 KASSERT(ifp->if_flags & IFF_UP);
701
702 /*
703 * Stop the interface (steps 1 to 4 in the Soft Reset and
704 * Reconfigurating Procedure.
705 */
706 pq3etsec_ifstop(ifp, 0);
707
708 /*
709 * If our frame size has changed (or it's our first time through)
710 * destroy the existing transmit mapcache.
711 */
712 if (sc->sc_tx_mapcache != NULL
713 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
714 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
715 sc->sc_tx_mapcache = NULL;
716 }
717
718 if (sc->sc_tx_mapcache == NULL) {
719 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
720 ETSEC_MAXTXMBUFS, ETSEC_MAXTXMBUFS, sc->sc_maxfrm,
721 ETSEC_NTXSEGS);
722 if (error)
723 return error;
724 }
725
726 sc->sc_ev_mii_ticks.ev_count++;
727 mii_tick(&sc->sc_mii);
728
729 if (ifp->if_flags & IFF_PROMISC) {
730 sc->sc_rctrl |= RCTRL_PROM;
731 } else {
732 sc->sc_rctrl &= ~RCTRL_PROM;
733 }
734
735 uint32_t rctrl_prsdep = 0;
736 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP);
737 if (VLAN_ATTACHED(&sc->sc_ec)) {
738 sc->sc_rctrl |= RCTRL_VLEX;
739 rctrl_prsdep = RCTRL_PRSDEP_L2;
740 }
741 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) {
742 sc->sc_rctrl |= RCTRL_IPCSEN;
743 rctrl_prsdep = RCTRL_PRSDEP_L3;
744 }
745 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) {
746 sc->sc_rctrl |= RCTRL_TUCSEN;
747 rctrl_prsdep = RCTRL_PRSDEP_L4;
748 }
749 sc->sc_rctrl |= rctrl_prsdep;
750 #if 0
751 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP))
752 aprint_normal_dev(sc->sc_dev,
753 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n",
754 sc->sc_rctrl,
755 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN),
756 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN),
757 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX),
758 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP));
759 #endif
760
761 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS);
762 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */
763 sc->sc_tctrl |= TCTRL_VLINS;
764 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN)
765 sc->sc_tctrl |= TCTRL_IPCSEN;
766 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN)
767 sc->sc_tctrl |= TCTRL_TUCSEN;
768 #if 0
769 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS))
770 aprint_normal_dev(sc->sc_dev,
771 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n",
772 sc->sc_tctrl,
773 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN),
774 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN),
775 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS));
776 #endif
777
778 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN);
779
780 const uint64_t macstnaddr =
781 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl));
782
783 sc->sc_imask = IEVENT_DPE;
784
785 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */
786 pq3etsec_rxq_reset(sc, &sc->sc_rxq);
787 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
788
789 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */
790 pq3etsec_txq_reset(sc, &sc->sc_txq);
791
792 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */
793 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2);
794 etsec_write(sc, MAXFRM, sc->sc_maxfrm);
795 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32));
796 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0));
797 etsec_write(sc, MACCFG1, sc->sc_maccfg1);
798 etsec_write(sc, MACCFG2, sc->sc_maccfg2);
799 etsec_write(sc, ECNTRL, sc->sc_ecntrl);
800
801 /* 8. Setup group address hash table (GADDR0-GADDR15) */
802 pq3etsec_mc_setup(sc);
803
804 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */
805 etsec_write(sc, MRBLR, MCLBYTES);
806
807 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */
808 sc->sc_dmactrl |= DMACTRL_DEFAULT;
809 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
810
811 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
812 etsec_write(sc, TQUEUE, TQUEUE_EN0);
813 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC;
814
815 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */
816
817 /* 12. Enable receive queues in RQUEUE, */
818 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0);
819 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC;
820
821 /* and optionally set TOE functionality in RCTRL. */
822 etsec_write(sc, RCTRL, sc->sc_rctrl);
823 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL);
824 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF)
825 sc->sc_rx_adjlen += sizeof(struct rxfcb);
826
827 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */
828 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF);
829
830 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/
831 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF);
832
833 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */
834 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS);
835 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
836
837 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */
838 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
839 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
840
841 sc->sc_soft_flags = 0;
842
843 etsec_write(sc, IMASK, sc->sc_imask);
844
845 ifp->if_flags |= IFF_RUNNING;
846
847 return error;
848 }
849
850 static void
851 pq3etsec_ifstop(struct ifnet *ifp, int disable)
852 {
853 struct pq3etsec_softc * const sc = ifp->if_softc;
854
855 KASSERT(!cpu_intr_p());
856 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC;
857 /*
858 * Clear the GTSC and GRSC from the interrupt mask until
859 * we are ready for them. Then clear them from IEVENT,
860 * request the graceful shutdown, and then enable the
861 * GTSC and GRSC bits in the mask. This should cause the
862 * error interrupt to fire which will issue a wakeup to
863 * allow us to resume.
864 */
865
866 /*
867 * 1. Set GRS/GTS bits in DMACTRL register
868 */
869 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS;
870 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask);
871 etsec_write(sc, IEVENT, imask_gsc_mask);
872 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
873
874 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) {
875 /*
876 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set
877 */
878 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask);
879
880 u_int timo = 1000;
881 uint32_t ievent = etsec_read(sc, IEVENT);
882 while ((ievent & imask_gsc_mask) != imask_gsc_mask) {
883 if (--timo == 0) {
884 aprint_error_dev(sc->sc_dev,
885 "WARNING: "
886 "request to stop failed (IEVENT=%#x)\n",
887 ievent);
888 break;
889 }
890 delay(10);
891 ievent = etsec_read(sc, IEVENT);
892 }
893 }
894
895 /*
896 * Now reset the controller.
897 *
898 * 3. Set SOFT_RESET bit in MACCFG1 register
899 * 4. Clear SOFT_RESET bit in MACCFG1 register
900 */
901 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET);
902 etsec_write(sc, MACCFG1, 0);
903 etsec_write(sc, IMASK, 0);
904 etsec_write(sc, IEVENT, ~0);
905 sc->sc_imask = 0;
906 ifp->if_flags &= ~IFF_RUNNING;
907
908 uint32_t tbipa = etsec_read(sc, TBIPA);
909 if (tbipa == sc->sc_phy_addr) {
910 aprint_normal_dev(sc->sc_dev, "relocating TBI\n");
911 etsec_write(sc, TBIPA, 0x1f);
912 }
913 uint32_t miimcfg = etsec_read(sc, MIIMCFG);
914 etsec_write(sc, MIIMCFG, MIIMCFG_RESET);
915 etsec_write(sc, MIIMCFG, miimcfg);
916
917 /*
918 * Let's consume any remaing transmitted packets. And if we are
919 * disabling the interface, purge ourselves of any untransmitted
920 * packets. But don't consume any received packets, just drop them.
921 * If we aren't disabling the interface, save the mbufs in the
922 * receive queue for reuse.
923 */
924 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable);
925 pq3etsec_txq_consume(sc, &sc->sc_txq);
926 if (disable) {
927 pq3etsec_txq_purge(sc, &sc->sc_txq);
928 IF_PURGE(&ifp->if_snd);
929 }
930 }
931
932 static void
933 pq3etsec_ifwatchdog(struct ifnet *ifp)
934 {
935 }
936
937 static void
938 pq3etsec_mc_setup(
939 struct pq3etsec_softc *sc)
940 {
941 struct ethercom * const ec = &sc->sc_ec;
942 struct ifnet * const ifp = &sc->sc_if;
943 struct ether_multi *enm;
944 struct ether_multistep step;
945 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8);
946 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8);
947
948 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr));
949 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
950
951 ifp->if_flags &= ~IFF_ALLMULTI;
952
953 ETHER_FIRST_MULTI(step, ec, enm);
954 for (u_int i = 0; enm != NULL; ) {
955 const char *addr = enm->enm_addrlo;
956 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
957 ifp->if_flags |= IFF_ALLMULTI;
958 memset(gaddr, 0xff, 32 << (crc_shift & 1));
959 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
960 break;
961 }
962 if ((sc->sc_rctrl & RCTRL_EMEN)
963 && i < __arraycount(sc->sc_macaddrs)) {
964 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr);
965 } else {
966 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
967 #if 0
968 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__,
969 ether_sprintf(addr), crc,
970 crc >> crc_shift,
971 crc >> (crc_shift + 5),
972 (crc >> crc_shift) & 31,
973 1 << (((crc >> crc_shift) & 31) ^ 31));
974 #endif
975 /*
976 * The documentation doesn't completely follow PowerPC
977 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01
978 * is 0x7fa32d9b. By empirical testing, the
979 * corresponding hash bit is word 3, bit 31 (ppc bit
980 * order). Since 3 << 31 | 31 is 0x7f, we deduce
981 * H[0:2] selects the register while H[3:7] selects
982 * the bit (ppc bit order).
983 */
984 crc >>= crc_shift;
985 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31);
986 }
987 ETHER_NEXT_MULTI(step, enm);
988 }
989 for (u_int i = 0; i < 8; i++) {
990 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]);
991 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]);
992 #if 0
993 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8])
994 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__,
995 i, IGADDR(i), etsec_read(sc, IGADDR(i)),
996 i, GADDR(i), etsec_read(sc, GADDR(i)));
997 #endif
998 }
999 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) {
1000 uint64_t macaddr = sc->sc_macaddrs[i];
1001 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32));
1002 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0));
1003 #if 0
1004 if (macaddr)
1005 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__,
1006 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)),
1007 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i)));
1008 #endif
1009 }
1010 }
1011
1012 static int
1013 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
1014 {
1015 struct pq3etsec_softc *sc = ifp->if_softc;
1016 struct ifreq * const ifr = data;
1017 const int s = splnet();
1018 int error;
1019
1020 switch (cmd) {
1021 case SIOCSIFMEDIA:
1022 case SIOCGIFMEDIA:
1023 /* Flow control requires full-duplex mode. */
1024 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1025 (ifr->ifr_media & IFM_FDX) == 0)
1026 ifr->ifr_media &= ~IFM_ETH_FMASK;
1027 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1028 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1029 /* We can do both TXPAUSE and RXPAUSE. */
1030 ifr->ifr_media |=
1031 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1032 }
1033 }
1034 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1035 break;
1036
1037 default:
1038 error = ether_ioctl(ifp, cmd, data);
1039 if (error != ENETRESET)
1040 break;
1041
1042 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1043 error = 0;
1044 if (ifp->if_flags & IFF_RUNNING)
1045 pq3etsec_mc_setup(sc);
1046 break;
1047 }
1048 error = pq3etsec_ifinit(ifp);
1049 break;
1050 }
1051
1052 splx(s);
1053 return error;
1054 }
1055
1056 static void
1057 pq3etsec_rxq_desc_presync(
1058 struct pq3etsec_softc *sc,
1059 struct pq3etsec_rxqueue *rxq,
1060 volatile struct rxbd *rxbd,
1061 size_t count)
1062 {
1063 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1064 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1065 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1066 }
1067
1068 static void
1069 pq3etsec_rxq_desc_postsync(
1070 struct pq3etsec_softc *sc,
1071 struct pq3etsec_rxqueue *rxq,
1072 volatile struct rxbd *rxbd,
1073 size_t count)
1074 {
1075 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1076 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1077 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1078 }
1079
1080 static void
1081 pq3etsec_txq_desc_presync(
1082 struct pq3etsec_softc *sc,
1083 struct pq3etsec_txqueue *txq,
1084 volatile struct txbd *txbd,
1085 size_t count)
1086 {
1087 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1088 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1089 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1090 }
1091
1092 static void
1093 pq3etsec_txq_desc_postsync(
1094 struct pq3etsec_softc *sc,
1095 struct pq3etsec_txqueue *txq,
1096 volatile struct txbd *txbd,
1097 size_t count)
1098 {
1099 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1100 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1101 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1102 }
1103
1104 static bus_dmamap_t
1105 pq3etsec_mapcache_get(
1106 struct pq3etsec_softc *sc,
1107 struct pq3etsec_mapcache *dmc)
1108 {
1109 if (dmc->dmc_nmaps == 0) {
1110 bus_dmamap_t map;
1111 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1112 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1113 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &map);
1114 if (error) {
1115 aprint_error_dev(sc->sc_dev,
1116 "failed to allocate a %zuB map: %d\n",
1117 dmc->dmc_maxmapsize, error);
1118 return NULL;
1119 }
1120 return map;
1121 }
1122
1123 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
1124 return dmc->dmc_maps[--dmc->dmc_nmaps];
1125 }
1126
1127 static void
1128 pq3etsec_mapcache_put(
1129 struct pq3etsec_softc *sc,
1130 struct pq3etsec_mapcache *dmc,
1131 bus_dmamap_t map)
1132 {
1133 KASSERT(map != NULL);
1134 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
1135 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
1136 }
1137
1138 static void
1139 pq3etsec_mapcache_destroy(
1140 struct pq3etsec_softc *sc,
1141 struct pq3etsec_mapcache *dmc)
1142 {
1143 const size_t dmc_size =
1144 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]);
1145
1146 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
1147 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
1148 }
1149 kmem_free(dmc, dmc_size);
1150 }
1151
1152 static int
1153 pq3etsec_mapcache_create(
1154 struct pq3etsec_softc *sc,
1155 struct pq3etsec_mapcache **dmc_p,
1156 size_t maxmaps,
1157 size_t minmaps,
1158 size_t maxmapsize,
1159 size_t maxseg)
1160 {
1161 const size_t dmc_size =
1162 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]);
1163 struct pq3etsec_mapcache * const dmc = kmem_zalloc(dmc_size, KM_SLEEP);
1164
1165 dmc->dmc_maxmaps = maxmaps;
1166 dmc->dmc_nmaps = minmaps;
1167 dmc->dmc_maxmapsize = maxmapsize;
1168 dmc->dmc_maxseg = maxseg;
1169
1170 for (u_int i = 0; i < minmaps; i++) {
1171 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1172 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1173 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
1174 if (error) {
1175 aprint_error_dev(sc->sc_dev,
1176 "failed to creat dma map cache "
1177 "entry %u of %zu (max %zu): %d\n",
1178 i, minmaps, maxmaps, error);
1179 while (i-- > 0) {
1180 bus_dmamap_destroy(sc->sc_dmat,
1181 dmc->dmc_maps[i]);
1182 }
1183 kmem_free(dmc, dmc_size);
1184 return error;
1185 }
1186 KASSERT(dmc->dmc_maps[i] != NULL);
1187 }
1188
1189 *dmc_p = dmc;
1190
1191 return 0;
1192 }
1193
1194 #if 0
1195 static void
1196 pq3etsec_dmamem_free(
1197 bus_dma_tag_t dmat,
1198 size_t map_size,
1199 bus_dma_segment_t *seg,
1200 bus_dmamap_t map,
1201 void *kvap)
1202 {
1203 bus_dmamap_destroy(dmat, map);
1204 bus_dmamem_unmap(dmat, kvap, map_size);
1205 bus_dmamem_free(dmat, seg, 1);
1206 }
1207 #endif
1208
1209 static int
1210 pq3etsec_dmamem_alloc(
1211 bus_dma_tag_t dmat,
1212 size_t map_size,
1213 bus_dma_segment_t *seg,
1214 bus_dmamap_t *map,
1215 void **kvap)
1216 {
1217 int error;
1218 int nseg;
1219
1220 *kvap = NULL;
1221 *map = NULL;
1222
1223 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
1224 seg, 1, &nseg, 0);
1225 if (error)
1226 return error;
1227
1228 KASSERT(nseg == 1);
1229
1230 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
1231 BUS_DMA_COHERENT);
1232 if (error == 0) {
1233 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
1234 map);
1235 if (error == 0) {
1236 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
1237 NULL, 0);
1238 if (error == 0)
1239 return 0;
1240 bus_dmamap_destroy(dmat, *map);
1241 *map = NULL;
1242 }
1243 bus_dmamem_unmap(dmat, *kvap, map_size);
1244 *kvap = NULL;
1245 }
1246 bus_dmamem_free(dmat, seg, nseg);
1247 return 0;
1248 }
1249
1250 static struct mbuf *
1251 pq3etsec_rx_buf_alloc(
1252 struct pq3etsec_softc *sc)
1253 {
1254 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
1255 if (m == NULL) {
1256 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
1257 return NULL;
1258 }
1259 MCLGET(m, M_DONTWAIT);
1260 if ((m->m_flags & M_EXT) == 0) {
1261 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
1262 m_freem(m);
1263 return NULL;
1264 }
1265 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1266
1267 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache);
1268 if (map == NULL) {
1269 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
1270 m_freem(m);
1271 return NULL;
1272 }
1273 M_SETCTX(m, map);
1274 m->m_len = m->m_pkthdr.len = MCLBYTES;
1275 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1276 BUS_DMA_READ|BUS_DMA_NOWAIT);
1277 if (error) {
1278 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
1279 error);
1280 M_SETCTX(m, NULL);
1281 m_freem(m);
1282 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1283 return NULL;
1284 }
1285 KASSERT(map->dm_mapsize == MCLBYTES);
1286 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1287 BUS_DMASYNC_PREREAD);
1288
1289 return m;
1290 }
1291
1292 static void
1293 pq3etsec_rx_map_unload(
1294 struct pq3etsec_softc *sc,
1295 struct mbuf *m)
1296 {
1297 KASSERT(m);
1298 for (; m != NULL; m = m->m_next) {
1299 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1300 KASSERT(map);
1301 KASSERT(map->dm_mapsize == MCLBYTES);
1302 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
1303 BUS_DMASYNC_POSTREAD);
1304 bus_dmamap_unload(sc->sc_dmat, map);
1305 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1306 M_SETCTX(m, NULL);
1307 }
1308 }
1309
1310 static bool
1311 pq3etsec_rxq_produce(
1312 struct pq3etsec_softc *sc,
1313 struct pq3etsec_rxqueue *rxq)
1314 {
1315 volatile struct rxbd *producer = rxq->rxq_producer;
1316 #if 0
1317 size_t inuse = rxq->rxq_inuse;
1318 #endif
1319 while (rxq->rxq_inuse < rxq->rxq_threshold) {
1320 struct mbuf *m;
1321 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
1322 if (m == NULL) {
1323 m = pq3etsec_rx_buf_alloc(sc);
1324 if (m == NULL) {
1325 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__);
1326 break;
1327 }
1328 }
1329 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1330 KASSERT(map);
1331
1332 #ifdef ETSEC_DEBUG
1333 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL);
1334 rxq->rxq_mbufs[producer-rxq->rxq_first] = m;
1335 #endif
1336
1337 /* rxbd_len is write-only by the ETSEC */
1338 producer->rxbd_bufptr = map->dm_segs[0].ds_addr;
1339 membar_producer();
1340 producer->rxbd_flags |= RXBD_E;
1341 if (__predict_false(rxq->rxq_mhead == NULL)) {
1342 KASSERT(producer == rxq->rxq_consumer);
1343 rxq->rxq_mconsumer = m;
1344 }
1345 *rxq->rxq_mtail = m;
1346 rxq->rxq_mtail = &m->m_next;
1347 m->m_len = MCLBYTES;
1348 m->m_next = NULL;
1349 rxq->rxq_inuse++;
1350 if (++producer == rxq->rxq_last) {
1351 membar_producer();
1352 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1353 rxq->rxq_last - rxq->rxq_producer);
1354 producer = rxq->rxq_producer = rxq->rxq_first;
1355 }
1356 }
1357 if (producer != rxq->rxq_producer) {
1358 membar_producer();
1359 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1360 producer - rxq->rxq_producer);
1361 rxq->rxq_producer = producer;
1362 }
1363 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT;
1364 if (qhlt) {
1365 KASSERT(qhlt & rxq->rxq_qmask);
1366 sc->sc_ev_rx_stall.ev_count++;
1367 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask);
1368 }
1369 #if 0
1370 aprint_normal_dev(sc->sc_dev,
1371 "%s: buffers inuse went from %zu to %zu\n",
1372 __func__, inuse, rxq->rxq_inuse);
1373 #endif
1374 return true;
1375 }
1376
1377 static bool
1378 pq3etsec_rx_offload(
1379 struct pq3etsec_softc *sc,
1380 struct mbuf *m,
1381 const struct rxfcb *fcb)
1382 {
1383 if (fcb->rxfcb_flags & RXFCB_VLN) {
1384 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl,
1385 m_freem(m); return false);
1386 }
1387 if ((fcb->rxfcb_flags & RXFCB_IP) == 0
1388 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0)
1389 return true;
1390 int csum_flags = 0;
1391 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) {
1392 csum_flags |= M_CSUM_IPv4;
1393 if (fcb->rxfcb_flags & RXFCB_EIP)
1394 csum_flags |= M_CSUM_IPv4_BAD;
1395 }
1396 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) {
1397 int ipv_flags;
1398 if (fcb->rxfcb_flags & RXFCB_IP6)
1399 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6;
1400 else
1401 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4;
1402 if (fcb->rxfcb_pro == IPPROTO_TCP) {
1403 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags;
1404 } else {
1405 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags;
1406 }
1407 if (fcb->rxfcb_flags & RXFCB_ETU)
1408 csum_flags |= M_CSUM_TCP_UDP_BAD;
1409 }
1410
1411 m->m_pkthdr.csum_flags = csum_flags;
1412 return true;
1413 }
1414
1415 static void
1416 pq3etsec_rx_input(
1417 struct pq3etsec_softc *sc,
1418 struct mbuf *m,
1419 uint16_t rxbd_flags)
1420 {
1421 struct ifnet * const ifp = &sc->sc_if;
1422
1423 pq3etsec_rx_map_unload(sc, m);
1424
1425 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) {
1426 struct rxfcb fcb = *mtod(m, struct rxfcb *);
1427 if (!pq3etsec_rx_offload(sc, m, &fcb))
1428 return;
1429 }
1430 m_adj(m, sc->sc_rx_adjlen);
1431
1432 if (rxbd_flags & RXBD_M)
1433 m->m_flags |= M_PROMISC;
1434 if (rxbd_flags & RXBD_BC)
1435 m->m_flags |= M_BCAST;
1436 if (rxbd_flags & RXBD_MC)
1437 m->m_flags |= M_MCAST;
1438 m->m_flags |= M_HASFCS;
1439 m->m_pkthdr.rcvif = &sc->sc_if;
1440
1441 ifp->if_ipackets++;
1442 ifp->if_ibytes += m->m_pkthdr.len;
1443
1444 /*
1445 * Let's give it to the network subsystm to deal with.
1446 */
1447 int s = splnet();
1448 bpf_mtap(ifp, m);
1449 (*ifp->if_input)(ifp, m);
1450 splx(s);
1451 }
1452
1453 static void
1454 pq3etsec_rxq_consume(
1455 struct pq3etsec_softc *sc,
1456 struct pq3etsec_rxqueue *rxq)
1457 {
1458 struct ifnet * const ifp = &sc->sc_if;
1459 volatile struct rxbd *consumer = rxq->rxq_consumer;
1460 size_t rxconsumed = 0;
1461
1462 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask);
1463
1464 for (;;) {
1465 if (consumer == rxq->rxq_producer) {
1466 rxq->rxq_consumer = consumer;
1467 rxq->rxq_inuse -= rxconsumed;
1468 KASSERT(rxq->rxq_inuse == 0);
1469 return;
1470 }
1471 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1);
1472 const uint16_t rxbd_flags = consumer->rxbd_flags;
1473 if (rxbd_flags & RXBD_E) {
1474 rxq->rxq_consumer = consumer;
1475 rxq->rxq_inuse -= rxconsumed;
1476 return;
1477 }
1478 KASSERT(rxq->rxq_mconsumer != NULL);
1479 #ifdef ETSEC_DEBUG
1480 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1481 #endif
1482 #if 0
1483 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n",
1484 __func__,
1485 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len,
1486 mtod(rxq->rxq_mconsumer, int *)[0],
1487 mtod(rxq->rxq_mconsumer, int *)[1],
1488 mtod(rxq->rxq_mconsumer, int *)[2],
1489 mtod(rxq->rxq_mconsumer, int *)[3]);
1490 #endif
1491 /*
1492 * We own this packet again. Clear all flags except wrap.
1493 */
1494 rxconsumed++;
1495 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I);
1496
1497 /*
1498 * If this descriptor has the LAST bit set and no errors,
1499 * it's a valid input packet.
1500 */
1501 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) {
1502 size_t rxbd_len = consumer->rxbd_len;
1503 struct mbuf *m = rxq->rxq_mhead;
1504 struct mbuf *m_last = rxq->rxq_mconsumer;
1505 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1506 rxq->rxq_mtail = &rxq->rxq_mhead;
1507 rxq->rxq_mconsumer = rxq->rxq_mhead;
1508 m_last->m_next = NULL;
1509 m_last->m_len = rxbd_len & (MCLBYTES - 1);
1510 m->m_pkthdr.len = rxbd_len;
1511 pq3etsec_rx_input(sc, m, rxbd_flags);
1512 } else if (rxbd_flags & RXBD_L) {
1513 KASSERT(rxbd_flags & RXBD_ERRORS);
1514 struct mbuf *m;
1515 /*
1516 * We encountered an error, take the mbufs and add
1517 * then to the rx bufcache so we can reuse them.
1518 */
1519 ifp->if_ierrors++;
1520 for (m = rxq->rxq_mhead;
1521 m != rxq->rxq_mconsumer;
1522 m = m->m_next) {
1523 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1524 }
1525 m = rxq->rxq_mconsumer;
1526 if ((rxq->rxq_mhead = m->m_next) == NULL)
1527 rxq->rxq_mtail = &rxq->rxq_mhead;
1528 rxq->rxq_mconsumer = m->m_next;
1529 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1530 } else {
1531 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next;
1532 }
1533 #ifdef ETSEC_DEBUG
1534 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL;
1535 #endif
1536
1537 /*
1538 * Wrap at the last entry!
1539 */
1540 if (rxbd_flags & RXBD_W) {
1541 KASSERT(consumer + 1 == rxq->rxq_last);
1542 consumer = rxq->rxq_first;
1543 } else {
1544 consumer++;
1545 }
1546 #ifdef ETSEC_DEBUG
1547 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1548 #endif
1549 }
1550 }
1551
1552 static void
1553 pq3etsec_rxq_purge(
1554 struct pq3etsec_softc *sc,
1555 struct pq3etsec_rxqueue *rxq,
1556 bool discard)
1557 {
1558 struct mbuf *m;
1559
1560 if ((m = rxq->rxq_mhead) != NULL) {
1561 #ifdef ETSEC_DEBUG
1562 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs));
1563 #endif
1564
1565 if (discard) {
1566 pq3etsec_rx_map_unload(sc, m);
1567 m_freem(m);
1568 } else {
1569 while (m != NULL) {
1570 struct mbuf *m0 = m->m_next;
1571 m->m_next = NULL;
1572 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1573 m = m0;
1574 }
1575 }
1576
1577 }
1578
1579 rxq->rxq_mconsumer = NULL;
1580 rxq->rxq_mhead = NULL;
1581 rxq->rxq_mtail = &rxq->rxq_mhead;
1582 rxq->rxq_inuse = 0;
1583 }
1584
1585 static void
1586 pq3etsec_rxq_reset(
1587 struct pq3etsec_softc *sc,
1588 struct pq3etsec_rxqueue *rxq)
1589 {
1590 /*
1591 * sync all the descriptors
1592 */
1593 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1594 rxq->rxq_last - rxq->rxq_first);
1595
1596 /*
1597 * Make sure we own all descriptors in the ring.
1598 */
1599 volatile struct rxbd *rxbd;
1600 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) {
1601 rxbd->rxbd_flags = RXBD_I;
1602 }
1603
1604 /*
1605 * Last descriptor has the wrap flag.
1606 */
1607 rxbd->rxbd_flags = RXBD_W|RXBD_I;
1608
1609 /*
1610 * Reset the producer consumer indexes.
1611 */
1612 rxq->rxq_consumer = rxq->rxq_first;
1613 rxq->rxq_producer = rxq->rxq_first;
1614 rxq->rxq_inuse = 0;
1615 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS)
1616 rxq->rxq_threshold = ETSEC_MINRXMBUFS;
1617
1618 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY;
1619
1620 /*
1621 * Restart the transmit at the first descriptor
1622 */
1623 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr);
1624 }
1625
1626 static int
1627 pq3etsec_rxq_attach(
1628 struct pq3etsec_softc *sc,
1629 struct pq3etsec_rxqueue *rxq,
1630 u_int qno)
1631 {
1632 size_t map_size = PAGE_SIZE;
1633 size_t desc_count = map_size / sizeof(struct rxbd);
1634 int error;
1635 void *descs;
1636
1637 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1638 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1639 if (error)
1640 return error;
1641
1642 memset(descs, 0, map_size);
1643 rxq->rxq_first = descs;
1644 rxq->rxq_last = rxq->rxq_first + desc_count;
1645 rxq->rxq_consumer = descs;
1646 rxq->rxq_producer = descs;
1647
1648 pq3etsec_rxq_purge(sc, rxq, true);
1649 pq3etsec_rxq_reset(sc, rxq);
1650
1651 rxq->rxq_reg_rbase = RBASEn(qno);
1652 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno);
1653
1654 return 0;
1655 }
1656
1657 static bool
1658 pq3etsec_txq_active_p(
1659 struct pq3etsec_softc * const sc,
1660 struct pq3etsec_txqueue *txq)
1661 {
1662 return !IF_IS_EMPTY(&txq->txq_mbufs);
1663 }
1664
1665 static bool
1666 pq3etsec_txq_fillable_p(
1667 struct pq3etsec_softc * const sc,
1668 struct pq3etsec_txqueue *txq)
1669 {
1670 return txq->txq_free >= txq->txq_threshold;
1671 }
1672
1673 static int
1674 pq3etsec_txq_attach(
1675 struct pq3etsec_softc *sc,
1676 struct pq3etsec_txqueue *txq,
1677 u_int qno)
1678 {
1679 size_t map_size = PAGE_SIZE;
1680 size_t desc_count = map_size / sizeof(struct txbd);
1681 int error;
1682 void *descs;
1683
1684 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1685 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1686 if (error)
1687 return error;
1688
1689 memset(descs, 0, map_size);
1690 txq->txq_first = descs;
1691 txq->txq_last = txq->txq_first + desc_count;
1692 txq->txq_consumer = descs;
1693 txq->txq_producer = descs;
1694
1695 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS);
1696
1697 txq->txq_reg_tbase = TBASEn(qno);
1698 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno);
1699
1700 pq3etsec_txq_reset(sc, txq);
1701
1702 return 0;
1703 }
1704
1705 static int
1706 pq3etsec_txq_map_load(
1707 struct pq3etsec_softc *sc,
1708 struct pq3etsec_txqueue *txq,
1709 struct mbuf *m)
1710 {
1711 bus_dmamap_t map;
1712 int error;
1713
1714 map = M_GETCTX(m, bus_dmamap_t);
1715 if (map != NULL)
1716 return 0;
1717
1718 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache);
1719 if (map == NULL)
1720 return ENOMEM;
1721
1722 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1723 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1724 if (error)
1725 return error;
1726
1727 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1728 BUS_DMASYNC_PREWRITE);
1729 M_SETCTX(m, map);
1730 return 0;
1731 }
1732
1733 static void
1734 pq3etsec_txq_map_unload(
1735 struct pq3etsec_softc *sc,
1736 struct pq3etsec_txqueue *txq,
1737 struct mbuf *m)
1738 {
1739 KASSERT(m);
1740 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1741 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1742 BUS_DMASYNC_POSTWRITE);
1743 bus_dmamap_unload(sc->sc_dmat, map);
1744 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map);
1745 }
1746
1747 static bool
1748 pq3etsec_txq_produce(
1749 struct pq3etsec_softc *sc,
1750 struct pq3etsec_txqueue *txq,
1751 struct mbuf *m)
1752 {
1753 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1754
1755 if (map->dm_nsegs > txq->txq_free)
1756 return false;
1757
1758 /*
1759 * TCP Offload flag must be set in the first descriptor.
1760 */
1761 volatile struct txbd *producer = txq->txq_producer;
1762 uint16_t last_flags = TXBD_L;
1763 uint16_t first_flags = TXBD_R
1764 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0);
1765
1766 /*
1767 * If we've produced enough descriptors without consuming any
1768 * we need to ask for an interrupt to reclaim some.
1769 */
1770 txq->txq_lastintr += map->dm_nsegs;
1771 if (txq->txq_lastintr >= txq->txq_threshold
1772 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1773 txq->txq_lastintr = 0;
1774 last_flags |= TXBD_I;
1775 }
1776
1777 #ifdef ETSEC_DEBUG
1778 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1779 #endif
1780 KASSERT(producer != txq->txq_last);
1781 producer->txbd_bufptr = map->dm_segs[0].ds_addr;
1782 producer->txbd_len = map->dm_segs[0].ds_len;
1783
1784 if (map->dm_nsegs > 1) {
1785 volatile struct txbd *start = producer + 1;
1786 size_t count = map->dm_nsegs - 1;
1787 for (u_int i = 1; i < map->dm_nsegs; i++) {
1788 if (__predict_false(++producer == txq->txq_last)) {
1789 producer = txq->txq_first;
1790 if (start < txq->txq_last) {
1791 pq3etsec_txq_desc_presync(sc, txq,
1792 start, txq->txq_last - start);
1793 count -= txq->txq_last - start;
1794 }
1795 start = txq->txq_first;
1796 }
1797 #ifdef ETSEC_DEBUG
1798 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1799 #endif
1800 producer->txbd_bufptr = map->dm_segs[i].ds_addr;
1801 producer->txbd_len = map->dm_segs[i].ds_len;
1802 producer->txbd_flags = TXBD_R
1803 | (producer->txbd_flags & TXBD_W)
1804 | (i == map->dm_nsegs - 1 ? last_flags : 0);
1805 #if 0
1806 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first,
1807 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr);
1808 #endif
1809 }
1810 pq3etsec_txq_desc_presync(sc, txq, start, count);
1811 } else {
1812 first_flags |= last_flags;
1813 }
1814
1815 membar_producer();
1816 txq->txq_producer->txbd_flags =
1817 first_flags | (txq->txq_producer->txbd_flags & TXBD_W);
1818 #if 0
1819 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__,
1820 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags,
1821 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr);
1822 #endif
1823 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1);
1824
1825 /*
1826 * Reduce free count by the number of segments we consumed.
1827 */
1828 txq->txq_free -= map->dm_nsegs;
1829 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1830 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0);
1831 KASSERT(producer->txbd_flags & TXBD_L);
1832 #ifdef ETSEC_DEBUG
1833 txq->txq_lmbufs[producer - txq->txq_first] = m;
1834 #endif
1835
1836 #if 0
1837 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n",
1838 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1839 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1840 #endif
1841
1842 if (++producer == txq->txq_last)
1843 txq->txq_producer = txq->txq_first;
1844 else
1845 txq->txq_producer = producer;
1846 IF_ENQUEUE(&txq->txq_mbufs, m);
1847
1848 /*
1849 * Restart the transmitter.
1850 */
1851 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */
1852
1853 return true;
1854 }
1855
1856 static void
1857 pq3etsec_tx_offload(
1858 struct pq3etsec_softc *sc,
1859 struct pq3etsec_txqueue *txq,
1860 struct mbuf **mp)
1861 {
1862 struct mbuf *m = *mp;
1863 u_int csum_flags = m->m_pkthdr.csum_flags;
1864 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m);
1865
1866 KASSERT(m->m_flags & M_PKTHDR);
1867
1868 /*
1869 * Let see if we are doing any offload first.
1870 */
1871 if (csum_flags == 0 && vtag == 0) {
1872 m->m_flags &= ~M_HASFCB;
1873 return;
1874 }
1875
1876 uint16_t flags = 0;
1877 if (csum_flags & M_CSUM_IP) {
1878 flags |= TXFCB_IP
1879 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0)
1880 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0)
1881 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0)
1882 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0)
1883 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0);
1884 }
1885 if (vtag) {
1886 flags |= TXFCB_VLN;
1887 }
1888 if (flags == 0) {
1889 m->m_flags &= ~M_HASFCB;
1890 return;
1891 }
1892
1893 struct txfcb fcb;
1894 fcb.txfcb_flags = flags;
1895 if (csum_flags & M_CSUM_IPv4)
1896 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
1897 else
1898 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
1899 fcb.txfcb_l3os = ETHER_HDR_LEN;
1900 fcb.txfcb_phcs = 0;
1901 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0;
1902
1903 #if 0
1904 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n",
1905 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os,
1906 fcb.txfcb_phcs, fcb.txfcb_vlctl);
1907 #endif
1908
1909 if (M_LEADINGSPACE(m) >= sizeof(fcb)) {
1910 m->m_data -= sizeof(fcb);
1911 m->m_len += sizeof(fcb);
1912 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) {
1913 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len);
1914 m->m_data = m->m_pktdat;
1915 m->m_len += sizeof(fcb);
1916 } else {
1917 struct mbuf *mn;
1918 MGET(mn, M_DONTWAIT, m->m_type);
1919 if (mn == NULL) {
1920 if (csum_flags & M_CSUM_IP4) {
1921 #ifdef INET
1922 ip_undefer_csum(m, ETHER_HDR_LEN,
1923 csum_flags & M_CSUM_IP4);
1924 #else
1925 panic("%s: impossible M_CSUM flags %#x",
1926 device_xname(sc->sc_dev), csum_flags);
1927 #endif
1928 } else if (csum_flags & M_CSUM_IP6) {
1929 #ifdef INET6
1930 ip6_undefer_csum(m, ETHER_HDR_LEN,
1931 csum_flags & M_CSUM_IP6);
1932 #else
1933 panic("%s: impossible M_CSUM flags %#x",
1934 device_xname(sc->sc_dev), csum_flags);
1935 #endif
1936 } else if (vtag) {
1937 }
1938
1939 m->m_flags &= ~M_HASFCB;
1940 return;
1941 }
1942
1943 M_MOVE_PKTHDR(mn, m);
1944 mn->m_next = m;
1945 m = mn;
1946 MH_ALIGN(m, sizeof(fcb));
1947 m->m_len = sizeof(fcb);
1948 *mp = m;
1949 }
1950 m->m_pkthdr.len += sizeof(fcb);
1951 m->m_flags |= M_HASFCB;
1952 *mtod(m, struct txfcb *) = fcb;
1953 return;
1954 }
1955
1956 static bool
1957 pq3etsec_txq_enqueue(
1958 struct pq3etsec_softc *sc,
1959 struct pq3etsec_txqueue *txq)
1960 {
1961 for (;;) {
1962 if (IF_QFULL(&txq->txq_mbufs))
1963 return false;
1964 struct mbuf *m = txq->txq_next;
1965 if (m == NULL) {
1966 int s = splnet();
1967 IF_DEQUEUE(&sc->sc_if.if_snd, m);
1968 splx(s);
1969 if (m == NULL)
1970 return true;
1971 M_SETCTX(m, NULL);
1972 pq3etsec_tx_offload(sc, txq, &m);
1973 } else {
1974 txq->txq_next = NULL;
1975 }
1976 int error = pq3etsec_txq_map_load(sc, txq, m);
1977 if (error) {
1978 aprint_error_dev(sc->sc_dev,
1979 "discarded packet due to "
1980 "dmamap load failure: %d\n", error);
1981 m_freem(m);
1982 continue;
1983 }
1984 KASSERT(txq->txq_next == NULL);
1985 if (!pq3etsec_txq_produce(sc, txq, m)) {
1986 txq->txq_next = m;
1987 return false;
1988 }
1989 KASSERT(txq->txq_next == NULL);
1990 }
1991 }
1992
1993 static bool
1994 pq3etsec_txq_consume(
1995 struct pq3etsec_softc *sc,
1996 struct pq3etsec_txqueue *txq)
1997 {
1998 struct ifnet * const ifp = &sc->sc_if;
1999 volatile struct txbd *consumer = txq->txq_consumer;
2000 size_t txfree = 0;
2001
2002 #if 0
2003 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
2004 #endif
2005 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask);
2006
2007 for (;;) {
2008 if (consumer == txq->txq_producer) {
2009 txq->txq_consumer = consumer;
2010 txq->txq_free += txfree;
2011 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2012 #if 0
2013 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n",
2014 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
2015 #endif
2016 KASSERT(txq->txq_lastintr == 0);
2017 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
2018 return true;
2019 }
2020 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1);
2021 const uint16_t txbd_flags = consumer->txbd_flags;
2022 if (txbd_flags & TXBD_R) {
2023 txq->txq_consumer = consumer;
2024 txq->txq_free += txfree;
2025 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2026 #if 0
2027 printf("%s: freed %zu descriptors\n",
2028 __func__, txfree);
2029 #endif
2030 return pq3etsec_txq_fillable_p(sc, txq);
2031 }
2032
2033 /*
2034 * If this is the last descriptor in the chain, get the
2035 * mbuf, free its dmamap, and free the mbuf chain itself.
2036 */
2037 if (txbd_flags & TXBD_L) {
2038 struct mbuf *m;
2039
2040 IF_DEQUEUE(&txq->txq_mbufs, m);
2041 #ifdef ETSEC_DEBUG
2042 KASSERTMSG(
2043 m == txq->txq_lmbufs[consumer-txq->txq_first],
2044 "%s: %p [%u]: flags %#x m (%p) != %p (%p)",
2045 __func__, consumer, consumer - txq->txq_first,
2046 txbd_flags, m,
2047 &txq->txq_lmbufs[consumer-txq->txq_first],
2048 txq->txq_lmbufs[consumer-txq->txq_first]);
2049 #endif
2050 KASSERT(m);
2051 pq3etsec_txq_map_unload(sc, txq, m);
2052 #if 0
2053 printf("%s: mbuf %p: consumed a %u byte packet\n",
2054 __func__, m, m->m_pkthdr.len);
2055 #endif
2056 if (m->m_flags & M_HASFCB)
2057 m_adj(m, sizeof(struct txfcb));
2058 ifp->if_opackets++;
2059 ifp->if_obytes += m->m_pkthdr.len;
2060 if (m->m_flags & M_MCAST)
2061 ifp->if_omcasts++;
2062 if (txbd_flags & TXBD_ERRORS)
2063 ifp->if_oerrors++;
2064 m_freem(m);
2065 #ifdef ETSEC_DEBUG
2066 txq->txq_lmbufs[consumer - txq->txq_first] = NULL;
2067 #endif
2068 } else {
2069 #ifdef ETSEC_DEBUG
2070 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL);
2071 #endif
2072 }
2073
2074 /*
2075 * We own this packet again. Clear all flags except wrap.
2076 */
2077 txfree++;
2078 //consumer->txbd_flags = txbd_flags & TXBD_W;
2079
2080 /*
2081 * Wrap at the last entry!
2082 */
2083 if (txbd_flags & TXBD_W) {
2084 KASSERT(consumer + 1 == txq->txq_last);
2085 consumer = txq->txq_first;
2086 } else {
2087 consumer++;
2088 KASSERT(consumer < txq->txq_last);
2089 }
2090 }
2091 }
2092
2093 static void
2094 pq3etsec_txq_purge(
2095 struct pq3etsec_softc *sc,
2096 struct pq3etsec_txqueue *txq)
2097 {
2098 struct mbuf *m;
2099 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0);
2100
2101 for (;;) {
2102 IF_DEQUEUE(&txq->txq_mbufs, m);
2103 if (m == NULL)
2104 break;
2105 pq3etsec_txq_map_unload(sc, txq, m);
2106 m_freem(m);
2107 }
2108 if ((m = txq->txq_next) != NULL) {
2109 txq->txq_next = NULL;
2110 pq3etsec_txq_map_unload(sc, txq, m);
2111 m_freem(m);
2112 }
2113 #ifdef ETSEC_DEBUG
2114 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs));
2115 #endif
2116 }
2117
2118 static void
2119 pq3etsec_txq_reset(
2120 struct pq3etsec_softc *sc,
2121 struct pq3etsec_txqueue *txq)
2122 {
2123 /*
2124 * sync all the descriptors
2125 */
2126 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first,
2127 txq->txq_last - txq->txq_first);
2128
2129 /*
2130 * Make sure we own all descriptors in the ring.
2131 */
2132 volatile struct txbd *txbd;
2133 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) {
2134 txbd->txbd_flags = 0;
2135 }
2136
2137 /*
2138 * Last descriptor has the wrap flag.
2139 */
2140 txbd->txbd_flags = TXBD_W;
2141
2142 /*
2143 * Reset the producer consumer indexes.
2144 */
2145 txq->txq_consumer = txq->txq_first;
2146 txq->txq_producer = txq->txq_first;
2147 txq->txq_free = txq->txq_last - txq->txq_first - 1;
2148 txq->txq_threshold = txq->txq_free / 2;
2149 txq->txq_lastintr = 0;
2150
2151 /*
2152 * What do we want to get interrupted on?
2153 */
2154 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE;
2155
2156 /*
2157 * Restart the transmit at the first descriptor
2158 */
2159 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr);
2160 }
2161
2162 static void
2163 pq3etsec_ifstart(struct ifnet *ifp)
2164 {
2165 struct pq3etsec_softc * const sc = ifp->if_softc;
2166
2167 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2168 softint_schedule(sc->sc_soft_ih);
2169 }
2170
2171 static void
2172 pq3etsec_tx_error(
2173 struct pq3etsec_softc * const sc)
2174 {
2175 struct pq3etsec_txqueue * const txq = &sc->sc_txq;
2176
2177 pq3etsec_txq_consume(sc, txq);
2178
2179 if (pq3etsec_txq_fillable_p(sc, txq))
2180 sc->sc_if.if_flags &= ~IFF_OACTIVE;
2181 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) {
2182 } else if (sc->sc_txerrors & IEVENT_EBERR) {
2183 }
2184
2185 if (pq3etsec_txq_active_p(sc, txq))
2186 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask);
2187 if (!pq3etsec_txq_enqueue(sc, txq)) {
2188 sc->sc_ev_tx_stall.ev_count++;
2189 sc->sc_if.if_flags |= IFF_OACTIVE;
2190 }
2191
2192 sc->sc_txerrors = 0;
2193 }
2194
2195 int
2196 pq3etsec_tx_intr(void *arg)
2197 {
2198 struct pq3etsec_softc * const sc = arg;
2199
2200 sc->sc_ev_tx_intr.ev_count++;
2201
2202 uint32_t ievent = etsec_read(sc, IEVENT);
2203 ievent &= IEVENT_TXF|IEVENT_TXB;
2204 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2205
2206 #if 0
2207 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2208 __func__, ievent, etsec_read(sc, IMASK));
2209 #endif
2210
2211 if (ievent == 0)
2212 return 0;
2213
2214 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB);
2215 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2216 etsec_write(sc, IMASK, sc->sc_imask);
2217 softint_schedule(sc->sc_soft_ih);
2218 return 1;
2219 }
2220
2221 int
2222 pq3etsec_rx_intr(void *arg)
2223 {
2224 struct pq3etsec_softc * const sc = arg;
2225
2226 sc->sc_ev_rx_intr.ev_count++;
2227
2228 uint32_t ievent = etsec_read(sc, IEVENT);
2229 ievent &= IEVENT_RXF|IEVENT_RXB;
2230 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2231 if (ievent == 0)
2232 return 0;
2233
2234 #if 0
2235 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent);
2236 #endif
2237
2238 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB);
2239 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR);
2240 etsec_write(sc, IMASK, sc->sc_imask);
2241 softint_schedule(sc->sc_soft_ih);
2242 return 1;
2243 }
2244
2245 int
2246 pq3etsec_error_intr(void *arg)
2247 {
2248 struct pq3etsec_softc * const sc = arg;
2249
2250 sc->sc_ev_error_intr.ev_count++;
2251
2252 for (int rv = 0, soft_flags = 0;; rv = 1) {
2253 uint32_t ievent = etsec_read(sc, IEVENT);
2254 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB);
2255 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2256 if (ievent == 0) {
2257 if (soft_flags) {
2258 atomic_or_uint(&sc->sc_soft_flags, soft_flags);
2259 softint_schedule(sc->sc_soft_ih);
2260 }
2261 return rv;
2262 }
2263 #if 0
2264 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2265 __func__, ievent, etsec_read(sc, IMASK));
2266 #endif
2267
2268 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) {
2269 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC);
2270 etsec_write(sc, IMASK, sc->sc_imask);
2271 wakeup(sc);
2272 }
2273 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) {
2274 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR);
2275 etsec_write(sc, IMASK, sc->sc_imask);
2276 wakeup(&sc->sc_mii);
2277 }
2278 if (ievent & IEVENT_BSY) {
2279 soft_flags |= SOFT_RXBSY;
2280 sc->sc_imask &= ~IEVENT_BSY;
2281 etsec_write(sc, IMASK, sc->sc_imask);
2282 }
2283 if (ievent & IEVENT_TXE) {
2284 soft_flags |= SOFT_TXERROR;
2285 sc->sc_imask &= ~IEVENT_TXE;
2286 sc->sc_txerrors |= ievent;
2287 }
2288 if (ievent & IEVENT_TXC) {
2289 sc->sc_ev_tx_pause.ev_count++;
2290 }
2291 if (ievent & IEVENT_RXC) {
2292 sc->sc_ev_rx_pause.ev_count++;
2293 }
2294 if (ievent & IEVENT_DPE) {
2295 soft_flags |= SOFT_RESET;
2296 sc->sc_imask &= ~IEVENT_DPE;
2297 etsec_write(sc, IMASK, sc->sc_imask);
2298 }
2299 }
2300 }
2301
2302 void
2303 pq3etsec_soft_intr(void *arg)
2304 {
2305 struct pq3etsec_softc * const sc = arg;
2306 struct ifnet * const ifp = &sc->sc_if;
2307
2308 mutex_enter(sc->sc_lock);
2309
2310 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
2311
2312 sc->sc_ev_soft_intr.ev_count++;
2313
2314 if (soft_flags & SOFT_RESET) {
2315 int s = splnet();
2316 pq3etsec_ifinit(ifp);
2317 splx(s);
2318 soft_flags = 0;
2319 }
2320
2321 if (soft_flags & SOFT_RXBSY) {
2322 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq;
2323 size_t threshold = 5 * rxq->rxq_threshold / 4;
2324 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
2325 threshold = rxq->rxq_last - rxq->rxq_first - 1;
2326 } else {
2327 sc->sc_imask |= IEVENT_BSY;
2328 }
2329 aprint_normal_dev(sc->sc_dev,
2330 "increasing receive buffers from %zu to %zu\n",
2331 rxq->rxq_threshold, threshold);
2332 rxq->rxq_threshold = threshold;
2333 }
2334
2335 if ((soft_flags & SOFT_TXINTR)
2336 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) {
2337 /*
2338 * Let's do what we came here for. Consume transmitted
2339 * packets off the the transmit ring.
2340 */
2341 if (!pq3etsec_txq_consume(sc, &sc->sc_txq)
2342 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
2343 sc->sc_ev_tx_stall.ev_count++;
2344 ifp->if_flags |= IFF_OACTIVE;
2345 } else {
2346 ifp->if_flags &= ~IFF_OACTIVE;
2347 }
2348 sc->sc_imask |= IEVENT_TXF;
2349 }
2350
2351 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) {
2352 /*
2353 * Let's consume
2354 */
2355 pq3etsec_rxq_consume(sc, &sc->sc_rxq);
2356 sc->sc_imask |= IEVENT_RXF;
2357 }
2358
2359 if (soft_flags & SOFT_TXERROR) {
2360 pq3etsec_tx_error(sc);
2361 sc->sc_imask |= IEVENT_TXE;
2362 }
2363
2364 if (ifp->if_flags & IFF_RUNNING) {
2365 pq3etsec_rxq_produce(sc, &sc->sc_rxq);
2366 etsec_write(sc, IMASK, sc->sc_imask);
2367 } else {
2368 KASSERT((soft_flags & SOFT_RXBSY) == 0);
2369 }
2370
2371 mutex_exit(sc->sc_lock);
2372 }
2373
2374 static void
2375 pq3etsec_mii_tick(void *arg)
2376 {
2377 struct pq3etsec_softc * const sc = arg;
2378 mutex_enter(sc->sc_lock);
2379 callout_ack(&sc->sc_mii_callout);
2380 sc->sc_ev_mii_ticks.ev_count++;
2381 #ifdef DEBUG
2382 uint64_t now = mftb();
2383 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) {
2384 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n",
2385 __func__, now - sc->sc_mii_last_tick);
2386 callout_stop(&sc->sc_mii_callout);
2387 }
2388 #endif
2389 mii_tick(&sc->sc_mii);
2390 int s = splnet();
2391 if (sc->sc_soft_flags & SOFT_RESET)
2392 softint_schedule(sc->sc_soft_ih);
2393 splx(s);
2394 callout_schedule(&sc->sc_mii_callout, hz);
2395 #ifdef DEBUG
2396 sc->sc_mii_last_tick = now;
2397 #endif
2398 mutex_exit(sc->sc_lock);
2399 }
2400