pq3etsec.c revision 1.23 1 /* $NetBSD: pq3etsec.c,v 1.23 2015/01/16 07:48:16 nonaka Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "opt_inet.h"
38 #include "opt_mpc85xx.h"
39
40 #include <sys/cdefs.h>
41
42 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.23 2015/01/16 07:48:16 nonaka Exp $");
43
44 #include <sys/param.h>
45 #include <sys/cpu.h>
46 #include <sys/device.h>
47 #include <sys/mbuf.h>
48 #include <sys/ioctl.h>
49 #include <sys/intr.h>
50 #include <sys/bus.h>
51 #include <sys/kernel.h>
52 #include <sys/kmem.h>
53 #include <sys/proc.h>
54 #include <sys/atomic.h>
55 #include <sys/callout.h>
56
57 #include <net/if.h>
58 #include <net/if_dl.h>
59 #include <net/if_ether.h>
60 #include <net/if_media.h>
61
62 #include <dev/mii/miivar.h>
63
64 #include <net/bpf.h>
65
66 #ifdef INET
67 #include <netinet/in.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/ip.h>
70 #include <netinet/in_offload.h>
71 #endif /* INET */
72 #ifdef INET6
73 #include <netinet6/in6.h>
74 #include <netinet/ip6.h>
75 #endif
76 #include <netinet6/in6_offload.h>
77
78
79 #include <powerpc/spr.h>
80 #include <powerpc/booke/spr.h>
81
82 #include <powerpc/booke/cpuvar.h>
83 #include <powerpc/booke/e500var.h>
84 #include <powerpc/booke/e500reg.h>
85 #include <powerpc/booke/etsecreg.h>
86
87 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */
88
89 #define ETSEC_MAXTXMBUFS 30
90 #define ETSEC_NTXSEGS 30
91 #define ETSEC_MAXRXMBUFS 511
92 #define ETSEC_MINRXMBUFS 32
93 #define ETSEC_NRXSEGS 1
94
95 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx
96 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\
97 |IFCAP_CSUM_UDPv4_Rx\
98 |IFCAP_CSUM_TCPv6_Rx\
99 |IFCAP_CSUM_UDPv6_Rx)
100
101 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx
102 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\
103 |IFCAP_CSUM_UDPv4_Tx\
104 |IFCAP_CSUM_TCPv6_Tx\
105 |IFCAP_CSUM_UDPv6_Tx)
106
107 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\
108 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN)
109
110 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU)
111 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6)
112 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
113 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6)
114 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4)
115 #define M_CSUM_CIP (M_CSUM_IPv4)
116 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)
117
118 struct pq3etsec_txqueue {
119 bus_dmamap_t txq_descmap;
120 volatile struct txbd *txq_consumer;
121 volatile struct txbd *txq_producer;
122 volatile struct txbd *txq_first;
123 volatile struct txbd *txq_last;
124 struct ifqueue txq_mbufs;
125 struct mbuf *txq_next;
126 #ifdef ETSEC_DEBUG
127 struct mbuf *txq_lmbufs[512];
128 #endif
129 uint32_t txq_qmask;
130 uint32_t txq_free;
131 uint32_t txq_threshold;
132 uint32_t txq_lastintr;
133 bus_size_t txq_reg_tbase;
134 bus_dma_segment_t txq_descmap_seg;
135 };
136
137 struct pq3etsec_rxqueue {
138 bus_dmamap_t rxq_descmap;
139 volatile struct rxbd *rxq_consumer;
140 volatile struct rxbd *rxq_producer;
141 volatile struct rxbd *rxq_first;
142 volatile struct rxbd *rxq_last;
143 struct mbuf *rxq_mhead;
144 struct mbuf **rxq_mtail;
145 struct mbuf *rxq_mconsumer;
146 #ifdef ETSEC_DEBUG
147 struct mbuf *rxq_mbufs[512];
148 #endif
149 uint32_t rxq_qmask;
150 uint32_t rxq_inuse;
151 uint32_t rxq_threshold;
152 bus_size_t rxq_reg_rbase;
153 bus_size_t rxq_reg_rbptr;
154 bus_dma_segment_t rxq_descmap_seg;
155 };
156
157 struct pq3etsec_mapcache {
158 u_int dmc_nmaps;
159 u_int dmc_maxseg;
160 u_int dmc_maxmaps;
161 u_int dmc_maxmapsize;
162 bus_dmamap_t dmc_maps[0];
163 };
164
165 struct pq3etsec_softc {
166 device_t sc_dev;
167 device_t sc_mdio_dev;
168 struct ethercom sc_ec;
169 #define sc_if sc_ec.ec_if
170 struct mii_data sc_mii;
171 bus_space_tag_t sc_bst;
172 bus_space_handle_t sc_bsh;
173 bus_space_handle_t sc_mdio_bsh;
174 bus_dma_tag_t sc_dmat;
175 int sc_phy_addr;
176 prop_dictionary_t sc_intrmap;
177 uint32_t sc_intrmask;
178
179 uint32_t sc_soft_flags;
180 #define SOFT_RESET 0x0001
181 #define SOFT_RXINTR 0x0010
182 #define SOFT_RXBSY 0x0020
183 #define SOFT_TXINTR 0x0100
184 #define SOFT_TXERROR 0x0200
185
186 struct pq3etsec_txqueue sc_txq;
187 struct pq3etsec_rxqueue sc_rxq;
188 uint32_t sc_txerrors;
189 uint32_t sc_rxerrors;
190
191 size_t sc_rx_adjlen;
192
193 /*
194 * Copies of various ETSEC registers.
195 */
196 uint32_t sc_imask;
197 uint32_t sc_maccfg1;
198 uint32_t sc_maccfg2;
199 uint32_t sc_maxfrm;
200 uint32_t sc_ecntrl;
201 uint32_t sc_dmactrl;
202 uint32_t sc_macstnaddr1;
203 uint32_t sc_macstnaddr2;
204 uint32_t sc_tctrl;
205 uint32_t sc_rctrl;
206 uint32_t sc_gaddr[16];
207 uint64_t sc_macaddrs[15];
208
209 void *sc_tx_ih;
210 void *sc_rx_ih;
211 void *sc_error_ih;
212 void *sc_soft_ih;
213
214 kmutex_t *sc_lock;
215 kmutex_t *sc_hwlock;
216
217 struct evcnt sc_ev_tx_stall;
218 struct evcnt sc_ev_tx_intr;
219 struct evcnt sc_ev_rx_stall;
220 struct evcnt sc_ev_rx_intr;
221 struct evcnt sc_ev_error_intr;
222 struct evcnt sc_ev_soft_intr;
223 struct evcnt sc_ev_tx_pause;
224 struct evcnt sc_ev_rx_pause;
225 struct evcnt sc_ev_mii_ticks;
226
227 struct callout sc_mii_callout;
228 uint64_t sc_mii_last_tick;
229
230 struct ifqueue sc_rx_bufcache;
231 struct pq3etsec_mapcache *sc_rx_mapcache;
232 struct pq3etsec_mapcache *sc_tx_mapcache;
233 };
234
235 struct pq3mdio_softc {
236 device_t mdio_dev;
237
238 kmutex_t *mdio_lock;
239
240 bus_space_tag_t mdio_bst;
241 bus_space_handle_t mdio_bsh;
242 };
243
244 static int pq3etsec_match(device_t, cfdata_t, void *);
245 static void pq3etsec_attach(device_t, device_t, void *);
246
247 static int pq3mdio_match(device_t, cfdata_t, void *);
248 static void pq3mdio_attach(device_t, device_t, void *);
249
250 static void pq3etsec_ifstart(struct ifnet *);
251 static void pq3etsec_ifwatchdog(struct ifnet *);
252 static int pq3etsec_ifinit(struct ifnet *);
253 static void pq3etsec_ifstop(struct ifnet *, int);
254 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *);
255
256 static int pq3etsec_mapcache_create(struct pq3etsec_softc *,
257 struct pq3etsec_mapcache **, size_t, size_t, size_t);
258 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *,
259 struct pq3etsec_mapcache *);
260 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *,
261 struct pq3etsec_mapcache *);
262 static void pq3etsec_mapcache_put(struct pq3etsec_softc *,
263 struct pq3etsec_mapcache *, bus_dmamap_t);
264
265 static int pq3etsec_txq_attach(struct pq3etsec_softc *,
266 struct pq3etsec_txqueue *, u_int);
267 static void pq3etsec_txq_purge(struct pq3etsec_softc *,
268 struct pq3etsec_txqueue *);
269 static void pq3etsec_txq_reset(struct pq3etsec_softc *,
270 struct pq3etsec_txqueue *);
271 static bool pq3etsec_txq_consume(struct pq3etsec_softc *,
272 struct pq3etsec_txqueue *);
273 static bool pq3etsec_txq_produce(struct pq3etsec_softc *,
274 struct pq3etsec_txqueue *, struct mbuf *m);
275 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *,
276 struct pq3etsec_txqueue *);
277
278 static int pq3etsec_rxq_attach(struct pq3etsec_softc *,
279 struct pq3etsec_rxqueue *, u_int);
280 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *,
281 struct pq3etsec_rxqueue *);
282 static void pq3etsec_rxq_purge(struct pq3etsec_softc *,
283 struct pq3etsec_rxqueue *, bool);
284 static void pq3etsec_rxq_reset(struct pq3etsec_softc *,
285 struct pq3etsec_rxqueue *);
286
287 static void pq3etsec_mc_setup(struct pq3etsec_softc *);
288
289 static void pq3etsec_mii_tick(void *);
290 static int pq3etsec_rx_intr(void *);
291 static int pq3etsec_tx_intr(void *);
292 static int pq3etsec_error_intr(void *);
293 static void pq3etsec_soft_intr(void *);
294
295 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc),
296 pq3etsec_match, pq3etsec_attach, NULL, NULL);
297
298 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc),
299 pq3mdio_match, pq3mdio_attach, NULL, NULL);
300
301 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc),
302 pq3mdio_match, pq3mdio_attach, NULL, NULL);
303
304 static inline uint32_t
305 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off)
306 {
307 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off);
308 }
309
310 static inline void
311 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data)
312 {
313 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data);
314 }
315
316 static inline uint32_t
317 etsec_read(struct pq3etsec_softc *sc, bus_size_t off)
318 {
319 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
320 }
321
322 static int
323 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
324 {
325 return strcmp(cf->cf_name, "mdio") == 0;
326 }
327
328 static int
329 pq3mdio_match(device_t parent, cfdata_t cf, void *aux)
330 {
331 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16;
332 const bool p1025_p = (svr == (SVR_P1025v1 >> 16)
333 || svr == (SVR_P1016v1 >> 16));
334
335 if (device_is_a(parent, "cpunode")) {
336 if (!p1025_p
337 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
338 return 0;
339
340 return 1;
341 }
342
343 if (device_is_a(parent, "tsec")) {
344 if (p1025_p
345 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
346 return 0;
347
348 return 1;
349 }
350
351 return 0;
352 }
353
354 static void
355 pq3mdio_attach(device_t parent, device_t self, void *aux)
356 {
357 struct pq3mdio_softc * const mdio = device_private(self);
358 struct cpunode_attach_args * const cna = aux;
359 struct cpunode_locators * const cnl = &cna->cna_locs;
360
361 mdio->mdio_dev = self;
362 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
363
364 if (device_is_a(parent, "cpunode")) {
365 struct cpunode_softc * const psc = device_private(parent);
366 psc->sc_children |= cna->cna_childmask;
367
368 mdio->mdio_bst = cna->cna_memt;
369 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr,
370 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) {
371 aprint_error(": error mapping registers @ %#x\n",
372 cnl->cnl_addr);
373 return;
374 }
375 } else {
376 struct pq3etsec_softc * const sc = device_private(parent);
377
378 KASSERT(device_is_a(parent, "tsec"));
379 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE
380 || cnl->cnl_addr == ETSEC2_BASE
381 || cnl->cnl_addr == ETSEC3_BASE
382 || cnl->cnl_addr == ETSEC4_BASE,
383 "unknown tsec addr %x", cnl->cnl_addr);
384
385 mdio->mdio_bst = sc->sc_bst;
386 mdio->mdio_bsh = sc->sc_bsh;
387 }
388
389 aprint_normal("\n");
390 }
391
392 static int
393 pq3mdio_mii_readreg(device_t self, int phy, int reg)
394 {
395 struct pq3mdio_softc * const mdio = device_private(self);
396 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
397
398 mutex_enter(mdio->mdio_lock);
399
400 etsec_mdio_write(mdio, MIIMADD,
401 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
402
403 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */
404 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ);
405
406 while (etsec_mdio_read(mdio, MIIMIND) != 0) {
407 delay(1);
408 }
409 int data = etsec_mdio_read(mdio, MIIMSTAT);
410
411 if (miimcom == MIIMCOM_SCAN)
412 etsec_mdio_write(mdio, MIIMCOM, miimcom);
413
414 #if 0
415 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
416 __func__, phy, reg, data);
417 #endif
418 mutex_exit(mdio->mdio_lock);
419 return data;
420 }
421
422 static void
423 pq3mdio_mii_writereg(device_t self, int phy, int reg, int data)
424 {
425 struct pq3mdio_softc * const mdio = device_private(self);
426 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM);
427
428 #if 0
429 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n",
430 __func__, phy, reg, data);
431 #endif
432
433 mutex_enter(mdio->mdio_lock);
434
435 etsec_mdio_write(mdio, MIIMADD,
436 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG));
437 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */
438 etsec_mdio_write(mdio, MIIMCON, data);
439
440 int timo = 1000; /* 1ms */
441 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) {
442 delay(1);
443 }
444
445 if (miimcom == MIIMCOM_SCAN)
446 etsec_mdio_write(mdio, MIIMCOM, miimcom);
447
448 mutex_exit(mdio->mdio_lock);
449 }
450
451 static inline void
452 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data)
453 {
454 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data);
455 }
456
457 static void
458 pq3etsec_mii_statchg(struct ifnet *ifp)
459 {
460 struct pq3etsec_softc * const sc = ifp->if_softc;
461 struct mii_data * const mii = &sc->sc_mii;
462
463 uint32_t maccfg1 = sc->sc_maccfg1;
464 uint32_t maccfg2 = sc->sc_maccfg2;
465 uint32_t ecntrl = sc->sc_ecntrl;
466
467 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW);
468 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD);
469
470 if (sc->sc_mii.mii_media_active & IFM_FDX) {
471 maccfg2 |= MACCFG2_FD;
472 }
473
474 /*
475 * Now deal with the flow control bits.
476 */
477 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO
478 && (mii->mii_media_active & IFM_ETH_FMASK)) {
479 if (mii->mii_media_active & IFM_ETH_RXPAUSE)
480 maccfg1 |= MACCFG1_RX_FLOW;
481 if (mii->mii_media_active & IFM_ETH_TXPAUSE)
482 maccfg1 |= MACCFG1_TX_FLOW;
483 }
484
485 /*
486 * Now deal with the speed.
487 */
488 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
489 maccfg2 |= MACCFG2_IFMODE_GMII;
490 } else {
491 maccfg2 |= MACCFG2_IFMODE_MII;
492 ecntrl &= ~ECNTRL_R100M;
493 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) {
494 ecntrl |= ECNTRL_R100M;
495 }
496 }
497
498 /*
499 * If things are different, re-init things.
500 */
501 if (maccfg1 != sc->sc_maccfg1
502 || maccfg2 != sc->sc_maccfg2
503 || ecntrl != sc->sc_ecntrl) {
504 if (sc->sc_if.if_flags & IFF_RUNNING)
505 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET);
506 sc->sc_maccfg1 = maccfg1;
507 sc->sc_maccfg2 = maccfg2;
508 sc->sc_ecntrl = ecntrl;
509 }
510 }
511
512 #if 0
513 static void
514 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
515 {
516 struct pq3etsec_softc * const sc = ifp->if_softc;
517
518 mii_pollstat(&sc->sc_mii);
519 ether_mediastatus(ifp, ifmr);
520 ifmr->ifm_status = sc->sc_mii.mii_media_status;
521 ifmr->ifm_active = sc->sc_mii.mii_media_active;
522 }
523
524 static int
525 pq3etsec_mediachange(struct ifnet *ifp)
526 {
527 struct pq3etsec_softc * const sc = ifp->if_softc;
528
529 if ((ifp->if_flags & IFF_UP) == 0)
530 return 0;
531
532 int rv = mii_mediachg(&sc->sc_mii);
533 return (rv == ENXIO) ? 0 : rv;
534 }
535 #endif
536
537 static int
538 pq3etsec_match(device_t parent, cfdata_t cf, void *aux)
539 {
540
541 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux))
542 return 0;
543
544 return 1;
545 }
546
547 static void
548 pq3etsec_attach(device_t parent, device_t self, void *aux)
549 {
550 struct cpunode_softc * const psc = device_private(parent);
551 struct pq3etsec_softc * const sc = device_private(self);
552 struct cpunode_attach_args * const cna = aux;
553 struct cpunode_locators * const cnl = &cna->cna_locs;
554 cfdata_t cf = device_cfdata(self);
555 int error;
556
557 psc->sc_children |= cna->cna_childmask;
558 sc->sc_dev = self;
559 sc->sc_bst = cna->cna_memt;
560 sc->sc_dmat = &booke_bus_dma_tag;
561
562 /*
563 * Pull out the mdio bus and phy we are supposed to use.
564 */
565 const int mdio = cf->cf_loc[CPUNODECF_MDIO];
566 const int phy = cf->cf_loc[CPUNODECF_PHY];
567 if (mdio != CPUNODECF_MDIO_DEFAULT)
568 aprint_normal(" mdio %d", mdio);
569
570 /*
571 * See if the phy is in the config file...
572 */
573 if (phy != CPUNODECF_PHY_DEFAULT) {
574 sc->sc_phy_addr = phy;
575 } else {
576 unsigned char prop_name[20];
577 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr",
578 cnl->cnl_instance);
579 sc->sc_phy_addr = board_info_get_number(prop_name);
580 }
581 if (sc->sc_phy_addr != MII_PHY_ANY)
582 aprint_normal(" phy %d", sc->sc_phy_addr);
583
584 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0,
585 &sc->sc_bsh);
586 if (error) {
587 aprint_error(": error mapping registers: %d\n", error);
588 return;
589 }
590
591 /*
592 * Assume firmware has aready set the mac address and fetch it
593 * before we reinit it.
594 */
595 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2);
596 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1);
597 sc->sc_rctrl = RCTRL_DEFAULT;
598 sc->sc_ecntrl = etsec_read(sc, ECNTRL);
599 sc->sc_maccfg1 = etsec_read(sc, MACCFG1);
600 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT;
601
602 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) {
603 size_t len;
604 const uint8_t *mac_addr =
605 board_info_get_data("tsec-mac-addr-base", &len);
606 KASSERT(len == ETHER_ADDR_LEN);
607 sc->sc_macstnaddr2 =
608 (mac_addr[1] << 24)
609 | (mac_addr[0] << 16);
610 sc->sc_macstnaddr1 =
611 ((mac_addr[5] + cnl->cnl_instance - 1) << 24)
612 | (mac_addr[4] << 16)
613 | (mac_addr[3] << 8)
614 | (mac_addr[2] << 0);
615 #if 0
616 aprint_error(": mac-address unknown\n");
617 return;
618 #endif
619 }
620
621 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
622 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
623
624 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE);
625 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc);
626
627 /* Disable interrupts */
628 etsec_write(sc, IMASK, 0);
629
630 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0);
631 if (error) {
632 aprint_error(": failed to init rxq: %d\n", error);
633 return;
634 }
635
636 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0);
637 if (error) {
638 aprint_error(": failed to init txq: %d\n", error);
639 return;
640 }
641
642 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache,
643 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS);
644 if (error) {
645 aprint_error(": failed to allocate rx dmamaps: %d\n", error);
646 return;
647 }
648
649 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
650 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS);
651 if (error) {
652 aprint_error(": failed to allocate tx dmamaps: %d\n", error);
653 return;
654 }
655
656 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP,
657 pq3etsec_tx_intr, sc);
658 if (sc->sc_tx_ih == NULL) {
659 aprint_error(": failed to establish tx interrupt: %d\n",
660 cnl->cnl_intrs[0]);
661 return;
662 }
663
664 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP,
665 pq3etsec_rx_intr, sc);
666 if (sc->sc_rx_ih == NULL) {
667 aprint_error(": failed to establish rx interrupt: %d\n",
668 cnl->cnl_intrs[1]);
669 return;
670 }
671
672 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP,
673 pq3etsec_error_intr, sc);
674 if (sc->sc_error_ih == NULL) {
675 aprint_error(": failed to establish error interrupt: %d\n",
676 cnl->cnl_intrs[2]);
677 return;
678 }
679
680 sc->sc_soft_ih = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
681 pq3etsec_soft_intr, sc);
682 if (sc->sc_soft_ih == NULL) {
683 aprint_error(": failed to establish soft interrupt\n");
684 return;
685 }
686
687 /*
688 * If there was no MDIO
689 */
690 if (mdio == CPUNODECF_MDIO_DEFAULT) {
691 aprint_normal("\n");
692 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna);
693 if (mdio_cf != NULL) {
694 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL);
695 }
696 } else {
697 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio);
698 if (sc->sc_mdio_dev == NULL) {
699 aprint_error(": failed to locate mdio device\n");
700 return;
701 }
702 aprint_normal("\n");
703 }
704
705 etsec_write(sc, ATTR, ATTR_DEFAULT);
706 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT);
707
708 char enaddr[ETHER_ADDR_LEN] = {
709 [0] = sc->sc_macstnaddr2 >> 16,
710 [1] = sc->sc_macstnaddr2 >> 24,
711 [2] = sc->sc_macstnaddr1 >> 0,
712 [3] = sc->sc_macstnaddr1 >> 8,
713 [4] = sc->sc_macstnaddr1 >> 16,
714 [5] = sc->sc_macstnaddr1 >> 24,
715 };
716 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
717 ether_sprintf(enaddr));
718
719 const char * const xname = device_xname(sc->sc_dev);
720 struct ethercom * const ec = &sc->sc_ec;
721 struct ifnet * const ifp = &ec->ec_if;
722
723 ec->ec_mii = &sc->sc_mii;
724
725 sc->sc_mii.mii_ifp = ifp;
726 sc->sc_mii.mii_readreg = pq3mdio_mii_readreg;
727 sc->sc_mii.mii_writereg = pq3mdio_mii_writereg;
728 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg;
729
730 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
731 ether_mediastatus);
732
733 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) {
734 mii_attach(sc->sc_mdio_dev, &sc->sc_mii, 0xffffffff,
735 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
736
737 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
738 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
739 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
740 } else {
741 callout_schedule(&sc->sc_mii_callout, hz);
742 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
743 }
744 } else {
745 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
746 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
747 }
748
749 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
750 | ETHERCAP_JUMBO_MTU;
751
752 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
753 ifp->if_softc = sc;
754 ifp->if_capabilities = IFCAP_ETSEC;
755 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
756 ifp->if_ioctl = pq3etsec_ifioctl;
757 ifp->if_start = pq3etsec_ifstart;
758 ifp->if_watchdog = pq3etsec_ifwatchdog;
759 ifp->if_init = pq3etsec_ifinit;
760 ifp->if_stop = pq3etsec_ifstop;
761 IFQ_SET_READY(&ifp->if_snd);
762
763 pq3etsec_ifstop(ifp, true);
764
765 /*
766 * Attach the interface.
767 */
768 if_initialize(ifp);
769 ether_ifattach(ifp, enaddr);
770 if_register(ifp);
771
772 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC,
773 NULL, xname, "rx stall");
774 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
775 NULL, xname, "tx stall");
776 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR,
777 NULL, xname, "tx intr");
778 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR,
779 NULL, xname, "rx intr");
780 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR,
781 NULL, xname, "error intr");
782 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
783 NULL, xname, "soft intr");
784 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC,
785 NULL, xname, "tx pause");
786 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC,
787 NULL, xname, "rx pause");
788 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC,
789 NULL, xname, "mii ticks");
790 }
791
792 static uint64_t
793 pq3etsec_macaddr_create(const uint8_t *lladdr)
794 {
795 uint64_t macaddr = 0;
796
797 lladdr += ETHER_ADDR_LEN;
798 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) {
799 macaddr = (macaddr << 8) | *--lladdr;
800 }
801 return macaddr << 16;
802 }
803
804 static int
805 pq3etsec_ifinit(struct ifnet *ifp)
806 {
807 struct pq3etsec_softc * const sc = ifp->if_softc;
808 int error = 0;
809
810 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
811 if (ifp->if_mtu > ETHERMTU_JUMBO)
812 return error;
813
814 KASSERT(ifp->if_flags & IFF_UP);
815
816 /*
817 * Stop the interface (steps 1 to 4 in the Soft Reset and
818 * Reconfigurating Procedure.
819 */
820 pq3etsec_ifstop(ifp, 0);
821
822 /*
823 * If our frame size has changed (or it's our first time through)
824 * destroy the existing transmit mapcache.
825 */
826 if (sc->sc_tx_mapcache != NULL
827 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
828 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache);
829 sc->sc_tx_mapcache = NULL;
830 }
831
832 if (sc->sc_tx_mapcache == NULL) {
833 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache,
834 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS);
835 if (error)
836 return error;
837 }
838
839 sc->sc_ev_mii_ticks.ev_count++;
840 mii_tick(&sc->sc_mii);
841
842 if (ifp->if_flags & IFF_PROMISC) {
843 sc->sc_rctrl |= RCTRL_PROM;
844 } else {
845 sc->sc_rctrl &= ~RCTRL_PROM;
846 }
847
848 uint32_t rctrl_prsdep = 0;
849 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP);
850 if (VLAN_ATTACHED(&sc->sc_ec)) {
851 sc->sc_rctrl |= RCTRL_VLEX;
852 rctrl_prsdep = RCTRL_PRSDEP_L2;
853 }
854 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) {
855 sc->sc_rctrl |= RCTRL_IPCSEN;
856 rctrl_prsdep = RCTRL_PRSDEP_L3;
857 }
858 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) {
859 sc->sc_rctrl |= RCTRL_TUCSEN;
860 rctrl_prsdep = RCTRL_PRSDEP_L4;
861 }
862 sc->sc_rctrl |= rctrl_prsdep;
863 #if 0
864 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP))
865 aprint_normal_dev(sc->sc_dev,
866 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n",
867 sc->sc_rctrl,
868 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN),
869 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN),
870 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX),
871 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP));
872 #endif
873
874 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS);
875 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */
876 sc->sc_tctrl |= TCTRL_VLINS;
877 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN)
878 sc->sc_tctrl |= TCTRL_IPCSEN;
879 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN)
880 sc->sc_tctrl |= TCTRL_TUCSEN;
881 #if 0
882 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS))
883 aprint_normal_dev(sc->sc_dev,
884 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n",
885 sc->sc_tctrl,
886 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN),
887 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN),
888 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS));
889 #endif
890
891 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN);
892
893 const uint64_t macstnaddr =
894 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl));
895
896 sc->sc_imask = IEVENT_DPE;
897
898 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */
899 pq3etsec_rxq_reset(sc, &sc->sc_rxq);
900 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
901
902 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */
903 pq3etsec_txq_reset(sc, &sc->sc_txq);
904
905 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */
906 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2);
907 etsec_write(sc, MAXFRM, sc->sc_maxfrm);
908 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32));
909 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0));
910 etsec_write(sc, MACCFG1, sc->sc_maccfg1);
911 etsec_write(sc, MACCFG2, sc->sc_maccfg2);
912 etsec_write(sc, ECNTRL, sc->sc_ecntrl);
913
914 /* 8. Setup group address hash table (GADDR0-GADDR15) */
915 pq3etsec_mc_setup(sc);
916
917 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */
918 etsec_write(sc, MRBLR, MCLBYTES);
919
920 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */
921 sc->sc_dmactrl |= DMACTRL_DEFAULT;
922 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
923
924 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
925 etsec_write(sc, TQUEUE, TQUEUE_EN0);
926 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC;
927
928 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */
929
930 /* 12. Enable receive queues in RQUEUE, */
931 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0);
932 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC;
933
934 /* and optionally set TOE functionality in RCTRL. */
935 etsec_write(sc, RCTRL, sc->sc_rctrl);
936 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL);
937 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF)
938 sc->sc_rx_adjlen += sizeof(struct rxfcb);
939
940 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */
941 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF);
942
943 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/
944 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF);
945
946 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */
947 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS);
948 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
949
950 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */
951 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
952 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN);
953
954 sc->sc_soft_flags = 0;
955
956 etsec_write(sc, IMASK, sc->sc_imask);
957
958 ifp->if_flags |= IFF_RUNNING;
959
960 return error;
961 }
962
963 static void
964 pq3etsec_ifstop(struct ifnet *ifp, int disable)
965 {
966 struct pq3etsec_softc * const sc = ifp->if_softc;
967
968 KASSERT(!cpu_intr_p());
969 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC;
970 /*
971 * Clear the GTSC and GRSC from the interrupt mask until
972 * we are ready for them. Then clear them from IEVENT,
973 * request the graceful shutdown, and then enable the
974 * GTSC and GRSC bits in the mask. This should cause the
975 * error interrupt to fire which will issue a wakeup to
976 * allow us to resume.
977 */
978
979 /*
980 * 1. Set GRS/GTS bits in DMACTRL register
981 */
982 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS;
983 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask);
984 etsec_write(sc, IEVENT, imask_gsc_mask);
985 etsec_write(sc, DMACTRL, sc->sc_dmactrl);
986
987 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) {
988 /*
989 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set
990 */
991 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask);
992
993 u_int timo = 1000;
994 uint32_t ievent = etsec_read(sc, IEVENT);
995 while ((ievent & imask_gsc_mask) != imask_gsc_mask) {
996 if (--timo == 0) {
997 aprint_error_dev(sc->sc_dev,
998 "WARNING: "
999 "request to stop failed (IEVENT=%#x)\n",
1000 ievent);
1001 break;
1002 }
1003 delay(10);
1004 ievent = etsec_read(sc, IEVENT);
1005 }
1006 }
1007
1008 /*
1009 * Now reset the controller.
1010 *
1011 * 3. Set SOFT_RESET bit in MACCFG1 register
1012 * 4. Clear SOFT_RESET bit in MACCFG1 register
1013 */
1014 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET);
1015 etsec_write(sc, MACCFG1, 0);
1016 etsec_write(sc, IMASK, 0);
1017 etsec_write(sc, IEVENT, ~0);
1018 sc->sc_imask = 0;
1019 ifp->if_flags &= ~IFF_RUNNING;
1020
1021 uint32_t tbipa = etsec_read(sc, TBIPA);
1022 if (tbipa == sc->sc_phy_addr) {
1023 aprint_normal_dev(sc->sc_dev, "relocating TBI\n");
1024 etsec_write(sc, TBIPA, 0x1f);
1025 }
1026 uint32_t miimcfg = etsec_read(sc, MIIMCFG);
1027 etsec_write(sc, MIIMCFG, MIIMCFG_RESET);
1028 etsec_write(sc, MIIMCFG, miimcfg);
1029
1030 /*
1031 * Let's consume any remaing transmitted packets. And if we are
1032 * disabling the interface, purge ourselves of any untransmitted
1033 * packets. But don't consume any received packets, just drop them.
1034 * If we aren't disabling the interface, save the mbufs in the
1035 * receive queue for reuse.
1036 */
1037 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable);
1038 pq3etsec_txq_consume(sc, &sc->sc_txq);
1039 if (disable) {
1040 pq3etsec_txq_purge(sc, &sc->sc_txq);
1041 IFQ_PURGE(&ifp->if_snd);
1042 }
1043 }
1044
1045 static void
1046 pq3etsec_ifwatchdog(struct ifnet *ifp)
1047 {
1048 }
1049
1050 static void
1051 pq3etsec_mc_setup(
1052 struct pq3etsec_softc *sc)
1053 {
1054 struct ethercom * const ec = &sc->sc_ec;
1055 struct ifnet * const ifp = &sc->sc_if;
1056 struct ether_multi *enm;
1057 struct ether_multistep step;
1058 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8);
1059 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8);
1060
1061 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr));
1062 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1063
1064 ifp->if_flags &= ~IFF_ALLMULTI;
1065
1066 ETHER_FIRST_MULTI(step, ec, enm);
1067 for (u_int i = 0; enm != NULL; ) {
1068 const char *addr = enm->enm_addrlo;
1069 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1070 ifp->if_flags |= IFF_ALLMULTI;
1071 memset(gaddr, 0xff, 32 << (crc_shift & 1));
1072 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs));
1073 break;
1074 }
1075 if ((sc->sc_rctrl & RCTRL_EMEN)
1076 && i < __arraycount(sc->sc_macaddrs)) {
1077 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr);
1078 } else {
1079 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
1080 #if 0
1081 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__,
1082 ether_sprintf(addr), crc,
1083 crc >> crc_shift,
1084 crc >> (crc_shift + 5),
1085 (crc >> crc_shift) & 31,
1086 1 << (((crc >> crc_shift) & 31) ^ 31));
1087 #endif
1088 /*
1089 * The documentation doesn't completely follow PowerPC
1090 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01
1091 * is 0x7fa32d9b. By empirical testing, the
1092 * corresponding hash bit is word 3, bit 31 (ppc bit
1093 * order). Since 3 << 31 | 31 is 0x7f, we deduce
1094 * H[0:2] selects the register while H[3:7] selects
1095 * the bit (ppc bit order).
1096 */
1097 crc >>= crc_shift;
1098 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31);
1099 }
1100 ETHER_NEXT_MULTI(step, enm);
1101 }
1102 for (u_int i = 0; i < 8; i++) {
1103 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]);
1104 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]);
1105 #if 0
1106 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8])
1107 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__,
1108 i, IGADDR(i), etsec_read(sc, IGADDR(i)),
1109 i, GADDR(i), etsec_read(sc, GADDR(i)));
1110 #endif
1111 }
1112 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) {
1113 uint64_t macaddr = sc->sc_macaddrs[i];
1114 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32));
1115 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0));
1116 #if 0
1117 if (macaddr)
1118 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__,
1119 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)),
1120 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i)));
1121 #endif
1122 }
1123 }
1124
1125 static int
1126 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
1127 {
1128 struct pq3etsec_softc *sc = ifp->if_softc;
1129 struct ifreq * const ifr = data;
1130 const int s = splnet();
1131 int error;
1132
1133 switch (cmd) {
1134 case SIOCSIFMEDIA:
1135 case SIOCGIFMEDIA:
1136 /* Flow control requires full-duplex mode. */
1137 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1138 (ifr->ifr_media & IFM_FDX) == 0)
1139 ifr->ifr_media &= ~IFM_ETH_FMASK;
1140 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1141 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1142 /* We can do both TXPAUSE and RXPAUSE. */
1143 ifr->ifr_media |=
1144 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1145 }
1146 }
1147 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1148 break;
1149
1150 default:
1151 error = ether_ioctl(ifp, cmd, data);
1152 if (error != ENETRESET)
1153 break;
1154
1155 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1156 error = 0;
1157 if (ifp->if_flags & IFF_RUNNING)
1158 pq3etsec_mc_setup(sc);
1159 break;
1160 }
1161 error = pq3etsec_ifinit(ifp);
1162 break;
1163 }
1164
1165 splx(s);
1166 return error;
1167 }
1168
1169 static void
1170 pq3etsec_rxq_desc_presync(
1171 struct pq3etsec_softc *sc,
1172 struct pq3etsec_rxqueue *rxq,
1173 volatile struct rxbd *rxbd,
1174 size_t count)
1175 {
1176 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1177 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1178 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1179 }
1180
1181 static void
1182 pq3etsec_rxq_desc_postsync(
1183 struct pq3etsec_softc *sc,
1184 struct pq3etsec_rxqueue *rxq,
1185 volatile struct rxbd *rxbd,
1186 size_t count)
1187 {
1188 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
1189 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd),
1190 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1191 }
1192
1193 static void
1194 pq3etsec_txq_desc_presync(
1195 struct pq3etsec_softc *sc,
1196 struct pq3etsec_txqueue *txq,
1197 volatile struct txbd *txbd,
1198 size_t count)
1199 {
1200 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1201 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1202 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1203 }
1204
1205 static void
1206 pq3etsec_txq_desc_postsync(
1207 struct pq3etsec_softc *sc,
1208 struct pq3etsec_txqueue *txq,
1209 volatile struct txbd *txbd,
1210 size_t count)
1211 {
1212 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
1213 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd),
1214 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1215 }
1216
1217 static bus_dmamap_t
1218 pq3etsec_mapcache_get(
1219 struct pq3etsec_softc *sc,
1220 struct pq3etsec_mapcache *dmc)
1221 {
1222 KASSERT(dmc->dmc_nmaps > 0);
1223 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
1224 return dmc->dmc_maps[--dmc->dmc_nmaps];
1225 }
1226
1227 static void
1228 pq3etsec_mapcache_put(
1229 struct pq3etsec_softc *sc,
1230 struct pq3etsec_mapcache *dmc,
1231 bus_dmamap_t map)
1232 {
1233 KASSERT(map != NULL);
1234 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
1235 dmc->dmc_maps[dmc->dmc_nmaps++] = map;
1236 }
1237
1238 static void
1239 pq3etsec_mapcache_destroy(
1240 struct pq3etsec_softc *sc,
1241 struct pq3etsec_mapcache *dmc)
1242 {
1243 const size_t dmc_size =
1244 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]);
1245
1246 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
1247 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
1248 }
1249 kmem_intr_free(dmc, dmc_size);
1250 }
1251
1252 static int
1253 pq3etsec_mapcache_create(
1254 struct pq3etsec_softc *sc,
1255 struct pq3etsec_mapcache **dmc_p,
1256 size_t maxmaps,
1257 size_t maxmapsize,
1258 size_t maxseg)
1259 {
1260 const size_t dmc_size =
1261 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]);
1262 struct pq3etsec_mapcache * const dmc =
1263 kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
1264
1265 dmc->dmc_maxmaps = maxmaps;
1266 dmc->dmc_nmaps = maxmaps;
1267 dmc->dmc_maxmapsize = maxmapsize;
1268 dmc->dmc_maxseg = maxseg;
1269
1270 for (u_int i = 0; i < maxmaps; i++) {
1271 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
1272 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
1273 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
1274 if (error) {
1275 aprint_error_dev(sc->sc_dev,
1276 "failed to creat dma map cache "
1277 "entry %u of %zu: %d\n",
1278 i, maxmaps, error);
1279 while (i-- > 0) {
1280 bus_dmamap_destroy(sc->sc_dmat,
1281 dmc->dmc_maps[i]);
1282 }
1283 kmem_intr_free(dmc, dmc_size);
1284 return error;
1285 }
1286 KASSERT(dmc->dmc_maps[i] != NULL);
1287 }
1288
1289 *dmc_p = dmc;
1290
1291 return 0;
1292 }
1293
1294 #if 0
1295 static void
1296 pq3etsec_dmamem_free(
1297 bus_dma_tag_t dmat,
1298 size_t map_size,
1299 bus_dma_segment_t *seg,
1300 bus_dmamap_t map,
1301 void *kvap)
1302 {
1303 bus_dmamap_destroy(dmat, map);
1304 bus_dmamem_unmap(dmat, kvap, map_size);
1305 bus_dmamem_free(dmat, seg, 1);
1306 }
1307 #endif
1308
1309 static int
1310 pq3etsec_dmamem_alloc(
1311 bus_dma_tag_t dmat,
1312 size_t map_size,
1313 bus_dma_segment_t *seg,
1314 bus_dmamap_t *map,
1315 void **kvap)
1316 {
1317 int error;
1318 int nseg;
1319
1320 *kvap = NULL;
1321 *map = NULL;
1322
1323 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0,
1324 seg, 1, &nseg, 0);
1325 if (error)
1326 return error;
1327
1328 KASSERT(nseg == 1);
1329
1330 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap,
1331 BUS_DMA_COHERENT);
1332 if (error == 0) {
1333 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
1334 map);
1335 if (error == 0) {
1336 error = bus_dmamap_load(dmat, *map, *kvap, map_size,
1337 NULL, 0);
1338 if (error == 0)
1339 return 0;
1340 bus_dmamap_destroy(dmat, *map);
1341 *map = NULL;
1342 }
1343 bus_dmamem_unmap(dmat, *kvap, map_size);
1344 *kvap = NULL;
1345 }
1346 bus_dmamem_free(dmat, seg, nseg);
1347 return 0;
1348 }
1349
1350 static struct mbuf *
1351 pq3etsec_rx_buf_alloc(
1352 struct pq3etsec_softc *sc)
1353 {
1354 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
1355 if (m == NULL) {
1356 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
1357 return NULL;
1358 }
1359 MCLGET(m, M_DONTWAIT);
1360 if ((m->m_flags & M_EXT) == 0) {
1361 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
1362 m_freem(m);
1363 return NULL;
1364 }
1365 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1366
1367 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache);
1368 if (map == NULL) {
1369 printf("%s:%d: %s\n", __func__, __LINE__, "map get");
1370 m_freem(m);
1371 return NULL;
1372 }
1373 M_SETCTX(m, map);
1374 m->m_len = m->m_pkthdr.len = MCLBYTES;
1375 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1376 BUS_DMA_READ|BUS_DMA_NOWAIT);
1377 if (error) {
1378 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
1379 error);
1380 M_SETCTX(m, NULL);
1381 m_freem(m);
1382 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1383 return NULL;
1384 }
1385 KASSERT(map->dm_mapsize == MCLBYTES);
1386 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1387 BUS_DMASYNC_PREREAD);
1388
1389 return m;
1390 }
1391
1392 static void
1393 pq3etsec_rx_map_unload(
1394 struct pq3etsec_softc *sc,
1395 struct mbuf *m)
1396 {
1397 KASSERT(m);
1398 for (; m != NULL; m = m->m_next) {
1399 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1400 KASSERT(map);
1401 KASSERT(map->dm_mapsize == MCLBYTES);
1402 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
1403 BUS_DMASYNC_POSTREAD);
1404 bus_dmamap_unload(sc->sc_dmat, map);
1405 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map);
1406 M_SETCTX(m, NULL);
1407 }
1408 }
1409
1410 static bool
1411 pq3etsec_rxq_produce(
1412 struct pq3etsec_softc *sc,
1413 struct pq3etsec_rxqueue *rxq)
1414 {
1415 volatile struct rxbd *producer = rxq->rxq_producer;
1416 #if 0
1417 size_t inuse = rxq->rxq_inuse;
1418 #endif
1419 while (rxq->rxq_inuse < rxq->rxq_threshold) {
1420 struct mbuf *m;
1421 IF_DEQUEUE(&sc->sc_rx_bufcache, m);
1422 if (m == NULL) {
1423 m = pq3etsec_rx_buf_alloc(sc);
1424 if (m == NULL) {
1425 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__);
1426 break;
1427 }
1428 }
1429 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1430 KASSERT(map);
1431
1432 #ifdef ETSEC_DEBUG
1433 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL);
1434 rxq->rxq_mbufs[producer-rxq->rxq_first] = m;
1435 #endif
1436
1437 /* rxbd_len is write-only by the ETSEC */
1438 producer->rxbd_bufptr = map->dm_segs[0].ds_addr;
1439 membar_producer();
1440 producer->rxbd_flags |= RXBD_E;
1441 if (__predict_false(rxq->rxq_mhead == NULL)) {
1442 KASSERT(producer == rxq->rxq_consumer);
1443 rxq->rxq_mconsumer = m;
1444 }
1445 *rxq->rxq_mtail = m;
1446 rxq->rxq_mtail = &m->m_next;
1447 m->m_len = MCLBYTES;
1448 m->m_next = NULL;
1449 rxq->rxq_inuse++;
1450 if (++producer == rxq->rxq_last) {
1451 membar_producer();
1452 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1453 rxq->rxq_last - rxq->rxq_producer);
1454 producer = rxq->rxq_producer = rxq->rxq_first;
1455 }
1456 }
1457 if (producer != rxq->rxq_producer) {
1458 membar_producer();
1459 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
1460 producer - rxq->rxq_producer);
1461 rxq->rxq_producer = producer;
1462 }
1463 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT;
1464 if (qhlt) {
1465 KASSERT(qhlt & rxq->rxq_qmask);
1466 sc->sc_ev_rx_stall.ev_count++;
1467 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask);
1468 }
1469 #if 0
1470 aprint_normal_dev(sc->sc_dev,
1471 "%s: buffers inuse went from %zu to %zu\n",
1472 __func__, inuse, rxq->rxq_inuse);
1473 #endif
1474 return true;
1475 }
1476
1477 static bool
1478 pq3etsec_rx_offload(
1479 struct pq3etsec_softc *sc,
1480 struct mbuf *m,
1481 const struct rxfcb *fcb)
1482 {
1483 if (fcb->rxfcb_flags & RXFCB_VLN) {
1484 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl,
1485 m_freem(m); return false);
1486 }
1487 if ((fcb->rxfcb_flags & RXFCB_IP) == 0
1488 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0)
1489 return true;
1490 int csum_flags = 0;
1491 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) {
1492 csum_flags |= M_CSUM_IPv4;
1493 if (fcb->rxfcb_flags & RXFCB_EIP)
1494 csum_flags |= M_CSUM_IPv4_BAD;
1495 }
1496 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) {
1497 int ipv_flags;
1498 if (fcb->rxfcb_flags & RXFCB_IP6)
1499 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6;
1500 else
1501 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4;
1502 if (fcb->rxfcb_pro == IPPROTO_TCP) {
1503 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags;
1504 } else {
1505 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags;
1506 }
1507 if (fcb->rxfcb_flags & RXFCB_ETU)
1508 csum_flags |= M_CSUM_TCP_UDP_BAD;
1509 }
1510
1511 m->m_pkthdr.csum_flags = csum_flags;
1512 return true;
1513 }
1514
1515 static void
1516 pq3etsec_rx_input(
1517 struct pq3etsec_softc *sc,
1518 struct mbuf *m,
1519 uint16_t rxbd_flags)
1520 {
1521 struct ifnet * const ifp = &sc->sc_if;
1522
1523 pq3etsec_rx_map_unload(sc, m);
1524
1525 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) {
1526 struct rxfcb fcb = *mtod(m, struct rxfcb *);
1527 if (!pq3etsec_rx_offload(sc, m, &fcb))
1528 return;
1529 }
1530 m_adj(m, sc->sc_rx_adjlen);
1531
1532 if (rxbd_flags & RXBD_M)
1533 m->m_flags |= M_PROMISC;
1534 if (rxbd_flags & RXBD_BC)
1535 m->m_flags |= M_BCAST;
1536 if (rxbd_flags & RXBD_MC)
1537 m->m_flags |= M_MCAST;
1538 m->m_flags |= M_HASFCS;
1539 m->m_pkthdr.rcvif = &sc->sc_if;
1540
1541 ifp->if_ipackets++;
1542 ifp->if_ibytes += m->m_pkthdr.len;
1543
1544 /*
1545 * Let's give it to the network subsystm to deal with.
1546 */
1547 int s = splnet();
1548 bpf_mtap(ifp, m);
1549 (*ifp->if_input)(ifp, m);
1550 splx(s);
1551 }
1552
1553 static void
1554 pq3etsec_rxq_consume(
1555 struct pq3etsec_softc *sc,
1556 struct pq3etsec_rxqueue *rxq)
1557 {
1558 struct ifnet * const ifp = &sc->sc_if;
1559 volatile struct rxbd *consumer = rxq->rxq_consumer;
1560 size_t rxconsumed = 0;
1561
1562 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask);
1563
1564 for (;;) {
1565 if (consumer == rxq->rxq_producer) {
1566 rxq->rxq_consumer = consumer;
1567 rxq->rxq_inuse -= rxconsumed;
1568 KASSERT(rxq->rxq_inuse == 0);
1569 return;
1570 }
1571 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1);
1572 const uint16_t rxbd_flags = consumer->rxbd_flags;
1573 if (rxbd_flags & RXBD_E) {
1574 rxq->rxq_consumer = consumer;
1575 rxq->rxq_inuse -= rxconsumed;
1576 return;
1577 }
1578 KASSERT(rxq->rxq_mconsumer != NULL);
1579 #ifdef ETSEC_DEBUG
1580 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1581 #endif
1582 #if 0
1583 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n",
1584 __func__,
1585 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len,
1586 mtod(rxq->rxq_mconsumer, int *)[0],
1587 mtod(rxq->rxq_mconsumer, int *)[1],
1588 mtod(rxq->rxq_mconsumer, int *)[2],
1589 mtod(rxq->rxq_mconsumer, int *)[3]);
1590 #endif
1591 /*
1592 * We own this packet again. Clear all flags except wrap.
1593 */
1594 rxconsumed++;
1595 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I);
1596
1597 /*
1598 * If this descriptor has the LAST bit set and no errors,
1599 * it's a valid input packet.
1600 */
1601 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) {
1602 size_t rxbd_len = consumer->rxbd_len;
1603 struct mbuf *m = rxq->rxq_mhead;
1604 struct mbuf *m_last = rxq->rxq_mconsumer;
1605 if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1606 rxq->rxq_mtail = &rxq->rxq_mhead;
1607 rxq->rxq_mconsumer = rxq->rxq_mhead;
1608 m_last->m_next = NULL;
1609 m_last->m_len = rxbd_len & (MCLBYTES - 1);
1610 m->m_pkthdr.len = rxbd_len;
1611 pq3etsec_rx_input(sc, m, rxbd_flags);
1612 } else if (rxbd_flags & RXBD_L) {
1613 KASSERT(rxbd_flags & RXBD_ERRORS);
1614 struct mbuf *m;
1615 /*
1616 * We encountered an error, take the mbufs and add
1617 * then to the rx bufcache so we can reuse them.
1618 */
1619 ifp->if_ierrors++;
1620 for (m = rxq->rxq_mhead;
1621 m != rxq->rxq_mconsumer;
1622 m = m->m_next) {
1623 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1624 }
1625 m = rxq->rxq_mconsumer;
1626 if ((rxq->rxq_mhead = m->m_next) == NULL)
1627 rxq->rxq_mtail = &rxq->rxq_mhead;
1628 rxq->rxq_mconsumer = m->m_next;
1629 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1630 } else {
1631 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next;
1632 }
1633 #ifdef ETSEC_DEBUG
1634 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL;
1635 #endif
1636
1637 /*
1638 * Wrap at the last entry!
1639 */
1640 if (rxbd_flags & RXBD_W) {
1641 KASSERT(consumer + 1 == rxq->rxq_last);
1642 consumer = rxq->rxq_first;
1643 } else {
1644 consumer++;
1645 }
1646 #ifdef ETSEC_DEBUG
1647 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer);
1648 #endif
1649 }
1650 }
1651
1652 static void
1653 pq3etsec_rxq_purge(
1654 struct pq3etsec_softc *sc,
1655 struct pq3etsec_rxqueue *rxq,
1656 bool discard)
1657 {
1658 struct mbuf *m;
1659
1660 if ((m = rxq->rxq_mhead) != NULL) {
1661 #ifdef ETSEC_DEBUG
1662 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs));
1663 #endif
1664
1665 if (discard) {
1666 pq3etsec_rx_map_unload(sc, m);
1667 m_freem(m);
1668 } else {
1669 while (m != NULL) {
1670 struct mbuf *m0 = m->m_next;
1671 m->m_next = NULL;
1672 IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1673 m = m0;
1674 }
1675 }
1676
1677 }
1678
1679 rxq->rxq_mconsumer = NULL;
1680 rxq->rxq_mhead = NULL;
1681 rxq->rxq_mtail = &rxq->rxq_mhead;
1682 rxq->rxq_inuse = 0;
1683 }
1684
1685 static void
1686 pq3etsec_rxq_reset(
1687 struct pq3etsec_softc *sc,
1688 struct pq3etsec_rxqueue *rxq)
1689 {
1690 /*
1691 * sync all the descriptors
1692 */
1693 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1694 rxq->rxq_last - rxq->rxq_first);
1695
1696 /*
1697 * Make sure we own all descriptors in the ring.
1698 */
1699 volatile struct rxbd *rxbd;
1700 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) {
1701 rxbd->rxbd_flags = RXBD_I;
1702 }
1703
1704 /*
1705 * Last descriptor has the wrap flag.
1706 */
1707 rxbd->rxbd_flags = RXBD_W|RXBD_I;
1708
1709 /*
1710 * Reset the producer consumer indexes.
1711 */
1712 rxq->rxq_consumer = rxq->rxq_first;
1713 rxq->rxq_producer = rxq->rxq_first;
1714 rxq->rxq_inuse = 0;
1715 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS)
1716 rxq->rxq_threshold = ETSEC_MINRXMBUFS;
1717
1718 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY;
1719
1720 /*
1721 * Restart the transmit at the first descriptor
1722 */
1723 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr);
1724 }
1725
1726 static int
1727 pq3etsec_rxq_attach(
1728 struct pq3etsec_softc *sc,
1729 struct pq3etsec_rxqueue *rxq,
1730 u_int qno)
1731 {
1732 size_t map_size = PAGE_SIZE;
1733 size_t desc_count = map_size / sizeof(struct rxbd);
1734 int error;
1735 void *descs;
1736
1737 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1738 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1739 if (error)
1740 return error;
1741
1742 memset(descs, 0, map_size);
1743 rxq->rxq_first = descs;
1744 rxq->rxq_last = rxq->rxq_first + desc_count;
1745 rxq->rxq_consumer = descs;
1746 rxq->rxq_producer = descs;
1747
1748 pq3etsec_rxq_purge(sc, rxq, true);
1749 pq3etsec_rxq_reset(sc, rxq);
1750
1751 rxq->rxq_reg_rbase = RBASEn(qno);
1752 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno);
1753
1754 return 0;
1755 }
1756
1757 static bool
1758 pq3etsec_txq_active_p(
1759 struct pq3etsec_softc * const sc,
1760 struct pq3etsec_txqueue *txq)
1761 {
1762 return !IF_IS_EMPTY(&txq->txq_mbufs);
1763 }
1764
1765 static bool
1766 pq3etsec_txq_fillable_p(
1767 struct pq3etsec_softc * const sc,
1768 struct pq3etsec_txqueue *txq)
1769 {
1770 return txq->txq_free >= txq->txq_threshold;
1771 }
1772
1773 static int
1774 pq3etsec_txq_attach(
1775 struct pq3etsec_softc *sc,
1776 struct pq3etsec_txqueue *txq,
1777 u_int qno)
1778 {
1779 size_t map_size = PAGE_SIZE;
1780 size_t desc_count = map_size / sizeof(struct txbd);
1781 int error;
1782 void *descs;
1783
1784 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size,
1785 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1786 if (error)
1787 return error;
1788
1789 memset(descs, 0, map_size);
1790 txq->txq_first = descs;
1791 txq->txq_last = txq->txq_first + desc_count;
1792 txq->txq_consumer = descs;
1793 txq->txq_producer = descs;
1794
1795 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS);
1796
1797 txq->txq_reg_tbase = TBASEn(qno);
1798 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno);
1799
1800 pq3etsec_txq_reset(sc, txq);
1801
1802 return 0;
1803 }
1804
1805 static int
1806 pq3etsec_txq_map_load(
1807 struct pq3etsec_softc *sc,
1808 struct pq3etsec_txqueue *txq,
1809 struct mbuf *m)
1810 {
1811 bus_dmamap_t map;
1812 int error;
1813
1814 map = M_GETCTX(m, bus_dmamap_t);
1815 if (map != NULL)
1816 return 0;
1817
1818 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache);
1819 if (map == NULL)
1820 return ENOMEM;
1821
1822 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1823 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1824 if (error)
1825 return error;
1826
1827 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1828 BUS_DMASYNC_PREWRITE);
1829 M_SETCTX(m, map);
1830 return 0;
1831 }
1832
1833 static void
1834 pq3etsec_txq_map_unload(
1835 struct pq3etsec_softc *sc,
1836 struct pq3etsec_txqueue *txq,
1837 struct mbuf *m)
1838 {
1839 KASSERT(m);
1840 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1841 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1842 BUS_DMASYNC_POSTWRITE);
1843 bus_dmamap_unload(sc->sc_dmat, map);
1844 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map);
1845 }
1846
1847 static bool
1848 pq3etsec_txq_produce(
1849 struct pq3etsec_softc *sc,
1850 struct pq3etsec_txqueue *txq,
1851 struct mbuf *m)
1852 {
1853 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1854
1855 if (map->dm_nsegs > txq->txq_free)
1856 return false;
1857
1858 /*
1859 * TCP Offload flag must be set in the first descriptor.
1860 */
1861 volatile struct txbd *producer = txq->txq_producer;
1862 uint16_t last_flags = TXBD_L;
1863 uint16_t first_flags = TXBD_R
1864 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0);
1865
1866 /*
1867 * If we've produced enough descriptors without consuming any
1868 * we need to ask for an interrupt to reclaim some.
1869 */
1870 txq->txq_lastintr += map->dm_nsegs;
1871 if (txq->txq_lastintr >= txq->txq_threshold
1872 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1873 txq->txq_lastintr = 0;
1874 last_flags |= TXBD_I;
1875 }
1876
1877 #ifdef ETSEC_DEBUG
1878 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1879 #endif
1880 KASSERT(producer != txq->txq_last);
1881 producer->txbd_bufptr = map->dm_segs[0].ds_addr;
1882 producer->txbd_len = map->dm_segs[0].ds_len;
1883
1884 if (map->dm_nsegs > 1) {
1885 volatile struct txbd *start = producer + 1;
1886 size_t count = map->dm_nsegs - 1;
1887 for (u_int i = 1; i < map->dm_nsegs; i++) {
1888 if (__predict_false(++producer == txq->txq_last)) {
1889 producer = txq->txq_first;
1890 if (start < txq->txq_last) {
1891 pq3etsec_txq_desc_presync(sc, txq,
1892 start, txq->txq_last - start);
1893 count -= txq->txq_last - start;
1894 }
1895 start = txq->txq_first;
1896 }
1897 #ifdef ETSEC_DEBUG
1898 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL);
1899 #endif
1900 producer->txbd_bufptr = map->dm_segs[i].ds_addr;
1901 producer->txbd_len = map->dm_segs[i].ds_len;
1902 producer->txbd_flags = TXBD_R
1903 | (producer->txbd_flags & TXBD_W)
1904 | (i == map->dm_nsegs - 1 ? last_flags : 0);
1905 #if 0
1906 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first,
1907 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr);
1908 #endif
1909 }
1910 pq3etsec_txq_desc_presync(sc, txq, start, count);
1911 } else {
1912 first_flags |= last_flags;
1913 }
1914
1915 membar_producer();
1916 txq->txq_producer->txbd_flags =
1917 first_flags | (txq->txq_producer->txbd_flags & TXBD_W);
1918 #if 0
1919 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__,
1920 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags,
1921 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr);
1922 #endif
1923 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1);
1924
1925 /*
1926 * Reduce free count by the number of segments we consumed.
1927 */
1928 txq->txq_free -= map->dm_nsegs;
1929 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1930 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0);
1931 KASSERT(producer->txbd_flags & TXBD_L);
1932 #ifdef ETSEC_DEBUG
1933 txq->txq_lmbufs[producer - txq->txq_first] = m;
1934 #endif
1935
1936 #if 0
1937 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n",
1938 __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1939 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1940 #endif
1941
1942 if (++producer == txq->txq_last)
1943 txq->txq_producer = txq->txq_first;
1944 else
1945 txq->txq_producer = producer;
1946 IF_ENQUEUE(&txq->txq_mbufs, m);
1947
1948 /*
1949 * Restart the transmitter.
1950 */
1951 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */
1952
1953 return true;
1954 }
1955
1956 static void
1957 pq3etsec_tx_offload(
1958 struct pq3etsec_softc *sc,
1959 struct pq3etsec_txqueue *txq,
1960 struct mbuf **mp)
1961 {
1962 struct mbuf *m = *mp;
1963 u_int csum_flags = m->m_pkthdr.csum_flags;
1964 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m);
1965
1966 KASSERT(m->m_flags & M_PKTHDR);
1967
1968 /*
1969 * Let see if we are doing any offload first.
1970 */
1971 if (csum_flags == 0 && vtag == 0) {
1972 m->m_flags &= ~M_HASFCB;
1973 return;
1974 }
1975
1976 uint16_t flags = 0;
1977 if (csum_flags & M_CSUM_IP) {
1978 flags |= TXFCB_IP
1979 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0)
1980 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0)
1981 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0)
1982 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0)
1983 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0);
1984 }
1985 if (vtag) {
1986 flags |= TXFCB_VLN;
1987 }
1988 if (flags == 0) {
1989 m->m_flags &= ~M_HASFCB;
1990 return;
1991 }
1992
1993 struct txfcb fcb;
1994 fcb.txfcb_flags = flags;
1995 if (csum_flags & M_CSUM_IPv4)
1996 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
1997 else
1998 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data);
1999 fcb.txfcb_l3os = ETHER_HDR_LEN;
2000 fcb.txfcb_phcs = 0;
2001 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0;
2002
2003 #if 0
2004 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n",
2005 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os,
2006 fcb.txfcb_phcs, fcb.txfcb_vlctl);
2007 #endif
2008
2009 if (M_LEADINGSPACE(m) >= sizeof(fcb)) {
2010 m->m_data -= sizeof(fcb);
2011 m->m_len += sizeof(fcb);
2012 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) {
2013 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len);
2014 m->m_data = m->m_pktdat;
2015 m->m_len += sizeof(fcb);
2016 } else {
2017 struct mbuf *mn;
2018 MGET(mn, M_DONTWAIT, m->m_type);
2019 if (mn == NULL) {
2020 if (csum_flags & M_CSUM_IP4) {
2021 #ifdef INET
2022 ip_undefer_csum(m, ETHER_HDR_LEN,
2023 csum_flags & M_CSUM_IP4);
2024 #else
2025 panic("%s: impossible M_CSUM flags %#x",
2026 device_xname(sc->sc_dev), csum_flags);
2027 #endif
2028 } else if (csum_flags & M_CSUM_IP6) {
2029 #ifdef INET6
2030 ip6_undefer_csum(m, ETHER_HDR_LEN,
2031 csum_flags & M_CSUM_IP6);
2032 #else
2033 panic("%s: impossible M_CSUM flags %#x",
2034 device_xname(sc->sc_dev), csum_flags);
2035 #endif
2036 } else if (vtag) {
2037 }
2038
2039 m->m_flags &= ~M_HASFCB;
2040 return;
2041 }
2042
2043 M_MOVE_PKTHDR(mn, m);
2044 mn->m_next = m;
2045 m = mn;
2046 MH_ALIGN(m, sizeof(fcb));
2047 m->m_len = sizeof(fcb);
2048 *mp = m;
2049 }
2050 m->m_pkthdr.len += sizeof(fcb);
2051 m->m_flags |= M_HASFCB;
2052 *mtod(m, struct txfcb *) = fcb;
2053 return;
2054 }
2055
2056 static bool
2057 pq3etsec_txq_enqueue(
2058 struct pq3etsec_softc *sc,
2059 struct pq3etsec_txqueue *txq)
2060 {
2061 for (;;) {
2062 if (IF_QFULL(&txq->txq_mbufs))
2063 return false;
2064 struct mbuf *m = txq->txq_next;
2065 if (m == NULL) {
2066 int s = splnet();
2067 IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
2068 splx(s);
2069 if (m == NULL)
2070 return true;
2071 M_SETCTX(m, NULL);
2072 pq3etsec_tx_offload(sc, txq, &m);
2073 } else {
2074 txq->txq_next = NULL;
2075 }
2076 int error = pq3etsec_txq_map_load(sc, txq, m);
2077 if (error) {
2078 aprint_error_dev(sc->sc_dev,
2079 "discarded packet due to "
2080 "dmamap load failure: %d\n", error);
2081 m_freem(m);
2082 continue;
2083 }
2084 KASSERT(txq->txq_next == NULL);
2085 if (!pq3etsec_txq_produce(sc, txq, m)) {
2086 txq->txq_next = m;
2087 return false;
2088 }
2089 KASSERT(txq->txq_next == NULL);
2090 }
2091 }
2092
2093 static bool
2094 pq3etsec_txq_consume(
2095 struct pq3etsec_softc *sc,
2096 struct pq3etsec_txqueue *txq)
2097 {
2098 struct ifnet * const ifp = &sc->sc_if;
2099 volatile struct txbd *consumer = txq->txq_consumer;
2100 size_t txfree = 0;
2101
2102 #if 0
2103 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
2104 #endif
2105 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask);
2106
2107 for (;;) {
2108 if (consumer == txq->txq_producer) {
2109 txq->txq_consumer = consumer;
2110 txq->txq_free += txfree;
2111 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2112 #if 0
2113 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n",
2114 __func__, txfree, txq->txq_free - txfree, txq->txq_free);
2115 #endif
2116 KASSERT(txq->txq_lastintr == 0);
2117 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
2118 return true;
2119 }
2120 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1);
2121 const uint16_t txbd_flags = consumer->txbd_flags;
2122 if (txbd_flags & TXBD_R) {
2123 txq->txq_consumer = consumer;
2124 txq->txq_free += txfree;
2125 txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
2126 #if 0
2127 printf("%s: freed %zu descriptors\n",
2128 __func__, txfree);
2129 #endif
2130 return pq3etsec_txq_fillable_p(sc, txq);
2131 }
2132
2133 /*
2134 * If this is the last descriptor in the chain, get the
2135 * mbuf, free its dmamap, and free the mbuf chain itself.
2136 */
2137 if (txbd_flags & TXBD_L) {
2138 struct mbuf *m;
2139
2140 IF_DEQUEUE(&txq->txq_mbufs, m);
2141 #ifdef ETSEC_DEBUG
2142 KASSERTMSG(
2143 m == txq->txq_lmbufs[consumer-txq->txq_first],
2144 "%s: %p [%u]: flags %#x m (%p) != %p (%p)",
2145 __func__, consumer, consumer - txq->txq_first,
2146 txbd_flags, m,
2147 &txq->txq_lmbufs[consumer-txq->txq_first],
2148 txq->txq_lmbufs[consumer-txq->txq_first]);
2149 #endif
2150 KASSERT(m);
2151 pq3etsec_txq_map_unload(sc, txq, m);
2152 #if 0
2153 printf("%s: mbuf %p: consumed a %u byte packet\n",
2154 __func__, m, m->m_pkthdr.len);
2155 #endif
2156 if (m->m_flags & M_HASFCB)
2157 m_adj(m, sizeof(struct txfcb));
2158 bpf_mtap(ifp, m);
2159 ifp->if_opackets++;
2160 ifp->if_obytes += m->m_pkthdr.len;
2161 if (m->m_flags & M_MCAST)
2162 ifp->if_omcasts++;
2163 if (txbd_flags & TXBD_ERRORS)
2164 ifp->if_oerrors++;
2165 m_freem(m);
2166 #ifdef ETSEC_DEBUG
2167 txq->txq_lmbufs[consumer - txq->txq_first] = NULL;
2168 #endif
2169 } else {
2170 #ifdef ETSEC_DEBUG
2171 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL);
2172 #endif
2173 }
2174
2175 /*
2176 * We own this packet again. Clear all flags except wrap.
2177 */
2178 txfree++;
2179 //consumer->txbd_flags = txbd_flags & TXBD_W;
2180
2181 /*
2182 * Wrap at the last entry!
2183 */
2184 if (txbd_flags & TXBD_W) {
2185 KASSERT(consumer + 1 == txq->txq_last);
2186 consumer = txq->txq_first;
2187 } else {
2188 consumer++;
2189 KASSERT(consumer < txq->txq_last);
2190 }
2191 }
2192 }
2193
2194 static void
2195 pq3etsec_txq_purge(
2196 struct pq3etsec_softc *sc,
2197 struct pq3etsec_txqueue *txq)
2198 {
2199 struct mbuf *m;
2200 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0);
2201
2202 for (;;) {
2203 IF_DEQUEUE(&txq->txq_mbufs, m);
2204 if (m == NULL)
2205 break;
2206 pq3etsec_txq_map_unload(sc, txq, m);
2207 m_freem(m);
2208 }
2209 if ((m = txq->txq_next) != NULL) {
2210 txq->txq_next = NULL;
2211 pq3etsec_txq_map_unload(sc, txq, m);
2212 m_freem(m);
2213 }
2214 #ifdef ETSEC_DEBUG
2215 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs));
2216 #endif
2217 }
2218
2219 static void
2220 pq3etsec_txq_reset(
2221 struct pq3etsec_softc *sc,
2222 struct pq3etsec_txqueue *txq)
2223 {
2224 /*
2225 * sync all the descriptors
2226 */
2227 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first,
2228 txq->txq_last - txq->txq_first);
2229
2230 /*
2231 * Make sure we own all descriptors in the ring.
2232 */
2233 volatile struct txbd *txbd;
2234 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) {
2235 txbd->txbd_flags = 0;
2236 }
2237
2238 /*
2239 * Last descriptor has the wrap flag.
2240 */
2241 txbd->txbd_flags = TXBD_W;
2242
2243 /*
2244 * Reset the producer consumer indexes.
2245 */
2246 txq->txq_consumer = txq->txq_first;
2247 txq->txq_producer = txq->txq_first;
2248 txq->txq_free = txq->txq_last - txq->txq_first - 1;
2249 txq->txq_threshold = txq->txq_free / 2;
2250 txq->txq_lastintr = 0;
2251
2252 /*
2253 * What do we want to get interrupted on?
2254 */
2255 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE;
2256
2257 /*
2258 * Restart the transmit at the first descriptor
2259 */
2260 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr);
2261 }
2262
2263 static void
2264 pq3etsec_ifstart(struct ifnet *ifp)
2265 {
2266 struct pq3etsec_softc * const sc = ifp->if_softc;
2267
2268 if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) {
2269 return;
2270 }
2271
2272 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2273 softint_schedule(sc->sc_soft_ih);
2274 }
2275
2276 static void
2277 pq3etsec_tx_error(
2278 struct pq3etsec_softc * const sc)
2279 {
2280 struct pq3etsec_txqueue * const txq = &sc->sc_txq;
2281
2282 pq3etsec_txq_consume(sc, txq);
2283
2284 if (pq3etsec_txq_fillable_p(sc, txq))
2285 sc->sc_if.if_flags &= ~IFF_OACTIVE;
2286 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) {
2287 } else if (sc->sc_txerrors & IEVENT_EBERR) {
2288 }
2289
2290 if (pq3etsec_txq_active_p(sc, txq))
2291 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask);
2292 if (!pq3etsec_txq_enqueue(sc, txq)) {
2293 sc->sc_ev_tx_stall.ev_count++;
2294 sc->sc_if.if_flags |= IFF_OACTIVE;
2295 }
2296
2297 sc->sc_txerrors = 0;
2298 }
2299
2300 int
2301 pq3etsec_tx_intr(void *arg)
2302 {
2303 struct pq3etsec_softc * const sc = arg;
2304
2305 mutex_enter(sc->sc_hwlock);
2306
2307 sc->sc_ev_tx_intr.ev_count++;
2308
2309 uint32_t ievent = etsec_read(sc, IEVENT);
2310 ievent &= IEVENT_TXF|IEVENT_TXB;
2311 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2312
2313 #if 0
2314 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2315 __func__, ievent, etsec_read(sc, IMASK));
2316 #endif
2317
2318 if (ievent == 0) {
2319 mutex_exit(sc->sc_hwlock);
2320 return 0;
2321 }
2322
2323 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB);
2324 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
2325 etsec_write(sc, IMASK, sc->sc_imask);
2326 softint_schedule(sc->sc_soft_ih);
2327
2328 mutex_exit(sc->sc_hwlock);
2329
2330 return 1;
2331 }
2332
2333 int
2334 pq3etsec_rx_intr(void *arg)
2335 {
2336 struct pq3etsec_softc * const sc = arg;
2337
2338 mutex_enter(sc->sc_hwlock);
2339
2340 sc->sc_ev_rx_intr.ev_count++;
2341
2342 uint32_t ievent = etsec_read(sc, IEVENT);
2343 ievent &= IEVENT_RXF|IEVENT_RXB;
2344 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2345 if (ievent == 0) {
2346 mutex_exit(sc->sc_hwlock);
2347 return 0;
2348 }
2349
2350 #if 0
2351 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent);
2352 #endif
2353
2354 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB);
2355 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR);
2356 etsec_write(sc, IMASK, sc->sc_imask);
2357 softint_schedule(sc->sc_soft_ih);
2358
2359 mutex_exit(sc->sc_hwlock);
2360
2361 return 1;
2362 }
2363
2364 int
2365 pq3etsec_error_intr(void *arg)
2366 {
2367 struct pq3etsec_softc * const sc = arg;
2368
2369 mutex_enter(sc->sc_hwlock);
2370
2371 sc->sc_ev_error_intr.ev_count++;
2372
2373 for (int rv = 0, soft_flags = 0;; rv = 1) {
2374 uint32_t ievent = etsec_read(sc, IEVENT);
2375 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB);
2376 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */
2377 if (ievent == 0) {
2378 if (soft_flags) {
2379 atomic_or_uint(&sc->sc_soft_flags, soft_flags);
2380 softint_schedule(sc->sc_soft_ih);
2381 }
2382 mutex_exit(sc->sc_hwlock);
2383 return rv;
2384 }
2385 #if 0
2386 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n",
2387 __func__, ievent, etsec_read(sc, IMASK));
2388 #endif
2389
2390 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) {
2391 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC);
2392 etsec_write(sc, IMASK, sc->sc_imask);
2393 wakeup(sc);
2394 }
2395 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) {
2396 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR);
2397 etsec_write(sc, IMASK, sc->sc_imask);
2398 wakeup(&sc->sc_mii);
2399 }
2400 if (ievent & IEVENT_BSY) {
2401 soft_flags |= SOFT_RXBSY;
2402 sc->sc_imask &= ~IEVENT_BSY;
2403 etsec_write(sc, IMASK, sc->sc_imask);
2404 }
2405 if (ievent & IEVENT_TXE) {
2406 soft_flags |= SOFT_TXERROR;
2407 sc->sc_imask &= ~IEVENT_TXE;
2408 sc->sc_txerrors |= ievent;
2409 }
2410 if (ievent & IEVENT_TXC) {
2411 sc->sc_ev_tx_pause.ev_count++;
2412 }
2413 if (ievent & IEVENT_RXC) {
2414 sc->sc_ev_rx_pause.ev_count++;
2415 }
2416 if (ievent & IEVENT_DPE) {
2417 soft_flags |= SOFT_RESET;
2418 sc->sc_imask &= ~IEVENT_DPE;
2419 etsec_write(sc, IMASK, sc->sc_imask);
2420 }
2421 }
2422 }
2423
2424 void
2425 pq3etsec_soft_intr(void *arg)
2426 {
2427 struct pq3etsec_softc * const sc = arg;
2428 struct ifnet * const ifp = &sc->sc_if;
2429 uint32_t imask = 0;
2430
2431 mutex_enter(sc->sc_lock);
2432
2433 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
2434
2435 sc->sc_ev_soft_intr.ev_count++;
2436
2437 if (soft_flags & SOFT_RESET) {
2438 int s = splnet();
2439 pq3etsec_ifinit(ifp);
2440 splx(s);
2441 soft_flags = 0;
2442 }
2443
2444 if (soft_flags & SOFT_RXBSY) {
2445 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq;
2446 size_t threshold = 5 * rxq->rxq_threshold / 4;
2447 if (threshold >= rxq->rxq_last - rxq->rxq_first) {
2448 threshold = rxq->rxq_last - rxq->rxq_first - 1;
2449 } else {
2450 imask |= IEVENT_BSY;
2451 }
2452 aprint_normal_dev(sc->sc_dev,
2453 "increasing receive buffers from %zu to %zu\n",
2454 rxq->rxq_threshold, threshold);
2455 rxq->rxq_threshold = threshold;
2456 }
2457
2458 if ((soft_flags & SOFT_TXINTR)
2459 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) {
2460 /*
2461 * Let's do what we came here for. Consume transmitted
2462 * packets off the the transmit ring.
2463 */
2464 if (!pq3etsec_txq_consume(sc, &sc->sc_txq)
2465 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) {
2466 sc->sc_ev_tx_stall.ev_count++;
2467 ifp->if_flags |= IFF_OACTIVE;
2468 } else {
2469 ifp->if_flags &= ~IFF_OACTIVE;
2470 }
2471 imask |= IEVENT_TXF;
2472 }
2473
2474 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) {
2475 /*
2476 * Let's consume
2477 */
2478 pq3etsec_rxq_consume(sc, &sc->sc_rxq);
2479 imask |= IEVENT_RXF;
2480 }
2481
2482 if (soft_flags & SOFT_TXERROR) {
2483 pq3etsec_tx_error(sc);
2484 imask |= IEVENT_TXE;
2485 }
2486
2487 if (ifp->if_flags & IFF_RUNNING) {
2488 pq3etsec_rxq_produce(sc, &sc->sc_rxq);
2489 mutex_spin_enter(sc->sc_hwlock);
2490 sc->sc_imask |= imask;
2491 etsec_write(sc, IMASK, sc->sc_imask);
2492 mutex_spin_exit(sc->sc_hwlock);
2493 } else {
2494 KASSERT((soft_flags & SOFT_RXBSY) == 0);
2495 }
2496
2497 mutex_exit(sc->sc_lock);
2498 }
2499
2500 static void
2501 pq3etsec_mii_tick(void *arg)
2502 {
2503 struct pq3etsec_softc * const sc = arg;
2504 mutex_enter(sc->sc_lock);
2505 callout_ack(&sc->sc_mii_callout);
2506 sc->sc_ev_mii_ticks.ev_count++;
2507 #ifdef DEBUG
2508 uint64_t now = mftb();
2509 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) {
2510 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n",
2511 __func__, now - sc->sc_mii_last_tick);
2512 callout_stop(&sc->sc_mii_callout);
2513 }
2514 #endif
2515 mii_tick(&sc->sc_mii);
2516 int s = splnet();
2517 if (sc->sc_soft_flags & SOFT_RESET)
2518 softint_schedule(sc->sc_soft_ih);
2519 splx(s);
2520 callout_schedule(&sc->sc_mii_callout, hz);
2521 #ifdef DEBUG
2522 sc->sc_mii_last_tick = now;
2523 #endif
2524 mutex_exit(sc->sc_lock);
2525 }
2526