bcm53xx_eth.c revision 1.20 1 1.1 matt /*-
2 1.1 matt * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas of 3am Software Foundry.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt
30 1.10 matt #define _ARM32_BUS_DMA_PRIVATE
31 1.1 matt #define GMAC_PRIVATE
32 1.1 matt
33 1.1 matt #include "locators.h"
34 1.18 matt #include "opt_broadcom.h"
35 1.1 matt
36 1.1 matt #include <sys/cdefs.h>
37 1.1 matt
38 1.20 matt __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.20 2012/12/25 21:17:40 matt Exp $");
39 1.1 matt
40 1.1 matt #include <sys/param.h>
41 1.2 matt #include <sys/atomic.h>
42 1.1 matt #include <sys/bus.h>
43 1.1 matt #include <sys/device.h>
44 1.2 matt #include <sys/ioctl.h>
45 1.1 matt #include <sys/intr.h>
46 1.2 matt #include <sys/kmem.h>
47 1.1 matt #include <sys/mutex.h>
48 1.2 matt #include <sys/socket.h>
49 1.1 matt #include <sys/systm.h>
50 1.8 matt #include <sys/workqueue.h>
51 1.1 matt
52 1.1 matt #include <net/if.h>
53 1.1 matt #include <net/if_ether.h>
54 1.1 matt #include <net/if_media.h>
55 1.1 matt
56 1.2 matt #include <net/if_dl.h>
57 1.2 matt
58 1.2 matt #include <net/bpf.h>
59 1.2 matt
60 1.1 matt #include <dev/mii/miivar.h>
61 1.1 matt
62 1.1 matt #include <arm/broadcom/bcm53xx_reg.h>
63 1.1 matt #include <arm/broadcom/bcm53xx_var.h>
64 1.1 matt
65 1.16 matt //#define BCMETH_MPSAFE
66 1.16 matt
67 1.18 matt #ifdef BCMETH_COUNTERS
68 1.18 matt #define BCMETH_EVCNT_ADD(a,b) ((void)((a).ev_count += (b)))
69 1.18 matt #else
70 1.18 matt #define BCMETH_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0)
71 1.18 matt #endif
72 1.18 matt #define BCMETH_EVCNT_INCR(a) BCMETH_EVCNT_ADD((a), 1)
73 1.18 matt
74 1.10 matt #define BCMETH_MAXTXMBUFS 128
75 1.2 matt #define BCMETH_NTXSEGS 30
76 1.2 matt #define BCMETH_MAXRXMBUFS 255
77 1.8 matt #define BCMETH_MINRXMBUFS 64
78 1.2 matt #define BCMETH_NRXSEGS 1
79 1.8 matt #define BCMETH_RINGSIZE PAGE_SIZE
80 1.2 matt
81 1.19 matt #if 1
82 1.10 matt #define BCMETH_RCVMAGIC 0xfeedface
83 1.16 matt #endif
84 1.10 matt
85 1.1 matt static int bcmeth_ccb_match(device_t, cfdata_t, void *);
86 1.1 matt static void bcmeth_ccb_attach(device_t, device_t, void *);
87 1.1 matt
88 1.2 matt struct bcmeth_txqueue {
89 1.2 matt bus_dmamap_t txq_descmap;
90 1.2 matt struct gmac_txdb *txq_consumer;
91 1.2 matt struct gmac_txdb *txq_producer;
92 1.2 matt struct gmac_txdb *txq_first;
93 1.2 matt struct gmac_txdb *txq_last;
94 1.2 matt struct ifqueue txq_mbufs;
95 1.2 matt struct mbuf *txq_next;
96 1.2 matt size_t txq_free;
97 1.2 matt size_t txq_threshold;
98 1.2 matt size_t txq_lastintr;
99 1.2 matt bus_size_t txq_reg_xmtaddrlo;
100 1.2 matt bus_size_t txq_reg_xmtptr;
101 1.2 matt bus_size_t txq_reg_xmtctl;
102 1.2 matt bus_size_t txq_reg_xmtsts0;
103 1.10 matt bus_size_t txq_reg_xmtsts1;
104 1.2 matt bus_dma_segment_t txq_descmap_seg;
105 1.2 matt };
106 1.2 matt
107 1.2 matt struct bcmeth_rxqueue {
108 1.2 matt bus_dmamap_t rxq_descmap;
109 1.2 matt struct gmac_rxdb *rxq_consumer;
110 1.2 matt struct gmac_rxdb *rxq_producer;
111 1.2 matt struct gmac_rxdb *rxq_first;
112 1.2 matt struct gmac_rxdb *rxq_last;
113 1.2 matt struct mbuf *rxq_mhead;
114 1.2 matt struct mbuf **rxq_mtail;
115 1.2 matt struct mbuf *rxq_mconsumer;
116 1.2 matt size_t rxq_inuse;
117 1.2 matt size_t rxq_threshold;
118 1.2 matt bus_size_t rxq_reg_rcvaddrlo;
119 1.2 matt bus_size_t rxq_reg_rcvptr;
120 1.2 matt bus_size_t rxq_reg_rcvctl;
121 1.2 matt bus_size_t rxq_reg_rcvsts0;
122 1.10 matt bus_size_t rxq_reg_rcvsts1;
123 1.2 matt bus_dma_segment_t rxq_descmap_seg;
124 1.2 matt };
125 1.2 matt
126 1.2 matt struct bcmeth_mapcache {
127 1.2 matt u_int dmc_nmaps;
128 1.2 matt u_int dmc_maxseg;
129 1.2 matt u_int dmc_maxmaps;
130 1.2 matt u_int dmc_maxmapsize;
131 1.2 matt bus_dmamap_t dmc_maps[0];
132 1.2 matt };
133 1.2 matt
134 1.1 matt struct bcmeth_softc {
135 1.1 matt device_t sc_dev;
136 1.1 matt bus_space_tag_t sc_bst;
137 1.1 matt bus_space_handle_t sc_bsh;
138 1.1 matt bus_dma_tag_t sc_dmat;
139 1.1 matt kmutex_t *sc_lock;
140 1.1 matt kmutex_t *sc_hwlock;
141 1.1 matt struct ethercom sc_ec;
142 1.2 matt #define sc_if sc_ec.ec_if
143 1.2 matt struct ifmedia sc_media;
144 1.2 matt void *sc_soft_ih;
145 1.1 matt void *sc_ih;
146 1.2 matt
147 1.2 matt struct bcmeth_rxqueue sc_rxq;
148 1.2 matt struct bcmeth_txqueue sc_txq;
149 1.2 matt
150 1.19 matt size_t sc_rcvoffset;
151 1.2 matt uint32_t sc_maxfrm;
152 1.2 matt uint32_t sc_cmdcfg;
153 1.15 matt uint32_t sc_intmask;
154 1.8 matt uint32_t sc_rcvlazy;
155 1.2 matt volatile uint32_t sc_soft_flags;
156 1.2 matt #define SOFT_RXINTR 0x01
157 1.8 matt #define SOFT_TXINTR 0x02
158 1.2 matt
159 1.18 matt #ifdef BCMETH_COUNTERS
160 1.2 matt struct evcnt sc_ev_intr;
161 1.2 matt struct evcnt sc_ev_soft_intr;
162 1.10 matt struct evcnt sc_ev_work;
163 1.2 matt struct evcnt sc_ev_tx_stall;
164 1.10 matt struct evcnt sc_ev_rx_badmagic_lo;
165 1.10 matt struct evcnt sc_ev_rx_badmagic_hi;
166 1.18 matt #endif
167 1.2 matt
168 1.2 matt struct ifqueue sc_rx_bufcache;
169 1.2 matt struct bcmeth_mapcache *sc_rx_mapcache;
170 1.2 matt struct bcmeth_mapcache *sc_tx_mapcache;
171 1.2 matt
172 1.8 matt struct workqueue *sc_workq;
173 1.8 matt struct work sc_work;
174 1.8 matt
175 1.8 matt volatile uint32_t sc_work_flags;
176 1.8 matt #define WORK_RXINTR 0x01
177 1.8 matt #define WORK_RXUNDERFLOW 0x02
178 1.8 matt #define WORK_REINIT 0x04
179 1.8 matt
180 1.2 matt uint8_t sc_enaddr[ETHER_ADDR_LEN];
181 1.1 matt };
182 1.1 matt
183 1.2 matt static void bcmeth_ifstart(struct ifnet *);
184 1.2 matt static void bcmeth_ifwatchdog(struct ifnet *);
185 1.2 matt static int bcmeth_ifinit(struct ifnet *);
186 1.2 matt static void bcmeth_ifstop(struct ifnet *, int);
187 1.2 matt static int bcmeth_ifioctl(struct ifnet *, u_long, void *);
188 1.2 matt
189 1.2 matt static int bcmeth_mapcache_create(struct bcmeth_softc *,
190 1.2 matt struct bcmeth_mapcache **, size_t, size_t, size_t);
191 1.2 matt static void bcmeth_mapcache_destroy(struct bcmeth_softc *,
192 1.2 matt struct bcmeth_mapcache *);
193 1.2 matt static bus_dmamap_t bcmeth_mapcache_get(struct bcmeth_softc *,
194 1.2 matt struct bcmeth_mapcache *);
195 1.2 matt static void bcmeth_mapcache_put(struct bcmeth_softc *,
196 1.2 matt struct bcmeth_mapcache *, bus_dmamap_t);
197 1.2 matt
198 1.2 matt static int bcmeth_txq_attach(struct bcmeth_softc *,
199 1.2 matt struct bcmeth_txqueue *, u_int);
200 1.2 matt static void bcmeth_txq_purge(struct bcmeth_softc *,
201 1.2 matt struct bcmeth_txqueue *);
202 1.2 matt static void bcmeth_txq_reset(struct bcmeth_softc *,
203 1.2 matt struct bcmeth_txqueue *);
204 1.2 matt static bool bcmeth_txq_consume(struct bcmeth_softc *,
205 1.2 matt struct bcmeth_txqueue *);
206 1.2 matt static bool bcmeth_txq_produce(struct bcmeth_softc *,
207 1.2 matt struct bcmeth_txqueue *, struct mbuf *m);
208 1.2 matt static bool bcmeth_txq_active_p(struct bcmeth_softc *,
209 1.2 matt struct bcmeth_txqueue *);
210 1.2 matt
211 1.2 matt static int bcmeth_rxq_attach(struct bcmeth_softc *,
212 1.2 matt struct bcmeth_rxqueue *, u_int);
213 1.2 matt static bool bcmeth_rxq_produce(struct bcmeth_softc *,
214 1.2 matt struct bcmeth_rxqueue *);
215 1.2 matt static void bcmeth_rxq_purge(struct bcmeth_softc *,
216 1.2 matt struct bcmeth_rxqueue *, bool);
217 1.2 matt static void bcmeth_rxq_reset(struct bcmeth_softc *,
218 1.2 matt struct bcmeth_rxqueue *);
219 1.2 matt
220 1.1 matt static int bcmeth_intr(void *);
221 1.16 matt #ifdef BCMETH_MPSAFETX
222 1.16 matt static void bcmeth_soft_txintr(struct bcmeth_softc *);
223 1.16 matt #endif
224 1.2 matt static void bcmeth_soft_intr(void *);
225 1.8 matt static void bcmeth_worker(struct work *, void *);
226 1.2 matt
227 1.2 matt static int bcmeth_mediachange(struct ifnet *);
228 1.2 matt static void bcmeth_mediastatus(struct ifnet *, struct ifmediareq *);
229 1.1 matt
230 1.1 matt static inline uint32_t
231 1.1 matt bcmeth_read_4(struct bcmeth_softc *sc, bus_size_t o)
232 1.1 matt {
233 1.1 matt return bus_space_read_4(sc->sc_bst, sc->sc_bsh, o);
234 1.1 matt }
235 1.1 matt
236 1.1 matt static inline void
237 1.1 matt bcmeth_write_4(struct bcmeth_softc *sc, bus_size_t o, uint32_t v)
238 1.1 matt {
239 1.1 matt bus_space_write_4(sc->sc_bst, sc->sc_bsh, o, v);
240 1.1 matt }
241 1.1 matt
242 1.1 matt CFATTACH_DECL_NEW(bcmeth_ccb, sizeof(struct bcmeth_softc),
243 1.1 matt bcmeth_ccb_match, bcmeth_ccb_attach, NULL, NULL);
244 1.1 matt
245 1.1 matt static int
246 1.1 matt bcmeth_ccb_match(device_t parent, cfdata_t cf, void *aux)
247 1.1 matt {
248 1.1 matt struct bcmccb_attach_args * const ccbaa = aux;
249 1.1 matt const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
250 1.1 matt
251 1.1 matt if (strcmp(cf->cf_name, loc->loc_name))
252 1.1 matt return 0;
253 1.1 matt
254 1.1 matt #ifdef DIAGNOSTIC
255 1.1 matt const int port = cf->cf_loc[BCMCCBCF_PORT];
256 1.1 matt #endif
257 1.1 matt KASSERT(port == BCMCCBCF_PORT_DEFAULT || port == loc->loc_port);
258 1.1 matt
259 1.1 matt return 1;
260 1.1 matt }
261 1.1 matt
262 1.1 matt static void
263 1.1 matt bcmeth_ccb_attach(device_t parent, device_t self, void *aux)
264 1.1 matt {
265 1.1 matt struct bcmeth_softc * const sc = device_private(self);
266 1.2 matt struct ethercom * const ec = &sc->sc_ec;
267 1.2 matt struct ifnet * const ifp = &ec->ec_if;
268 1.1 matt struct bcmccb_attach_args * const ccbaa = aux;
269 1.1 matt const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;
270 1.2 matt const char * const xname = device_xname(self);
271 1.2 matt prop_dictionary_t dict = device_properties(self);
272 1.2 matt int error;
273 1.1 matt
274 1.1 matt sc->sc_bst = ccbaa->ccbaa_ccb_bst;
275 1.1 matt sc->sc_dmat = ccbaa->ccbaa_dmat;
276 1.1 matt bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
277 1.1 matt loc->loc_offset, loc->loc_size, &sc->sc_bsh);
278 1.1 matt
279 1.10 matt /*
280 1.11 matt * We need to use the coherent dma tag for the GMAC.
281 1.10 matt */
282 1.11 matt sc->sc_dmat = &bcm53xx_coherent_dma_tag;
283 1.10 matt
284 1.2 matt prop_data_t eaprop = prop_dictionary_get(dict, "mac-address");
285 1.2 matt if (eaprop == NULL) {
286 1.2 matt uint32_t mac0 = bcmeth_read_4(sc, UNIMAC_MAC_0);
287 1.2 matt uint32_t mac1 = bcmeth_read_4(sc, UNIMAC_MAC_1);
288 1.2 matt if ((mac0 == 0 && mac1 == 0) || (mac1 & 1)) {
289 1.2 matt aprint_error(": mac-address property is missing\n");
290 1.2 matt return;
291 1.2 matt }
292 1.5 matt sc->sc_enaddr[0] = (mac0 >> 0) & 0xff;
293 1.5 matt sc->sc_enaddr[1] = (mac0 >> 8) & 0xff;
294 1.5 matt sc->sc_enaddr[2] = (mac0 >> 16) & 0xff;
295 1.5 matt sc->sc_enaddr[3] = (mac0 >> 24) & 0xff;
296 1.5 matt sc->sc_enaddr[4] = (mac1 >> 0) & 0xff;
297 1.5 matt sc->sc_enaddr[5] = (mac1 >> 8) & 0xff;
298 1.2 matt } else {
299 1.2 matt KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA);
300 1.2 matt KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN);
301 1.2 matt memcpy(sc->sc_enaddr, prop_data_data_nocopy(eaprop),
302 1.2 matt ETHER_ADDR_LEN);
303 1.2 matt }
304 1.2 matt sc->sc_dev = self;
305 1.2 matt sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
306 1.2 matt sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);
307 1.2 matt
308 1.1 matt bcmeth_write_4(sc, GMAC_INTMASK, 0); // disable interrupts
309 1.1 matt
310 1.1 matt aprint_naive("\n");
311 1.1 matt aprint_normal(": Gigabit Ethernet Controller\n");
312 1.1 matt
313 1.2 matt error = bcmeth_rxq_attach(sc, &sc->sc_rxq, 0);
314 1.2 matt if (error) {
315 1.2 matt aprint_error(": failed to init rxq: %d\n", error);
316 1.2 matt return;
317 1.2 matt }
318 1.2 matt
319 1.2 matt error = bcmeth_txq_attach(sc, &sc->sc_txq, 0);
320 1.2 matt if (error) {
321 1.2 matt aprint_error(": failed to init txq: %d\n", error);
322 1.2 matt return;
323 1.2 matt }
324 1.2 matt
325 1.2 matt error = bcmeth_mapcache_create(sc, &sc->sc_rx_mapcache,
326 1.2 matt BCMETH_MAXRXMBUFS, MCLBYTES, BCMETH_NRXSEGS);
327 1.2 matt if (error) {
328 1.2 matt aprint_error(": failed to allocate rx dmamaps: %d\n", error);
329 1.2 matt return;
330 1.2 matt }
331 1.2 matt
332 1.2 matt error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
333 1.2 matt BCMETH_MAXTXMBUFS, MCLBYTES, BCMETH_NTXSEGS);
334 1.2 matt if (error) {
335 1.2 matt aprint_error(": failed to allocate tx dmamaps: %d\n", error);
336 1.2 matt return;
337 1.2 matt }
338 1.2 matt
339 1.8 matt error = workqueue_create(&sc->sc_workq, xname, bcmeth_worker, sc,
340 1.9 matt (PRI_USER + MAXPRI_USER) / 2, IPL_NET, WQ_MPSAFE|WQ_PERCPU);
341 1.8 matt if (error) {
342 1.8 matt aprint_error(": failed to create workqueue: %d\n", error);
343 1.8 matt return;
344 1.8 matt }
345 1.8 matt
346 1.2 matt sc->sc_soft_ih = softint_establish(SOFTINT_MPSAFE | SOFTINT_NET,
347 1.2 matt bcmeth_soft_intr, sc);
348 1.1 matt
349 1.1 matt sc->sc_ih = intr_establish(loc->loc_intrs[0], IPL_VM, IST_LEVEL,
350 1.1 matt bcmeth_intr, sc);
351 1.1 matt
352 1.1 matt if (sc->sc_ih == NULL) {
353 1.1 matt aprint_error_dev(self, "failed to establish interrupt %d\n",
354 1.1 matt loc->loc_intrs[0]);
355 1.1 matt } else {
356 1.1 matt aprint_normal_dev(self, "interrupting on irq %d\n",
357 1.1 matt loc->loc_intrs[0]);
358 1.1 matt }
359 1.2 matt
360 1.2 matt aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
361 1.2 matt ether_sprintf(sc->sc_enaddr));
362 1.2 matt
363 1.2 matt /*
364 1.2 matt * Since each port in plugged into the switch/flow-accelerator,
365 1.2 matt * we hard code at Gige Full-Duplex with Flow Control enabled.
366 1.2 matt */
367 1.2 matt int ifmedia = IFM_ETHER|IFM_1000_T|IFM_FDX;
368 1.2 matt //ifmedia |= IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE;
369 1.2 matt ifmedia_init(&sc->sc_media, IFM_IMASK, bcmeth_mediachange,
370 1.2 matt bcmeth_mediastatus);
371 1.2 matt ifmedia_add(&sc->sc_media, ifmedia, 0, NULL);
372 1.2 matt ifmedia_set(&sc->sc_media, ifmedia);
373 1.2 matt
374 1.2 matt ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
375 1.2 matt
376 1.2 matt strlcpy(ifp->if_xname, xname, IFNAMSIZ);
377 1.2 matt ifp->if_softc = sc;
378 1.2 matt ifp->if_baudrate = IF_Mbps(1000);
379 1.2 matt ifp->if_capabilities = 0;
380 1.2 matt ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
381 1.16 matt #ifdef BCMETH_MPSAFE
382 1.16 matt ifp->if_flags2 = IFF2_MPSAFE;
383 1.16 matt #endif
384 1.2 matt ifp->if_ioctl = bcmeth_ifioctl;
385 1.2 matt ifp->if_start = bcmeth_ifstart;
386 1.2 matt ifp->if_watchdog = bcmeth_ifwatchdog;
387 1.2 matt ifp->if_init = bcmeth_ifinit;
388 1.2 matt ifp->if_stop = bcmeth_ifstop;
389 1.2 matt IFQ_SET_READY(&ifp->if_snd);
390 1.2 matt
391 1.2 matt bcmeth_ifstop(ifp, true);
392 1.2 matt
393 1.2 matt /*
394 1.2 matt * Attach the interface.
395 1.2 matt */
396 1.2 matt if_attach(ifp);
397 1.2 matt ether_ifattach(ifp, sc->sc_enaddr);
398 1.2 matt
399 1.18 matt #ifdef BCMETH_COUNTERS
400 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
401 1.2 matt NULL, xname, "intr");
402 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR,
403 1.2 matt NULL, xname, "soft intr");
404 1.8 matt evcnt_attach_dynamic(&sc->sc_ev_work, EVCNT_TYPE_MISC,
405 1.8 matt NULL, xname, "work items");
406 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC,
407 1.2 matt NULL, xname, "tx stalls");
408 1.10 matt evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_lo, EVCNT_TYPE_MISC,
409 1.10 matt NULL, xname, "rx badmagic lo");
410 1.10 matt evcnt_attach_dynamic(&sc->sc_ev_rx_badmagic_hi, EVCNT_TYPE_MISC,
411 1.10 matt NULL, xname, "rx badmagic hi");
412 1.18 matt #endif
413 1.2 matt }
414 1.2 matt
415 1.2 matt static int
416 1.2 matt bcmeth_mediachange(struct ifnet *ifp)
417 1.2 matt {
418 1.2 matt //struct bcmeth_softc * const sc = ifp->if_softc;
419 1.2 matt return 0;
420 1.2 matt }
421 1.2 matt
422 1.2 matt static void
423 1.2 matt bcmeth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifm)
424 1.2 matt {
425 1.2 matt //struct bcmeth_softc * const sc = ifp->if_softc;
426 1.2 matt
427 1.2 matt ifm->ifm_status = IFM_AVALID | IFM_ACTIVE;
428 1.2 matt ifm->ifm_active = IFM_ETHER | IFM_FDX | IFM_1000_T;
429 1.2 matt }
430 1.2 matt
431 1.2 matt static uint64_t
432 1.2 matt bcmeth_macaddr_create(const uint8_t *enaddr)
433 1.2 matt {
434 1.5 matt return (enaddr[3] << 0) // UNIMAC_MAC_0
435 1.5 matt | (enaddr[2] << 8) // UNIMAC_MAC_0
436 1.5 matt | (enaddr[1] << 16) // UNIMAC_MAC_0
437 1.19 matt | ((uint64_t)enaddr[0] << 24) // UNIMAC_MAC_0
438 1.5 matt | ((uint64_t)enaddr[5] << 32) // UNIMAC_MAC_1
439 1.5 matt | ((uint64_t)enaddr[4] << 40); // UNIMAC_MAC_1
440 1.2 matt }
441 1.2 matt
442 1.2 matt static int
443 1.2 matt bcmeth_ifinit(struct ifnet *ifp)
444 1.2 matt {
445 1.2 matt struct bcmeth_softc * const sc = ifp->if_softc;
446 1.2 matt int error = 0;
447 1.2 matt
448 1.2 matt sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES);
449 1.2 matt if (ifp->if_mtu > ETHERMTU_JUMBO)
450 1.2 matt return error;
451 1.2 matt
452 1.2 matt KASSERT(ifp->if_flags & IFF_UP);
453 1.2 matt
454 1.2 matt /*
455 1.2 matt * Stop the interface
456 1.2 matt */
457 1.2 matt bcmeth_ifstop(ifp, 0);
458 1.2 matt
459 1.2 matt /*
460 1.19 matt * Reserve enough space at the front so that we can insert a maxsized
461 1.19 matt * link header and a VLAN tag. Also make sure we have enough room for
462 1.19 matt * the rcvsts field as well.
463 1.19 matt */
464 1.19 matt KASSERT(ALIGN(max_linkhdr) == max_linkhdr);
465 1.19 matt KASSERTMSG(max_linkhdr > sizeof(struct ether_header), "%u > %zu",
466 1.19 matt max_linkhdr, sizeof(struct ether_header));
467 1.19 matt sc->sc_rcvoffset = max_linkhdr + 4 - sizeof(struct ether_header);
468 1.19 matt if (sc->sc_rcvoffset <= 4)
469 1.19 matt sc->sc_rcvoffset += 4;
470 1.19 matt KASSERT((sc->sc_rcvoffset & 3) == 2);
471 1.19 matt KASSERT(sc->sc_rcvoffset <= __SHIFTOUT(RCVCTL_RCVOFFSET, RCVCTL_RCVOFFSET));
472 1.19 matt KASSERT(sc->sc_rcvoffset >= 6);
473 1.19 matt
474 1.19 matt /*
475 1.2 matt * If our frame size has changed (or it's our first time through)
476 1.2 matt * destroy the existing transmit mapcache.
477 1.2 matt */
478 1.2 matt if (sc->sc_tx_mapcache != NULL
479 1.2 matt && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) {
480 1.2 matt bcmeth_mapcache_destroy(sc, sc->sc_tx_mapcache);
481 1.2 matt sc->sc_tx_mapcache = NULL;
482 1.2 matt }
483 1.2 matt
484 1.2 matt if (sc->sc_tx_mapcache == NULL) {
485 1.2 matt error = bcmeth_mapcache_create(sc, &sc->sc_tx_mapcache,
486 1.2 matt BCMETH_MAXTXMBUFS, sc->sc_maxfrm, BCMETH_NTXSEGS);
487 1.2 matt if (error)
488 1.2 matt return error;
489 1.2 matt }
490 1.2 matt
491 1.2 matt sc->sc_cmdcfg = NO_LENGTH_CHECK | PAUSE_IGNORE
492 1.2 matt | __SHIFTIN(ETH_SPEED_1000, ETH_SPEED)
493 1.2 matt | RX_ENA | TX_ENA;
494 1.2 matt
495 1.2 matt if (ifp->if_flags & IFF_PROMISC) {
496 1.2 matt sc->sc_cmdcfg |= PROMISC_EN;
497 1.2 matt } else {
498 1.2 matt sc->sc_cmdcfg &= ~PROMISC_EN;
499 1.2 matt }
500 1.2 matt
501 1.2 matt const uint64_t macstnaddr =
502 1.2 matt bcmeth_macaddr_create(CLLADDR(ifp->if_sadl));
503 1.2 matt
504 1.2 matt sc->sc_intmask = DESCPROTOERR|DATAERR|DESCERR;
505 1.2 matt
506 1.2 matt /* 5. Load RCVADDR_LO with new pointer */
507 1.2 matt bcmeth_rxq_reset(sc, &sc->sc_rxq);
508 1.2 matt
509 1.4 matt bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
510 1.19 matt __SHIFTIN(sc->sc_rcvoffset, RCVCTL_RCVOFFSET)
511 1.2 matt | RCVCTL_PARITY_DIS
512 1.2 matt | RCVCTL_OFLOW_CONTINUE
513 1.17 matt | __SHIFTIN(3, RCVCTL_BURSTLEN));
514 1.2 matt
515 1.2 matt /* 6. Load XMTADDR_LO with new pointer */
516 1.2 matt bcmeth_txq_reset(sc, &sc->sc_txq);
517 1.2 matt
518 1.2 matt bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl, XMTCTL_DMA_ACT_INDEX
519 1.2 matt | XMTCTL_PARITY_DIS
520 1.17 matt | __SHIFTIN(3, XMTCTL_BURSTLEN));
521 1.2 matt
522 1.2 matt /* 7. Setup other UNIMAC registers */
523 1.2 matt bcmeth_write_4(sc, UNIMAC_FRAME_LEN, sc->sc_maxfrm);
524 1.2 matt bcmeth_write_4(sc, UNIMAC_MAC_0, (uint32_t)(macstnaddr >> 0));
525 1.2 matt bcmeth_write_4(sc, UNIMAC_MAC_1, (uint32_t)(macstnaddr >> 32));
526 1.2 matt bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, sc->sc_cmdcfg);
527 1.2 matt
528 1.2 matt uint32_t devctl = bcmeth_read_4(sc, GMAC_DEVCONTROL);
529 1.2 matt devctl |= RGMII_LINK_STATUS_SEL | NWAY_AUTO_POLL_EN | TXARB_STRICT_MODE;
530 1.2 matt devctl &= ~FLOW_CTRL_MODE;
531 1.2 matt devctl &= ~MIB_RD_RESET_EN;
532 1.2 matt devctl &= ~RXQ_OVERFLOW_CTRL_SEL;
533 1.2 matt devctl &= ~CPU_FLOW_CTRL_ON;
534 1.2 matt bcmeth_write_4(sc, GMAC_DEVCONTROL, devctl);
535 1.2 matt
536 1.3 matt /* Setup lazy receive (at most 1ms). */
537 1.8 matt sc->sc_rcvlazy = __SHIFTIN(4, INTRCVLAZY_FRAMECOUNT)
538 1.8 matt | __SHIFTIN(125000000 / 1000, INTRCVLAZY_TIMEOUT);
539 1.8 matt bcmeth_write_4(sc, GMAC_INTRCVLAZY, sc->sc_rcvlazy);
540 1.3 matt
541 1.2 matt /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */
542 1.2 matt sc->sc_intmask |= XMTINT_0|XMTUF;
543 1.2 matt bcmeth_write_4(sc, sc->sc_txq.txq_reg_xmtctl,
544 1.2 matt bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl) | XMTCTL_ENABLE);
545 1.2 matt
546 1.2 matt
547 1.2 matt /* 12. Enable receive queues in RQUEUE, */
548 1.2 matt sc->sc_intmask |= RCVINT|RCVDESCUF|RCVFIFOOF;
549 1.2 matt bcmeth_write_4(sc, sc->sc_rxq.rxq_reg_rcvctl,
550 1.2 matt bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl) | RCVCTL_ENABLE);
551 1.2 matt
552 1.2 matt bcmeth_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */
553 1.3 matt
554 1.3 matt #if 0
555 1.3 matt aprint_normal_dev(sc->sc_dev,
556 1.3 matt "devctl=%#x ucmdcfg=%#x xmtctl=%#x rcvctl=%#x\n",
557 1.3 matt devctl, sc->sc_cmdcfg,
558 1.3 matt bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtctl),
559 1.3 matt bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvctl));
560 1.2 matt #endif
561 1.2 matt
562 1.2 matt sc->sc_soft_flags = 0;
563 1.2 matt
564 1.2 matt bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
565 1.2 matt
566 1.2 matt ifp->if_flags |= IFF_RUNNING;
567 1.2 matt
568 1.2 matt return error;
569 1.2 matt }
570 1.2 matt
571 1.2 matt static void
572 1.2 matt bcmeth_ifstop(struct ifnet *ifp, int disable)
573 1.2 matt {
574 1.2 matt struct bcmeth_softc * const sc = ifp->if_softc;
575 1.2 matt struct bcmeth_txqueue * const txq = &sc->sc_txq;
576 1.2 matt struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
577 1.2 matt
578 1.2 matt KASSERT(!cpu_intr_p());
579 1.2 matt
580 1.2 matt sc->sc_soft_flags = 0;
581 1.16 matt sc->sc_work_flags = 0;
582 1.2 matt
583 1.2 matt /* Disable Rx processing */
584 1.2 matt bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
585 1.2 matt bcmeth_read_4(sc, rxq->rxq_reg_rcvctl) & ~RCVCTL_ENABLE);
586 1.2 matt
587 1.2 matt /* Disable Tx processing */
588 1.2 matt bcmeth_write_4(sc, txq->txq_reg_xmtctl,
589 1.2 matt bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
590 1.2 matt
591 1.2 matt /* Disable all interrupts */
592 1.2 matt bcmeth_write_4(sc, GMAC_INTMASK, 0);
593 1.2 matt
594 1.2 matt for (;;) {
595 1.2 matt uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
596 1.2 matt uint32_t rx0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
597 1.2 matt if (__SHIFTOUT(tx0, XMTSTATE) == XMTSTATE_DIS
598 1.2 matt && __SHIFTOUT(rx0, RCVSTATE) == RCVSTATE_DIS)
599 1.2 matt break;
600 1.2 matt delay(50);
601 1.2 matt }
602 1.2 matt /*
603 1.2 matt * Now reset the controller.
604 1.2 matt *
605 1.2 matt * 3. Set SW_RESET bit in UNIMAC_COMMAND_CONFIG register
606 1.2 matt * 4. Clear SW_RESET bit in UNIMAC_COMMAND_CONFIG register
607 1.2 matt */
608 1.2 matt bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, SW_RESET);
609 1.2 matt bcmeth_write_4(sc, GMAC_INTSTATUS, ~0);
610 1.2 matt sc->sc_intmask = 0;
611 1.2 matt ifp->if_flags &= ~IFF_RUNNING;
612 1.2 matt
613 1.2 matt /*
614 1.2 matt * Let's consume any remaining transmitted packets. And if we are
615 1.2 matt * disabling the interface, purge ourselves of any untransmitted
616 1.2 matt * packets. But don't consume any received packets, just drop them.
617 1.2 matt * If we aren't disabling the interface, save the mbufs in the
618 1.2 matt * receive queue for reuse.
619 1.2 matt */
620 1.2 matt bcmeth_rxq_purge(sc, &sc->sc_rxq, disable);
621 1.2 matt bcmeth_txq_consume(sc, &sc->sc_txq);
622 1.2 matt if (disable) {
623 1.2 matt bcmeth_txq_purge(sc, &sc->sc_txq);
624 1.2 matt IF_PURGE(&ifp->if_snd);
625 1.2 matt }
626 1.2 matt
627 1.2 matt bcmeth_write_4(sc, UNIMAC_COMMAND_CONFIG, 0);
628 1.2 matt }
629 1.2 matt
630 1.2 matt static void
631 1.2 matt bcmeth_ifwatchdog(struct ifnet *ifp)
632 1.2 matt {
633 1.2 matt }
634 1.2 matt
635 1.2 matt static int
636 1.2 matt bcmeth_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
637 1.2 matt {
638 1.2 matt struct bcmeth_softc *sc = ifp->if_softc;
639 1.2 matt struct ifreq * const ifr = data;
640 1.2 matt const int s = splnet();
641 1.2 matt int error;
642 1.2 matt
643 1.2 matt switch (cmd) {
644 1.2 matt case SIOCSIFMEDIA:
645 1.2 matt case SIOCGIFMEDIA:
646 1.2 matt error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
647 1.2 matt break;
648 1.2 matt
649 1.2 matt default:
650 1.2 matt error = ether_ioctl(ifp, cmd, data);
651 1.2 matt if (error != ENETRESET)
652 1.2 matt break;
653 1.2 matt
654 1.2 matt if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
655 1.2 matt error = 0;
656 1.2 matt break;
657 1.2 matt }
658 1.2 matt error = bcmeth_ifinit(ifp);
659 1.2 matt break;
660 1.2 matt }
661 1.2 matt
662 1.2 matt splx(s);
663 1.2 matt return error;
664 1.2 matt }
665 1.2 matt
666 1.2 matt static void
667 1.2 matt bcmeth_rxq_desc_presync(
668 1.2 matt struct bcmeth_softc *sc,
669 1.2 matt struct bcmeth_rxqueue *rxq,
670 1.2 matt struct gmac_rxdb *rxdb,
671 1.2 matt size_t count)
672 1.2 matt {
673 1.2 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
674 1.2 matt (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
675 1.2 matt BUS_DMASYNC_PREWRITE);
676 1.2 matt }
677 1.2 matt
678 1.2 matt static void
679 1.2 matt bcmeth_rxq_desc_postsync(
680 1.2 matt struct bcmeth_softc *sc,
681 1.2 matt struct bcmeth_rxqueue *rxq,
682 1.2 matt struct gmac_rxdb *rxdb,
683 1.2 matt size_t count)
684 1.2 matt {
685 1.2 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap,
686 1.2 matt (rxdb - rxq->rxq_first) * sizeof(*rxdb), count * sizeof(*rxdb),
687 1.2 matt BUS_DMASYNC_POSTWRITE);
688 1.2 matt }
689 1.2 matt
690 1.2 matt static void
691 1.2 matt bcmeth_txq_desc_presync(
692 1.2 matt struct bcmeth_softc *sc,
693 1.2 matt struct bcmeth_txqueue *txq,
694 1.2 matt struct gmac_txdb *txdb,
695 1.2 matt size_t count)
696 1.2 matt {
697 1.2 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
698 1.2 matt (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
699 1.2 matt BUS_DMASYNC_PREWRITE);
700 1.2 matt }
701 1.2 matt
702 1.2 matt static void
703 1.2 matt bcmeth_txq_desc_postsync(
704 1.2 matt struct bcmeth_softc *sc,
705 1.2 matt struct bcmeth_txqueue *txq,
706 1.2 matt struct gmac_txdb *txdb,
707 1.2 matt size_t count)
708 1.2 matt {
709 1.2 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
710 1.2 matt (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
711 1.2 matt BUS_DMASYNC_POSTWRITE);
712 1.2 matt }
713 1.2 matt
714 1.2 matt static bus_dmamap_t
715 1.2 matt bcmeth_mapcache_get(
716 1.2 matt struct bcmeth_softc *sc,
717 1.2 matt struct bcmeth_mapcache *dmc)
718 1.2 matt {
719 1.2 matt KASSERT(dmc->dmc_nmaps > 0);
720 1.2 matt KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL);
721 1.2 matt return dmc->dmc_maps[--dmc->dmc_nmaps];
722 1.2 matt }
723 1.2 matt
724 1.2 matt static void
725 1.2 matt bcmeth_mapcache_put(
726 1.2 matt struct bcmeth_softc *sc,
727 1.2 matt struct bcmeth_mapcache *dmc,
728 1.2 matt bus_dmamap_t map)
729 1.2 matt {
730 1.2 matt KASSERT(map != NULL);
731 1.2 matt KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps);
732 1.2 matt dmc->dmc_maps[dmc->dmc_nmaps++] = map;
733 1.2 matt }
734 1.2 matt
735 1.2 matt static void
736 1.2 matt bcmeth_mapcache_destroy(
737 1.2 matt struct bcmeth_softc *sc,
738 1.2 matt struct bcmeth_mapcache *dmc)
739 1.2 matt {
740 1.2 matt const size_t dmc_size =
741 1.2 matt offsetof(struct bcmeth_mapcache, dmc_maps[dmc->dmc_maxmaps]);
742 1.2 matt
743 1.2 matt for (u_int i = 0; i < dmc->dmc_maxmaps; i++) {
744 1.2 matt bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]);
745 1.2 matt }
746 1.2 matt kmem_intr_free(dmc, dmc_size);
747 1.2 matt }
748 1.2 matt
749 1.2 matt static int
750 1.2 matt bcmeth_mapcache_create(
751 1.2 matt struct bcmeth_softc *sc,
752 1.2 matt struct bcmeth_mapcache **dmc_p,
753 1.2 matt size_t maxmaps,
754 1.2 matt size_t maxmapsize,
755 1.2 matt size_t maxseg)
756 1.2 matt {
757 1.2 matt const size_t dmc_size =
758 1.2 matt offsetof(struct bcmeth_mapcache, dmc_maps[maxmaps]);
759 1.2 matt struct bcmeth_mapcache * const dmc =
760 1.2 matt kmem_intr_zalloc(dmc_size, KM_NOSLEEP);
761 1.2 matt
762 1.2 matt dmc->dmc_maxmaps = maxmaps;
763 1.2 matt dmc->dmc_nmaps = maxmaps;
764 1.2 matt dmc->dmc_maxmapsize = maxmapsize;
765 1.2 matt dmc->dmc_maxseg = maxseg;
766 1.2 matt
767 1.2 matt for (u_int i = 0; i < maxmaps; i++) {
768 1.2 matt int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize,
769 1.2 matt dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0,
770 1.2 matt BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]);
771 1.2 matt if (error) {
772 1.2 matt aprint_error_dev(sc->sc_dev,
773 1.2 matt "failed to creat dma map cache "
774 1.2 matt "entry %u of %zu: %d\n",
775 1.2 matt i, maxmaps, error);
776 1.2 matt while (i-- > 0) {
777 1.2 matt bus_dmamap_destroy(sc->sc_dmat,
778 1.2 matt dmc->dmc_maps[i]);
779 1.2 matt }
780 1.2 matt kmem_intr_free(dmc, dmc_size);
781 1.2 matt return error;
782 1.2 matt }
783 1.2 matt KASSERT(dmc->dmc_maps[i] != NULL);
784 1.2 matt }
785 1.2 matt
786 1.2 matt *dmc_p = dmc;
787 1.2 matt
788 1.2 matt return 0;
789 1.2 matt }
790 1.2 matt
791 1.2 matt #if 0
792 1.2 matt static void
793 1.2 matt bcmeth_dmamem_free(
794 1.2 matt bus_dma_tag_t dmat,
795 1.2 matt size_t map_size,
796 1.2 matt bus_dma_segment_t *seg,
797 1.2 matt bus_dmamap_t map,
798 1.2 matt void *kvap)
799 1.2 matt {
800 1.2 matt bus_dmamap_destroy(dmat, map);
801 1.2 matt bus_dmamem_unmap(dmat, kvap, map_size);
802 1.2 matt bus_dmamem_free(dmat, seg, 1);
803 1.2 matt }
804 1.2 matt #endif
805 1.2 matt
806 1.2 matt static int
807 1.2 matt bcmeth_dmamem_alloc(
808 1.2 matt bus_dma_tag_t dmat,
809 1.2 matt size_t map_size,
810 1.2 matt bus_dma_segment_t *seg,
811 1.2 matt bus_dmamap_t *map,
812 1.2 matt void **kvap)
813 1.2 matt {
814 1.2 matt int error;
815 1.2 matt int nseg;
816 1.2 matt
817 1.2 matt *kvap = NULL;
818 1.2 matt *map = NULL;
819 1.2 matt
820 1.10 matt error = bus_dmamem_alloc(dmat, map_size, 2*PAGE_SIZE, 0,
821 1.2 matt seg, 1, &nseg, 0);
822 1.2 matt if (error)
823 1.2 matt return error;
824 1.2 matt
825 1.2 matt KASSERT(nseg == 1);
826 1.2 matt
827 1.10 matt error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 0);
828 1.2 matt if (error == 0) {
829 1.2 matt error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0,
830 1.2 matt map);
831 1.2 matt if (error == 0) {
832 1.2 matt error = bus_dmamap_load(dmat, *map, *kvap, map_size,
833 1.2 matt NULL, 0);
834 1.2 matt if (error == 0)
835 1.2 matt return 0;
836 1.2 matt bus_dmamap_destroy(dmat, *map);
837 1.2 matt *map = NULL;
838 1.2 matt }
839 1.2 matt bus_dmamem_unmap(dmat, *kvap, map_size);
840 1.2 matt *kvap = NULL;
841 1.2 matt }
842 1.2 matt bus_dmamem_free(dmat, seg, nseg);
843 1.2 matt return 0;
844 1.2 matt }
845 1.2 matt
846 1.2 matt static struct mbuf *
847 1.2 matt bcmeth_rx_buf_alloc(
848 1.2 matt struct bcmeth_softc *sc)
849 1.2 matt {
850 1.2 matt struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
851 1.2 matt if (m == NULL) {
852 1.2 matt printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr");
853 1.2 matt return NULL;
854 1.2 matt }
855 1.2 matt MCLGET(m, M_DONTWAIT);
856 1.2 matt if ((m->m_flags & M_EXT) == 0) {
857 1.2 matt printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET");
858 1.2 matt m_freem(m);
859 1.2 matt return NULL;
860 1.2 matt }
861 1.2 matt m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
862 1.2 matt
863 1.2 matt bus_dmamap_t map = bcmeth_mapcache_get(sc, sc->sc_rx_mapcache);
864 1.2 matt if (map == NULL) {
865 1.2 matt printf("%s:%d: %s\n", __func__, __LINE__, "map get");
866 1.2 matt m_freem(m);
867 1.2 matt return NULL;
868 1.2 matt }
869 1.2 matt M_SETCTX(m, map);
870 1.2 matt m->m_len = m->m_pkthdr.len = MCLBYTES;
871 1.2 matt int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
872 1.2 matt BUS_DMA_READ|BUS_DMA_NOWAIT);
873 1.2 matt if (error) {
874 1.2 matt aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n",
875 1.2 matt error);
876 1.2 matt M_SETCTX(m, NULL);
877 1.2 matt m_freem(m);
878 1.2 matt bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
879 1.2 matt return NULL;
880 1.2 matt }
881 1.12 matt KASSERT(((map->_dm_flags ^ sc->sc_dmat->_ranges[0].dr_flags) & _BUS_DMAMAP_COHERENT) == 0);
882 1.2 matt KASSERT(map->dm_mapsize == MCLBYTES);
883 1.16 matt #ifdef BCMETH_RCVMAGIC
884 1.10 matt *mtod(m, uint32_t *) = BCMETH_RCVMAGIC;
885 1.10 matt bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
886 1.10 matt BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
887 1.10 matt bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t),
888 1.10 matt map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD);
889 1.16 matt #else
890 1.16 matt bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
891 1.16 matt BUS_DMASYNC_PREREAD);
892 1.16 matt #endif
893 1.2 matt
894 1.2 matt return m;
895 1.2 matt }
896 1.2 matt
897 1.2 matt static void
898 1.2 matt bcmeth_rx_map_unload(
899 1.2 matt struct bcmeth_softc *sc,
900 1.2 matt struct mbuf *m)
901 1.2 matt {
902 1.2 matt KASSERT(m);
903 1.2 matt for (; m != NULL; m = m->m_next) {
904 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
905 1.2 matt KASSERT(map);
906 1.2 matt KASSERT(map->dm_mapsize == MCLBYTES);
907 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len,
908 1.2 matt BUS_DMASYNC_POSTREAD);
909 1.2 matt bus_dmamap_unload(sc->sc_dmat, map);
910 1.2 matt bcmeth_mapcache_put(sc, sc->sc_rx_mapcache, map);
911 1.2 matt M_SETCTX(m, NULL);
912 1.2 matt }
913 1.2 matt }
914 1.2 matt
915 1.2 matt static bool
916 1.2 matt bcmeth_rxq_produce(
917 1.2 matt struct bcmeth_softc *sc,
918 1.2 matt struct bcmeth_rxqueue *rxq)
919 1.2 matt {
920 1.2 matt struct gmac_rxdb *producer = rxq->rxq_producer;
921 1.7 matt bool produced = false;
922 1.7 matt
923 1.2 matt while (rxq->rxq_inuse < rxq->rxq_threshold) {
924 1.2 matt struct mbuf *m;
925 1.2 matt IF_DEQUEUE(&sc->sc_rx_bufcache, m);
926 1.2 matt if (m == NULL) {
927 1.2 matt m = bcmeth_rx_buf_alloc(sc);
928 1.2 matt if (m == NULL) {
929 1.2 matt printf("%s: bcmeth_rx_buf_alloc failed\n", __func__);
930 1.2 matt break;
931 1.2 matt }
932 1.2 matt }
933 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
934 1.2 matt KASSERT(map);
935 1.2 matt
936 1.2 matt producer->rxdb_buflen = MCLBYTES;
937 1.2 matt producer->rxdb_addrlo = map->dm_segs[0].ds_addr;
938 1.4 matt producer->rxdb_flags &= RXDB_FLAG_ET;
939 1.2 matt *rxq->rxq_mtail = m;
940 1.2 matt rxq->rxq_mtail = &m->m_next;
941 1.2 matt m->m_len = MCLBYTES;
942 1.2 matt m->m_next = NULL;
943 1.2 matt rxq->rxq_inuse++;
944 1.2 matt if (++producer == rxq->rxq_last) {
945 1.2 matt membar_producer();
946 1.2 matt bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
947 1.2 matt rxq->rxq_last - rxq->rxq_producer);
948 1.2 matt producer = rxq->rxq_producer = rxq->rxq_first;
949 1.2 matt }
950 1.7 matt produced = true;
951 1.2 matt }
952 1.7 matt if (produced) {
953 1.2 matt membar_producer();
954 1.7 matt if (producer != rxq->rxq_producer) {
955 1.7 matt bcmeth_rxq_desc_presync(sc, rxq, rxq->rxq_producer,
956 1.7 matt producer - rxq->rxq_producer);
957 1.7 matt rxq->rxq_producer = producer;
958 1.7 matt }
959 1.2 matt bcmeth_write_4(sc, rxq->rxq_reg_rcvptr,
960 1.2 matt rxq->rxq_descmap->dm_segs[0].ds_addr
961 1.7 matt + ((uintptr_t)producer & RCVPTR));
962 1.2 matt }
963 1.2 matt return true;
964 1.2 matt }
965 1.2 matt
966 1.2 matt static void
967 1.2 matt bcmeth_rx_input(
968 1.2 matt struct bcmeth_softc *sc,
969 1.2 matt struct mbuf *m,
970 1.2 matt uint32_t rxdb_flags)
971 1.2 matt {
972 1.2 matt struct ifnet * const ifp = &sc->sc_if;
973 1.2 matt
974 1.2 matt bcmeth_rx_map_unload(sc, m);
975 1.2 matt
976 1.19 matt m_adj(m, sc->sc_rcvoffset);
977 1.2 matt
978 1.2 matt switch (__SHIFTOUT(rxdb_flags, RXSTS_PKTTYPE)) {
979 1.2 matt case RXSTS_PKTTYPE_UC:
980 1.2 matt break;
981 1.2 matt case RXSTS_PKTTYPE_MC:
982 1.2 matt m->m_flags |= M_MCAST;
983 1.2 matt break;
984 1.2 matt case RXSTS_PKTTYPE_BC:
985 1.2 matt m->m_flags |= M_BCAST|M_MCAST;
986 1.2 matt break;
987 1.6 matt default:
988 1.6 matt if (sc->sc_cmdcfg & PROMISC_EN)
989 1.6 matt m->m_flags |= M_PROMISC;
990 1.6 matt break;
991 1.2 matt }
992 1.2 matt m->m_pkthdr.rcvif = ifp;
993 1.2 matt
994 1.2 matt ifp->if_ipackets++;
995 1.2 matt ifp->if_ibytes += m->m_pkthdr.len;
996 1.2 matt
997 1.2 matt /*
998 1.2 matt * Let's give it to the network subsystm to deal with.
999 1.2 matt */
1000 1.16 matt #ifdef BCMETH_MPSAFE
1001 1.16 matt mutex_exit(sc->sc_lock);
1002 1.16 matt (*ifp->if_input)(ifp, m);
1003 1.16 matt mutex_enter(sc->sc_lock);
1004 1.16 matt #else
1005 1.2 matt int s = splnet();
1006 1.2 matt bpf_mtap(ifp, m);
1007 1.2 matt (*ifp->if_input)(ifp, m);
1008 1.2 matt splx(s);
1009 1.16 matt #endif
1010 1.2 matt }
1011 1.2 matt
1012 1.20 matt static bool
1013 1.2 matt bcmeth_rxq_consume(
1014 1.2 matt struct bcmeth_softc *sc,
1015 1.20 matt struct bcmeth_rxqueue *rxq,
1016 1.20 matt size_t atmost)
1017 1.2 matt {
1018 1.2 matt struct ifnet * const ifp = &sc->sc_if;
1019 1.2 matt struct gmac_rxdb *consumer = rxq->rxq_consumer;
1020 1.2 matt size_t rxconsumed = 0;
1021 1.20 matt bool didconsume = false;
1022 1.2 matt
1023 1.20 matt while (atmost-- > 0) {
1024 1.2 matt if (consumer == rxq->rxq_producer) {
1025 1.2 matt KASSERT(rxq->rxq_inuse == 0);
1026 1.20 matt break;
1027 1.2 matt }
1028 1.2 matt
1029 1.8 matt uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1030 1.2 matt uint32_t currdscr = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1031 1.2 matt if (consumer == rxq->rxq_first + currdscr) {
1032 1.20 matt break;
1033 1.2 matt }
1034 1.2 matt bcmeth_rxq_desc_postsync(sc, rxq, consumer, 1);
1035 1.2 matt
1036 1.2 matt /*
1037 1.2 matt * We own this packet again. Copy the rxsts word from it.
1038 1.2 matt */
1039 1.2 matt rxconsumed++;
1040 1.20 matt didconsume = true;
1041 1.2 matt uint32_t rxsts;
1042 1.2 matt KASSERT(rxq->rxq_mhead != NULL);
1043 1.2 matt bus_dmamap_t map = M_GETCTX(rxq->rxq_mhead, bus_dmamap_t);
1044 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, arm_dcache_align,
1045 1.2 matt BUS_DMASYNC_POSTREAD);
1046 1.2 matt memcpy(&rxsts, rxq->rxq_mhead->m_data, 4);
1047 1.10 matt #if 0
1048 1.10 matt KASSERTMSG(rxsts != BCMETH_RCVMAGIC, "currdscr=%u consumer=%zd",
1049 1.10 matt currdscr, consumer - rxq->rxq_first);
1050 1.10 matt #endif
1051 1.2 matt
1052 1.2 matt /*
1053 1.2 matt * Get the count of descriptors. Fetch the correct number
1054 1.2 matt * of mbufs.
1055 1.2 matt */
1056 1.16 matt #ifdef BCMETH_RCVMAGIC
1057 1.10 matt size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1;
1058 1.16 matt #else
1059 1.16 matt size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
1060 1.16 matt #endif
1061 1.2 matt struct mbuf *m = rxq->rxq_mhead;
1062 1.2 matt struct mbuf *m_last = m;
1063 1.2 matt for (size_t i = 1; i < desc_count; i++) {
1064 1.2 matt if (++consumer == rxq->rxq_last) {
1065 1.2 matt consumer = rxq->rxq_first;
1066 1.2 matt }
1067 1.10 matt KASSERTMSG(consumer != rxq->rxq_first + currdscr,
1068 1.10 matt "i=%zu rxsts=%#x desc_count=%zu currdscr=%u consumer=%zd",
1069 1.10 matt i, rxsts, desc_count, currdscr,
1070 1.10 matt consumer - rxq->rxq_first);
1071 1.2 matt m_last = m_last->m_next;
1072 1.2 matt }
1073 1.2 matt
1074 1.2 matt /*
1075 1.2 matt * Now remove it/them from the list of enqueued mbufs.
1076 1.2 matt */
1077 1.2 matt if ((rxq->rxq_mhead = m_last->m_next) == NULL)
1078 1.2 matt rxq->rxq_mtail = &rxq->rxq_mhead;
1079 1.2 matt m_last->m_next = NULL;
1080 1.2 matt
1081 1.16 matt #ifdef BCMETH_RCVMAGIC
1082 1.10 matt if (rxsts == BCMETH_RCVMAGIC) {
1083 1.10 matt ifp->if_ierrors++;
1084 1.10 matt if ((m->m_ext.ext_paddr >> 28) == 8) {
1085 1.18 matt BCMETH_EVCNT_INCR(sc->sc_ev_rx_badmagic_lo);
1086 1.10 matt } else {
1087 1.18 matt BCMETH_EVCNT_INCR( sc->sc_ev_rx_badmagic_hi);
1088 1.10 matt }
1089 1.10 matt IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1090 1.16 matt } else
1091 1.16 matt #endif /* BCMETH_RCVMAGIC */
1092 1.16 matt if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
1093 1.2 matt aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
1094 1.2 matt consumer - rxq->rxq_first, desc_count, rxsts);
1095 1.2 matt /*
1096 1.2 matt * We encountered an error, take the mbufs and add them
1097 1.2 matt * to the rx bufcache so we can quickly reuse them.
1098 1.2 matt */
1099 1.2 matt ifp->if_ierrors++;
1100 1.2 matt do {
1101 1.2 matt struct mbuf *m0 = m->m_next;
1102 1.2 matt m->m_next = NULL;
1103 1.2 matt IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1104 1.2 matt m = m0;
1105 1.2 matt } while (m);
1106 1.2 matt } else {
1107 1.2 matt uint32_t framelen = __SHIFTOUT(rxsts, RXSTS_FRAMELEN);
1108 1.19 matt framelen += sc->sc_rcvoffset;
1109 1.2 matt m->m_pkthdr.len = framelen;
1110 1.2 matt if (desc_count == 1) {
1111 1.2 matt KASSERT(framelen <= MCLBYTES);
1112 1.2 matt m->m_len = framelen;
1113 1.2 matt } else {
1114 1.2 matt m_last->m_len = framelen & (MCLBYTES - 1);
1115 1.2 matt }
1116 1.16 matt
1117 1.16 matt #ifdef BCMETH_MPSAFE
1118 1.16 matt /*
1119 1.16 matt * Wrap at the last entry!
1120 1.16 matt */
1121 1.16 matt if (++consumer == rxq->rxq_last) {
1122 1.16 matt KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1123 1.16 matt rxq->rxq_consumer = rxq->rxq_first;
1124 1.16 matt } else {
1125 1.16 matt rxq->rxq_consumer = consumer;
1126 1.16 matt }
1127 1.16 matt rxq->rxq_inuse -= rxconsumed;
1128 1.16 matt #endif /* BCMETH_MPSAFE */
1129 1.16 matt
1130 1.16 matt /*
1131 1.16 matt * Receive the packet (which releases our lock)
1132 1.16 matt */
1133 1.2 matt bcmeth_rx_input(sc, m, rxsts);
1134 1.16 matt
1135 1.16 matt #ifdef BCMETH_MPSAFE
1136 1.16 matt /*
1137 1.16 matt * Since we had to give up our lock, we need to
1138 1.16 matt * refresh these.
1139 1.16 matt */
1140 1.16 matt consumer = rxq->rxq_consumer;
1141 1.16 matt rxconsumed = 0;
1142 1.16 matt continue;
1143 1.16 matt #endif /* BCMETH_MPSAFE */
1144 1.2 matt }
1145 1.2 matt
1146 1.2 matt /*
1147 1.2 matt * Wrap at the last entry!
1148 1.2 matt */
1149 1.2 matt if (++consumer == rxq->rxq_last) {
1150 1.2 matt KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
1151 1.2 matt consumer = rxq->rxq_first;
1152 1.2 matt }
1153 1.2 matt }
1154 1.20 matt
1155 1.20 matt /*
1156 1.20 matt * Update queue info.
1157 1.20 matt */
1158 1.20 matt rxq->rxq_consumer = consumer;
1159 1.20 matt rxq->rxq_inuse -= rxconsumed;
1160 1.20 matt
1161 1.20 matt /*
1162 1.20 matt * Did we consume anything?
1163 1.20 matt */
1164 1.20 matt return didconsume;
1165 1.2 matt }
1166 1.2 matt
1167 1.2 matt static void
1168 1.2 matt bcmeth_rxq_purge(
1169 1.2 matt struct bcmeth_softc *sc,
1170 1.2 matt struct bcmeth_rxqueue *rxq,
1171 1.2 matt bool discard)
1172 1.2 matt {
1173 1.2 matt struct mbuf *m;
1174 1.2 matt
1175 1.2 matt if ((m = rxq->rxq_mhead) != NULL) {
1176 1.2 matt if (discard) {
1177 1.2 matt bcmeth_rx_map_unload(sc, m);
1178 1.2 matt m_freem(m);
1179 1.2 matt } else {
1180 1.2 matt while (m != NULL) {
1181 1.2 matt struct mbuf *m0 = m->m_next;
1182 1.2 matt m->m_next = NULL;
1183 1.2 matt IF_ENQUEUE(&sc->sc_rx_bufcache, m);
1184 1.2 matt m = m0;
1185 1.2 matt }
1186 1.2 matt }
1187 1.2 matt
1188 1.2 matt }
1189 1.2 matt
1190 1.2 matt rxq->rxq_mhead = NULL;
1191 1.2 matt rxq->rxq_mtail = &rxq->rxq_mhead;
1192 1.2 matt rxq->rxq_inuse = 0;
1193 1.1 matt }
1194 1.1 matt
1195 1.1 matt static void
1196 1.2 matt bcmeth_rxq_reset(
1197 1.2 matt struct bcmeth_softc *sc,
1198 1.2 matt struct bcmeth_rxqueue *rxq)
1199 1.2 matt {
1200 1.2 matt /*
1201 1.3 matt * sync all the descriptors
1202 1.3 matt */
1203 1.3 matt bcmeth_rxq_desc_postsync(sc, rxq, rxq->rxq_first,
1204 1.3 matt rxq->rxq_last - rxq->rxq_first);
1205 1.3 matt
1206 1.3 matt /*
1207 1.3 matt * Make sure we own all descriptors in the ring.
1208 1.3 matt */
1209 1.3 matt struct gmac_rxdb *rxdb;
1210 1.3 matt for (rxdb = rxq->rxq_first; rxdb < rxq->rxq_last - 1; rxdb++) {
1211 1.7 matt rxdb->rxdb_flags = RXDB_FLAG_IC;
1212 1.3 matt }
1213 1.3 matt
1214 1.3 matt /*
1215 1.3 matt * Last descriptor has the wrap flag.
1216 1.3 matt */
1217 1.7 matt rxdb->rxdb_flags = RXDB_FLAG_ET|RXDB_FLAG_IC;
1218 1.3 matt
1219 1.3 matt /*
1220 1.2 matt * Reset the producer consumer indexes.
1221 1.2 matt */
1222 1.2 matt rxq->rxq_consumer = rxq->rxq_first;
1223 1.2 matt rxq->rxq_producer = rxq->rxq_first;
1224 1.2 matt rxq->rxq_inuse = 0;
1225 1.2 matt if (rxq->rxq_threshold < BCMETH_MINRXMBUFS)
1226 1.2 matt rxq->rxq_threshold = BCMETH_MINRXMBUFS;
1227 1.2 matt
1228 1.2 matt sc->sc_intmask |= RCVINT|RCVFIFOOF|RCVDESCUF;
1229 1.2 matt
1230 1.2 matt /*
1231 1.2 matt * Restart the receiver at the first descriptor
1232 1.2 matt */
1233 1.2 matt bcmeth_write_4(sc, rxq->rxq_reg_rcvaddrlo,
1234 1.2 matt rxq->rxq_descmap->dm_segs[0].ds_addr);
1235 1.2 matt }
1236 1.2 matt
1237 1.2 matt static int
1238 1.2 matt bcmeth_rxq_attach(
1239 1.2 matt struct bcmeth_softc *sc,
1240 1.2 matt struct bcmeth_rxqueue *rxq,
1241 1.2 matt u_int qno)
1242 1.2 matt {
1243 1.8 matt size_t desc_count = BCMETH_RINGSIZE / sizeof(rxq->rxq_first[0]);
1244 1.2 matt int error;
1245 1.2 matt void *descs;
1246 1.2 matt
1247 1.2 matt KASSERT(desc_count == 256 || desc_count == 512);
1248 1.2 matt
1249 1.8 matt error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1250 1.2 matt &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs);
1251 1.2 matt if (error)
1252 1.2 matt return error;
1253 1.2 matt
1254 1.8 matt memset(descs, 0, BCMETH_RINGSIZE);
1255 1.2 matt rxq->rxq_first = descs;
1256 1.2 matt rxq->rxq_last = rxq->rxq_first + desc_count;
1257 1.2 matt rxq->rxq_consumer = descs;
1258 1.2 matt rxq->rxq_producer = descs;
1259 1.2 matt
1260 1.2 matt bcmeth_rxq_purge(sc, rxq, true);
1261 1.2 matt bcmeth_rxq_reset(sc, rxq);
1262 1.2 matt
1263 1.2 matt rxq->rxq_reg_rcvaddrlo = GMAC_RCVADDR_LOW;
1264 1.2 matt rxq->rxq_reg_rcvctl = GMAC_RCVCONTROL;
1265 1.2 matt rxq->rxq_reg_rcvptr = GMAC_RCVPTR;
1266 1.2 matt rxq->rxq_reg_rcvsts0 = GMAC_RCVSTATUS0;
1267 1.10 matt rxq->rxq_reg_rcvsts1 = GMAC_RCVSTATUS1;
1268 1.2 matt
1269 1.2 matt return 0;
1270 1.2 matt }
1271 1.2 matt
1272 1.2 matt static bool
1273 1.2 matt bcmeth_txq_active_p(
1274 1.2 matt struct bcmeth_softc * const sc,
1275 1.2 matt struct bcmeth_txqueue *txq)
1276 1.1 matt {
1277 1.2 matt return !IF_IS_EMPTY(&txq->txq_mbufs);
1278 1.2 matt }
1279 1.2 matt
1280 1.2 matt static bool
1281 1.2 matt bcmeth_txq_fillable_p(
1282 1.2 matt struct bcmeth_softc * const sc,
1283 1.2 matt struct bcmeth_txqueue *txq)
1284 1.2 matt {
1285 1.2 matt return txq->txq_free >= txq->txq_threshold;
1286 1.2 matt }
1287 1.2 matt
1288 1.2 matt static int
1289 1.2 matt bcmeth_txq_attach(
1290 1.2 matt struct bcmeth_softc *sc,
1291 1.2 matt struct bcmeth_txqueue *txq,
1292 1.2 matt u_int qno)
1293 1.2 matt {
1294 1.8 matt size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]);
1295 1.2 matt int error;
1296 1.2 matt void *descs;
1297 1.2 matt
1298 1.2 matt KASSERT(desc_count == 256 || desc_count == 512);
1299 1.2 matt
1300 1.8 matt error = bcmeth_dmamem_alloc(sc->sc_dmat, BCMETH_RINGSIZE,
1301 1.2 matt &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1302 1.2 matt if (error)
1303 1.2 matt return error;
1304 1.2 matt
1305 1.8 matt memset(descs, 0, BCMETH_RINGSIZE);
1306 1.2 matt txq->txq_first = descs;
1307 1.2 matt txq->txq_last = txq->txq_first + desc_count;
1308 1.2 matt txq->txq_consumer = descs;
1309 1.2 matt txq->txq_producer = descs;
1310 1.2 matt
1311 1.2 matt IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1312 1.2 matt
1313 1.2 matt txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1314 1.2 matt txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1315 1.2 matt txq->txq_reg_xmtptr = GMAC_XMTPTR;
1316 1.2 matt txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1317 1.10 matt txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1;
1318 1.2 matt
1319 1.2 matt bcmeth_txq_reset(sc, txq);
1320 1.1 matt
1321 1.2 matt return 0;
1322 1.1 matt }
1323 1.1 matt
1324 1.1 matt static int
1325 1.2 matt bcmeth_txq_map_load(
1326 1.2 matt struct bcmeth_softc *sc,
1327 1.2 matt struct bcmeth_txqueue *txq,
1328 1.2 matt struct mbuf *m)
1329 1.2 matt {
1330 1.2 matt bus_dmamap_t map;
1331 1.2 matt int error;
1332 1.2 matt
1333 1.2 matt map = M_GETCTX(m, bus_dmamap_t);
1334 1.2 matt if (map != NULL)
1335 1.2 matt return 0;
1336 1.2 matt
1337 1.2 matt map = bcmeth_mapcache_get(sc, sc->sc_tx_mapcache);
1338 1.2 matt if (map == NULL)
1339 1.2 matt return ENOMEM;
1340 1.2 matt
1341 1.2 matt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1342 1.2 matt BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1343 1.2 matt if (error)
1344 1.2 matt return error;
1345 1.2 matt
1346 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len,
1347 1.2 matt BUS_DMASYNC_PREWRITE);
1348 1.2 matt M_SETCTX(m, map);
1349 1.2 matt return 0;
1350 1.2 matt }
1351 1.2 matt
1352 1.2 matt static void
1353 1.2 matt bcmeth_txq_map_unload(
1354 1.2 matt struct bcmeth_softc *sc,
1355 1.2 matt struct bcmeth_txqueue *txq,
1356 1.2 matt struct mbuf *m)
1357 1.2 matt {
1358 1.2 matt KASSERT(m);
1359 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1360 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1361 1.2 matt BUS_DMASYNC_POSTWRITE);
1362 1.2 matt bus_dmamap_unload(sc->sc_dmat, map);
1363 1.2 matt bcmeth_mapcache_put(sc, sc->sc_tx_mapcache, map);
1364 1.2 matt }
1365 1.2 matt
1366 1.2 matt static bool
1367 1.2 matt bcmeth_txq_produce(
1368 1.2 matt struct bcmeth_softc *sc,
1369 1.2 matt struct bcmeth_txqueue *txq,
1370 1.2 matt struct mbuf *m)
1371 1.2 matt {
1372 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
1373 1.2 matt
1374 1.2 matt if (map->dm_nsegs > txq->txq_free)
1375 1.2 matt return false;
1376 1.2 matt
1377 1.2 matt /*
1378 1.2 matt * TCP Offload flag must be set in the first descriptor.
1379 1.2 matt */
1380 1.2 matt struct gmac_txdb *producer = txq->txq_producer;
1381 1.2 matt uint32_t first_flags = TXDB_FLAG_SF;
1382 1.2 matt uint32_t last_flags = TXDB_FLAG_EF;
1383 1.2 matt
1384 1.2 matt /*
1385 1.2 matt * If we've produced enough descriptors without consuming any
1386 1.2 matt * we need to ask for an interrupt to reclaim some.
1387 1.2 matt */
1388 1.2 matt txq->txq_lastintr += map->dm_nsegs;
1389 1.2 matt if (txq->txq_lastintr >= txq->txq_threshold
1390 1.2 matt || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1391 1.2 matt txq->txq_lastintr = 0;
1392 1.2 matt last_flags |= TXDB_FLAG_IC;
1393 1.2 matt }
1394 1.2 matt
1395 1.2 matt KASSERT(producer != txq->txq_last);
1396 1.2 matt
1397 1.2 matt struct gmac_txdb *start = producer;
1398 1.2 matt size_t count = map->dm_nsegs;
1399 1.2 matt producer->txdb_flags |= first_flags;
1400 1.2 matt producer->txdb_addrlo = map->dm_segs[0].ds_addr;
1401 1.2 matt producer->txdb_buflen = map->dm_segs[0].ds_len;
1402 1.2 matt for (u_int i = 1; i < map->dm_nsegs; i++) {
1403 1.2 matt #if 0
1404 1.2 matt printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1405 1.2 matt producer->txdb_flags, producer->txdb_buflen,
1406 1.2 matt producer->txdb_addrlo, producer->txdb_addrhi);
1407 1.2 matt #endif
1408 1.2 matt if (__predict_false(++producer == txq->txq_last)) {
1409 1.2 matt bcmeth_txq_desc_presync(sc, txq, start,
1410 1.2 matt txq->txq_last - start);
1411 1.2 matt count -= txq->txq_last - start;
1412 1.2 matt producer = txq->txq_first;
1413 1.2 matt start = txq->txq_first;
1414 1.2 matt }
1415 1.2 matt producer->txdb_addrlo = map->dm_segs[i].ds_addr;
1416 1.2 matt producer->txdb_buflen = map->dm_segs[i].ds_len;
1417 1.2 matt }
1418 1.2 matt producer->txdb_flags |= last_flags;
1419 1.2 matt #if 0
1420 1.2 matt printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1421 1.2 matt producer->txdb_flags, producer->txdb_buflen,
1422 1.2 matt producer->txdb_addrlo, producer->txdb_addrhi);
1423 1.2 matt #endif
1424 1.10 matt if (count)
1425 1.10 matt bcmeth_txq_desc_presync(sc, txq, start, count);
1426 1.2 matt
1427 1.2 matt /*
1428 1.2 matt * Reduce free count by the number of segments we consumed.
1429 1.2 matt */
1430 1.2 matt txq->txq_free -= map->dm_nsegs;
1431 1.2 matt KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1432 1.2 matt KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txdb_flags & TXDB_FLAG_EF) == 0);
1433 1.2 matt KASSERT(producer->txdb_flags & TXDB_FLAG_EF);
1434 1.2 matt
1435 1.2 matt #if 0
1436 1.2 matt printf("%s: mbuf %p: produced a %u byte packet in %u segments (%zd..%zd)\n",
1437 1.2 matt __func__, m, m->m_pkthdr.len, map->dm_nsegs,
1438 1.2 matt txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1439 1.2 matt #endif
1440 1.2 matt
1441 1.10 matt if (producer + 1 == txq->txq_last)
1442 1.2 matt txq->txq_producer = txq->txq_first;
1443 1.2 matt else
1444 1.10 matt txq->txq_producer = producer + 1;
1445 1.2 matt IF_ENQUEUE(&txq->txq_mbufs, m);
1446 1.2 matt
1447 1.2 matt /*
1448 1.2 matt * Let the transmitter know there's more to do
1449 1.2 matt */
1450 1.2 matt bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1451 1.2 matt txq->txq_descmap->dm_segs[0].ds_addr
1452 1.2 matt + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1453 1.2 matt
1454 1.2 matt return true;
1455 1.2 matt }
1456 1.2 matt
1457 1.16 matt static struct mbuf *
1458 1.16 matt bcmeth_copy_packet(struct mbuf *m)
1459 1.16 matt {
1460 1.16 matt struct mbuf *mext = NULL;
1461 1.16 matt size_t misalignment = 0;
1462 1.16 matt size_t hlen = 0;
1463 1.16 matt
1464 1.16 matt for (mext = m; mext != NULL; mext = mext->m_next) {
1465 1.16 matt if (mext->m_flags & M_EXT) {
1466 1.16 matt misalignment = mtod(mext, vaddr_t) & arm_dcache_align;
1467 1.16 matt break;
1468 1.16 matt }
1469 1.16 matt hlen += m->m_len;
1470 1.16 matt }
1471 1.16 matt
1472 1.16 matt struct mbuf *n = m->m_next;
1473 1.16 matt if (m != mext && hlen + misalignment <= MHLEN && false) {
1474 1.16 matt KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]);
1475 1.16 matt size_t oldoff = m->m_data - m->m_pktdat;
1476 1.16 matt size_t off;
1477 1.16 matt if (mext == NULL) {
1478 1.16 matt off = (oldoff + hlen > MHLEN) ? 0 : oldoff;
1479 1.16 matt } else {
1480 1.16 matt off = MHLEN - (hlen + misalignment);
1481 1.16 matt }
1482 1.16 matt KASSERT(off + hlen + misalignment <= MHLEN);
1483 1.16 matt if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) {
1484 1.16 matt memmove(&m->m_pktdat[off], m->m_data, m->m_len);
1485 1.16 matt m->m_data = &m->m_pktdat[off];
1486 1.16 matt }
1487 1.16 matt m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]);
1488 1.16 matt m->m_len = hlen;
1489 1.16 matt m->m_next = mext;
1490 1.16 matt while (n != mext) {
1491 1.16 matt n = m_free(n);
1492 1.16 matt }
1493 1.16 matt return m;
1494 1.16 matt }
1495 1.16 matt
1496 1.16 matt struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
1497 1.16 matt if (m0 == NULL) {
1498 1.16 matt return NULL;
1499 1.16 matt }
1500 1.16 matt M_COPY_PKTHDR(m0, m);
1501 1.16 matt MCLAIM(m0, m->m_owner);
1502 1.16 matt if (m0->m_pkthdr.len > MHLEN) {
1503 1.16 matt MCLGET(m0, M_DONTWAIT);
1504 1.16 matt if ((m0->m_flags & M_EXT) == 0) {
1505 1.16 matt m_freem(m0);
1506 1.16 matt return NULL;
1507 1.16 matt }
1508 1.16 matt }
1509 1.16 matt m0->m_len = m->m_pkthdr.len;
1510 1.16 matt m_copydata(m, 0, m0->m_len, mtod(m0, void *));
1511 1.16 matt m_freem(m);
1512 1.16 matt return m0;
1513 1.16 matt }
1514 1.16 matt
1515 1.2 matt static bool
1516 1.2 matt bcmeth_txq_enqueue(
1517 1.2 matt struct bcmeth_softc *sc,
1518 1.2 matt struct bcmeth_txqueue *txq)
1519 1.2 matt {
1520 1.2 matt for (;;) {
1521 1.2 matt if (IF_QFULL(&txq->txq_mbufs))
1522 1.2 matt return false;
1523 1.2 matt struct mbuf *m = txq->txq_next;
1524 1.2 matt if (m == NULL) {
1525 1.2 matt int s = splnet();
1526 1.2 matt IF_DEQUEUE(&sc->sc_if.if_snd, m);
1527 1.2 matt splx(s);
1528 1.2 matt if (m == NULL)
1529 1.2 matt return true;
1530 1.2 matt M_SETCTX(m, NULL);
1531 1.2 matt } else {
1532 1.2 matt txq->txq_next = NULL;
1533 1.2 matt }
1534 1.15 matt /*
1535 1.15 matt * If LINK2 is set and this packet uses multiple mbufs,
1536 1.15 matt * consolidate it into a single mbuf.
1537 1.15 matt */
1538 1.15 matt if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) {
1539 1.16 matt struct mbuf *m0 = bcmeth_copy_packet(m);
1540 1.15 matt if (m0 == NULL) {
1541 1.15 matt txq->txq_next = m;
1542 1.15 matt return true;
1543 1.15 matt }
1544 1.15 matt m = m0;
1545 1.15 matt }
1546 1.2 matt int error = bcmeth_txq_map_load(sc, txq, m);
1547 1.2 matt if (error) {
1548 1.2 matt aprint_error_dev(sc->sc_dev,
1549 1.2 matt "discarded packet due to "
1550 1.2 matt "dmamap load failure: %d\n", error);
1551 1.2 matt m_freem(m);
1552 1.2 matt continue;
1553 1.2 matt }
1554 1.2 matt KASSERT(txq->txq_next == NULL);
1555 1.2 matt if (!bcmeth_txq_produce(sc, txq, m)) {
1556 1.2 matt txq->txq_next = m;
1557 1.2 matt return false;
1558 1.2 matt }
1559 1.2 matt KASSERT(txq->txq_next == NULL);
1560 1.2 matt }
1561 1.2 matt }
1562 1.2 matt
1563 1.2 matt static bool
1564 1.2 matt bcmeth_txq_consume(
1565 1.2 matt struct bcmeth_softc *sc,
1566 1.2 matt struct bcmeth_txqueue *txq)
1567 1.2 matt {
1568 1.2 matt struct ifnet * const ifp = &sc->sc_if;
1569 1.2 matt struct gmac_txdb *consumer = txq->txq_consumer;
1570 1.2 matt size_t txfree = 0;
1571 1.2 matt
1572 1.2 matt #if 0
1573 1.2 matt printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1574 1.2 matt #endif
1575 1.2 matt
1576 1.2 matt for (;;) {
1577 1.2 matt if (consumer == txq->txq_producer) {
1578 1.2 matt txq->txq_consumer = consumer;
1579 1.2 matt txq->txq_free += txfree;
1580 1.2 matt txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1581 1.2 matt #if 0
1582 1.5 matt printf("%s: empty: freed %zu descriptors going from %zu to %zu\n",
1583 1.2 matt __func__, txfree, txq->txq_free - txfree, txq->txq_free);
1584 1.2 matt #endif
1585 1.2 matt KASSERT(txq->txq_lastintr == 0);
1586 1.2 matt KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1);
1587 1.2 matt return true;
1588 1.2 matt }
1589 1.2 matt bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1590 1.2 matt uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1591 1.2 matt if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1592 1.2 matt txq->txq_consumer = consumer;
1593 1.2 matt txq->txq_free += txfree;
1594 1.2 matt txq->txq_lastintr -= min(txq->txq_lastintr, txfree);
1595 1.2 matt #if 0
1596 1.2 matt printf("%s: freed %zu descriptors\n",
1597 1.2 matt __func__, txfree);
1598 1.2 matt #endif
1599 1.2 matt return bcmeth_txq_fillable_p(sc, txq);
1600 1.2 matt }
1601 1.2 matt
1602 1.2 matt /*
1603 1.2 matt * If this is the last descriptor in the chain, get the
1604 1.2 matt * mbuf, free its dmamap, and free the mbuf chain itself.
1605 1.2 matt */
1606 1.2 matt const uint32_t txdb_flags = consumer->txdb_flags;
1607 1.2 matt if (txdb_flags & TXDB_FLAG_EF) {
1608 1.2 matt struct mbuf *m;
1609 1.2 matt
1610 1.2 matt IF_DEQUEUE(&txq->txq_mbufs, m);
1611 1.2 matt KASSERT(m);
1612 1.2 matt bcmeth_txq_map_unload(sc, txq, m);
1613 1.2 matt #if 0
1614 1.2 matt printf("%s: mbuf %p: consumed a %u byte packet\n",
1615 1.2 matt __func__, m, m->m_pkthdr.len);
1616 1.2 matt #endif
1617 1.10 matt bpf_mtap(ifp, m);
1618 1.2 matt ifp->if_opackets++;
1619 1.2 matt ifp->if_obytes += m->m_pkthdr.len;
1620 1.2 matt if (m->m_flags & M_MCAST)
1621 1.2 matt ifp->if_omcasts++;
1622 1.2 matt m_freem(m);
1623 1.2 matt }
1624 1.2 matt
1625 1.2 matt /*
1626 1.2 matt * We own this packet again. Clear all flags except wrap.
1627 1.2 matt */
1628 1.2 matt txfree++;
1629 1.2 matt
1630 1.2 matt /*
1631 1.2 matt * Wrap at the last entry!
1632 1.2 matt */
1633 1.2 matt if (txdb_flags & TXDB_FLAG_ET) {
1634 1.2 matt consumer->txdb_flags = TXDB_FLAG_ET;
1635 1.2 matt KASSERT(consumer + 1 == txq->txq_last);
1636 1.2 matt consumer = txq->txq_first;
1637 1.2 matt } else {
1638 1.2 matt consumer->txdb_flags = 0;
1639 1.2 matt consumer++;
1640 1.2 matt KASSERT(consumer < txq->txq_last);
1641 1.2 matt }
1642 1.2 matt }
1643 1.2 matt }
1644 1.2 matt
1645 1.2 matt static void
1646 1.2 matt bcmeth_txq_purge(
1647 1.2 matt struct bcmeth_softc *sc,
1648 1.2 matt struct bcmeth_txqueue *txq)
1649 1.2 matt {
1650 1.2 matt struct mbuf *m;
1651 1.2 matt KASSERT((bcmeth_read_4(sc, UNIMAC_COMMAND_CONFIG) & TX_ENA) == 0);
1652 1.2 matt
1653 1.2 matt for (;;) {
1654 1.2 matt IF_DEQUEUE(&txq->txq_mbufs, m);
1655 1.2 matt if (m == NULL)
1656 1.2 matt break;
1657 1.2 matt bcmeth_txq_map_unload(sc, txq, m);
1658 1.2 matt m_freem(m);
1659 1.2 matt }
1660 1.2 matt if ((m = txq->txq_next) != NULL) {
1661 1.2 matt txq->txq_next = NULL;
1662 1.2 matt bcmeth_txq_map_unload(sc, txq, m);
1663 1.2 matt m_freem(m);
1664 1.2 matt }
1665 1.2 matt }
1666 1.2 matt
1667 1.2 matt static void
1668 1.2 matt bcmeth_txq_reset(
1669 1.2 matt struct bcmeth_softc *sc,
1670 1.2 matt struct bcmeth_txqueue *txq)
1671 1.2 matt {
1672 1.2 matt /*
1673 1.2 matt * sync all the descriptors
1674 1.2 matt */
1675 1.2 matt bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1676 1.2 matt txq->txq_last - txq->txq_first);
1677 1.2 matt
1678 1.2 matt /*
1679 1.2 matt * Make sure we own all descriptors in the ring.
1680 1.2 matt */
1681 1.2 matt struct gmac_txdb *txdb;
1682 1.2 matt for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1683 1.2 matt txdb->txdb_flags = 0;
1684 1.2 matt }
1685 1.2 matt
1686 1.2 matt /*
1687 1.2 matt * Last descriptor has the wrap flag.
1688 1.2 matt */
1689 1.2 matt txdb->txdb_flags = TXDB_FLAG_ET;
1690 1.2 matt
1691 1.2 matt /*
1692 1.2 matt * Reset the producer consumer indexes.
1693 1.2 matt */
1694 1.2 matt txq->txq_consumer = txq->txq_first;
1695 1.2 matt txq->txq_producer = txq->txq_first;
1696 1.2 matt txq->txq_free = txq->txq_last - txq->txq_first - 1;
1697 1.2 matt txq->txq_threshold = txq->txq_free / 2;
1698 1.2 matt txq->txq_lastintr = 0;
1699 1.2 matt
1700 1.2 matt /*
1701 1.2 matt * What do we want to get interrupted on?
1702 1.2 matt */
1703 1.2 matt sc->sc_intmask |= XMTINT_0 | XMTUF;
1704 1.2 matt
1705 1.2 matt /*
1706 1.2 matt * Restart the transmiter at the first descriptor
1707 1.2 matt */
1708 1.2 matt bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1709 1.2 matt txq->txq_descmap->dm_segs->ds_addr);
1710 1.2 matt }
1711 1.2 matt
1712 1.2 matt static void
1713 1.2 matt bcmeth_ifstart(struct ifnet *ifp)
1714 1.2 matt {
1715 1.2 matt struct bcmeth_softc * const sc = ifp->if_softc;
1716 1.2 matt
1717 1.16 matt if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
1718 1.16 matt return;
1719 1.16 matt }
1720 1.16 matt
1721 1.16 matt #ifdef BCMETH_MPSAFETX
1722 1.16 matt if (cpu_intr_p()) {
1723 1.16 matt #endif
1724 1.16 matt atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
1725 1.16 matt softint_schedule(sc->sc_soft_ih);
1726 1.16 matt #ifdef BCMETH_MPSAFETX
1727 1.16 matt } else {
1728 1.16 matt /*
1729 1.16 matt * Either we are in a softintr thread already or some other
1730 1.16 matt * thread so just borrow it to do the send and save ourselves
1731 1.16 matt * the overhead of a fast soft int.
1732 1.16 matt */
1733 1.16 matt bcmeth_soft_txintr(sc);
1734 1.16 matt }
1735 1.16 matt #endif
1736 1.2 matt }
1737 1.2 matt
1738 1.2 matt int
1739 1.1 matt bcmeth_intr(void *arg)
1740 1.1 matt {
1741 1.1 matt struct bcmeth_softc * const sc = arg;
1742 1.2 matt uint32_t soft_flags = 0;
1743 1.8 matt uint32_t work_flags = 0;
1744 1.1 matt int rv = 0;
1745 1.1 matt
1746 1.1 matt mutex_enter(sc->sc_hwlock);
1747 1.1 matt
1748 1.15 matt uint32_t intmask = sc->sc_intmask;
1749 1.18 matt BCMETH_EVCNT_INCR(sc->sc_ev_intr);
1750 1.2 matt
1751 1.2 matt for (;;) {
1752 1.2 matt uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1753 1.15 matt intstatus &= intmask;
1754 1.2 matt bcmeth_write_4(sc, GMAC_INTSTATUS, intstatus); /* write 1 to clear */
1755 1.2 matt if (intstatus == 0) {
1756 1.2 matt break;
1757 1.2 matt }
1758 1.2 matt #if 0
1759 1.8 matt aprint_normal_dev(sc->sc_dev, "%s: intstatus=%#x intmask=%#x\n",
1760 1.8 matt __func__, intstatus, bcmeth_read_4(sc, GMAC_INTMASK));
1761 1.2 matt #endif
1762 1.2 matt if (intstatus & RCVINT) {
1763 1.8 matt struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1764 1.15 matt intmask &= ~RCVINT;
1765 1.8 matt
1766 1.8 matt uint32_t rcvsts0 = bcmeth_read_4(sc, rxq->rxq_reg_rcvsts0);
1767 1.8 matt uint32_t descs = __SHIFTOUT(rcvsts0, RCV_CURRDSCR);
1768 1.8 matt if (descs < rxq->rxq_consumer - rxq->rxq_first) {
1769 1.8 matt /*
1770 1.8 matt * We wrapped at the end so count how far
1771 1.8 matt * we are from the end.
1772 1.8 matt */
1773 1.8 matt descs += rxq->rxq_last - rxq->rxq_consumer;
1774 1.8 matt } else {
1775 1.8 matt descs -= rxq->rxq_consumer - rxq->rxq_first;
1776 1.8 matt }
1777 1.8 matt /*
1778 1.8 matt * If we "timedout" we can't be hogging so use
1779 1.8 matt * softints. If we exceeded then we might hogging
1780 1.8 matt * so let the workqueue deal with them.
1781 1.8 matt */
1782 1.8 matt const uint32_t framecount = __SHIFTOUT(sc->sc_rcvlazy, INTRCVLAZY_FRAMECOUNT);
1783 1.9 matt if (descs < framecount
1784 1.9 matt || (curcpu()->ci_curlwp->l_flag & LW_IDLE)) {
1785 1.8 matt soft_flags |= SOFT_RXINTR;
1786 1.8 matt } else {
1787 1.8 matt work_flags |= WORK_RXINTR;
1788 1.8 matt }
1789 1.2 matt }
1790 1.2 matt
1791 1.2 matt if (intstatus & XMTINT_0) {
1792 1.15 matt intmask &= ~XMTINT_0;
1793 1.2 matt soft_flags |= SOFT_TXINTR;
1794 1.2 matt }
1795 1.2 matt
1796 1.2 matt if (intstatus & RCVDESCUF) {
1797 1.15 matt intmask &= ~RCVDESCUF;
1798 1.8 matt work_flags |= WORK_RXUNDERFLOW;
1799 1.2 matt }
1800 1.2 matt
1801 1.15 matt intstatus &= intmask;
1802 1.2 matt if (intstatus) {
1803 1.10 matt aprint_error_dev(sc->sc_dev,
1804 1.10 matt "intr: intstatus=%#x\n", intstatus);
1805 1.10 matt aprint_error_dev(sc->sc_dev,
1806 1.10 matt "rcvbase=%p/%#lx rcvptr=%#x rcvsts=%#x/%#x\n",
1807 1.10 matt sc->sc_rxq.rxq_first,
1808 1.10 matt sc->sc_rxq.rxq_descmap->dm_segs[0].ds_addr,
1809 1.10 matt bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvptr),
1810 1.10 matt bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts0),
1811 1.10 matt bcmeth_read_4(sc, sc->sc_rxq.rxq_reg_rcvsts1));
1812 1.10 matt aprint_error_dev(sc->sc_dev,
1813 1.10 matt "xmtbase=%p/%#lx xmtptr=%#x xmtsts=%#x/%#x\n",
1814 1.10 matt sc->sc_txq.txq_first,
1815 1.10 matt sc->sc_txq.txq_descmap->dm_segs[0].ds_addr,
1816 1.10 matt bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtptr),
1817 1.10 matt bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts0),
1818 1.10 matt bcmeth_read_4(sc, sc->sc_txq.txq_reg_xmtsts1));
1819 1.15 matt intmask &= ~intstatus;
1820 1.8 matt work_flags |= WORK_REINIT;
1821 1.2 matt break;
1822 1.2 matt }
1823 1.2 matt }
1824 1.2 matt
1825 1.15 matt if (intmask != sc->sc_intmask) {
1826 1.8 matt bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1827 1.8 matt }
1828 1.8 matt
1829 1.8 matt if (work_flags) {
1830 1.8 matt if (sc->sc_work_flags == 0) {
1831 1.8 matt workqueue_enqueue(sc->sc_workq, &sc->sc_work, NULL);
1832 1.8 matt }
1833 1.8 matt atomic_or_32(&sc->sc_work_flags, work_flags);
1834 1.8 matt rv = 1;
1835 1.8 matt }
1836 1.8 matt
1837 1.2 matt if (soft_flags) {
1838 1.8 matt if (sc->sc_soft_flags == 0) {
1839 1.8 matt softint_schedule(sc->sc_soft_ih);
1840 1.8 matt }
1841 1.8 matt atomic_or_32(&sc->sc_soft_flags, soft_flags);
1842 1.2 matt rv = 1;
1843 1.2 matt }
1844 1.1 matt
1845 1.1 matt mutex_exit(sc->sc_hwlock);
1846 1.1 matt
1847 1.1 matt return rv;
1848 1.1 matt }
1849 1.2 matt
1850 1.16 matt #ifdef BCMETH_MPSAFETX
1851 1.16 matt void
1852 1.16 matt bcmeth_soft_txintr(struct bcmeth_softc *sc)
1853 1.16 matt {
1854 1.16 matt mutex_enter(sc->sc_lock);
1855 1.16 matt /*
1856 1.16 matt * Let's do what we came here for. Consume transmitted
1857 1.16 matt * packets off the the transmit ring.
1858 1.16 matt */
1859 1.16 matt if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1860 1.16 matt || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1861 1.18 matt BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1862 1.16 matt sc->sc_if.if_flags |= IFF_OACTIVE;
1863 1.16 matt } else {
1864 1.16 matt sc->sc_if.if_flags &= ~IFF_OACTIVE;
1865 1.16 matt }
1866 1.16 matt if (sc->sc_if.if_flags & IFF_RUNNING) {
1867 1.16 matt mutex_spin_enter(sc->sc_hwlock);
1868 1.16 matt sc->sc_intmask |= XMTINT_0;
1869 1.16 matt bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1870 1.16 matt mutex_spin_exit(sc->sc_hwlock);
1871 1.16 matt }
1872 1.16 matt mutex_exit(sc->sc_lock);
1873 1.16 matt }
1874 1.16 matt #endif /* BCMETH_MPSAFETX */
1875 1.16 matt
1876 1.2 matt void
1877 1.2 matt bcmeth_soft_intr(void *arg)
1878 1.2 matt {
1879 1.2 matt struct bcmeth_softc * const sc = arg;
1880 1.2 matt struct ifnet * const ifp = &sc->sc_if;
1881 1.15 matt uint32_t intmask = 0;
1882 1.2 matt
1883 1.2 matt mutex_enter(sc->sc_lock);
1884 1.2 matt
1885 1.2 matt u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0);
1886 1.2 matt
1887 1.18 matt BCMETH_EVCNT_INCR(sc->sc_ev_soft_intr);
1888 1.2 matt
1889 1.8 matt if ((soft_flags & SOFT_TXINTR)
1890 1.8 matt || bcmeth_txq_active_p(sc, &sc->sc_txq)) {
1891 1.8 matt /*
1892 1.8 matt * Let's do what we came here for. Consume transmitted
1893 1.8 matt * packets off the the transmit ring.
1894 1.8 matt */
1895 1.8 matt if (!bcmeth_txq_consume(sc, &sc->sc_txq)
1896 1.8 matt || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
1897 1.18 matt BCMETH_EVCNT_INCR(sc->sc_ev_tx_stall);
1898 1.8 matt ifp->if_flags |= IFF_OACTIVE;
1899 1.8 matt } else {
1900 1.8 matt ifp->if_flags &= ~IFF_OACTIVE;
1901 1.8 matt }
1902 1.15 matt intmask |= XMTINT_0;
1903 1.8 matt }
1904 1.8 matt
1905 1.8 matt if (soft_flags & SOFT_RXINTR) {
1906 1.8 matt /*
1907 1.8 matt * Let's consume
1908 1.8 matt */
1909 1.20 matt while (bcmeth_rxq_consume(sc, &sc->sc_rxq,
1910 1.20 matt sc->sc_rxq.rxq_threshold / 4)) {
1911 1.20 matt /*
1912 1.20 matt * We've consumed a quarter of the ring and still have
1913 1.20 matt * more to do. Refill the ring.
1914 1.20 matt */
1915 1.20 matt bcmeth_rxq_produce(sc, &sc->sc_rxq);
1916 1.20 matt }
1917 1.15 matt intmask |= RCVINT;
1918 1.8 matt }
1919 1.8 matt
1920 1.8 matt if (ifp->if_flags & IFF_RUNNING) {
1921 1.8 matt bcmeth_rxq_produce(sc, &sc->sc_rxq);
1922 1.14 matt mutex_spin_enter(sc->sc_hwlock);
1923 1.15 matt sc->sc_intmask |= intmask;
1924 1.8 matt bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1925 1.14 matt mutex_spin_exit(sc->sc_hwlock);
1926 1.8 matt }
1927 1.8 matt
1928 1.8 matt mutex_exit(sc->sc_lock);
1929 1.8 matt }
1930 1.8 matt
1931 1.8 matt void
1932 1.8 matt bcmeth_worker(struct work *wk, void *arg)
1933 1.8 matt {
1934 1.8 matt struct bcmeth_softc * const sc = arg;
1935 1.8 matt struct ifnet * const ifp = &sc->sc_if;
1936 1.15 matt uint32_t intmask = 0;
1937 1.8 matt
1938 1.8 matt mutex_enter(sc->sc_lock);
1939 1.8 matt
1940 1.18 matt BCMETH_EVCNT_INCR(sc->sc_ev_work);
1941 1.8 matt
1942 1.8 matt uint32_t work_flags = atomic_swap_32(&sc->sc_work_flags, 0);
1943 1.8 matt if (work_flags & WORK_REINIT) {
1944 1.2 matt int s = splnet();
1945 1.8 matt sc->sc_soft_flags = 0;
1946 1.2 matt bcmeth_ifinit(ifp);
1947 1.2 matt splx(s);
1948 1.8 matt work_flags &= ~WORK_RXUNDERFLOW;
1949 1.2 matt }
1950 1.2 matt
1951 1.8 matt if (work_flags & WORK_RXUNDERFLOW) {
1952 1.2 matt struct bcmeth_rxqueue * const rxq = &sc->sc_rxq;
1953 1.2 matt size_t threshold = 5 * rxq->rxq_threshold / 4;
1954 1.2 matt if (threshold >= rxq->rxq_last - rxq->rxq_first) {
1955 1.2 matt threshold = rxq->rxq_last - rxq->rxq_first - 1;
1956 1.2 matt } else {
1957 1.15 matt intmask |= RCVDESCUF;
1958 1.2 matt }
1959 1.2 matt aprint_normal_dev(sc->sc_dev,
1960 1.2 matt "increasing receive buffers from %zu to %zu\n",
1961 1.2 matt rxq->rxq_threshold, threshold);
1962 1.2 matt rxq->rxq_threshold = threshold;
1963 1.2 matt }
1964 1.2 matt
1965 1.8 matt if (work_flags & WORK_RXINTR) {
1966 1.2 matt /*
1967 1.2 matt * Let's consume
1968 1.2 matt */
1969 1.20 matt while (bcmeth_rxq_consume(sc, &sc->sc_rxq,
1970 1.20 matt sc->sc_rxq.rxq_threshold / 4)) {
1971 1.20 matt /*
1972 1.20 matt * We've consumed a quarter of the ring and still have
1973 1.20 matt * more to do. Refill the ring.
1974 1.20 matt */
1975 1.20 matt bcmeth_rxq_produce(sc, &sc->sc_rxq);
1976 1.20 matt }
1977 1.15 matt intmask |= RCVINT;
1978 1.2 matt }
1979 1.2 matt
1980 1.2 matt if (ifp->if_flags & IFF_RUNNING) {
1981 1.2 matt bcmeth_rxq_produce(sc, &sc->sc_rxq);
1982 1.16 matt #if 0
1983 1.16 matt uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
1984 1.16 matt if (intstatus & RCVINT) {
1985 1.16 matt bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT);
1986 1.16 matt work_flags |= WORK_RXINTR;
1987 1.16 matt continue;
1988 1.16 matt }
1989 1.16 matt #endif
1990 1.14 matt mutex_spin_enter(sc->sc_hwlock);
1991 1.15 matt sc->sc_intmask |= intmask;
1992 1.2 matt bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
1993 1.14 matt mutex_spin_exit(sc->sc_hwlock);
1994 1.2 matt }
1995 1.2 matt
1996 1.2 matt mutex_exit(sc->sc_lock);
1997 1.2 matt }
1998