gemini_gmac.c revision 1.1 1 1.1 matt /* $NetBSD: gemini_gmac.c,v 1.1 2008/12/14 01:57:02 matt Exp $ */
2 1.1 matt /*-
3 1.1 matt * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 1.1 matt * All rights reserved.
5 1.1 matt *
6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.1 matt * by Matt Thomas <matt (at) 3am-software.com>
8 1.1 matt *
9 1.1 matt * Redistribution and use in source and binary forms, with or without
10 1.1 matt * modification, are permitted provided that the following conditions
11 1.1 matt * are met:
12 1.1 matt * 1. Redistributions of source code must retain the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer.
14 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer in the
16 1.1 matt * documentation and/or other materials provided with the distribution.
17 1.1 matt *
18 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
29 1.1 matt */
30 1.1 matt
31 1.1 matt #include "locators.h"
32 1.1 matt #include <sys/param.h>
33 1.1 matt #include <sys/device.h>
34 1.1 matt #include <sys/kmem.h>
35 1.1 matt #include <sys/mbuf.h>
36 1.1 matt
37 1.1 matt #include <net/if.h>
38 1.1 matt #include <net/if_ether.h>
39 1.1 matt
40 1.1 matt #include <machine/bus.h>
41 1.1 matt
42 1.1 matt #include <arm/gemini/gemini_reg.h>
43 1.1 matt #include <arm/gemini/gemini_obiovar.h>
44 1.1 matt #include <arm/gemini/gemini_gmacvar.h>
45 1.1 matt #include <arm/gemini/gemini_gpiovar.h>
46 1.1 matt
47 1.1 matt #include <dev/mii/mii.h>
48 1.1 matt #include <dev/mii/mii_bitbang.h>
49 1.1 matt
50 1.1 matt #include <sys/gpio.h>
51 1.1 matt
52 1.1 matt __KERNEL_RCSID(0, "$NetBSD: gemini_gmac.c,v 1.1 2008/12/14 01:57:02 matt Exp $");
53 1.1 matt
54 1.1 matt #define SWFREEQ_DESCS 256 /* one page worth */
55 1.1 matt #define HWFREEQ_DESCS 256 /* one page worth */
56 1.1 matt
57 1.1 matt static int geminigmac_match(device_t, cfdata_t, void *);
58 1.1 matt static void geminigmac_attach(device_t, device_t, void *);
59 1.1 matt static int geminigmac_find(device_t, cfdata_t, const int *, void *);
60 1.1 matt static int geminigmac_print(void *aux, const char *name);
61 1.1 matt
62 1.1 matt static int geminigmac_mii_readreg(device_t, int, int);
63 1.1 matt static void geminigmac_mii_writereg(device_t, int, int, int);
64 1.1 matt
65 1.1 matt #define GPIO_MDIO 21
66 1.1 matt #define GPIO_MDCLK 22
67 1.1 matt
68 1.1 matt #define MDIN __BIT(3)
69 1.1 matt #define MDOUT __BIT(2)
70 1.1 matt #define MDCLK __BIT(1)
71 1.1 matt #define MDTOPHY __BIT(0)
72 1.1 matt
73 1.1 matt CFATTACH_DECL_NEW(geminigmac, sizeof(struct gmac_softc),
74 1.1 matt geminigmac_match, geminigmac_attach, NULL, NULL);
75 1.1 matt
76 1.1 matt extern struct cfdriver geminigmac_cd;
77 1.1 matt extern struct cfdriver geminigpio_cd;
78 1.1 matt
79 1.1 matt void
80 1.1 matt gmac_intr_update(struct gmac_softc *sc)
81 1.1 matt {
82 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK,
83 1.1 matt ~sc->sc_int_enabled[0]);
84 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK,
85 1.1 matt ~sc->sc_int_enabled[1]);
86 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK,
87 1.1 matt ~sc->sc_int_enabled[2]);
88 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK,
89 1.1 matt ~sc->sc_int_enabled[3]);
90 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
91 1.1 matt ~sc->sc_int_enabled[4]);
92 1.1 matt
93 1.1 matt }
94 1.1 matt
95 1.1 matt static void
96 1.1 matt gmac_init(struct gmac_softc *sc)
97 1.1 matt {
98 1.1 matt gmac_hwqmem_t *hqm;
99 1.1 matt
100 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_SKBSIZE,
101 1.1 matt SKB_SIZE_SET(PAGE_SIZE, MCLBYTES));
102 1.1 matt
103 1.1 matt sc->sc_int_select[0] = INT0_GMAC1;
104 1.1 matt sc->sc_int_select[1] = INT1_GMAC1;
105 1.1 matt sc->sc_int_select[2] = INT2_GMAC1;
106 1.1 matt sc->sc_int_select[3] = INT3_GMAC1;
107 1.1 matt sc->sc_int_select[4] = INT4_GMAC1;
108 1.1 matt
109 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_SELECT, INT0_GMAC1);
110 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_SELECT, INT1_GMAC1);
111 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_SELECT, INT2_GMAC1);
112 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_SELECT, INT3_GMAC1);
113 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_SELECT, INT4_GMAC1);
114 1.1 matt
115 1.1 matt gmac_intr_update(sc);
116 1.1 matt
117 1.1 matt /*
118 1.1 matt * Allocate the cache for receive dmamaps.
119 1.1 matt */
120 1.1 matt sc->sc_rxmaps = gmac_mapcache_create(sc->sc_dmat, MAX_RXMAPS,
121 1.1 matt MCLBYTES, 1);
122 1.1 matt KASSERT(sc->sc_rxmaps != NULL);
123 1.1 matt
124 1.1 matt /*
125 1.1 matt * Allocate the memory for sw (receive) free queue
126 1.1 matt */
127 1.1 matt hqm = gmac_hwqmem_create(sc->sc_rxmaps, SWFREEQ_DESCS, 1,
128 1.1 matt HQM_PRODUCER|HQM_RX);
129 1.1 matt sc->sc_swfreeq = gmac_hwqueue_create(hqm, sc->sc_iot, sc->sc_ioh,
130 1.1 matt GMAC_SWFREEQ_RWPTR, GMAC_SWFREEQ_BASE, 0);
131 1.1 matt KASSERT(sc->sc_swfreeq != NULL);
132 1.1 matt
133 1.1 matt /*
134 1.1 matt * Allocate the cache for transmit dmamaps.
135 1.1 matt */
136 1.1 matt sc->sc_txmaps = gmac_mapcache_create(sc->sc_dmat, MAX_TXMAPS,
137 1.1 matt ETHERMTU_JUMBO + ETHER_HDR_LEN, 16);
138 1.1 matt KASSERT(sc->sc_txmaps != NULL);
139 1.1 matt
140 1.1 matt /*
141 1.1 matt * Allocate the memory for hw (transmit) free queue
142 1.1 matt */
143 1.1 matt hqm = gmac_hwqmem_create(sc->sc_rxmaps, HWFREEQ_DESCS, 1,
144 1.1 matt HQM_CONSUMER|HQM_TX);
145 1.1 matt sc->sc_hwfreeq = gmac_hwqueue_create(hqm, sc->sc_iot, sc->sc_ioh,
146 1.1 matt GMAC_HWFREEQ_RWPTR, GMAC_HWFREEQ_BASE, 0);
147 1.1 matt KASSERT(sc->sc_hwfreeq != NULL);
148 1.1 matt }
149 1.1 matt
150 1.1 matt int
151 1.1 matt geminigmac_match(device_t parent, cfdata_t cf, void *aux)
152 1.1 matt {
153 1.1 matt struct obio_attach_args *obio = aux;
154 1.1 matt
155 1.1 matt if (obio->obio_addr != GEMINI_GMAC_BASE)
156 1.1 matt return 0;
157 1.1 matt
158 1.1 matt return 1;
159 1.1 matt }
160 1.1 matt
161 1.1 matt void
162 1.1 matt geminigmac_attach(device_t parent, device_t self, void *aux)
163 1.1 matt {
164 1.1 matt struct gmac_softc *sc = device_private(self);
165 1.1 matt struct obio_attach_args *obio = aux;
166 1.1 matt struct gmac_attach_args gma;
167 1.1 matt cfdata_t cf;
168 1.1 matt uint32_t v;
169 1.1 matt int error;
170 1.1 matt
171 1.1 matt sc->sc_dev = self;
172 1.1 matt sc->sc_iot = obio->obio_iot;
173 1.1 matt sc->sc_dmat = obio->obio_dmat;
174 1.1 matt sc->sc_gpio_dev = geminigpio_cd.cd_devs[0];
175 1.1 matt sc->sc_gpio_mdclk = GPIO_MDCLK;
176 1.1 matt sc->sc_gpio_mdout = GPIO_MDIO;
177 1.1 matt sc->sc_gpio_mdin = GPIO_MDIO;
178 1.1 matt KASSERT(sc->sc_gpio_dev != NULL);
179 1.1 matt
180 1.1 matt error = bus_space_map(sc->sc_iot, obio->obio_addr, obio->obio_size, 0,
181 1.1 matt &sc->sc_ioh);
182 1.1 matt if (error) {
183 1.1 matt aprint_error(": error mapping registers: %d", error);
184 1.1 matt return;
185 1.1 matt }
186 1.1 matt
187 1.1 matt v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 0);
188 1.1 matt aprint_normal(": devid %d rev %d\n", GMAC_TOE_DEVID(v),
189 1.1 matt GMAC_TOE_REVID(v));
190 1.1 matt aprint_naive("\n");
191 1.1 matt
192 1.1 matt mutex_init(&sc->sc_mdiolock, MUTEX_DEFAULT, IPL_NET);
193 1.1 matt
194 1.1 matt /*
195 1.1 matt * Initialize the GPIO pins
196 1.1 matt */
197 1.1 matt geminigpio_pin_ctl(sc->sc_gpio_dev, sc->sc_gpio_mdclk, GPIO_PIN_OUTPUT);
198 1.1 matt geminigpio_pin_ctl(sc->sc_gpio_dev, sc->sc_gpio_mdout, GPIO_PIN_OUTPUT);
199 1.1 matt if (sc->sc_gpio_mdout != sc->sc_gpio_mdin)
200 1.1 matt geminigpio_pin_ctl(sc->sc_gpio_dev, sc->sc_gpio_mdin,
201 1.1 matt GPIO_PIN_INPUT);
202 1.1 matt
203 1.1 matt /*
204 1.1 matt * Set the MDIO GPIO pins to a known state.
205 1.1 matt */
206 1.1 matt geminigpio_pin_write(sc->sc_gpio_dev, sc->sc_gpio_mdclk, 0);
207 1.1 matt geminigpio_pin_write(sc->sc_gpio_dev, sc->sc_gpio_mdout, 0);
208 1.1 matt sc->sc_mdiobits = MDCLK;
209 1.1 matt
210 1.1 matt gmac_init(sc);
211 1.1 matt
212 1.1 matt gma.gma_iot = sc->sc_iot;
213 1.1 matt gma.gma_ioh = sc->sc_ioh;
214 1.1 matt gma.gma_dmat = sc->sc_dmat;
215 1.1 matt
216 1.1 matt gma.gma_mii_readreg = geminigmac_mii_readreg;
217 1.1 matt gma.gma_mii_writereg = geminigmac_mii_writereg;
218 1.1 matt
219 1.1 matt gma.gma_port = 0;
220 1.1 matt gma.gma_phy = -1;
221 1.1 matt gma.gma_intr = 1;
222 1.1 matt
223 1.1 matt cf = config_search_ia(geminigmac_find, sc->sc_dev,
224 1.1 matt geminigmac_cd.cd_name, &gma);
225 1.1 matt if (cf != NULL)
226 1.1 matt config_attach(sc->sc_dev, cf, &gma, geminigmac_print);
227 1.1 matt
228 1.1 matt gma.gma_port = 1;
229 1.1 matt gma.gma_phy = -1;
230 1.1 matt gma.gma_intr = 2;
231 1.1 matt
232 1.1 matt cf = config_search_ia(geminigmac_find, sc->sc_dev,
233 1.1 matt geminigmac_cd.cd_name, &gma);
234 1.1 matt if (cf != NULL)
235 1.1 matt config_attach(sc->sc_dev, cf, &gma, geminigmac_print);
236 1.1 matt }
237 1.1 matt
238 1.1 matt static int
239 1.1 matt geminigmac_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
240 1.1 matt {
241 1.1 matt struct gmac_attach_args * const gma = aux;
242 1.1 matt
243 1.1 matt if (gma->gma_port != cf->cf_loc[GEMINIGMACCF_PORT])
244 1.1 matt return 0;
245 1.1 matt if (gma->gma_intr != cf->cf_loc[GEMINIGMACCF_INTR])
246 1.1 matt return 0;
247 1.1 matt
248 1.1 matt gma->gma_phy = cf->cf_loc[GEMINIGMACCF_PHY];
249 1.1 matt gma->gma_intr = cf->cf_loc[GEMINIGMACCF_INTR];
250 1.1 matt
251 1.1 matt return config_match(parent, cf, gma);
252 1.1 matt }
253 1.1 matt
254 1.1 matt static int
255 1.1 matt geminigmac_print(void *aux, const char *name)
256 1.1 matt {
257 1.1 matt struct gmac_attach_args * const gma = aux;
258 1.1 matt
259 1.1 matt aprint_normal(" port %d", gma->gma_port);
260 1.1 matt aprint_normal(" phy %d", gma->gma_phy);
261 1.1 matt aprint_normal(" intr %d", gma->gma_intr);
262 1.1 matt
263 1.1 matt return UNCONF;
264 1.1 matt }
265 1.1 matt
266 1.1 matt static uint32_t
267 1.1 matt gemini_gmac_gpio_read(device_t dv)
268 1.1 matt {
269 1.1 matt struct gmac_softc * const sc = device_private(dv);
270 1.1 matt int value = geminigpio_pin_read(sc->sc_gpio_dev, GPIO_MDIO);
271 1.1 matt
272 1.1 matt KASSERT((sc->sc_mdiobits & MDTOPHY) == 0);
273 1.1 matt
274 1.1 matt return value ? MDIN : 0;
275 1.1 matt }
276 1.1 matt
277 1.1 matt static void
278 1.1 matt gemini_gmac_gpio_write(device_t dv, uint32_t bits)
279 1.1 matt {
280 1.1 matt struct gmac_softc * const sc = device_private(dv);
281 1.1 matt
282 1.1 matt if ((sc->sc_mdiobits ^ bits) & MDTOPHY) {
283 1.1 matt int flags = (bits & MDTOPHY) ? GPIO_PIN_OUTPUT : GPIO_PIN_INPUT;
284 1.1 matt geminigpio_pin_ctl(sc->sc_gpio_dev, GPIO_MDIO, flags);
285 1.1 matt }
286 1.1 matt
287 1.1 matt if ((sc->sc_mdiobits ^ bits) & MDOUT) {
288 1.1 matt int flags = ((bits & MDOUT) != 0);
289 1.1 matt geminigpio_pin_write(sc->sc_gpio_dev, GPIO_MDIO, flags);
290 1.1 matt }
291 1.1 matt
292 1.1 matt if ((sc->sc_mdiobits ^ bits) & MDCLK) {
293 1.1 matt int flags = ((bits & MDCLK) != 0);
294 1.1 matt geminigpio_pin_write(sc->sc_gpio_dev, GPIO_MDCLK, flags);
295 1.1 matt }
296 1.1 matt
297 1.1 matt sc->sc_mdiobits = bits;
298 1.1 matt }
299 1.1 matt
300 1.1 matt static const struct mii_bitbang_ops geminigmac_mii_bitbang_ops = {
301 1.1 matt .mbo_read = gemini_gmac_gpio_read,
302 1.1 matt .mbo_write = gemini_gmac_gpio_write,
303 1.1 matt .mbo_bits[MII_BIT_MDO] = MDOUT,
304 1.1 matt .mbo_bits[MII_BIT_MDI] = MDIN,
305 1.1 matt .mbo_bits[MII_BIT_MDC] = MDCLK,
306 1.1 matt .mbo_bits[MII_BIT_DIR_HOST_PHY] = MDTOPHY,
307 1.1 matt };
308 1.1 matt
309 1.1 matt int
310 1.1 matt geminigmac_mii_readreg(device_t dv, int phy, int reg)
311 1.1 matt {
312 1.1 matt device_t parent = device_parent(dv);
313 1.1 matt struct gmac_softc * const sc = device_private(parent);
314 1.1 matt int rv;
315 1.1 matt
316 1.1 matt mutex_enter(&sc->sc_mdiolock);
317 1.1 matt rv = mii_bitbang_readreg(parent, &geminigmac_mii_bitbang_ops, phy, reg);
318 1.1 matt mutex_exit(&sc->sc_mdiolock);
319 1.1 matt
320 1.1 matt return rv;
321 1.1 matt }
322 1.1 matt
323 1.1 matt void
324 1.1 matt geminigmac_mii_writereg(device_t dv, int phy, int reg, int val)
325 1.1 matt {
326 1.1 matt device_t parent = device_parent(dv);
327 1.1 matt struct gmac_softc * const sc = device_private(parent);
328 1.1 matt
329 1.1 matt mutex_enter(&sc->sc_mdiolock);
330 1.1 matt mii_bitbang_writereg(parent, &geminigmac_mii_bitbang_ops, phy, reg, val);
331 1.1 matt mutex_exit(&sc->sc_mdiolock);
332 1.1 matt }
333 1.1 matt
334 1.1 matt
335 1.1 matt gmac_mapcache_t *
336 1.1 matt gmac_mapcache_create(bus_dma_tag_t dmat, size_t maxmaps, bus_size_t mapsize,
337 1.1 matt int nsegs)
338 1.1 matt {
339 1.1 matt gmac_mapcache_t *mc;
340 1.1 matt
341 1.1 matt mc = kmem_zalloc(offsetof(gmac_mapcache_t, mc_maps[maxmaps]),
342 1.1 matt KM_SLEEP);
343 1.1 matt if (mc == NULL)
344 1.1 matt return NULL;
345 1.1 matt
346 1.1 matt mc->mc_max = maxmaps;
347 1.1 matt mc->mc_dmat = dmat;
348 1.1 matt mc->mc_mapsize = mapsize;
349 1.1 matt mc->mc_nsegs = nsegs;
350 1.1 matt return mc;
351 1.1 matt }
352 1.1 matt
353 1.1 matt void
354 1.1 matt gmac_mapcache_destroy(gmac_mapcache_t **mc_p)
355 1.1 matt {
356 1.1 matt gmac_mapcache_t *mc = *mc_p;
357 1.1 matt
358 1.1 matt if (mc == NULL)
359 1.1 matt return;
360 1.1 matt
361 1.1 matt KASSERT(mc->mc_used == 0);
362 1.1 matt while (mc->mc_free-- > 0) {
363 1.1 matt KASSERT(mc->mc_maps[mc->mc_free] != NULL);
364 1.1 matt bus_dmamap_destroy(mc->mc_dmat, mc->mc_maps[mc->mc_free]);
365 1.1 matt mc->mc_maps[mc->mc_free] = NULL;
366 1.1 matt }
367 1.1 matt
368 1.1 matt kmem_free(mc, offsetof(gmac_mapcache_t, mc_maps[mc->mc_max]));
369 1.1 matt *mc_p = NULL;
370 1.1 matt }
371 1.1 matt
372 1.1 matt int
373 1.1 matt gmac_mapcache_fill(gmac_mapcache_t *mc, size_t limit)
374 1.1 matt {
375 1.1 matt int error;
376 1.1 matt
377 1.1 matt KASSERT(limit <= mc->mc_max);
378 1.1 matt printf("gmac_mapcache_fill(%p): limit=%zu used=%zu free=%zu\n",
379 1.1 matt mc, limit, mc->mc_used, mc->mc_free);
380 1.1 matt
381 1.1 matt for (error = 0; mc->mc_free + mc->mc_used < limit; mc->mc_free++) {
382 1.1 matt KASSERT(mc->mc_maps[mc->mc_free] == NULL);
383 1.1 matt error = bus_dmamap_create(mc->mc_dmat, mc->mc_mapsize,
384 1.1 matt mc->mc_nsegs, mc->mc_mapsize, 0,
385 1.1 matt BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK,
386 1.1 matt &mc->mc_maps[mc->mc_free]);
387 1.1 matt if (error)
388 1.1 matt break;
389 1.1 matt }
390 1.1 matt printf("gmac_mapcache_fill(%p): limit=%zu used=%zu free=%zu\n",
391 1.1 matt mc, limit, mc->mc_used, mc->mc_free);
392 1.1 matt
393 1.1 matt return error;
394 1.1 matt }
395 1.1 matt
396 1.1 matt bus_dmamap_t
397 1.1 matt gmac_mapcache_get(gmac_mapcache_t *mc)
398 1.1 matt {
399 1.1 matt bus_dmamap_t map;
400 1.1 matt
401 1.1 matt KASSERT(mc != NULL);
402 1.1 matt
403 1.1 matt if (mc->mc_free == 0) {
404 1.1 matt int error;
405 1.1 matt if (mc->mc_used == mc->mc_max)
406 1.1 matt return NULL;
407 1.1 matt error = bus_dmamap_create(mc->mc_dmat, mc->mc_mapsize,
408 1.1 matt mc->mc_nsegs, mc->mc_mapsize, 0,
409 1.1 matt BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT,
410 1.1 matt &map);
411 1.1 matt if (error)
412 1.1 matt return NULL;
413 1.1 matt KASSERT(mc->mc_maps[mc->mc_free] == NULL);
414 1.1 matt } else {
415 1.1 matt KASSERT(mc->mc_free <= mc->mc_max);
416 1.1 matt map = mc->mc_maps[--mc->mc_free];
417 1.1 matt mc->mc_maps[mc->mc_free] = NULL;
418 1.1 matt }
419 1.1 matt mc->mc_used++;
420 1.1 matt KASSERT(map != NULL);
421 1.1 matt
422 1.1 matt return map;
423 1.1 matt }
424 1.1 matt
425 1.1 matt void
426 1.1 matt gmac_mapcache_put(gmac_mapcache_t *mc, bus_dmamap_t map)
427 1.1 matt {
428 1.1 matt KASSERT(mc->mc_free + mc->mc_used < mc->mc_max);
429 1.1 matt KASSERT(mc->mc_maps[mc->mc_free] == NULL);
430 1.1 matt
431 1.1 matt mc->mc_maps[mc->mc_free++] = map;
432 1.1 matt mc->mc_used--;
433 1.1 matt }
434 1.1 matt
435 1.1 matt gmac_desc_t *
436 1.1 matt gmac_hwqueue_desc(gmac_hwqueue_t *hwq, size_t i)
437 1.1 matt {
438 1.1 matt i += hwq->hwq_wptr;
439 1.1 matt if (i >= hwq->hwq_size)
440 1.1 matt i -= hwq->hwq_size;
441 1.1 matt return hwq->hwq_base + i;
442 1.1 matt }
443 1.1 matt
444 1.1 matt void
445 1.1 matt gmac_hwqueue_sync(gmac_hwqueue_t *hwq)
446 1.1 matt {
447 1.1 matt gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
448 1.1 matt uint32_t v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
449 1.1 matt uint16_t old_rptr = hwq->hwq_rptr;
450 1.1 matt
451 1.1 matt KASSERT(hqm->hqm_flags & HQM_PRODUCER);
452 1.1 matt
453 1.1 matt hwq->hwq_rptr = (uint16_t)(v >> 4) & 0xfff;
454 1.1 matt hwq->hwq_wptr = (uint16_t)(v >> 20) & 0xfff;
455 1.1 matt
456 1.1 matt if (old_rptr == hwq->hwq_rptr)
457 1.1 matt return;
458 1.1 matt
459 1.1 matt hwq->hwq_free += hwq->hwq_rptr - old_rptr;
460 1.1 matt if (__predict_false(old_rptr > hwq->hwq_rptr)) {
461 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
462 1.1 matt sizeof(gmac_desc_t [hwq->hwq_qoff + old_rptr]),
463 1.1 matt sizeof(gmac_desc_t [hwq->hwq_size - old_rptr]),
464 1.1 matt BUS_DMASYNC_POSTREAD);
465 1.1 matt hwq->hwq_free += hwq->hwq_size;
466 1.1 matt old_rptr = 0;
467 1.1 matt }
468 1.1 matt if (old_rptr < hwq->hwq_rptr) {
469 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
470 1.1 matt sizeof(gmac_desc_t [hwq->hwq_qoff + old_rptr]),
471 1.1 matt sizeof(gmac_desc_t [hwq->hwq_qoff + hwq->hwq_rptr]),
472 1.1 matt BUS_DMASYNC_POSTREAD);
473 1.1 matt }
474 1.1 matt }
475 1.1 matt
476 1.1 matt void
477 1.1 matt gmac_hwqueue_produce(gmac_hwqueue_t *hwq, size_t count)
478 1.1 matt {
479 1.1 matt gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
480 1.1 matt
481 1.1 matt KASSERT(count < hwq->hwq_free);
482 1.1 matt KASSERT(hqm->hqm_flags & HQM_PRODUCER);
483 1.1 matt
484 1.1 matt hwq->hwq_free -= count;
485 1.1 matt if (hwq->hwq_wptr + count >= hwq->hwq_size) {
486 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
487 1.1 matt sizeof(gmac_desc_t [hwq->hwq_qoff + hwq->hwq_wptr]),
488 1.1 matt sizeof(gmac_desc_t [hwq->hwq_size - hwq->hwq_wptr]),
489 1.1 matt BUS_DMASYNC_PREREAD);
490 1.1 matt count -= hwq->hwq_size - hwq->hwq_wptr;
491 1.1 matt hwq->hwq_wptr = 0;
492 1.1 matt }
493 1.1 matt if (count > 0) {
494 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
495 1.1 matt sizeof(gmac_desc_t [hwq->hwq_qoff + hwq->hwq_wptr]),
496 1.1 matt sizeof(gmac_desc_t [hwq->hwq_wptr + count]),
497 1.1 matt BUS_DMASYNC_PREREAD);
498 1.1 matt hwq->hwq_wptr += sizeof(gmac_desc_t [count]);
499 1.1 matt }
500 1.1 matt
501 1.1 matt /*
502 1.1 matt * Tell the h/w we've produced a few more descriptors.
503 1.1 matt */
504 1.1 matt bus_space_write_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0,
505 1.1 matt (hwq->hwq_wptr << 20) | (hwq->hwq_rptr << 4));
506 1.1 matt }
507 1.1 matt
508 1.1 matt static void
509 1.1 matt gmac_hwqueue_rxconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
510 1.1 matt {
511 1.1 matt gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
512 1.1 matt struct ifnet * const ifp = hwq->hwq_ifp;
513 1.1 matt size_t buflen = d->d_desc0 & 0xffff;
514 1.1 matt bus_dmamap_t map;
515 1.1 matt struct mbuf *m, *last_m, **mp;
516 1.1 matt
517 1.1 matt KASSERT(ifp != NULL);
518 1.1 matt
519 1.1 matt /*
520 1.1 matt * First we have to find this mbuf in the software free queue
521 1.1 matt * (the producer of the mbufs) and remove it.
522 1.1 matt */
523 1.1 matt for (mp = &hwq->hwq_producer->hwq_ifq.ifq_head, last_m = NULL;
524 1.1 matt (m = *mp) != NULL;
525 1.1 matt last_m = m, mp = &m->m_nextpkt) {
526 1.1 matt map = M_GETCTX(m, bus_dmamap_t);
527 1.1 matt KASSERT(map->dm_nsegs == 1);
528 1.1 matt if (d->d_bufaddr == map->dm_segs->ds_addr) {
529 1.1 matt *mp = m->m_nextpkt;
530 1.1 matt if (hwq->hwq_producer->hwq_ifq.ifq_tail == m)
531 1.1 matt hwq->hwq_producer->hwq_ifq.ifq_tail = last_m;
532 1.1 matt hwq->hwq_producer->hwq_ifq.ifq_len--;
533 1.1 matt break;
534 1.1 matt }
535 1.1 matt }
536 1.1 matt KASSERT(m != NULL);
537 1.1 matt
538 1.1 matt m->m_len = buflen;
539 1.1 matt if (d->d_desc3 & DESC3_SOF) {
540 1.1 matt buflen += 2; /* account for the pad */
541 1.1 matt m->m_pkthdr.len = (d->d_desc1 & 0xffff) - ETHER_CRC_LEN;
542 1.1 matt }
543 1.1 matt
544 1.1 matt
545 1.1 matt map = M_GETCTX(m, bus_dmamap_t);
546 1.1 matt
547 1.1 matt /*
548 1.1 matt * Sync the buffer contents, unload the dmamap, and save it away.
549 1.1 matt */
550 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, map, 0, buflen, BUS_DMASYNC_POSTWRITE);
551 1.1 matt bus_dmamap_unload(hqm->hqm_dmat, map);
552 1.1 matt M_SETCTX(m, NULL);
553 1.1 matt gmac_mapcache_put(hqm->hqm_mc, map);
554 1.1 matt
555 1.1 matt
556 1.1 matt /*
557 1.1 matt * Now we build our new packet chain by tacking this on the end.
558 1.1 matt */
559 1.1 matt *hwq->hwq_mp = m;
560 1.1 matt if ((d->d_desc3 & DESC3_EOF) == 0) {
561 1.1 matt /*
562 1.1 matt * Not last frame, so make sure the next gets appended right.
563 1.1 matt */
564 1.1 matt hwq->hwq_mp = &m->m_next;
565 1.1 matt return;
566 1.1 matt }
567 1.1 matt
568 1.1 matt /*
569 1.1 matt * We have a complete frame, let's try to deliver it.
570 1.1 matt */
571 1.1 matt m->m_len -= ETHER_CRC_LEN; /* remove the CRC from the end */
572 1.1 matt
573 1.1 matt /*
574 1.1 matt * Now get the whole chain.
575 1.1 matt */
576 1.1 matt m = hwq->hwq_rxmbuf;
577 1.1 matt m->m_pkthdr.rcvif = ifp; /* set receive interface */
578 1.1 matt ifp->if_ipackets++;
579 1.1 matt ifp->if_ibytes += m->m_pkthdr.len;
580 1.1 matt switch (DESC0_RXSTS_GET(d->d_desc0)) {
581 1.1 matt case DESC0_RXSTS_GOOD:
582 1.1 matt case DESC0_RXSTS_LONG:
583 1.1 matt m->m_data += 2;
584 1.1 matt KASSERT(m_length(m) == m->m_pkthdr.len);
585 1.1 matt #if NBPFILTER > 0
586 1.1 matt if (ifp->if_bpf)
587 1.1 matt bpf_mtap(ifp, m);
588 1.1 matt #endif
589 1.1 matt (*ifp->if_input)(ifp, m);
590 1.1 matt break;
591 1.1 matt default:
592 1.1 matt ifp->if_ierrors++;
593 1.1 matt m_freem(m);
594 1.1 matt break;
595 1.1 matt }
596 1.1 matt hwq->hwq_rxmbuf = NULL;
597 1.1 matt hwq->hwq_mp = &hwq->hwq_rxmbuf;
598 1.1 matt }
599 1.1 matt
600 1.1 matt static void
601 1.1 matt gmac_hwqueue_txconsume(gmac_hwqueue_t *hwq, const gmac_desc_t *d)
602 1.1 matt {
603 1.1 matt gmac_hwqmem_t *hqm;
604 1.1 matt gmac_hwqueue_t *txhwq;
605 1.1 matt struct ifnet *ifp;
606 1.1 matt bus_dmamap_t map = NULL;
607 1.1 matt struct mbuf *m = NULL;
608 1.1 matt
609 1.1 matt SLIST_FOREACH(txhwq, &hwq->hwq_producers, hwq_link) {
610 1.1 matt if ((m = txhwq->hwq_ifq.ifq_head) == NULL)
611 1.1 matt continue;
612 1.1 matt map = M_GETCTX(m, bus_dmamap_t);
613 1.1 matt if (d->d_bufaddr == map->dm_segs[map->dm_nsegs-1].ds_addr) {
614 1.1 matt if ((txhwq->hwq_ifq.ifq_head = m->m_nextpkt) == NULL)
615 1.1 matt txhwq->hwq_ifq.ifq_tail = NULL;
616 1.1 matt txhwq->hwq_ifq.ifq_len--;
617 1.1 matt break;
618 1.1 matt }
619 1.1 matt }
620 1.1 matt KASSERT(txhwq != NULL);
621 1.1 matt KASSERT(m != NULL);
622 1.1 matt
623 1.1 matt hqm = txhwq->hwq_hqm;
624 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, map, 0, map->dm_mapsize,
625 1.1 matt BUS_DMASYNC_POSTREAD);
626 1.1 matt bus_dmamap_unload(hqm->hqm_dmat, map);
627 1.1 matt M_SETCTX(m, NULL);
628 1.1 matt gmac_mapcache_put(hqm->hqm_mc, map);
629 1.1 matt
630 1.1 matt ifp = txhwq->hwq_ifp;
631 1.1 matt ifp->if_opackets++;
632 1.1 matt ifp->if_obytes += m->m_pkthdr.len;
633 1.1 matt
634 1.1 matt #if NBPFILTER > 0
635 1.1 matt if (ifp->if_bpf)
636 1.1 matt bpf_mtap(ifp, m);
637 1.1 matt #endif
638 1.1 matt m_freem(m);
639 1.1 matt }
640 1.1 matt
641 1.1 matt void
642 1.1 matt gmac_hwqueue_consume(gmac_hwqueue_t *hwq)
643 1.1 matt {
644 1.1 matt gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
645 1.1 matt
646 1.1 matt KASSERT((hqm->hqm_flags & HQM_PRODUCER) == 0);
647 1.1 matt
648 1.1 matt for (;;) {
649 1.1 matt gmac_desc_t d;
650 1.1 matt uint32_t v;
651 1.1 matt uint16_t rptr, wptr;
652 1.1 matt
653 1.1 matt v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
654 1.1 matt rptr = (v >> 4) & 0xfff;
655 1.1 matt wptr = (v >> 20) & 0xfff;
656 1.1 matt for (; rptr != ((wptr - 1) & (hwq->hwq_size - 1));
657 1.1 matt rptr = (rptr + 1) & (hwq->hwq_size - 1)) {
658 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
659 1.1 matt sizeof(gmac_desc_t [hwq->hwq_qoff + rptr]),
660 1.1 matt sizeof(gmac_desc_t),
661 1.1 matt BUS_DMASYNC_POSTWRITE);
662 1.1 matt d.d_desc0 = le32toh(hwq->hwq_base[rptr].d_desc0);
663 1.1 matt d.d_desc1 = le32toh(hwq->hwq_base[rptr].d_desc1);
664 1.1 matt d.d_bufaddr = le32toh(hwq->hwq_base[rptr].d_bufaddr);
665 1.1 matt d.d_desc3 = le32toh(hwq->hwq_base[rptr].d_desc3);
666 1.1 matt bus_dmamap_sync(hqm->hqm_dmat, hqm->hqm_dmamap,
667 1.1 matt sizeof(gmac_desc_t [hwq->hwq_qoff + rptr]),
668 1.1 matt sizeof(gmac_desc_t),
669 1.1 matt BUS_DMASYNC_PREWRITE);
670 1.1 matt
671 1.1 matt if ((hqm->hqm_flags & HQM_TX) == 0) {
672 1.1 matt gmac_hwqueue_rxconsume(hwq, &d);
673 1.1 matt } else if (d.d_desc3 & DESC3_EOF) {
674 1.1 matt gmac_hwqueue_txconsume(hwq, &d);
675 1.1 matt }
676 1.1 matt }
677 1.1 matt hwq->hwq_rptr = rptr;
678 1.1 matt hwq->hwq_wptr = wptr;
679 1.1 matt }
680 1.1 matt }
681 1.1 matt
682 1.1 matt void
683 1.1 matt gmac_hwqmem_destroy(gmac_hwqmem_t *hqm)
684 1.1 matt {
685 1.1 matt if (hqm->hqm_nsegs) {
686 1.1 matt if (hqm->hqm_base) {
687 1.1 matt if (hqm->hqm_dmamap) {
688 1.1 matt if (hqm->hqm_dmamap->dm_mapsize) {
689 1.1 matt bus_dmamap_unload(hqm->hqm_dmat,
690 1.1 matt hqm->hqm_dmamap);
691 1.1 matt }
692 1.1 matt bus_dmamap_destroy(hqm->hqm_dmat,
693 1.1 matt hqm->hqm_dmamap);
694 1.1 matt }
695 1.1 matt bus_dmamem_unmap(hqm->hqm_dmat, hqm->hqm_base,
696 1.1 matt hqm->hqm_memsize);
697 1.1 matt }
698 1.1 matt bus_dmamem_free(hqm->hqm_dmat, hqm->hqm_segs, hqm->hqm_nsegs);
699 1.1 matt }
700 1.1 matt
701 1.1 matt kmem_free(hqm, sizeof(*hqm));
702 1.1 matt }
703 1.1 matt
704 1.1 matt gmac_hwqmem_t *
705 1.1 matt gmac_hwqmem_create(gmac_mapcache_t *mc, size_t ndesc, size_t nqueue, int flags)
706 1.1 matt {
707 1.1 matt gmac_hwqmem_t *hqm;
708 1.1 matt int error;
709 1.1 matt
710 1.1 matt KASSERT(ndesc > 0 && ndesc <= 2048);
711 1.1 matt KASSERT((ndesc & (ndesc - 1)) == 0);
712 1.1 matt
713 1.1 matt hqm = kmem_zalloc(sizeof(*hqm), KM_SLEEP);
714 1.1 matt if (hqm == NULL)
715 1.1 matt return NULL;
716 1.1 matt
717 1.1 matt hqm->hqm_memsize = nqueue * sizeof(gmac_desc_t [ndesc]);
718 1.1 matt hqm->hqm_mc = mc;
719 1.1 matt hqm->hqm_dmat = mc->mc_dmat;
720 1.1 matt hqm->hqm_ndesc = ndesc;
721 1.1 matt hqm->hqm_nqueue = nqueue;
722 1.1 matt hqm->hqm_flags = flags;
723 1.1 matt
724 1.1 matt error = bus_dmamem_alloc(hqm->hqm_dmat, hqm->hqm_memsize, 0, 0,
725 1.1 matt hqm->hqm_segs, 1, &hqm->hqm_nsegs, BUS_DMA_WAITOK);
726 1.1 matt if (error)
727 1.1 matt goto failed;
728 1.1 matt error = bus_dmamem_map(hqm->hqm_dmat, hqm->hqm_segs, hqm->hqm_nsegs,
729 1.1 matt hqm->hqm_memsize, (void **)&hqm->hqm_base, BUS_DMA_WAITOK);
730 1.1 matt if (error)
731 1.1 matt goto failed;
732 1.1 matt error = bus_dmamap_create(hqm->hqm_dmat, hqm->hqm_memsize,
733 1.1 matt hqm->hqm_nsegs, 0, 0, BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW,
734 1.1 matt &hqm->hqm_dmamap);
735 1.1 matt if (error)
736 1.1 matt goto failed;
737 1.1 matt error = bus_dmamap_load(hqm->hqm_dmat, hqm->hqm_dmamap, hqm->hqm_base,
738 1.1 matt hqm->hqm_memsize, NULL, BUS_DMA_WAITOK
739 1.1 matt | (flags & HQM_PRODUCER ? BUS_DMA_READ: BUS_DMA_WRITE));
740 1.1 matt if (error)
741 1.1 matt goto failed;
742 1.1 matt
743 1.1 matt return hqm;
744 1.1 matt
745 1.1 matt failed:
746 1.1 matt gmac_hwqmem_destroy(hqm);
747 1.1 matt return NULL;
748 1.1 matt }
749 1.1 matt
750 1.1 matt void
751 1.1 matt gmac_hwqueue_destroy(gmac_hwqueue_t *hwq)
752 1.1 matt {
753 1.1 matt gmac_hwqmem_t * const hqm = hwq->hwq_hqm;
754 1.1 matt KASSERT(hqm->hqm_refs & hwq->hwq_ref);
755 1.1 matt hqm->hqm_refs &= ~hwq->hwq_ref;
756 1.1 matt for (;;) {
757 1.1 matt struct mbuf *m;
758 1.1 matt bus_dmamap_t map;
759 1.1 matt IF_DEQUEUE(&hwq->hwq_ifq, m);
760 1.1 matt if (m == NULL)
761 1.1 matt break;
762 1.1 matt map = M_GETCTX(m, bus_dmamap_t);
763 1.1 matt bus_dmamap_unload(hqm->hqm_dmat, map);
764 1.1 matt gmac_mapcache_put(hqm->hqm_mc, map);
765 1.1 matt m_freem(m);
766 1.1 matt }
767 1.1 matt kmem_free(hwq, sizeof(*hwq));
768 1.1 matt }
769 1.1 matt
770 1.1 matt gmac_hwqueue_t *
771 1.1 matt gmac_hwqueue_create(gmac_hwqmem_t *hqm,
772 1.1 matt bus_space_tag_t iot, bus_space_handle_t ioh,
773 1.1 matt bus_size_t qrwptr, bus_size_t qbase,
774 1.1 matt size_t qno)
775 1.1 matt {
776 1.1 matt const size_t log2_memsize = ffs(hqm->hqm_ndesc) + 3;
777 1.1 matt gmac_hwqueue_t *hwq;
778 1.1 matt uint32_t v;
779 1.1 matt
780 1.1 matt KASSERT(qno < hqm->hqm_nqueue);
781 1.1 matt KASSERT((hqm->hqm_refs & (1 << qno)) == 0);
782 1.1 matt
783 1.1 matt hwq = kmem_zalloc(sizeof(*hwq), KM_SLEEP);
784 1.1 matt if (hwq == NULL)
785 1.1 matt return NULL;
786 1.1 matt
787 1.1 matt hwq->hwq_size = hqm->hqm_ndesc;
788 1.1 matt
789 1.1 matt hwq->hwq_iot = iot;
790 1.1 matt bus_space_subregion(iot, ioh, qrwptr, sizeof(uint32_t),
791 1.1 matt &hwq->hwq_qrwptr_ioh);
792 1.1 matt
793 1.1 matt hwq->hwq_hqm = hqm;
794 1.1 matt hwq->hwq_ref = 1 << qno;
795 1.1 matt hqm->hqm_refs |= hwq->hwq_ref;
796 1.1 matt hwq->hwq_qoff = hqm->hqm_ndesc * qno;
797 1.1 matt hwq->hwq_base = hqm->hqm_base + hwq->hwq_qoff;
798 1.1 matt
799 1.1 matt v = bus_space_read_4(hwq->hwq_iot, hwq->hwq_qrwptr_ioh, 0);
800 1.1 matt hwq->hwq_rptr = (v >> 4) & 0xfff;
801 1.1 matt hwq->hwq_wptr = (v >> 20) & 0xfff;
802 1.1 matt
803 1.1 matt if (qno == 0) {
804 1.1 matt bus_space_write_4(hwq->hwq_iot, ioh, qbase,
805 1.1 matt hqm->hqm_dmamap->dm_segs[0].ds_addr | (log2_memsize));
806 1.1 matt }
807 1.1 matt
808 1.1 matt hwq->hwq_free = hwq->hwq_size - 1;
809 1.1 matt hwq->hwq_ifq.ifq_maxlen = hwq->hwq_free;
810 1.1 matt
811 1.1 matt return hwq;
812 1.1 matt }
813