if_gmc.c revision 1.6.18.1 1 1.6.18.1 bouyer /* $NetBSD: if_gmc.c,v 1.6.18.1 2017/04/21 16:53:23 bouyer Exp $ */
2 1.1 matt /*-
3 1.1 matt * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 1.1 matt * All rights reserved.
5 1.1 matt *
6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.1 matt * by Matt Thomas <matt (at) 3am-software.com>
8 1.1 matt *
9 1.1 matt * Redistribution and use in source and binary forms, with or without
10 1.1 matt * modification, are permitted provided that the following conditions
11 1.1 matt * are met:
12 1.1 matt * 1. Redistributions of source code must retain the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer.
14 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer in the
16 1.1 matt * documentation and/or other materials provided with the distribution.
17 1.1 matt *
18 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
29 1.1 matt */
30 1.1 matt
31 1.1 matt #include <sys/param.h>
32 1.1 matt #include <sys/callout.h>
33 1.1 matt #include <sys/device.h>
34 1.1 matt #include <sys/ioctl.h>
35 1.1 matt #include <sys/kernel.h>
36 1.1 matt #include <sys/kmem.h>
37 1.1 matt #include <sys/mbuf.h>
38 1.1 matt
39 1.4 dyoung #include <sys/bus.h>
40 1.1 matt #include <machine/intr.h>
41 1.1 matt
42 1.1 matt #include <arm/gemini/gemini_reg.h>
43 1.1 matt #include <arm/gemini/gemini_gmacreg.h>
44 1.1 matt #include <arm/gemini/gemini_gmacvar.h>
45 1.1 matt
46 1.1 matt #include <net/if.h>
47 1.1 matt #include <net/if_ether.h>
48 1.1 matt #include <net/if_dl.h>
49 1.1 matt
50 1.6.18.1 bouyer __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.6.18.1 2017/04/21 16:53:23 bouyer Exp $");
51 1.1 matt
52 1.1 matt #define MAX_TXSEG 32
53 1.1 matt
54 1.1 matt struct gmc_softc {
55 1.1 matt device_t sc_dev;
56 1.1 matt struct gmac_softc *sc_psc;
57 1.1 matt struct gmc_softc *sc_sibling;
58 1.1 matt bus_dma_tag_t sc_dmat;
59 1.1 matt bus_space_tag_t sc_iot;
60 1.1 matt bus_space_handle_t sc_ioh;
61 1.1 matt bus_space_handle_t sc_dma_ioh;
62 1.1 matt bus_space_handle_t sc_gmac_ioh;
63 1.1 matt struct ethercom sc_ec;
64 1.1 matt struct mii_data sc_mii;
65 1.1 matt void *sc_ih;
66 1.1 matt bool sc_port1;
67 1.3 matt uint8_t sc_phy;
68 1.1 matt gmac_hwqueue_t *sc_rxq;
69 1.1 matt gmac_hwqueue_t *sc_txq[6];
70 1.1 matt callout_t sc_mii_ch;
71 1.1 matt
72 1.1 matt uint32_t sc_gmac_status;
73 1.1 matt uint32_t sc_gmac_sta_add[3];
74 1.1 matt uint32_t sc_gmac_mcast_filter[2];
75 1.1 matt uint32_t sc_gmac_rx_filter;
76 1.1 matt uint32_t sc_gmac_config[2];
77 1.1 matt uint32_t sc_dmavr;
78 1.1 matt
79 1.1 matt uint32_t sc_int_mask[5];
80 1.1 matt uint32_t sc_int_enabled[5];
81 1.1 matt };
82 1.1 matt
83 1.1 matt #define sc_if sc_ec.ec_if
84 1.1 matt
85 1.1 matt static bool
86 1.1 matt gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
87 1.1 matt {
88 1.1 matt bus_dmamap_t map;
89 1.3 matt uint32_t desc0, desc1, desc3;
90 1.1 matt struct mbuf *last_m, *m0;
91 1.1 matt size_t count, i;
92 1.1 matt int error;
93 1.1 matt gmac_desc_t *d;
94 1.1 matt
95 1.2 matt KASSERT(hwq != NULL);
96 1.2 matt
97 1.1 matt map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
98 1.1 matt if (map == NULL)
99 1.1 matt return false;
100 1.1 matt
101 1.1 matt for (last_m = NULL, m0 = m, count = 0;
102 1.1 matt m0 != NULL;
103 1.1 matt last_m = m0, m0 = m0->m_next) {
104 1.1 matt vaddr_t addr = (uintptr_t)m0->m_data;
105 1.1 matt if (m0->m_len == 0)
106 1.1 matt continue;
107 1.1 matt if (addr & 1) {
108 1.1 matt if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
109 1.1 matt last_m->m_data[last_m->m_len++] = *m->m_data++;
110 1.1 matt m->m_len--;
111 1.1 matt } else if (M_TRAILINGSPACE(m0) > 0) {
112 1.1 matt memmove(m0->m_data + 1, m0->m_data, m0->m_len);
113 1.1 matt m0->m_data++;
114 1.1 matt } else if (M_LEADINGSPACE(m0) > 0) {
115 1.1 matt memmove(m0->m_data - 1, m0->m_data, m0->m_len);
116 1.1 matt m0->m_data--;
117 1.1 matt } else {
118 1.2 matt panic("gmc_txqueue: odd addr %p", m0->m_data);
119 1.1 matt }
120 1.1 matt }
121 1.1 matt count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
122 1.1 matt }
123 1.1 matt
124 1.2 matt gmac_hwqueue_sync(hwq);
125 1.1 matt if (hwq->hwq_free <= count) {
126 1.2 matt gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
127 1.2 matt return false;
128 1.1 matt }
129 1.1 matt
130 1.1 matt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
131 1.2 matt BUS_DMA_WRITE|BUS_DMA_NOWAIT);
132 1.1 matt if (error) {
133 1.1 matt aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
134 1.1 matt error);
135 1.1 matt gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
136 1.1 matt m_freem(m);
137 1.1 matt sc->sc_if.if_oerrors++;
138 1.1 matt return true;
139 1.1 matt }
140 1.1 matt KASSERT(map->dm_nsegs > 0);
141 1.1 matt
142 1.1 matt /*
143 1.1 matt * Sync the mbuf contents to memory/cache.
144 1.1 matt */
145 1.1 matt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
146 1.2 matt BUS_DMASYNC_PREWRITE);
147 1.1 matt
148 1.1 matt /*
149 1.1 matt * Now we need to load the descriptors...
150 1.1 matt */
151 1.3 matt desc0 = map->dm_nsegs << 16;
152 1.1 matt desc1 = m->m_pkthdr.len;
153 1.1 matt desc3 = DESC3_SOF;
154 1.1 matt i = 0;
155 1.2 matt d = NULL;
156 1.1 matt do {
157 1.3 matt #if 0
158 1.2 matt if (i > 0)
159 1.3 matt aprint_debug_dev(sc->sc_dev,
160 1.2 matt "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
161 1.2 matt i-1, d, d->d_desc0, d->d_desc1,
162 1.2 matt d->d_bufaddr, d->d_desc3);
163 1.3 matt #endif
164 1.1 matt d = gmac_hwqueue_desc(hwq, i);
165 1.1 matt KASSERT(map->dm_segs[i].ds_len > 0);
166 1.1 matt KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
167 1.3 matt d->d_desc0 = htole32(map->dm_segs[i].ds_len | desc0);
168 1.2 matt d->d_desc1 = htole32(desc1);
169 1.2 matt d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
170 1.2 matt d->d_desc3 = htole32(desc3);
171 1.2 matt desc3 = 0;
172 1.1 matt } while (++i < map->dm_nsegs);
173 1.1 matt
174 1.2 matt d->d_desc3 |= htole32(DESC3_EOF|DESC3_EOFIE);
175 1.3 matt #if 0
176 1.3 matt aprint_debug_dev(sc->sc_dev,
177 1.2 matt "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
178 1.2 matt i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
179 1.3 matt #endif
180 1.1 matt M_SETCTX(m, map);
181 1.1 matt IF_ENQUEUE(&hwq->hwq_ifq, m);
182 1.1 matt /*
183 1.1 matt * Last descriptor has been marked. Give them to the h/w.
184 1.1 matt * This will sync for us.
185 1.1 matt */
186 1.1 matt gmac_hwqueue_produce(hwq, map->dm_nsegs);
187 1.3 matt #if 0
188 1.3 matt aprint_debug_dev(sc->sc_dev,
189 1.2 matt "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
190 1.2 matt i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
191 1.3 matt #endif
192 1.1 matt return true;
193 1.1 matt }
194 1.1 matt
195 1.1 matt static void
196 1.1 matt gmc_filter_change(struct gmc_softc *sc)
197 1.1 matt {
198 1.1 matt struct ether_multi *enm;
199 1.1 matt struct ether_multistep step;
200 1.1 matt uint32_t mhash[2];
201 1.1 matt uint32_t new0, new1, new2;
202 1.1 matt const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
203 1.1 matt
204 1.1 matt new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
205 1.1 matt new1 = eaddr[4] | (eaddr[5] << 8);
206 1.1 matt new2 = 0;
207 1.1 matt if (sc->sc_gmac_sta_add[0] != new0
208 1.1 matt || sc->sc_gmac_sta_add[1] != new1
209 1.1 matt || sc->sc_gmac_sta_add[2] != new2) {
210 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
211 1.1 matt new0);
212 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
213 1.1 matt new1);
214 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
215 1.1 matt new2);
216 1.1 matt sc->sc_gmac_sta_add[0] = new0;
217 1.1 matt sc->sc_gmac_sta_add[1] = new1;
218 1.1 matt sc->sc_gmac_sta_add[2] = new2;
219 1.1 matt }
220 1.1 matt
221 1.1 matt mhash[0] = 0;
222 1.1 matt mhash[1] = 0;
223 1.1 matt ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
224 1.1 matt while (enm != NULL) {
225 1.1 matt size_t i;
226 1.1 matt if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
227 1.1 matt mhash[0] = mhash[1] = 0xffffffff;
228 1.1 matt break;
229 1.1 matt }
230 1.1 matt i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
231 1.2 matt mhash[(i >> 5) & 1] |= 1 << (i & 31);
232 1.1 matt ETHER_NEXT_MULTI(step, enm);
233 1.1 matt }
234 1.1 matt
235 1.1 matt if (sc->sc_gmac_mcast_filter[0] != mhash[0]
236 1.1 matt || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
237 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
238 1.1 matt GMAC_MCAST_FILTER0, mhash[0]);
239 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
240 1.1 matt GMAC_MCAST_FILTER1, mhash[1]);
241 1.1 matt sc->sc_gmac_mcast_filter[0] = mhash[0];
242 1.1 matt sc->sc_gmac_mcast_filter[1] = mhash[1];
243 1.1 matt }
244 1.1 matt
245 1.1 matt new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
246 1.1 matt new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
247 1.1 matt if (sc->sc_if.if_flags & IFF_PROMISC)
248 1.1 matt new0 |= RXFILTER_PROMISC;
249 1.1 matt
250 1.1 matt if (new0 != sc->sc_gmac_rx_filter) {
251 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
252 1.1 matt new0);
253 1.1 matt sc->sc_gmac_rx_filter = new0;
254 1.1 matt }
255 1.1 matt }
256 1.1 matt
257 1.1 matt static void
258 1.1 matt gmc_mii_tick(void *arg)
259 1.1 matt {
260 1.1 matt struct gmc_softc * const sc = arg;
261 1.2 matt struct gmac_softc * const psc = sc->sc_psc;
262 1.1 matt int s = splnet();
263 1.1 matt
264 1.2 matt /*
265 1.2 matt * If we had to increase the number of receive mbufs due to fifo
266 1.2 matt * overflows, we need a way to decrease them. So every second we
267 1.2 matt * recieve less than or equal to MIN_RXMAPS packets, we decrement
268 1.2 matt * swfree_min until it returns to MIN_RXMAPS.
269 1.2 matt */
270 1.2 matt if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
271 1.3 matt && psc->sc_swfree_min > MIN_RXMAPS) {
272 1.2 matt psc->sc_swfree_min--;
273 1.3 matt gmac_swfree_min_update(psc);
274 1.3 matt }
275 1.2 matt /*
276 1.2 matt * If only one GMAC is running or this is port0, reset the count.
277 1.2 matt */
278 1.2 matt if (psc->sc_running != 3 || !sc->sc_port1)
279 1.2 matt psc->sc_rxpkts_per_sec = 0;
280 1.2 matt
281 1.1 matt mii_tick(&sc->sc_mii);
282 1.1 matt if (sc->sc_if.if_flags & IFF_RUNNING)
283 1.1 matt callout_schedule(&sc->sc_mii_ch, hz);
284 1.1 matt
285 1.1 matt splx(s);
286 1.1 matt }
287 1.1 matt
288 1.1 matt static int
289 1.1 matt gmc_mediachange(struct ifnet *ifp)
290 1.1 matt {
291 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
292 1.1 matt
293 1.1 matt if ((ifp->if_flags & IFF_UP) == 0)
294 1.1 matt return 0;
295 1.1 matt
296 1.1 matt return mii_mediachg(&sc->sc_mii);
297 1.1 matt }
298 1.1 matt
299 1.1 matt static void
300 1.1 matt gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
301 1.1 matt {
302 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
303 1.1 matt
304 1.1 matt mii_pollstat(&sc->sc_mii);
305 1.1 matt ifmr->ifm_status = sc->sc_mii.mii_media_status;
306 1.1 matt ifmr->ifm_active = sc->sc_mii.mii_media_active;
307 1.1 matt }
308 1.1 matt
309 1.1 matt static void
310 1.5 matt gmc_mii_statchg(struct ifnet *ifp)
311 1.1 matt {
312 1.5 matt struct gmc_softc * const sc = ifp->if_softc;
313 1.1 matt uint32_t gmac_status;
314 1.1 matt
315 1.1 matt gmac_status = sc->sc_gmac_status;
316 1.3 matt
317 1.2 matt gmac_status &= ~STATUS_PHYMODE_MASK;
318 1.3 matt gmac_status |= STATUS_PHYMODE_RGMII_A;
319 1.3 matt
320 1.1 matt gmac_status &= ~STATUS_SPEED_MASK;
321 1.1 matt if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
322 1.1 matt gmac_status |= STATUS_SPEED_1000M;
323 1.1 matt } else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
324 1.1 matt gmac_status |= STATUS_SPEED_100M;
325 1.1 matt } else {
326 1.1 matt gmac_status |= STATUS_SPEED_10M;
327 1.1 matt }
328 1.1 matt
329 1.1 matt if (sc->sc_mii.mii_media_active & IFM_FDX)
330 1.1 matt gmac_status |= STATUS_DUPLEX_FULL;
331 1.1 matt else
332 1.1 matt gmac_status &= ~STATUS_DUPLEX_FULL;
333 1.1 matt
334 1.3 matt if (sc->sc_mii.mii_media_status & IFM_ACTIVE)
335 1.1 matt gmac_status |= STATUS_LINK_ON;
336 1.1 matt else
337 1.1 matt gmac_status &= ~STATUS_LINK_ON;
338 1.1 matt
339 1.1 matt if (sc->sc_gmac_status != gmac_status) {
340 1.3 matt aprint_debug_dev(sc->sc_dev,
341 1.2 matt "status change old=%#x new=%#x active=%#x\n",
342 1.2 matt sc->sc_gmac_status, gmac_status,
343 1.2 matt sc->sc_mii.mii_media_active);
344 1.1 matt sc->sc_gmac_status = gmac_status;
345 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
346 1.1 matt sc->sc_gmac_status);
347 1.1 matt }
348 1.3 matt
349 1.3 matt (*sc->sc_mii.mii_writereg)(sc->sc_dev, sc->sc_phy, 0x0018, 0x0041);
350 1.1 matt }
351 1.1 matt
352 1.1 matt static int
353 1.1 matt gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
354 1.1 matt {
355 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
356 1.1 matt struct ifreq * const ifr = data;
357 1.1 matt int s;
358 1.1 matt int error;
359 1.1 matt s = splnet();
360 1.1 matt
361 1.1 matt switch (cmd) {
362 1.1 matt case SIOCSIFMEDIA:
363 1.1 matt case SIOCGIFMEDIA:
364 1.1 matt error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
365 1.1 matt break;
366 1.1 matt default:
367 1.1 matt error = ether_ioctl(ifp, cmd, data);
368 1.1 matt if (error == ENETRESET) {
369 1.1 matt if (ifp->if_flags & IFF_RUNNING) {
370 1.1 matt /*
371 1.1 matt * If the interface is running, we have to
372 1.1 matt * update its multicast filter.
373 1.1 matt */
374 1.1 matt gmc_filter_change(sc);
375 1.1 matt }
376 1.1 matt error = 0;
377 1.1 matt }
378 1.1 matt }
379 1.1 matt
380 1.1 matt splx(s);
381 1.1 matt return error;
382 1.1 matt }
383 1.1 matt
384 1.1 matt static void
385 1.1 matt gmc_ifstart(struct ifnet *ifp)
386 1.1 matt {
387 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
388 1.1 matt
389 1.3 matt #if 0
390 1.3 matt if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0)
391 1.3 matt return;
392 1.3 matt #endif
393 1.3 matt if ((ifp->if_flags & IFF_RUNNING) == 0)
394 1.2 matt return;
395 1.2 matt
396 1.1 matt for (;;) {
397 1.1 matt struct mbuf *m;
398 1.1 matt IF_DEQUEUE(&ifp->if_snd, m);
399 1.1 matt if (m == NULL)
400 1.1 matt break;
401 1.1 matt if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
402 1.1 matt IF_PREPEND(&ifp->if_snd, m);
403 1.1 matt ifp->if_flags |= IFF_OACTIVE;
404 1.1 matt break;
405 1.1 matt }
406 1.1 matt }
407 1.1 matt }
408 1.1 matt
409 1.1 matt static void
410 1.1 matt gmc_ifstop(struct ifnet *ifp, int disable)
411 1.1 matt {
412 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
413 1.1 matt struct gmac_softc * const psc = sc->sc_psc;
414 1.1 matt
415 1.1 matt psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
416 1.1 matt psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
417 1.1 matt psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
418 1.1 matt psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
419 1.1 matt psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
420 1.1 matt psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
421 1.1 matt if (psc->sc_running == 0) {
422 1.1 matt psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
423 1.1 matt KASSERT(psc->sc_int_enabled[0] == 0);
424 1.1 matt KASSERT(psc->sc_int_enabled[1] == 0);
425 1.1 matt KASSERT(psc->sc_int_enabled[2] == 0);
426 1.1 matt KASSERT(psc->sc_int_enabled[3] == 0);
427 1.1 matt KASSERT(psc->sc_int_enabled[4] == 0);
428 1.1 matt } else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
429 1.1 matt == sc->sc_port1) {
430 1.1 matt psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
431 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
432 1.1 matt psc->sc_int_select[4]);
433 1.1 matt }
434 1.1 matt gmac_intr_update(psc);
435 1.1 matt if (disable) {
436 1.1 matt #if 0
437 1.1 matt if (psc->sc_running == 0) {
438 1.1 matt gmac_mapcache_destroy(&psc->sc_txmaps);
439 1.1 matt gmac_mapcache_destroy(&psc->sc_rxmaps);
440 1.1 matt }
441 1.1 matt #endif
442 1.1 matt }
443 1.1 matt }
444 1.1 matt
445 1.1 matt static int
446 1.1 matt gmc_ifinit(struct ifnet *ifp)
447 1.1 matt {
448 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
449 1.1 matt struct gmac_softc * const psc = sc->sc_psc;
450 1.1 matt uint32_t new, mask;
451 1.1 matt
452 1.1 matt gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
453 1.1 matt gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
454 1.1 matt
455 1.1 matt if (sc->sc_rxq == NULL) {
456 1.1 matt gmac_hwqmem_t *hqm;
457 1.3 matt hqm = gmac_hwqmem_create(psc->sc_rxmaps, 16, /*RXQ_NDESCS,*/ 1,
458 1.1 matt HQM_CONSUMER|HQM_RX);
459 1.1 matt sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
460 1.2 matt sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
461 1.2 matt GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
462 1.1 matt if (sc->sc_rxq == NULL) {
463 1.1 matt gmac_hwqmem_destroy(hqm);
464 1.1 matt goto failed;
465 1.1 matt }
466 1.1 matt sc->sc_rxq->hwq_ifp = ifp;
467 1.1 matt sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
468 1.1 matt }
469 1.1 matt
470 1.2 matt if (sc->sc_txq[0] == NULL) {
471 1.1 matt gmac_hwqueue_t *hwq, *last_hwq;
472 1.1 matt gmac_hwqmem_t *hqm;
473 1.1 matt size_t i;
474 1.1 matt
475 1.1 matt hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
476 1.1 matt HQM_PRODUCER|HQM_TX);
477 1.2 matt KASSERT(hqm != NULL);
478 1.1 matt for (i = 0; i < __arraycount(sc->sc_txq); i++) {
479 1.1 matt sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
480 1.1 matt sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
481 1.1 matt GMAC_SW_TX_Q_BASE, i);
482 1.1 matt if (sc->sc_txq[i] == NULL) {
483 1.1 matt if (i == 0)
484 1.1 matt gmac_hwqmem_destroy(hqm);
485 1.1 matt goto failed;
486 1.1 matt }
487 1.1 matt sc->sc_txq[i]->hwq_ifp = ifp;
488 1.1 matt
489 1.1 matt last_hwq = NULL;
490 1.1 matt SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
491 1.1 matt hwq_link) {
492 1.1 matt if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
493 1.1 matt break;
494 1.1 matt last_hwq = hwq;
495 1.1 matt }
496 1.1 matt if (last_hwq == NULL)
497 1.1 matt SLIST_INSERT_HEAD(
498 1.1 matt &psc->sc_hwfreeq->hwq_producers,
499 1.1 matt sc->sc_txq[i], hwq_link);
500 1.1 matt else
501 1.1 matt SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
502 1.1 matt hwq_link);
503 1.1 matt }
504 1.1 matt }
505 1.1 matt
506 1.1 matt gmc_filter_change(sc);
507 1.1 matt
508 1.1 matt mask = DMAVR_LOOPBACK|DMAVR_DROP_SMALL_ACK|DMAVR_EXTRABYTES_MASK
509 1.1 matt |DMAVR_RXBURSTSIZE_MASK|DMAVR_RXBUSWIDTH_MASK
510 1.1 matt |DMAVR_TXBURSTSIZE_MASK|DMAVR_TXBUSWIDTH_MASK;
511 1.3 matt new = DMAVR_RXDMA_ENABLE|DMAVR_TXDMA_ENABLE
512 1.1 matt |DMAVR_EXTRABYTES(2)
513 1.1 matt |DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
514 1.1 matt |DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
515 1.1 matt |DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
516 1.1 matt |DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
517 1.1 matt new |= sc->sc_dmavr & ~mask;
518 1.1 matt if (sc->sc_dmavr != new) {
519 1.1 matt sc->sc_dmavr = new;
520 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
521 1.1 matt sc->sc_dmavr);
522 1.3 matt aprint_debug_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
523 1.2 matt sc->sc_dmavr,
524 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
525 1.1 matt }
526 1.1 matt
527 1.3 matt mask = CONFIG0_MAXLEN_MASK|CONFIG0_TX_DISABLE|CONFIG0_RX_DISABLE
528 1.3 matt |CONFIG0_LOOPBACK|/*CONFIG0_SIM_TEST|*/CONFIG0_INVERSE_RXC_RGMII
529 1.3 matt |CONFIG0_RGMII_INBAND_STATUS_ENABLE;
530 1.3 matt new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536)|CONFIG0_R_LATCHED_MMII;
531 1.1 matt new |= (sc->sc_gmac_config[0] & ~mask);
532 1.1 matt if (sc->sc_gmac_config[0] != new) {
533 1.1 matt sc->sc_gmac_config[0] = new;
534 1.2 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
535 1.1 matt sc->sc_gmac_config[0]);
536 1.3 matt aprint_debug_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
537 1.2 matt sc->sc_gmac_config[0],
538 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
539 1.1 matt }
540 1.1 matt
541 1.3 matt psc->sc_rxpkts_per_sec +=
542 1.3 matt gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
543 1.1 matt
544 1.1 matt /*
545 1.1 matt * If we will be the only active interface, make sure the sw freeq
546 1.1 matt * interrupt gets routed to use.
547 1.1 matt */
548 1.1 matt if (psc->sc_running == 0
549 1.1 matt && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
550 1.1 matt psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
551 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
552 1.1 matt psc->sc_int_select[4]);
553 1.1 matt }
554 1.1 matt sc->sc_int_enabled[0] = sc->sc_int_mask[0]
555 1.1 matt & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
556 1.1 matt sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
557 1.1 matt sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
558 1.1 matt & (INT4_TX_FAIL|INT4_MIB_HEMIWRAP|INT4_RX_FIFO_OVRN
559 1.1 matt |INT4_RGMII_STSCHG));
560 1.1 matt
561 1.1 matt psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
562 1.1 matt psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
563 1.1 matt psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
564 1.1 matt
565 1.1 matt gmac_intr_update(psc);
566 1.1 matt
567 1.1 matt if ((ifp->if_flags & IFF_RUNNING) == 0)
568 1.1 matt mii_tick(&sc->sc_mii);
569 1.1 matt
570 1.1 matt ifp->if_flags |= IFF_RUNNING;
571 1.1 matt psc->sc_running |= (sc->sc_port1 ? 2 : 1);
572 1.1 matt
573 1.1 matt callout_schedule(&sc->sc_mii_ch, hz);
574 1.1 matt
575 1.1 matt return 0;
576 1.1 matt
577 1.1 matt failed:
578 1.1 matt gmc_ifstop(ifp, true);
579 1.1 matt return ENOMEM;
580 1.1 matt }
581 1.1 matt
582 1.1 matt static int
583 1.1 matt gmc_intr(void *arg)
584 1.1 matt {
585 1.1 matt struct gmc_softc * const sc = arg;
586 1.1 matt uint32_t int0_status, int1_status, int4_status;
587 1.1 matt uint32_t status;
588 1.1 matt bool do_ifstart = false;
589 1.1 matt int rv = 0;
590 1.1 matt
591 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: entry\n");
592 1.3 matt
593 1.1 matt int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
594 1.1 matt GMAC_INT0_STATUS);
595 1.1 matt int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
596 1.1 matt GMAC_INT1_STATUS);
597 1.1 matt int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
598 1.1 matt GMAC_INT4_STATUS);
599 1.1 matt
600 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
601 1.2 matt int0_status, int1_status,
602 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
603 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
604 1.2 matt int4_status);
605 1.2 matt
606 1.3 matt #if 0
607 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
608 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
609 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
610 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
611 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
612 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
613 1.3 matt #endif
614 1.2 matt
615 1.1 matt status = int0_status & sc->sc_int_mask[0];
616 1.1 matt if (status & (INT0_TXDERR|INT0_TXPERR)) {
617 1.1 matt aprint_error_dev(sc->sc_dev,
618 1.2 matt "transmit%s%s error: %#x %08x bufaddr %#x\n",
619 1.1 matt status & INT0_TXDERR ? " data" : "",
620 1.1 matt status & INT0_TXPERR ? " protocol" : "",
621 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
622 1.3 matt GMAC_DMA_TX_CUR_DESC),
623 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
624 1.3 matt GMAC_SW_TX_Q0_RWPTR),
625 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
626 1.3 matt GMAC_DMA_TX_DESC2));
627 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
628 1.3 matt status & (INT0_TXDERR|INT0_TXPERR));
629 1.3 matt Debugger();
630 1.1 matt }
631 1.1 matt if (status & (INT0_RXDERR|INT0_RXPERR)) {
632 1.1 matt aprint_error_dev(sc->sc_dev,
633 1.3 matt "receive%s%s error: %#x %#x=%#x/%#x/%#x/%#x\n",
634 1.3 matt status & INT0_RXDERR ? " data" : "",
635 1.3 matt status & INT0_RXPERR ? " protocol" : "",
636 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
637 1.3 matt GMAC_DMA_RX_CUR_DESC),
638 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh,
639 1.3 matt GMAC_SWFREEQ_RWPTR),
640 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
641 1.3 matt GMAC_DMA_RX_DESC0),
642 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
643 1.3 matt GMAC_DMA_RX_DESC1),
644 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
645 1.3 matt GMAC_DMA_RX_DESC2),
646 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
647 1.3 matt GMAC_DMA_RX_DESC3));
648 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
649 1.3 matt status & (INT0_RXDERR|INT0_RXPERR));
650 1.2 matt Debugger();
651 1.1 matt }
652 1.1 matt if (status & INT0_SWTXQ_EOF) {
653 1.1 matt status &= INT0_SWTXQ_EOF;
654 1.1 matt for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
655 1.1 matt if (status & INT0_SWTXQn_EOF(i)) {
656 1.1 matt gmac_hwqueue_sync(sc->sc_txq[i]);
657 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh,
658 1.3 matt GMAC_INT0_STATUS,
659 1.3 matt sc->sc_int_mask[0] & (INT0_SWTXQn_EOF(i)|INT0_SWTXQn_FIN(i)));
660 1.1 matt status &= ~INT0_SWTXQn_EOF(i);
661 1.1 matt }
662 1.1 matt }
663 1.1 matt do_ifstart = true;
664 1.1 matt rv = 1;
665 1.1 matt }
666 1.1 matt
667 1.3 matt if (int4_status & INT4_SW_FREEQ_EMPTY) {
668 1.3 matt struct gmac_softc * const psc = sc->sc_psc;
669 1.3 matt psc->sc_rxpkts_per_sec +=
670 1.3 matt gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
671 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
672 1.3 matt status & INT4_SW_FREEQ_EMPTY);
673 1.1 matt rv = 1;
674 1.1 matt }
675 1.1 matt
676 1.3 matt status = int1_status & sc->sc_int_mask[1];
677 1.3 matt if (status & INT1_DEF_RXQ_EOF) {
678 1.3 matt struct gmac_softc * const psc = sc->sc_psc;
679 1.3 matt psc->sc_rxpkts_per_sec +=
680 1.3 matt gmac_hwqueue_consume(sc->sc_rxq, psc->sc_swfree_min);
681 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
682 1.3 matt status & INT1_DEF_RXQ_EOF);
683 1.1 matt rv = 1;
684 1.1 matt }
685 1.2 matt
686 1.1 matt status = int4_status & sc->sc_int_enabled[4];
687 1.1 matt if (status & INT4_TX_FAIL) {
688 1.1 matt }
689 1.1 matt if (status & INT4_MIB_HEMIWRAP) {
690 1.1 matt }
691 1.1 matt if (status & INT4_RX_XON) {
692 1.1 matt }
693 1.1 matt if (status & INT4_RX_XOFF) {
694 1.1 matt }
695 1.1 matt if (status & INT4_TX_XON) {
696 1.1 matt }
697 1.1 matt if (status & INT4_TX_XOFF) {
698 1.1 matt }
699 1.1 matt if (status & INT4_RX_FIFO_OVRN) {
700 1.3 matt #if 0
701 1.3 matt if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS) {
702 1.2 matt sc->sc_psc->sc_swfree_min++;
703 1.3 matt gmac_swfree_min_update(psc);
704 1.3 matt }
705 1.3 matt #endif
706 1.1 matt sc->sc_if.if_ierrors++;
707 1.1 matt }
708 1.1 matt if (status & INT4_RGMII_STSCHG) {
709 1.6 msaitoh mii_pollstat(&sc->sc_mii);
710 1.1 matt }
711 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS, status);
712 1.1 matt
713 1.1 matt if (do_ifstart)
714 1.6.18.1 bouyer if_schedule_deferred_start(&sc->sc_if);
715 1.1 matt
716 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
717 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS),
718 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS),
719 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
720 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
721 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS));
722 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: exit rv=%d\n", rv);
723 1.1 matt return rv;
724 1.1 matt }
725 1.1 matt
726 1.1 matt static int
727 1.1 matt gmc_match(device_t parent, cfdata_t cf, void *aux)
728 1.1 matt {
729 1.1 matt struct gmac_softc *psc = device_private(parent);
730 1.1 matt struct gmac_attach_args *gma = aux;
731 1.1 matt
732 1.1 matt if ((unsigned int)gma->gma_phy > 31)
733 1.1 matt return 0;
734 1.1 matt if ((unsigned int)gma->gma_port > 1)
735 1.1 matt return 0;
736 1.1 matt if (gma->gma_intr < 1 || gma->gma_intr > 2)
737 1.1 matt return 0;
738 1.1 matt
739 1.1 matt if (psc->sc_ports & (1 << gma->gma_port))
740 1.1 matt return 0;
741 1.1 matt
742 1.1 matt return 1;
743 1.1 matt }
744 1.1 matt
745 1.1 matt static void
746 1.1 matt gmc_attach(device_t parent, device_t self, void *aux)
747 1.1 matt {
748 1.1 matt struct gmac_softc * const psc = device_private(parent);
749 1.1 matt struct gmc_softc * const sc = device_private(self);
750 1.1 matt struct gmac_attach_args *gma = aux;
751 1.1 matt struct ifnet * const ifp = &sc->sc_if;
752 1.1 matt static const char eaddrs[2][6] = {
753 1.1 matt "\x0\x52\xc3\x11\x22\x33",
754 1.1 matt "\x0\x52\xc3\x44\x55\x66",
755 1.1 matt };
756 1.1 matt
757 1.1 matt psc->sc_ports |= 1 << gma->gma_port;
758 1.1 matt sc->sc_port1 = (gma->gma_port == 1);
759 1.3 matt sc->sc_phy = gma->gma_phy;
760 1.1 matt
761 1.1 matt sc->sc_dev = self;
762 1.1 matt sc->sc_psc = psc;
763 1.1 matt sc->sc_iot = psc->sc_iot;
764 1.1 matt sc->sc_ioh = psc->sc_ioh;
765 1.1 matt sc->sc_dmat = psc->sc_dmat;
766 1.1 matt
767 1.1 matt bus_space_subregion(sc->sc_iot, sc->sc_ioh,
768 1.1 matt GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
769 1.1 matt &sc->sc_dma_ioh);
770 1.1 matt bus_space_subregion(sc->sc_iot, sc->sc_ioh,
771 1.1 matt GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
772 1.1 matt &sc->sc_gmac_ioh);
773 1.1 matt aprint_normal("\n");
774 1.1 matt aprint_naive("\n");
775 1.1 matt
776 1.1 matt strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
777 1.1 matt ifp->if_flags = IFF_SIMPLEX|IFF_MULTICAST|IFF_BROADCAST;
778 1.1 matt ifp->if_softc = sc;
779 1.1 matt ifp->if_ioctl = gmc_ifioctl;
780 1.1 matt ifp->if_stop = gmc_ifstop;
781 1.1 matt ifp->if_start = gmc_ifstart;
782 1.1 matt ifp->if_init = gmc_ifinit;
783 1.1 matt
784 1.1 matt IFQ_SET_READY(&ifp->if_snd);
785 1.1 matt
786 1.1 matt sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
787 1.1 matt sc->sc_ec.ec_mii = &sc->sc_mii;
788 1.1 matt
789 1.1 matt sc->sc_mii.mii_ifp = ifp;
790 1.1 matt sc->sc_mii.mii_statchg = gmc_mii_statchg;
791 1.1 matt sc->sc_mii.mii_readreg = gma->gma_mii_readreg;
792 1.1 matt sc->sc_mii.mii_writereg = gma->gma_mii_writereg;
793 1.1 matt
794 1.1 matt ifmedia_init(&sc->sc_mii.mii_media, 0, gmc_mediachange,
795 1.1 matt gmc_mediastatus);
796 1.1 matt
797 1.1 matt if_attach(ifp);
798 1.6.18.1 bouyer if_deferred_start_init(ifp, NULL);
799 1.1 matt ether_ifattach(ifp, eaddrs[gma->gma_port]);
800 1.1 matt mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
801 1.1 matt gma->gma_phy, MII_OFFSET_ANY, 0);
802 1.1 matt
803 1.1 matt if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
804 1.1 matt ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
805 1.1 matt ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
806 1.1 matt } else {
807 1.3 matt ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
808 1.3 matt // ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX);
809 1.1 matt }
810 1.1 matt
811 1.1 matt sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
812 1.1 matt GMAC_STATUS);
813 1.1 matt sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
814 1.1 matt GMAC_STA_ADD0);
815 1.1 matt sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
816 1.1 matt GMAC_STA_ADD1);
817 1.1 matt sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
818 1.1 matt GMAC_STA_ADD2);
819 1.1 matt sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
820 1.1 matt sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
821 1.1 matt sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
822 1.1 matt sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
823 1.1 matt sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
824 1.1 matt GMAC_RX_FILTER);
825 1.1 matt sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
826 1.1 matt GMAC_CONFIG0);
827 1.1 matt sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
828 1.1 matt
829 1.1 matt /* sc->sc_int_enabled is already zeroed */
830 1.1 matt sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
831 1.1 matt sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
832 1.1 matt sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
833 1.1 matt sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
834 1.1 matt sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
835 1.1 matt
836 1.2 matt if (!sc->sc_port1) {
837 1.1 matt sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
838 1.1 matt gmc_intr, sc);
839 1.1 matt KASSERT(sc->sc_ih != NULL);
840 1.2 matt }
841 1.1 matt
842 1.1 matt callout_init(&sc->sc_mii_ch, 0);
843 1.1 matt callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
844 1.1 matt
845 1.1 matt aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
846 1.1 matt ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
847 1.1 matt }
848 1.1 matt
849 1.1 matt CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
850 1.1 matt gmc_match, gmc_attach, NULL, NULL);
851