if_gmc.c revision 1.11 1 1.11 msaitoh /* $NetBSD: if_gmc.c,v 1.11 2019/05/28 07:41:46 msaitoh Exp $ */
2 1.1 matt /*-
3 1.1 matt * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 1.1 matt * All rights reserved.
5 1.1 matt *
6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.1 matt * by Matt Thomas <matt (at) 3am-software.com>
8 1.1 matt *
9 1.1 matt * Redistribution and use in source and binary forms, with or without
10 1.1 matt * modification, are permitted provided that the following conditions
11 1.1 matt * are met:
12 1.1 matt * 1. Redistributions of source code must retain the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer.
14 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer in the
16 1.1 matt * documentation and/or other materials provided with the distribution.
17 1.1 matt *
18 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
29 1.1 matt */
30 1.1 matt
31 1.1 matt #include <sys/param.h>
32 1.1 matt #include <sys/callout.h>
33 1.1 matt #include <sys/device.h>
34 1.1 matt #include <sys/ioctl.h>
35 1.1 matt #include <sys/kernel.h>
36 1.1 matt #include <sys/kmem.h>
37 1.1 matt #include <sys/mbuf.h>
38 1.1 matt
39 1.4 dyoung #include <sys/bus.h>
40 1.1 matt #include <machine/intr.h>
41 1.1 matt
42 1.1 matt #include <arm/gemini/gemini_reg.h>
43 1.1 matt #include <arm/gemini/gemini_gmacreg.h>
44 1.1 matt #include <arm/gemini/gemini_gmacvar.h>
45 1.1 matt
46 1.1 matt #include <net/if.h>
47 1.1 matt #include <net/if_ether.h>
48 1.1 matt #include <net/if_dl.h>
49 1.1 matt
50 1.11 msaitoh __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.11 2019/05/28 07:41:46 msaitoh Exp $");
51 1.1 matt
52 1.1 matt #define MAX_TXSEG 32
53 1.1 matt
54 1.1 matt struct gmc_softc {
55 1.1 matt device_t sc_dev;
56 1.1 matt struct gmac_softc *sc_psc;
57 1.1 matt struct gmc_softc *sc_sibling;
58 1.1 matt bus_dma_tag_t sc_dmat;
59 1.1 matt bus_space_tag_t sc_iot;
60 1.1 matt bus_space_handle_t sc_ioh;
61 1.1 matt bus_space_handle_t sc_dma_ioh;
62 1.1 matt bus_space_handle_t sc_gmac_ioh;
63 1.1 matt struct ethercom sc_ec;
64 1.1 matt struct mii_data sc_mii;
65 1.1 matt void *sc_ih;
66 1.1 matt bool sc_port1;
67 1.3 matt uint8_t sc_phy;
68 1.1 matt gmac_hwqueue_t *sc_rxq;
69 1.1 matt gmac_hwqueue_t *sc_txq[6];
70 1.1 matt callout_t sc_mii_ch;
71 1.1 matt
72 1.1 matt uint32_t sc_gmac_status;
73 1.1 matt uint32_t sc_gmac_sta_add[3];
74 1.1 matt uint32_t sc_gmac_mcast_filter[2];
75 1.1 matt uint32_t sc_gmac_rx_filter;
76 1.1 matt uint32_t sc_gmac_config[2];
77 1.1 matt uint32_t sc_dmavr;
78 1.1 matt
79 1.1 matt uint32_t sc_int_mask[5];
80 1.1 matt uint32_t sc_int_enabled[5];
81 1.1 matt };
82 1.1 matt
83 1.1 matt #define sc_if sc_ec.ec_if
84 1.1 matt
85 1.1 matt static bool
86 1.1 matt gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
87 1.1 matt {
88 1.1 matt bus_dmamap_t map;
89 1.3 matt uint32_t desc0, desc1, desc3;
90 1.1 matt struct mbuf *last_m, *m0;
91 1.1 matt size_t count, i;
92 1.1 matt int error;
93 1.1 matt gmac_desc_t *d;
94 1.1 matt
95 1.2 matt KASSERT(hwq != NULL);
96 1.2 matt
97 1.1 matt map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
98 1.1 matt if (map == NULL)
99 1.1 matt return false;
100 1.1 matt
101 1.1 matt for (last_m = NULL, m0 = m, count = 0;
102 1.1 matt m0 != NULL;
103 1.1 matt last_m = m0, m0 = m0->m_next) {
104 1.1 matt vaddr_t addr = (uintptr_t)m0->m_data;
105 1.1 matt if (m0->m_len == 0)
106 1.1 matt continue;
107 1.1 matt if (addr & 1) {
108 1.1 matt if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
109 1.1 matt last_m->m_data[last_m->m_len++] = *m->m_data++;
110 1.1 matt m->m_len--;
111 1.1 matt } else if (M_TRAILINGSPACE(m0) > 0) {
112 1.1 matt memmove(m0->m_data + 1, m0->m_data, m0->m_len);
113 1.1 matt m0->m_data++;
114 1.1 matt } else if (M_LEADINGSPACE(m0) > 0) {
115 1.1 matt memmove(m0->m_data - 1, m0->m_data, m0->m_len);
116 1.1 matt m0->m_data--;
117 1.1 matt } else {
118 1.2 matt panic("gmc_txqueue: odd addr %p", m0->m_data);
119 1.1 matt }
120 1.1 matt }
121 1.1 matt count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
122 1.1 matt }
123 1.1 matt
124 1.2 matt gmac_hwqueue_sync(hwq);
125 1.1 matt if (hwq->hwq_free <= count) {
126 1.2 matt gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
127 1.2 matt return false;
128 1.1 matt }
129 1.1 matt
130 1.1 matt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
131 1.9 msaitoh BUS_DMA_WRITE | BUS_DMA_NOWAIT);
132 1.1 matt if (error) {
133 1.1 matt aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
134 1.1 matt error);
135 1.1 matt gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
136 1.1 matt m_freem(m);
137 1.1 matt sc->sc_if.if_oerrors++;
138 1.1 matt return true;
139 1.1 matt }
140 1.1 matt KASSERT(map->dm_nsegs > 0);
141 1.1 matt
142 1.1 matt /*
143 1.1 matt * Sync the mbuf contents to memory/cache.
144 1.1 matt */
145 1.1 matt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
146 1.2 matt BUS_DMASYNC_PREWRITE);
147 1.1 matt
148 1.1 matt /*
149 1.1 matt * Now we need to load the descriptors...
150 1.1 matt */
151 1.3 matt desc0 = map->dm_nsegs << 16;
152 1.1 matt desc1 = m->m_pkthdr.len;
153 1.1 matt desc3 = DESC3_SOF;
154 1.1 matt i = 0;
155 1.2 matt d = NULL;
156 1.1 matt do {
157 1.3 matt #if 0
158 1.2 matt if (i > 0)
159 1.3 matt aprint_debug_dev(sc->sc_dev,
160 1.2 matt "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
161 1.2 matt i-1, d, d->d_desc0, d->d_desc1,
162 1.2 matt d->d_bufaddr, d->d_desc3);
163 1.3 matt #endif
164 1.1 matt d = gmac_hwqueue_desc(hwq, i);
165 1.1 matt KASSERT(map->dm_segs[i].ds_len > 0);
166 1.1 matt KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
167 1.3 matt d->d_desc0 = htole32(map->dm_segs[i].ds_len | desc0);
168 1.2 matt d->d_desc1 = htole32(desc1);
169 1.2 matt d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
170 1.2 matt d->d_desc3 = htole32(desc3);
171 1.2 matt desc3 = 0;
172 1.1 matt } while (++i < map->dm_nsegs);
173 1.1 matt
174 1.9 msaitoh d->d_desc3 |= htole32(DESC3_EOF | DESC3_EOFIE);
175 1.3 matt #if 0
176 1.3 matt aprint_debug_dev(sc->sc_dev,
177 1.2 matt "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
178 1.2 matt i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
179 1.3 matt #endif
180 1.1 matt M_SETCTX(m, map);
181 1.1 matt IF_ENQUEUE(&hwq->hwq_ifq, m);
182 1.1 matt /*
183 1.1 matt * Last descriptor has been marked. Give them to the h/w.
184 1.1 matt * This will sync for us.
185 1.1 matt */
186 1.1 matt gmac_hwqueue_produce(hwq, map->dm_nsegs);
187 1.3 matt #if 0
188 1.3 matt aprint_debug_dev(sc->sc_dev,
189 1.2 matt "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
190 1.2 matt i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
191 1.3 matt #endif
192 1.1 matt return true;
193 1.1 matt }
194 1.1 matt
195 1.1 matt static void
196 1.1 matt gmc_filter_change(struct gmc_softc *sc)
197 1.1 matt {
198 1.11 msaitoh struct ethercom *ec = &sc->sc_ec;
199 1.1 matt struct ether_multi *enm;
200 1.1 matt struct ether_multistep step;
201 1.1 matt uint32_t mhash[2];
202 1.1 matt uint32_t new0, new1, new2;
203 1.1 matt const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
204 1.1 matt
205 1.1 matt new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
206 1.1 matt new1 = eaddr[4] | (eaddr[5] << 8);
207 1.1 matt new2 = 0;
208 1.1 matt if (sc->sc_gmac_sta_add[0] != new0
209 1.1 matt || sc->sc_gmac_sta_add[1] != new1
210 1.1 matt || sc->sc_gmac_sta_add[2] != new2) {
211 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
212 1.1 matt new0);
213 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
214 1.1 matt new1);
215 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
216 1.1 matt new2);
217 1.1 matt sc->sc_gmac_sta_add[0] = new0;
218 1.1 matt sc->sc_gmac_sta_add[1] = new1;
219 1.1 matt sc->sc_gmac_sta_add[2] = new2;
220 1.1 matt }
221 1.1 matt
222 1.1 matt mhash[0] = 0;
223 1.1 matt mhash[1] = 0;
224 1.11 msaitoh ETHER_LOCK(ec);
225 1.11 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
226 1.1 matt while (enm != NULL) {
227 1.1 matt size_t i;
228 1.1 matt if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
229 1.1 matt mhash[0] = mhash[1] = 0xffffffff;
230 1.1 matt break;
231 1.1 matt }
232 1.1 matt i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
233 1.2 matt mhash[(i >> 5) & 1] |= 1 << (i & 31);
234 1.1 matt ETHER_NEXT_MULTI(step, enm);
235 1.1 matt }
236 1.11 msaitoh ETHER_UNLOCK(ec);
237 1.1 matt
238 1.1 matt if (sc->sc_gmac_mcast_filter[0] != mhash[0]
239 1.1 matt || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
240 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
241 1.1 matt GMAC_MCAST_FILTER0, mhash[0]);
242 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
243 1.1 matt GMAC_MCAST_FILTER1, mhash[1]);
244 1.1 matt sc->sc_gmac_mcast_filter[0] = mhash[0];
245 1.1 matt sc->sc_gmac_mcast_filter[1] = mhash[1];
246 1.1 matt }
247 1.1 matt
248 1.1 matt new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
249 1.1 matt new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
250 1.1 matt if (sc->sc_if.if_flags & IFF_PROMISC)
251 1.1 matt new0 |= RXFILTER_PROMISC;
252 1.1 matt
253 1.1 matt if (new0 != sc->sc_gmac_rx_filter) {
254 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
255 1.1 matt new0);
256 1.1 matt sc->sc_gmac_rx_filter = new0;
257 1.1 matt }
258 1.1 matt }
259 1.1 matt
260 1.1 matt static void
261 1.1 matt gmc_mii_tick(void *arg)
262 1.1 matt {
263 1.1 matt struct gmc_softc * const sc = arg;
264 1.2 matt struct gmac_softc * const psc = sc->sc_psc;
265 1.1 matt int s = splnet();
266 1.1 matt
267 1.2 matt /*
268 1.2 matt * If we had to increase the number of receive mbufs due to fifo
269 1.2 matt * overflows, we need a way to decrease them. So every second we
270 1.2 matt * recieve less than or equal to MIN_RXMAPS packets, we decrement
271 1.2 matt * swfree_min until it returns to MIN_RXMAPS.
272 1.2 matt */
273 1.2 matt if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
274 1.3 matt && psc->sc_swfree_min > MIN_RXMAPS) {
275 1.2 matt psc->sc_swfree_min--;
276 1.3 matt gmac_swfree_min_update(psc);
277 1.3 matt }
278 1.2 matt /*
279 1.2 matt * If only one GMAC is running or this is port0, reset the count.
280 1.2 matt */
281 1.2 matt if (psc->sc_running != 3 || !sc->sc_port1)
282 1.2 matt psc->sc_rxpkts_per_sec = 0;
283 1.2 matt
284 1.1 matt mii_tick(&sc->sc_mii);
285 1.1 matt if (sc->sc_if.if_flags & IFF_RUNNING)
286 1.1 matt callout_schedule(&sc->sc_mii_ch, hz);
287 1.1 matt
288 1.1 matt splx(s);
289 1.1 matt }
290 1.1 matt
291 1.1 matt static int
292 1.1 matt gmc_mediachange(struct ifnet *ifp)
293 1.1 matt {
294 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
295 1.1 matt
296 1.1 matt if ((ifp->if_flags & IFF_UP) == 0)
297 1.1 matt return 0;
298 1.1 matt
299 1.1 matt return mii_mediachg(&sc->sc_mii);
300 1.1 matt }
301 1.1 matt
302 1.1 matt static void
303 1.1 matt gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
304 1.1 matt {
305 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
306 1.1 matt
307 1.1 matt mii_pollstat(&sc->sc_mii);
308 1.1 matt ifmr->ifm_status = sc->sc_mii.mii_media_status;
309 1.1 matt ifmr->ifm_active = sc->sc_mii.mii_media_active;
310 1.1 matt }
311 1.1 matt
312 1.1 matt static void
313 1.5 matt gmc_mii_statchg(struct ifnet *ifp)
314 1.1 matt {
315 1.5 matt struct gmc_softc * const sc = ifp->if_softc;
316 1.1 matt uint32_t gmac_status;
317 1.9 msaitoh
318 1.1 matt gmac_status = sc->sc_gmac_status;
319 1.3 matt
320 1.2 matt gmac_status &= ~STATUS_PHYMODE_MASK;
321 1.3 matt gmac_status |= STATUS_PHYMODE_RGMII_A;
322 1.3 matt
323 1.1 matt gmac_status &= ~STATUS_SPEED_MASK;
324 1.1 matt if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
325 1.1 matt gmac_status |= STATUS_SPEED_1000M;
326 1.1 matt } else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
327 1.1 matt gmac_status |= STATUS_SPEED_100M;
328 1.1 matt } else {
329 1.1 matt gmac_status |= STATUS_SPEED_10M;
330 1.1 matt }
331 1.1 matt
332 1.10 msaitoh if (sc->sc_mii.mii_media_active & IFM_FDX)
333 1.1 matt gmac_status |= STATUS_DUPLEX_FULL;
334 1.1 matt else
335 1.1 matt gmac_status &= ~STATUS_DUPLEX_FULL;
336 1.1 matt
337 1.10 msaitoh if (sc->sc_mii.mii_media_status & IFM_ACTIVE)
338 1.1 matt gmac_status |= STATUS_LINK_ON;
339 1.1 matt else
340 1.1 matt gmac_status &= ~STATUS_LINK_ON;
341 1.1 matt
342 1.1 matt if (sc->sc_gmac_status != gmac_status) {
343 1.3 matt aprint_debug_dev(sc->sc_dev,
344 1.2 matt "status change old=%#x new=%#x active=%#x\n",
345 1.2 matt sc->sc_gmac_status, gmac_status,
346 1.2 matt sc->sc_mii.mii_media_active);
347 1.1 matt sc->sc_gmac_status = gmac_status;
348 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
349 1.1 matt sc->sc_gmac_status);
350 1.1 matt }
351 1.3 matt
352 1.3 matt (*sc->sc_mii.mii_writereg)(sc->sc_dev, sc->sc_phy, 0x0018, 0x0041);
353 1.1 matt }
354 1.1 matt
355 1.1 matt static int
356 1.1 matt gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
357 1.1 matt {
358 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
359 1.1 matt int s;
360 1.1 matt int error;
361 1.1 matt s = splnet();
362 1.1 matt
363 1.1 matt switch (cmd) {
364 1.1 matt default:
365 1.1 matt error = ether_ioctl(ifp, cmd, data);
366 1.1 matt if (error == ENETRESET) {
367 1.1 matt if (ifp->if_flags & IFF_RUNNING) {
368 1.1 matt /*
369 1.9 msaitoh * If the interface is running, we have to
370 1.1 matt * update its multicast filter.
371 1.1 matt */
372 1.1 matt gmc_filter_change(sc);
373 1.1 matt }
374 1.1 matt error = 0;
375 1.1 matt }
376 1.1 matt }
377 1.1 matt
378 1.1 matt splx(s);
379 1.1 matt return error;
380 1.1 matt }
381 1.1 matt
382 1.1 matt static void
383 1.1 matt gmc_ifstart(struct ifnet *ifp)
384 1.1 matt {
385 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
386 1.1 matt
387 1.3 matt #if 0
388 1.3 matt if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0)
389 1.3 matt return;
390 1.3 matt #endif
391 1.3 matt if ((ifp->if_flags & IFF_RUNNING) == 0)
392 1.2 matt return;
393 1.2 matt
394 1.1 matt for (;;) {
395 1.1 matt struct mbuf *m;
396 1.1 matt IF_DEQUEUE(&ifp->if_snd, m);
397 1.1 matt if (m == NULL)
398 1.1 matt break;
399 1.1 matt if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
400 1.1 matt IF_PREPEND(&ifp->if_snd, m);
401 1.1 matt ifp->if_flags |= IFF_OACTIVE;
402 1.1 matt break;
403 1.1 matt }
404 1.1 matt }
405 1.1 matt }
406 1.1 matt
407 1.1 matt static void
408 1.1 matt gmc_ifstop(struct ifnet *ifp, int disable)
409 1.1 matt {
410 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
411 1.1 matt struct gmac_softc * const psc = sc->sc_psc;
412 1.1 matt
413 1.1 matt psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
414 1.1 matt psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
415 1.1 matt psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
416 1.1 matt psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
417 1.1 matt psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
418 1.1 matt psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
419 1.1 matt if (psc->sc_running == 0) {
420 1.1 matt psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
421 1.1 matt KASSERT(psc->sc_int_enabled[0] == 0);
422 1.1 matt KASSERT(psc->sc_int_enabled[1] == 0);
423 1.1 matt KASSERT(psc->sc_int_enabled[2] == 0);
424 1.1 matt KASSERT(psc->sc_int_enabled[3] == 0);
425 1.1 matt KASSERT(psc->sc_int_enabled[4] == 0);
426 1.1 matt } else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
427 1.1 matt == sc->sc_port1) {
428 1.1 matt psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
429 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
430 1.1 matt psc->sc_int_select[4]);
431 1.1 matt }
432 1.1 matt gmac_intr_update(psc);
433 1.1 matt if (disable) {
434 1.1 matt #if 0
435 1.1 matt if (psc->sc_running == 0) {
436 1.1 matt gmac_mapcache_destroy(&psc->sc_txmaps);
437 1.1 matt gmac_mapcache_destroy(&psc->sc_rxmaps);
438 1.1 matt }
439 1.1 matt #endif
440 1.1 matt }
441 1.1 matt }
442 1.1 matt
443 1.1 matt static int
444 1.1 matt gmc_ifinit(struct ifnet *ifp)
445 1.1 matt {
446 1.1 matt struct gmc_softc * const sc = ifp->if_softc;
447 1.1 matt struct gmac_softc * const psc = sc->sc_psc;
448 1.1 matt uint32_t new, mask;
449 1.1 matt
450 1.1 matt gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
451 1.1 matt gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
452 1.1 matt
453 1.1 matt if (sc->sc_rxq == NULL) {
454 1.1 matt gmac_hwqmem_t *hqm;
455 1.3 matt hqm = gmac_hwqmem_create(psc->sc_rxmaps, 16, /*RXQ_NDESCS,*/ 1,
456 1.9 msaitoh HQM_CONSUMER | HQM_RX);
457 1.1 matt sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
458 1.2 matt sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
459 1.2 matt GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
460 1.1 matt if (sc->sc_rxq == NULL) {
461 1.1 matt gmac_hwqmem_destroy(hqm);
462 1.1 matt goto failed;
463 1.1 matt }
464 1.1 matt sc->sc_rxq->hwq_ifp = ifp;
465 1.1 matt sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
466 1.1 matt }
467 1.1 matt
468 1.2 matt if (sc->sc_txq[0] == NULL) {
469 1.1 matt gmac_hwqueue_t *hwq, *last_hwq;
470 1.1 matt gmac_hwqmem_t *hqm;
471 1.1 matt size_t i;
472 1.1 matt
473 1.1 matt hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
474 1.9 msaitoh HQM_PRODUCER | HQM_TX);
475 1.2 matt KASSERT(hqm != NULL);
476 1.1 matt for (i = 0; i < __arraycount(sc->sc_txq); i++) {
477 1.1 matt sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
478 1.1 matt sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
479 1.1 matt GMAC_SW_TX_Q_BASE, i);
480 1.1 matt if (sc->sc_txq[i] == NULL) {
481 1.1 matt if (i == 0)
482 1.1 matt gmac_hwqmem_destroy(hqm);
483 1.1 matt goto failed;
484 1.1 matt }
485 1.1 matt sc->sc_txq[i]->hwq_ifp = ifp;
486 1.1 matt
487 1.1 matt last_hwq = NULL;
488 1.1 matt SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
489 1.1 matt hwq_link) {
490 1.1 matt if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
491 1.1 matt break;
492 1.1 matt last_hwq = hwq;
493 1.1 matt }
494 1.1 matt if (last_hwq == NULL)
495 1.1 matt SLIST_INSERT_HEAD(
496 1.1 matt &psc->sc_hwfreeq->hwq_producers,
497 1.1 matt sc->sc_txq[i], hwq_link);
498 1.1 matt else
499 1.1 matt SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
500 1.1 matt hwq_link);
501 1.1 matt }
502 1.1 matt }
503 1.1 matt
504 1.1 matt gmc_filter_change(sc);
505 1.1 matt
506 1.9 msaitoh mask = DMAVR_LOOPBACK | DMAVR_DROP_SMALL_ACK | DMAVR_EXTRABYTES_MASK
507 1.9 msaitoh | DMAVR_RXBURSTSIZE_MASK | DMAVR_RXBUSWIDTH_MASK
508 1.9 msaitoh | DMAVR_TXBURSTSIZE_MASK | DMAVR_TXBUSWIDTH_MASK;
509 1.9 msaitoh new = DMAVR_RXDMA_ENABLE | DMAVR_TXDMA_ENABLE
510 1.9 msaitoh | DMAVR_EXTRABYTES(2)
511 1.9 msaitoh | DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
512 1.9 msaitoh | DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
513 1.9 msaitoh | DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
514 1.9 msaitoh | DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
515 1.1 matt new |= sc->sc_dmavr & ~mask;
516 1.1 matt if (sc->sc_dmavr != new) {
517 1.1 matt sc->sc_dmavr = new;
518 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
519 1.1 matt sc->sc_dmavr);
520 1.3 matt aprint_debug_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
521 1.2 matt sc->sc_dmavr,
522 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
523 1.1 matt }
524 1.1 matt
525 1.9 msaitoh mask = CONFIG0_MAXLEN_MASK | CONFIG0_TX_DISABLE | CONFIG0_RX_DISABLE
526 1.9 msaitoh | CONFIG0_LOOPBACK |/*CONFIG0_SIM_TEST|*/CONFIG0_INVERSE_RXC_RGMII
527 1.9 msaitoh | CONFIG0_RGMII_INBAND_STATUS_ENABLE;
528 1.9 msaitoh new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536) | CONFIG0_R_LATCHED_MMII;
529 1.1 matt new |= (sc->sc_gmac_config[0] & ~mask);
530 1.1 matt if (sc->sc_gmac_config[0] != new) {
531 1.1 matt sc->sc_gmac_config[0] = new;
532 1.2 matt bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
533 1.1 matt sc->sc_gmac_config[0]);
534 1.3 matt aprint_debug_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
535 1.2 matt sc->sc_gmac_config[0],
536 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
537 1.1 matt }
538 1.1 matt
539 1.3 matt psc->sc_rxpkts_per_sec +=
540 1.3 matt gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
541 1.1 matt
542 1.1 matt /*
543 1.1 matt * If we will be the only active interface, make sure the sw freeq
544 1.1 matt * interrupt gets routed to use.
545 1.1 matt */
546 1.1 matt if (psc->sc_running == 0
547 1.1 matt && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
548 1.1 matt psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
549 1.1 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
550 1.1 matt psc->sc_int_select[4]);
551 1.1 matt }
552 1.1 matt sc->sc_int_enabled[0] = sc->sc_int_mask[0]
553 1.1 matt & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
554 1.1 matt sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
555 1.1 matt sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
556 1.9 msaitoh & (INT4_TX_FAIL | INT4_MIB_HEMIWRAP | INT4_RX_FIFO_OVRN
557 1.9 msaitoh | INT4_RGMII_STSCHG));
558 1.1 matt
559 1.1 matt psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
560 1.1 matt psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
561 1.1 matt psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
562 1.1 matt
563 1.1 matt gmac_intr_update(psc);
564 1.1 matt
565 1.1 matt if ((ifp->if_flags & IFF_RUNNING) == 0)
566 1.1 matt mii_tick(&sc->sc_mii);
567 1.1 matt
568 1.1 matt ifp->if_flags |= IFF_RUNNING;
569 1.1 matt psc->sc_running |= (sc->sc_port1 ? 2 : 1);
570 1.1 matt
571 1.1 matt callout_schedule(&sc->sc_mii_ch, hz);
572 1.9 msaitoh
573 1.1 matt return 0;
574 1.1 matt
575 1.1 matt failed:
576 1.1 matt gmc_ifstop(ifp, true);
577 1.1 matt return ENOMEM;
578 1.1 matt }
579 1.1 matt
580 1.1 matt static int
581 1.1 matt gmc_intr(void *arg)
582 1.1 matt {
583 1.1 matt struct gmc_softc * const sc = arg;
584 1.1 matt uint32_t int0_status, int1_status, int4_status;
585 1.1 matt uint32_t status;
586 1.1 matt bool do_ifstart = false;
587 1.1 matt int rv = 0;
588 1.1 matt
589 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: entry\n");
590 1.3 matt
591 1.1 matt int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
592 1.1 matt GMAC_INT0_STATUS);
593 1.1 matt int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
594 1.1 matt GMAC_INT1_STATUS);
595 1.1 matt int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
596 1.1 matt GMAC_INT4_STATUS);
597 1.1 matt
598 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
599 1.2 matt int0_status, int1_status,
600 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
601 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
602 1.2 matt int4_status);
603 1.2 matt
604 1.3 matt #if 0
605 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
606 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
607 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
608 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
609 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
610 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
611 1.3 matt #endif
612 1.2 matt
613 1.1 matt status = int0_status & sc->sc_int_mask[0];
614 1.9 msaitoh if (status & (INT0_TXDERR | INT0_TXPERR)) {
615 1.1 matt aprint_error_dev(sc->sc_dev,
616 1.2 matt "transmit%s%s error: %#x %08x bufaddr %#x\n",
617 1.1 matt status & INT0_TXDERR ? " data" : "",
618 1.1 matt status & INT0_TXPERR ? " protocol" : "",
619 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
620 1.3 matt GMAC_DMA_TX_CUR_DESC),
621 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
622 1.3 matt GMAC_SW_TX_Q0_RWPTR),
623 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
624 1.3 matt GMAC_DMA_TX_DESC2));
625 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
626 1.9 msaitoh status & (INT0_TXDERR | INT0_TXPERR));
627 1.3 matt Debugger();
628 1.1 matt }
629 1.9 msaitoh if (status & (INT0_RXDERR | INT0_RXPERR)) {
630 1.1 matt aprint_error_dev(sc->sc_dev,
631 1.3 matt "receive%s%s error: %#x %#x=%#x/%#x/%#x/%#x\n",
632 1.3 matt status & INT0_RXDERR ? " data" : "",
633 1.3 matt status & INT0_RXPERR ? " protocol" : "",
634 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
635 1.3 matt GMAC_DMA_RX_CUR_DESC),
636 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh,
637 1.3 matt GMAC_SWFREEQ_RWPTR),
638 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
639 1.3 matt GMAC_DMA_RX_DESC0),
640 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
641 1.3 matt GMAC_DMA_RX_DESC1),
642 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
643 1.3 matt GMAC_DMA_RX_DESC2),
644 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
645 1.3 matt GMAC_DMA_RX_DESC3));
646 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
647 1.9 msaitoh status & (INT0_RXDERR | INT0_RXPERR));
648 1.2 matt Debugger();
649 1.1 matt }
650 1.1 matt if (status & INT0_SWTXQ_EOF) {
651 1.1 matt status &= INT0_SWTXQ_EOF;
652 1.1 matt for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
653 1.1 matt if (status & INT0_SWTXQn_EOF(i)) {
654 1.1 matt gmac_hwqueue_sync(sc->sc_txq[i]);
655 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh,
656 1.3 matt GMAC_INT0_STATUS,
657 1.9 msaitoh sc->sc_int_mask[0] & (INT0_SWTXQn_EOF(i)
658 1.9 msaitoh | INT0_SWTXQn_FIN(i)));
659 1.1 matt status &= ~INT0_SWTXQn_EOF(i);
660 1.1 matt }
661 1.1 matt }
662 1.1 matt do_ifstart = true;
663 1.1 matt rv = 1;
664 1.1 matt }
665 1.1 matt
666 1.3 matt if (int4_status & INT4_SW_FREEQ_EMPTY) {
667 1.3 matt struct gmac_softc * const psc = sc->sc_psc;
668 1.3 matt psc->sc_rxpkts_per_sec +=
669 1.3 matt gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
670 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
671 1.3 matt status & INT4_SW_FREEQ_EMPTY);
672 1.1 matt rv = 1;
673 1.1 matt }
674 1.1 matt
675 1.3 matt status = int1_status & sc->sc_int_mask[1];
676 1.3 matt if (status & INT1_DEF_RXQ_EOF) {
677 1.3 matt struct gmac_softc * const psc = sc->sc_psc;
678 1.3 matt psc->sc_rxpkts_per_sec +=
679 1.3 matt gmac_hwqueue_consume(sc->sc_rxq, psc->sc_swfree_min);
680 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
681 1.3 matt status & INT1_DEF_RXQ_EOF);
682 1.1 matt rv = 1;
683 1.1 matt }
684 1.2 matt
685 1.1 matt status = int4_status & sc->sc_int_enabled[4];
686 1.1 matt if (status & INT4_TX_FAIL) {
687 1.1 matt }
688 1.1 matt if (status & INT4_MIB_HEMIWRAP) {
689 1.1 matt }
690 1.1 matt if (status & INT4_RX_XON) {
691 1.1 matt }
692 1.1 matt if (status & INT4_RX_XOFF) {
693 1.1 matt }
694 1.1 matt if (status & INT4_TX_XON) {
695 1.1 matt }
696 1.1 matt if (status & INT4_TX_XOFF) {
697 1.1 matt }
698 1.1 matt if (status & INT4_RX_FIFO_OVRN) {
699 1.3 matt #if 0
700 1.3 matt if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS) {
701 1.2 matt sc->sc_psc->sc_swfree_min++;
702 1.3 matt gmac_swfree_min_update(psc);
703 1.3 matt }
704 1.3 matt #endif
705 1.1 matt sc->sc_if.if_ierrors++;
706 1.1 matt }
707 1.1 matt if (status & INT4_RGMII_STSCHG) {
708 1.6 msaitoh mii_pollstat(&sc->sc_mii);
709 1.1 matt }
710 1.3 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS, status);
711 1.1 matt
712 1.1 matt if (do_ifstart)
713 1.7 ozaki if_schedule_deferred_start(&sc->sc_if);
714 1.1 matt
715 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
716 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS),
717 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS),
718 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
719 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
720 1.3 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS));
721 1.3 matt aprint_debug_dev(sc->sc_dev, "gmac_intr: exit rv=%d\n", rv);
722 1.1 matt return rv;
723 1.1 matt }
724 1.1 matt
725 1.1 matt static int
726 1.1 matt gmc_match(device_t parent, cfdata_t cf, void *aux)
727 1.1 matt {
728 1.1 matt struct gmac_softc *psc = device_private(parent);
729 1.1 matt struct gmac_attach_args *gma = aux;
730 1.1 matt
731 1.1 matt if ((unsigned int)gma->gma_phy > 31)
732 1.1 matt return 0;
733 1.1 matt if ((unsigned int)gma->gma_port > 1)
734 1.1 matt return 0;
735 1.1 matt if (gma->gma_intr < 1 || gma->gma_intr > 2)
736 1.1 matt return 0;
737 1.1 matt
738 1.1 matt if (psc->sc_ports & (1 << gma->gma_port))
739 1.1 matt return 0;
740 1.1 matt
741 1.1 matt return 1;
742 1.1 matt }
743 1.1 matt
744 1.1 matt static void
745 1.1 matt gmc_attach(device_t parent, device_t self, void *aux)
746 1.1 matt {
747 1.1 matt struct gmac_softc * const psc = device_private(parent);
748 1.1 matt struct gmc_softc * const sc = device_private(self);
749 1.1 matt struct gmac_attach_args *gma = aux;
750 1.1 matt struct ifnet * const ifp = &sc->sc_if;
751 1.9 msaitoh struct mii_data * const mii = &sc->sc_mii;
752 1.1 matt static const char eaddrs[2][6] = {
753 1.1 matt "\x0\x52\xc3\x11\x22\x33",
754 1.1 matt "\x0\x52\xc3\x44\x55\x66",
755 1.1 matt };
756 1.1 matt
757 1.1 matt psc->sc_ports |= 1 << gma->gma_port;
758 1.1 matt sc->sc_port1 = (gma->gma_port == 1);
759 1.3 matt sc->sc_phy = gma->gma_phy;
760 1.1 matt
761 1.1 matt sc->sc_dev = self;
762 1.1 matt sc->sc_psc = psc;
763 1.1 matt sc->sc_iot = psc->sc_iot;
764 1.1 matt sc->sc_ioh = psc->sc_ioh;
765 1.1 matt sc->sc_dmat = psc->sc_dmat;
766 1.1 matt
767 1.9 msaitoh bus_space_subregion(sc->sc_iot, sc->sc_ioh,
768 1.1 matt GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
769 1.1 matt &sc->sc_dma_ioh);
770 1.9 msaitoh bus_space_subregion(sc->sc_iot, sc->sc_ioh,
771 1.1 matt GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
772 1.1 matt &sc->sc_gmac_ioh);
773 1.1 matt aprint_normal("\n");
774 1.1 matt aprint_naive("\n");
775 1.1 matt
776 1.1 matt strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
777 1.9 msaitoh ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
778 1.1 matt ifp->if_softc = sc;
779 1.1 matt ifp->if_ioctl = gmc_ifioctl;
780 1.1 matt ifp->if_stop = gmc_ifstop;
781 1.1 matt ifp->if_start = gmc_ifstart;
782 1.1 matt ifp->if_init = gmc_ifinit;
783 1.1 matt
784 1.1 matt IFQ_SET_READY(&ifp->if_snd);
785 1.1 matt
786 1.1 matt sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
787 1.9 msaitoh sc->sc_ec.ec_mii = mii;
788 1.1 matt
789 1.9 msaitoh mii->mii_ifp = ifp;
790 1.9 msaitoh mii->mii_statchg = gmc_mii_statchg;
791 1.9 msaitoh mii->mii_readreg = gma->gma_mii_readreg;
792 1.9 msaitoh mii->mii_writereg = gma->gma_mii_writereg;
793 1.1 matt
794 1.9 msaitoh ifmedia_init(&mii->mii_media, 0, gmc_mediachange, gmc_mediastatus);
795 1.1 matt
796 1.1 matt if_attach(ifp);
797 1.7 ozaki if_deferred_start_init(ifp, NULL);
798 1.1 matt ether_ifattach(ifp, eaddrs[gma->gma_port]);
799 1.9 msaitoh mii_attach(sc->sc_dev, mii, 0xffffffff,
800 1.1 matt gma->gma_phy, MII_OFFSET_ANY, 0);
801 1.1 matt
802 1.9 msaitoh if (LIST_EMPTY(&mii->mii_phys)) {
803 1.9 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
804 1.9 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
805 1.1 matt } else {
806 1.9 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
807 1.9 msaitoh // ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX);
808 1.1 matt }
809 1.1 matt
810 1.1 matt sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
811 1.1 matt GMAC_STATUS);
812 1.1 matt sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
813 1.1 matt GMAC_STA_ADD0);
814 1.1 matt sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
815 1.1 matt GMAC_STA_ADD1);
816 1.1 matt sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
817 1.1 matt GMAC_STA_ADD2);
818 1.1 matt sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
819 1.1 matt sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
820 1.1 matt sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
821 1.1 matt sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
822 1.1 matt sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
823 1.1 matt GMAC_RX_FILTER);
824 1.1 matt sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
825 1.1 matt GMAC_CONFIG0);
826 1.1 matt sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
827 1.1 matt
828 1.1 matt /* sc->sc_int_enabled is already zeroed */
829 1.1 matt sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
830 1.1 matt sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
831 1.1 matt sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
832 1.1 matt sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
833 1.1 matt sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
834 1.1 matt
835 1.2 matt if (!sc->sc_port1) {
836 1.1 matt sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
837 1.1 matt gmc_intr, sc);
838 1.1 matt KASSERT(sc->sc_ih != NULL);
839 1.2 matt }
840 1.1 matt
841 1.1 matt callout_init(&sc->sc_mii_ch, 0);
842 1.1 matt callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
843 1.1 matt
844 1.1 matt aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
845 1.1 matt ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
846 1.1 matt }
847 1.1 matt
848 1.1 matt CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
849 1.1 matt gmc_match, gmc_attach, NULL, NULL);
850