if_mc.c revision 1.8.2.3 1 1.8.2.3 skrll /* $NetBSD: if_mc.c,v 1.8.2.3 2004/09/21 13:18:19 skrll Exp $ */
2 1.1 tsubai
3 1.1 tsubai /*-
4 1.1 tsubai * Copyright (c) 1997 David Huang <khym (at) bga.com>
5 1.1 tsubai * All rights reserved.
6 1.1 tsubai *
7 1.1 tsubai * Portions of this code are based on code by Denton Gentry <denny1 (at) home.com>
8 1.1 tsubai * and Yanagisawa Takeshi <yanagisw (at) aa.ap.titech.ac.jp>.
9 1.1 tsubai *
10 1.1 tsubai * Redistribution and use in source and binary forms, with or without
11 1.1 tsubai * modification, are permitted provided that the following conditions
12 1.1 tsubai * are met:
13 1.1 tsubai * 1. Redistributions of source code must retain the above copyright
14 1.1 tsubai * notice, this list of conditions and the following disclaimer.
15 1.1 tsubai * 2. The name of the author may not be used to endorse or promote products
16 1.1 tsubai * derived from this software without specific prior written permission
17 1.1 tsubai *
18 1.1 tsubai * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.1 tsubai * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.1 tsubai * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.1 tsubai * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.1 tsubai * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 1.1 tsubai * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 1.1 tsubai * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 1.1 tsubai * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 1.1 tsubai * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 1.1 tsubai * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 1.1 tsubai *
29 1.1 tsubai */
30 1.1 tsubai
31 1.1 tsubai /*
32 1.1 tsubai * Bus attachment and DMA routines for the mc driver (Centris/Quadra
33 1.1 tsubai * 660av and Quadra 840av onboard ethernet, based on the AMD Am79C940
34 1.1 tsubai * MACE ethernet chip). Also uses the PSC (Peripheral Subsystem
35 1.1 tsubai * Controller) for DMA to and from the MACE.
36 1.1 tsubai */
37 1.1 tsubai
38 1.8.2.1 skrll #include <sys/cdefs.h>
39 1.8.2.3 skrll __KERNEL_RCSID(0, "$NetBSD: if_mc.c,v 1.8.2.3 2004/09/21 13:18:19 skrll Exp $");
40 1.8.2.1 skrll
41 1.1 tsubai #include <sys/param.h>
42 1.1 tsubai #include <sys/device.h>
43 1.1 tsubai #include <sys/malloc.h>
44 1.1 tsubai #include <sys/socket.h>
45 1.1 tsubai #include <sys/systm.h>
46 1.1 tsubai
47 1.1 tsubai #include <net/if.h>
48 1.1 tsubai #include <net/if_ether.h>
49 1.1 tsubai #include <net/if_media.h>
50 1.1 tsubai
51 1.3 mrg #include <uvm/uvm_extern.h>
52 1.1 tsubai
53 1.1 tsubai #include <dev/ofw/openfirm.h>
54 1.1 tsubai
55 1.1 tsubai #include <machine/pio.h>
56 1.1 tsubai #include <machine/bus.h>
57 1.1 tsubai #include <machine/autoconf.h>
58 1.1 tsubai
59 1.1 tsubai #include <macppc/dev/am79c950reg.h>
60 1.1 tsubai #include <macppc/dev/if_mcvar.h>
61 1.1 tsubai
62 1.1 tsubai #define MC_BUFSIZE 0x800
63 1.1 tsubai
64 1.1 tsubai hide int mc_match __P((struct device *, struct cfdata *, void *));
65 1.1 tsubai hide void mc_attach __P((struct device *, struct device *, void *));
66 1.1 tsubai hide void mc_init __P((struct mc_softc *sc));
67 1.1 tsubai hide void mc_putpacket __P((struct mc_softc *sc, u_int len));
68 1.1 tsubai hide int mc_dmaintr __P((void *arg));
69 1.1 tsubai hide void mc_reset_rxdma __P((struct mc_softc *sc));
70 1.1 tsubai hide void mc_reset_txdma __P((struct mc_softc *sc));
71 1.1 tsubai hide void mc_select_utp __P((struct mc_softc *sc));
72 1.1 tsubai hide void mc_select_aui __P((struct mc_softc *sc));
73 1.1 tsubai hide int mc_mediachange __P((struct mc_softc *sc));
74 1.4 matt hide void mc_mediastatus __P((struct mc_softc *sc, struct ifmediareq *));
75 1.1 tsubai
76 1.1 tsubai int mc_supmedia[] = {
77 1.1 tsubai IFM_ETHER | IFM_10_T,
78 1.1 tsubai IFM_ETHER | IFM_10_5,
79 1.1 tsubai /*IFM_ETHER | IFM_AUTO,*/
80 1.1 tsubai };
81 1.1 tsubai
82 1.1 tsubai #define N_SUPMEDIA (sizeof(mc_supmedia) / sizeof(int));
83 1.1 tsubai
84 1.7 thorpej CFATTACH_DECL(mc, sizeof(struct mc_softc),
85 1.7 thorpej mc_match, mc_attach, NULL, NULL);
86 1.1 tsubai
87 1.1 tsubai hide int
88 1.1 tsubai mc_match(parent, cf, aux)
89 1.1 tsubai struct device *parent;
90 1.1 tsubai struct cfdata *cf;
91 1.1 tsubai void *aux;
92 1.1 tsubai {
93 1.1 tsubai struct confargs *ca = aux;
94 1.1 tsubai
95 1.1 tsubai if (strcmp(ca->ca_name, "mace") != 0)
96 1.1 tsubai return 0;
97 1.1 tsubai
98 1.1 tsubai /* requires 6 regs */
99 1.1 tsubai if (ca->ca_nreg / sizeof(int) != 6)
100 1.1 tsubai return 0;
101 1.1 tsubai
102 1.1 tsubai /* requires 3 intrs */
103 1.1 tsubai if (ca->ca_nintr / sizeof(int) != 3)
104 1.1 tsubai return 0;
105 1.1 tsubai
106 1.1 tsubai return 1;
107 1.1 tsubai }
108 1.1 tsubai
109 1.1 tsubai hide void
110 1.1 tsubai mc_attach(parent, self, aux)
111 1.1 tsubai struct device *parent, *self;
112 1.1 tsubai void *aux;
113 1.1 tsubai {
114 1.1 tsubai struct confargs *ca = aux;
115 1.1 tsubai struct mc_softc *sc = (struct mc_softc *)self;
116 1.1 tsubai u_int8_t myaddr[ETHER_ADDR_LEN];
117 1.1 tsubai u_int *reg;
118 1.1 tsubai
119 1.1 tsubai sc->sc_node = ca->ca_node;
120 1.1 tsubai
121 1.1 tsubai reg = ca->ca_reg;
122 1.1 tsubai reg[0] += ca->ca_baseaddr;
123 1.1 tsubai reg[2] += ca->ca_baseaddr;
124 1.1 tsubai reg[4] += ca->ca_baseaddr;
125 1.1 tsubai
126 1.1 tsubai sc->sc_txdma = mapiodev(reg[2], reg[3]);
127 1.1 tsubai sc->sc_rxdma = mapiodev(reg[4], reg[5]);
128 1.1 tsubai bus_space_map(sc->sc_regt, reg[0], reg[1], 0, &sc->sc_regh);
129 1.1 tsubai /* XXX sc_regt is uninitialized */
130 1.1 tsubai sc->sc_tail = 0;
131 1.1 tsubai sc->sc_txdmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 2);
132 1.1 tsubai sc->sc_rxdmacmd = (void *)dbdma_alloc(sizeof(dbdma_command_t) * 8);
133 1.5 wiz memset(sc->sc_txdmacmd, 0, sizeof(dbdma_command_t) * 2);
134 1.5 wiz memset(sc->sc_rxdmacmd, 0, sizeof(dbdma_command_t) * 8);
135 1.1 tsubai
136 1.1 tsubai printf(": irq %d,%d,%d",
137 1.1 tsubai ca->ca_intr[0], ca->ca_intr[1], ca->ca_intr[2]);
138 1.1 tsubai
139 1.1 tsubai if (OF_getprop(sc->sc_node, "local-mac-address", myaddr, 6) != 6) {
140 1.1 tsubai printf(": failed to get MAC address.\n");
141 1.1 tsubai return;
142 1.1 tsubai }
143 1.1 tsubai
144 1.1 tsubai /* allocate memory for transmit buffer and mark it non-cacheable */
145 1.8 thorpej sc->sc_txbuf = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
146 1.1 tsubai sc->sc_txbuf_phys = kvtop(sc->sc_txbuf);
147 1.8 thorpej memset(sc->sc_txbuf, 0, PAGE_SIZE);
148 1.1 tsubai
149 1.1 tsubai /*
150 1.1 tsubai * allocate memory for receive buffer and mark it non-cacheable
151 1.1 tsubai * XXX This should use the bus_dma interface, since the buffer
152 1.1 tsubai * needs to be physically contiguous. However, it seems that
153 1.1 tsubai * at least on my system, malloc() does allocate contiguous
154 1.1 tsubai * memory. If it's not, suggest reducing the number of buffers
155 1.1 tsubai * to 2, which will fit in one 4K page.
156 1.1 tsubai */
157 1.8 thorpej sc->sc_rxbuf = malloc(MC_NPAGES * PAGE_SIZE, M_DEVBUF, M_WAITOK);
158 1.1 tsubai sc->sc_rxbuf_phys = kvtop(sc->sc_rxbuf);
159 1.8 thorpej memset(sc->sc_rxbuf, 0, MC_NPAGES * PAGE_SIZE);
160 1.1 tsubai
161 1.1 tsubai if ((int)sc->sc_txbuf & PGOFSET)
162 1.1 tsubai printf("txbuf is not page-aligned\n");
163 1.1 tsubai if ((int)sc->sc_rxbuf & PGOFSET)
164 1.1 tsubai printf("rxbuf is not page-aligned\n");
165 1.1 tsubai
166 1.1 tsubai sc->sc_bus_init = mc_init;
167 1.1 tsubai sc->sc_putpacket = mc_putpacket;
168 1.1 tsubai
169 1.1 tsubai
170 1.1 tsubai /* disable receive DMA */
171 1.1 tsubai dbdma_reset(sc->sc_rxdma);
172 1.1 tsubai
173 1.1 tsubai /* disable transmit DMA */
174 1.1 tsubai dbdma_reset(sc->sc_txdma);
175 1.1 tsubai
176 1.1 tsubai /* install interrupt handlers */
177 1.1 tsubai /*intr_establish(ca->ca_intr[1], IST_LEVEL, IPL_NET, mc_dmaintr, sc);*/
178 1.1 tsubai intr_establish(ca->ca_intr[2], IST_LEVEL, IPL_NET, mc_dmaintr, sc);
179 1.1 tsubai intr_establish(ca->ca_intr[0], IST_LEVEL, IPL_NET, mcintr, sc);
180 1.1 tsubai
181 1.1 tsubai sc->sc_biucc = XMTSP_64;
182 1.1 tsubai sc->sc_fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU |
183 1.1 tsubai XMTBRST | RCVBRST;
184 1.1 tsubai /*sc->sc_plscc = PORTSEL_10BT;*/
185 1.1 tsubai sc->sc_plscc = PORTSEL_GPSI | ENPLSIO;
186 1.1 tsubai
187 1.1 tsubai /* mcsetup returns 1 if something fails */
188 1.1 tsubai if (mcsetup(sc, myaddr)) {
189 1.1 tsubai printf("mcsetup returns non zero\n");
190 1.1 tsubai return;
191 1.1 tsubai }
192 1.1 tsubai #ifdef NOTYET
193 1.1 tsubai sc->sc_mediachange = mc_mediachange;
194 1.1 tsubai sc->sc_mediastatus = mc_mediastatus;
195 1.1 tsubai sc->sc_supmedia = mc_supmedia;
196 1.1 tsubai sc->sc_nsupmedia = N_SUPMEDIA;
197 1.1 tsubai sc->sc_defaultmedia = IFM_ETHER | IFM_10_T;
198 1.1 tsubai #endif
199 1.1 tsubai }
200 1.1 tsubai
201 1.1 tsubai /* Bus-specific initialization */
202 1.1 tsubai hide void
203 1.1 tsubai mc_init(sc)
204 1.1 tsubai struct mc_softc *sc;
205 1.1 tsubai {
206 1.1 tsubai mc_reset_rxdma(sc);
207 1.1 tsubai mc_reset_txdma(sc);
208 1.1 tsubai }
209 1.1 tsubai
210 1.1 tsubai hide void
211 1.1 tsubai mc_putpacket(sc, len)
212 1.1 tsubai struct mc_softc *sc;
213 1.1 tsubai u_int len;
214 1.1 tsubai {
215 1.1 tsubai dbdma_command_t *cmd = sc->sc_txdmacmd;
216 1.1 tsubai
217 1.1 tsubai DBDMA_BUILD(cmd, DBDMA_CMD_OUT_LAST, 0, len, sc->sc_txbuf_phys,
218 1.1 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
219 1.1 tsubai
220 1.1 tsubai dbdma_start(sc->sc_txdma, sc->sc_txdmacmd);
221 1.1 tsubai }
222 1.1 tsubai
223 1.1 tsubai /*
224 1.1 tsubai * Interrupt handler for the MACE DMA completion interrupts
225 1.1 tsubai */
226 1.1 tsubai int
227 1.1 tsubai mc_dmaintr(arg)
228 1.1 tsubai void *arg;
229 1.1 tsubai {
230 1.1 tsubai struct mc_softc *sc = arg;
231 1.1 tsubai int status, offset, statoff;
232 1.1 tsubai int datalen, resid;
233 1.1 tsubai int i, n;
234 1.1 tsubai dbdma_command_t *cmd;
235 1.1 tsubai
236 1.1 tsubai /* We've received some packets from the MACE */
237 1.1 tsubai
238 1.1 tsubai /* Loop through, processing each of the packets */
239 1.1 tsubai i = sc->sc_tail;
240 1.1 tsubai for (n = 0; n < MC_RXDMABUFS; n++, i++) {
241 1.1 tsubai if (i == MC_RXDMABUFS)
242 1.1 tsubai i = 0;
243 1.1 tsubai
244 1.1 tsubai cmd = &sc->sc_rxdmacmd[i];
245 1.2 tsubai /* flushcache(cmd, sizeof(dbdma_command_t)); */
246 1.1 tsubai status = dbdma_ld16(&cmd->d_status);
247 1.1 tsubai resid = dbdma_ld16(&cmd->d_resid);
248 1.1 tsubai
249 1.1 tsubai /*if ((status & D_ACTIVE) == 0)*/
250 1.1 tsubai if ((status & 0x40) == 0)
251 1.1 tsubai continue;
252 1.1 tsubai
253 1.1 tsubai #if 1
254 1.1 tsubai if (dbdma_ld16(&cmd->d_count) != ETHERMTU + 22)
255 1.1 tsubai printf("bad d_count\n");
256 1.1 tsubai #endif
257 1.1 tsubai
258 1.1 tsubai datalen = dbdma_ld16(&cmd->d_count) - resid;
259 1.1 tsubai datalen -= 4; /* 4 == status bytes */
260 1.1 tsubai
261 1.1 tsubai if (datalen < 4 + sizeof(struct ether_header)) {
262 1.1 tsubai printf("short packet len=%d\n", datalen);
263 1.1 tsubai /* continue; */
264 1.1 tsubai goto next;
265 1.1 tsubai }
266 1.1 tsubai
267 1.1 tsubai offset = i * MC_BUFSIZE;
268 1.1 tsubai statoff = offset + datalen;
269 1.1 tsubai
270 1.1 tsubai DBDMA_BUILD_CMD(cmd, DBDMA_CMD_STOP, 0, 0, 0, 0);
271 1.1 tsubai __asm __volatile("eieio");
272 1.1 tsubai
273 1.2 tsubai /* flushcache(sc->sc_rxbuf + offset, datalen + 4); */
274 1.2 tsubai
275 1.1 tsubai sc->sc_rxframe.rx_rcvcnt = sc->sc_rxbuf[statoff + 0];
276 1.1 tsubai sc->sc_rxframe.rx_rcvsts = sc->sc_rxbuf[statoff + 1];
277 1.1 tsubai sc->sc_rxframe.rx_rntpc = sc->sc_rxbuf[statoff + 2];
278 1.1 tsubai sc->sc_rxframe.rx_rcvcc = sc->sc_rxbuf[statoff + 3];
279 1.1 tsubai sc->sc_rxframe.rx_frame = sc->sc_rxbuf + offset;
280 1.1 tsubai
281 1.1 tsubai mc_rint(sc);
282 1.1 tsubai
283 1.1 tsubai next:
284 1.1 tsubai DBDMA_BUILD_CMD(cmd, DBDMA_CMD_IN_LAST, 0, DBDMA_INT_ALWAYS,
285 1.1 tsubai DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
286 1.1 tsubai __asm __volatile("eieio");
287 1.1 tsubai cmd->d_status = 0;
288 1.1 tsubai cmd->d_resid = 0;
289 1.1 tsubai sc->sc_tail = i + 1;
290 1.1 tsubai }
291 1.1 tsubai
292 1.1 tsubai dbdma_continue(sc->sc_rxdma);
293 1.1 tsubai
294 1.1 tsubai return 1;
295 1.1 tsubai }
296 1.1 tsubai
297 1.1 tsubai hide void
298 1.1 tsubai mc_reset_rxdma(sc)
299 1.1 tsubai struct mc_softc *sc;
300 1.1 tsubai {
301 1.1 tsubai dbdma_command_t *cmd = sc->sc_rxdmacmd;
302 1.1 tsubai dbdma_regmap_t *dmareg = sc->sc_rxdma;
303 1.1 tsubai int i;
304 1.1 tsubai u_int8_t maccc;
305 1.1 tsubai
306 1.1 tsubai /* Disable receiver, reset the DMA channels */
307 1.1 tsubai maccc = NIC_GET(sc, MACE_MACCC);
308 1.1 tsubai NIC_PUT(sc, MACE_MACCC, maccc & ~ENRCV);
309 1.1 tsubai
310 1.1 tsubai dbdma_reset(dmareg);
311 1.1 tsubai
312 1.1 tsubai for (i = 0; i < MC_RXDMABUFS; i++) {
313 1.1 tsubai DBDMA_BUILD(cmd, DBDMA_CMD_IN_LAST, 0, ETHERMTU + 22,
314 1.1 tsubai sc->sc_rxbuf_phys + MC_BUFSIZE * i, DBDMA_INT_ALWAYS,
315 1.1 tsubai DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
316 1.1 tsubai cmd++;
317 1.1 tsubai }
318 1.1 tsubai
319 1.1 tsubai DBDMA_BUILD(cmd, DBDMA_CMD_NOP, 0, 0, 0,
320 1.1 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_ALWAYS);
321 1.1 tsubai dbdma_st32(&cmd->d_cmddep, kvtop((caddr_t)sc->sc_rxdmacmd));
322 1.1 tsubai cmd++;
323 1.1 tsubai
324 1.1 tsubai dbdma_start(dmareg, sc->sc_rxdmacmd);
325 1.1 tsubai
326 1.1 tsubai sc->sc_tail = 0;
327 1.1 tsubai
328 1.1 tsubai /* Reenable receiver, reenable DMA */
329 1.1 tsubai NIC_PUT(sc, MACE_MACCC, maccc);
330 1.1 tsubai }
331 1.1 tsubai
332 1.1 tsubai hide void
333 1.1 tsubai mc_reset_txdma(sc)
334 1.1 tsubai struct mc_softc *sc;
335 1.1 tsubai {
336 1.1 tsubai dbdma_command_t *cmd = sc->sc_txdmacmd;
337 1.1 tsubai dbdma_regmap_t *dmareg = sc->sc_txdma;
338 1.1 tsubai u_int8_t maccc;
339 1.1 tsubai
340 1.1 tsubai /* disable transmitter */
341 1.1 tsubai maccc = NIC_GET(sc, MACE_MACCC);
342 1.1 tsubai NIC_PUT(sc, MACE_MACCC, maccc & ~ENXMT);
343 1.1 tsubai
344 1.1 tsubai dbdma_reset(dmareg);
345 1.1 tsubai
346 1.1 tsubai DBDMA_BUILD(cmd, DBDMA_CMD_OUT_LAST, 0, 0, sc->sc_txbuf_phys,
347 1.1 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
348 1.1 tsubai cmd++;
349 1.1 tsubai DBDMA_BUILD(cmd, DBDMA_CMD_STOP, 0, 0, 0,
350 1.1 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
351 1.1 tsubai
352 1.1 tsubai out32rb(&dmareg->d_cmdptrhi, 0);
353 1.1 tsubai out32rb(&dmareg->d_cmdptrlo, kvtop((caddr_t)sc->sc_txdmacmd));
354 1.1 tsubai
355 1.1 tsubai /* restore old value */
356 1.1 tsubai NIC_PUT(sc, MACE_MACCC, maccc);
357 1.1 tsubai }
358 1.1 tsubai
359 1.1 tsubai void
360 1.1 tsubai mc_select_utp(sc)
361 1.1 tsubai struct mc_softc *sc;
362 1.1 tsubai {
363 1.1 tsubai sc->sc_plscc = PORTSEL_GPSI | ENPLSIO;
364 1.1 tsubai }
365 1.1 tsubai
366 1.1 tsubai void
367 1.1 tsubai mc_select_aui(sc)
368 1.1 tsubai struct mc_softc *sc;
369 1.1 tsubai {
370 1.1 tsubai sc->sc_plscc = PORTSEL_AUI;
371 1.1 tsubai }
372 1.1 tsubai
373 1.1 tsubai int
374 1.1 tsubai mc_mediachange(sc)
375 1.1 tsubai struct mc_softc *sc;
376 1.1 tsubai {
377 1.1 tsubai struct ifmedia *ifm = &sc->sc_media;
378 1.1 tsubai
379 1.1 tsubai if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
380 1.1 tsubai return EINVAL;
381 1.1 tsubai
382 1.1 tsubai switch (IFM_SUBTYPE(ifm->ifm_media)) {
383 1.1 tsubai
384 1.1 tsubai case IFM_10_T:
385 1.1 tsubai mc_select_utp(sc);
386 1.1 tsubai break;
387 1.1 tsubai
388 1.1 tsubai case IFM_10_5:
389 1.1 tsubai mc_select_aui(sc);
390 1.1 tsubai break;
391 1.1 tsubai
392 1.1 tsubai default:
393 1.1 tsubai return EINVAL;
394 1.1 tsubai }
395 1.1 tsubai
396 1.1 tsubai return 0;
397 1.1 tsubai }
398 1.1 tsubai
399 1.1 tsubai void
400 1.1 tsubai mc_mediastatus(sc, ifmr)
401 1.1 tsubai struct mc_softc *sc;
402 1.1 tsubai struct ifmediareq *ifmr;
403 1.1 tsubai {
404 1.1 tsubai if (sc->sc_plscc == PORTSEL_AUI)
405 1.1 tsubai ifmr->ifm_active = IFM_ETHER | IFM_10_5;
406 1.1 tsubai else
407 1.1 tsubai ifmr->ifm_active = IFM_ETHER | IFM_10_T;
408 1.1 tsubai }
409