if_sk.c revision 1.87.2.2 1 /* $NetBSD: if_sk.c,v 1.87.2.2 2020/04/08 14:08:09 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* $OpenBSD: if_sk.c,v 1.116 2006/06/22 23:06:03 brad Exp $ */
30
31 /*
32 * Copyright (c) 1997, 1998, 1999, 2000
33 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by Bill Paul.
46 * 4. Neither the name of the author nor the names of any co-contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
60 * THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
63 */
64
65 /*
66 * Copyright (c) 2003 Nathan L. Binkert <binkertn (at) umich.edu>
67 *
68 * Permission to use, copy, modify, and distribute this software for any
69 * purpose with or without fee is hereby granted, provided that the above
70 * copyright notice and this permission notice appear in all copies.
71 *
72 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
73 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
74 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
75 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
76 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
77 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
78 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
79 */
80
81 /*
82 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
83 * the SK-984x series adapters, both single port and dual port.
84 * References:
85 * The XaQti XMAC II datasheet,
86 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
87 * The SysKonnect GEnesis manual, http://www.syskonnect.com
88 *
89 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
90 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
91 * convenience to others until Vitesse corrects this problem:
92 *
93 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
94 *
95 * Written by Bill Paul <wpaul (at) ee.columbia.edu>
96 * Department of Electrical Engineering
97 * Columbia University, New York City
98 */
99
100 /*
101 * The SysKonnect gigabit ethernet adapters consist of two main
102 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
103 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
104 * components and a PHY while the GEnesis controller provides a PCI
105 * interface with DMA support. Each card may have between 512K and
106 * 2MB of SRAM on board depending on the configuration.
107 *
108 * The SysKonnect GEnesis controller can have either one or two XMAC
109 * chips connected to it, allowing single or dual port NIC configurations.
110 * SysKonnect has the distinction of being the only vendor on the market
111 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
112 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
113 * XMAC registers. This driver takes advantage of these features to allow
114 * both XMACs to operate as independent interfaces.
115 */
116
117 #include <sys/cdefs.h>
118 __KERNEL_RCSID(0, "$NetBSD: if_sk.c,v 1.87.2.2 2020/04/08 14:08:09 martin Exp $");
119
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/sockio.h>
123 #include <sys/mbuf.h>
124 #include <sys/malloc.h>
125 #include <sys/mutex.h>
126 #include <sys/kernel.h>
127 #include <sys/socket.h>
128 #include <sys/device.h>
129 #include <sys/queue.h>
130 #include <sys/callout.h>
131 #include <sys/sysctl.h>
132 #include <sys/endian.h>
133
134 #include <net/if.h>
135 #include <net/if_dl.h>
136 #include <net/if_types.h>
137
138 #include <net/if_media.h>
139
140 #include <net/bpf.h>
141 #include <sys/rndsource.h>
142
143 #include <dev/mii/mii.h>
144 #include <dev/mii/miivar.h>
145 #include <dev/mii/brgphyreg.h>
146
147 #include <dev/pci/pcireg.h>
148 #include <dev/pci/pcivar.h>
149 #include <dev/pci/pcidevs.h>
150
151 /* #define SK_USEIOSPACE */
152
153 #include <dev/pci/if_skreg.h>
154 #include <dev/pci/if_skvar.h>
155
156 static int skc_probe(device_t, cfdata_t, void *);
157 static void skc_attach(device_t, device_t, void *);
158 static int sk_probe(device_t, cfdata_t, void *);
159 static void sk_attach(device_t, device_t, void *);
160 static int skcprint(void *, const char *);
161 static int sk_intr(void *);
162 static void sk_intr_bcom(struct sk_if_softc *);
163 static void sk_intr_xmac(struct sk_if_softc *);
164 static void sk_intr_yukon(struct sk_if_softc *);
165 static void sk_rxeof(struct sk_if_softc *);
166 static void sk_txeof(struct sk_if_softc *);
167 static int sk_encap(struct sk_if_softc *, struct mbuf *, uint32_t *);
168 static void sk_start(struct ifnet *);
169 static int sk_ioctl(struct ifnet *, u_long, void *);
170 static int sk_init(struct ifnet *);
171 static void sk_unreset_xmac(struct sk_if_softc *);
172 static void sk_init_xmac(struct sk_if_softc *);
173 static void sk_unreset_yukon(struct sk_if_softc *);
174 static void sk_init_yukon(struct sk_if_softc *);
175 static void sk_stop(struct ifnet *, int);
176 static void sk_watchdog(struct ifnet *);
177 static int sk_ifmedia_upd(struct ifnet *);
178 static void sk_reset(struct sk_softc *);
179 static int sk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
180 static int sk_alloc_jumbo_mem(struct sk_if_softc *);
181 static void *sk_jalloc(struct sk_if_softc *);
182 static void sk_jfree(struct mbuf *, void *, size_t, void *);
183 static int sk_init_rx_ring(struct sk_if_softc *);
184 static int sk_init_tx_ring(struct sk_if_softc *);
185 static uint8_t sk_vpd_readbyte(struct sk_softc *, int);
186 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
187 static void sk_vpd_read(struct sk_softc *);
188
189 static void sk_update_int_mod(struct sk_softc *);
190
191 static int sk_xmac_miibus_readreg(device_t, int, int, uint16_t *);
192 static int sk_xmac_miibus_writereg(device_t, int, int, uint16_t);
193 static void sk_xmac_miibus_statchg(struct ifnet *);
194
195 static int sk_marv_miibus_readreg(device_t, int, int, uint16_t *);
196 static int sk_marv_miibus_writereg(device_t, int, int, uint16_t);
197 static void sk_marv_miibus_statchg(struct ifnet *);
198
199 static uint32_t sk_xmac_hash(void *);
200 static uint32_t sk_yukon_hash(void *);
201 static void sk_setfilt(struct sk_if_softc *, void *, int);
202 static void sk_setmulti(struct sk_if_softc *);
203 static void sk_tick(void *);
204
205 static bool skc_suspend(device_t, const pmf_qual_t *);
206 static bool skc_resume(device_t, const pmf_qual_t *);
207 static bool sk_resume(device_t dv, const pmf_qual_t *);
208
209 /* #define SK_DEBUG 2 */
210 #ifdef SK_DEBUG
211 #define DPRINTF(x) if (skdebug) printf x
212 #define DPRINTFN(n, x) if (skdebug >= (n)) printf x
213 int skdebug = SK_DEBUG;
214
215 static void sk_dump_txdesc(struct sk_tx_desc *, int);
216 static void sk_dump_mbuf(struct mbuf *);
217 static void sk_dump_bytes(const char *, int);
218 #else
219 #define DPRINTF(x)
220 #define DPRINTFN(n, x)
221 #endif
222
223 static int sk_sysctl_handler(SYSCTLFN_PROTO);
224 static int sk_root_num;
225
226 /* supported device vendors */
227 /* PCI_PRODUCT_DLINK_DGE560T_2 might belong in if_msk instead */
228 static const struct sk_product {
229 pci_vendor_id_t sk_vendor;
230 pci_product_id_t sk_product;
231 } sk_products[] = {
232 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940, },
233 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T, },
234 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T_2, },
235 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1064, },
236 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SKNET_GE, },
237 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2, },
238 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_SKNET, },
239 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_BELKIN, },
240 { 0, 0, }
241 };
242
243 #define SK_LINKSYS_EG1032_SUBID 0x00151737
244
245 static inline uint32_t
246 sk_win_read_4(struct sk_softc *sc, uint32_t reg)
247 {
248 #ifdef SK_USEIOSPACE
249 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
250 return CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg));
251 #else
252 return CSR_READ_4(sc, reg);
253 #endif
254 }
255
256 static inline uint16_t
257 sk_win_read_2(struct sk_softc *sc, uint32_t reg)
258 {
259 #ifdef SK_USEIOSPACE
260 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
261 return CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg));
262 #else
263 return CSR_READ_2(sc, reg);
264 #endif
265 }
266
267 static inline uint8_t
268 sk_win_read_1(struct sk_softc *sc, uint32_t reg)
269 {
270 #ifdef SK_USEIOSPACE
271 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
272 return CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg));
273 #else
274 return CSR_READ_1(sc, reg);
275 #endif
276 }
277
278 static inline void
279 sk_win_write_4(struct sk_softc *sc, uint32_t reg, uint32_t x)
280 {
281 #ifdef SK_USEIOSPACE
282 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
283 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), x);
284 #else
285 CSR_WRITE_4(sc, reg, x);
286 #endif
287 }
288
289 static inline void
290 sk_win_write_2(struct sk_softc *sc, uint32_t reg, uint16_t x)
291 {
292 #ifdef SK_USEIOSPACE
293 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
294 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), x);
295 #else
296 CSR_WRITE_2(sc, reg, x);
297 #endif
298 }
299
300 static inline void
301 sk_win_write_1(struct sk_softc *sc, uint32_t reg, uint8_t x)
302 {
303 #ifdef SK_USEIOSPACE
304 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
305 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), x);
306 #else
307 CSR_WRITE_1(sc, reg, x);
308 #endif
309 }
310
311 /*
312 * The VPD EEPROM contains Vital Product Data, as suggested in
313 * the PCI 2.1 specification. The VPD data is separared into areas
314 * denoted by resource IDs. The SysKonnect VPD contains an ID string
315 * resource (the name of the adapter), a read-only area resource
316 * containing various key/data fields and a read/write area which
317 * can be used to store asset management information or log messages.
318 * We read the ID string and read-only into buffers attached to
319 * the controller softc structure for later use. At the moment,
320 * we only use the ID string during sk_attach().
321 */
322 static uint8_t
323 sk_vpd_readbyte(struct sk_softc *sc, int addr)
324 {
325 int i;
326
327 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
328 for (i = 0; i < SK_TIMEOUT; i++) {
329 DELAY(1);
330 if (sk_win_read_2(sc,
331 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
332 break;
333 }
334
335 if (i == SK_TIMEOUT)
336 return 0;
337
338 return sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA));
339 }
340
341 static void
342 sk_vpd_read_res(struct sk_softc *sc, struct vpd_res *res, int addr)
343 {
344 int i;
345 uint8_t *ptr;
346
347 ptr = (uint8_t *)res;
348 for (i = 0; i < sizeof(struct vpd_res); i++)
349 ptr[i] = sk_vpd_readbyte(sc, i + addr);
350 }
351
352 static void
353 sk_vpd_read(struct sk_softc *sc)
354 {
355 int pos = 0, i;
356 struct vpd_res res;
357
358 if (sc->sk_vpd_prodname != NULL)
359 free(sc->sk_vpd_prodname, M_DEVBUF);
360 if (sc->sk_vpd_readonly != NULL)
361 free(sc->sk_vpd_readonly, M_DEVBUF);
362 sc->sk_vpd_prodname = NULL;
363 sc->sk_vpd_readonly = NULL;
364
365 sk_vpd_read_res(sc, &res, pos);
366
367 if (res.vr_id != VPD_RES_ID) {
368 aprint_error_dev(sc->sk_dev,
369 "bad VPD resource id: expected %x got %x\n",
370 VPD_RES_ID, res.vr_id);
371 return;
372 }
373
374 pos += sizeof(res);
375 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
376 if (sc->sk_vpd_prodname == NULL)
377 panic("sk_vpd_read");
378 for (i = 0; i < res.vr_len; i++)
379 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
380 sc->sk_vpd_prodname[i] = '\0';
381 pos += i;
382
383 sk_vpd_read_res(sc, &res, pos);
384
385 if (res.vr_id != VPD_RES_READ) {
386 aprint_error_dev(sc->sk_dev,
387 "bad VPD resource id: expected %x got %x\n",
388 VPD_RES_READ, res.vr_id);
389 return;
390 }
391
392 pos += sizeof(res);
393 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
394 if (sc->sk_vpd_readonly == NULL)
395 panic("sk_vpd_read");
396 for (i = 0; i < res.vr_len ; i++)
397 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
398 }
399
400 static int
401 sk_xmac_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
402 {
403 struct sk_if_softc *sc_if = device_private(dev);
404 int i;
405
406 DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
407
408 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
409 return -1;
410
411 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
412 SK_XM_READ_2(sc_if, XM_PHY_DATA);
413 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
414 for (i = 0; i < SK_TIMEOUT; i++) {
415 DELAY(1);
416 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
417 XM_MMUCMD_PHYDATARDY)
418 break;
419 }
420
421 if (i == SK_TIMEOUT) {
422 aprint_error_dev(sc_if->sk_dev,
423 "phy failed to come ready\n");
424 return ETIMEDOUT;
425 }
426 }
427 DELAY(1);
428 *val = SK_XM_READ_2(sc_if, XM_PHY_DATA);
429 return 0;
430 }
431
432 static int
433 sk_xmac_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
434 {
435 struct sk_if_softc *sc_if = device_private(dev);
436 int i;
437
438 DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
439
440 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
441 for (i = 0; i < SK_TIMEOUT; i++) {
442 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
443 break;
444 }
445
446 if (i == SK_TIMEOUT) {
447 aprint_error_dev(sc_if->sk_dev, "phy failed to come ready\n");
448 return ETIMEDOUT;
449 }
450
451 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
452 for (i = 0; i < SK_TIMEOUT; i++) {
453 DELAY(1);
454 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
455 break;
456 }
457
458 if (i == SK_TIMEOUT) {
459 aprint_error_dev(sc_if->sk_dev, "phy write timed out\n");
460 return ETIMEDOUT;
461 }
462
463 return 0;
464 }
465
466 static void
467 sk_xmac_miibus_statchg(struct ifnet *ifp)
468 {
469 struct sk_if_softc *sc_if = ifp->if_softc;
470 struct mii_data *mii = &sc_if->sk_mii;
471
472 DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
473
474 /*
475 * If this is a GMII PHY, manually set the XMAC's
476 * duplex mode accordingly.
477 */
478 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
479 if ((mii->mii_media_active & IFM_FDX) != 0)
480 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
481 else
482 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
483 }
484 }
485
486 static int
487 sk_marv_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
488 {
489 struct sk_if_softc *sc_if = device_private(dev);
490 uint16_t data;
491 int i;
492
493 if (phy != 0 ||
494 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
495 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
496 DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
497 phy, reg));
498 return -1;
499 }
500
501 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
502 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
503
504 for (i = 0; i < SK_TIMEOUT; i++) {
505 DELAY(1);
506 data = SK_YU_READ_2(sc_if, YUKON_SMICR);
507 if (data & YU_SMICR_READ_VALID)
508 break;
509 }
510
511 if (i == SK_TIMEOUT) {
512 aprint_error_dev(sc_if->sk_dev, "phy failed to come ready\n");
513 return ETIMEDOUT;
514 }
515
516 DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
517 SK_TIMEOUT));
518
519 *val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
520
521 DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#hx\n",
522 phy, reg, *val));
523
524 return 0;
525 }
526
527 static int
528 sk_marv_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
529 {
530 struct sk_if_softc *sc_if = device_private(dev);
531 int i;
532
533 DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#hx\n",
534 phy, reg, val));
535
536 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
537 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
538 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
539
540 for (i = 0; i < SK_TIMEOUT; i++) {
541 DELAY(1);
542 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
543 break;
544 }
545
546 if (i == SK_TIMEOUT) {
547 printf("%s: phy write timed out\n",
548 device_xname(sc_if->sk_dev));
549 return ETIMEDOUT;
550 }
551
552 return 0;
553 }
554
555 static void
556 sk_marv_miibus_statchg(struct ifnet *ifp)
557 {
558 DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
559 SK_YU_READ_2(((struct sk_if_softc *)ifp->if_softc),
560 YUKON_GPCR)));
561 }
562
563 static uint32_t
564 sk_xmac_hash(void *addr)
565 {
566 uint32_t crc;
567
568 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
569 crc = ~crc & ((1<< SK_HASH_BITS) - 1);
570 DPRINTFN(2,("multicast hash for %s is %x\n", ether_sprintf(addr),crc));
571 return crc;
572 }
573
574 static uint32_t
575 sk_yukon_hash(void *addr)
576 {
577 uint32_t crc;
578
579 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
580 crc &= ((1 << SK_HASH_BITS) - 1);
581 DPRINTFN(2,("multicast hash for %s is %x\n", ether_sprintf(addr),crc));
582 return crc;
583 }
584
585 static void
586 sk_setfilt(struct sk_if_softc *sc_if, void *addrv, int slot)
587 {
588 char *addr = addrv;
589 int base = XM_RXFILT_ENTRY(slot);
590
591 SK_XM_WRITE_2(sc_if, base, *(uint16_t *)(&addr[0]));
592 SK_XM_WRITE_2(sc_if, base + 2, *(uint16_t *)(&addr[2]));
593 SK_XM_WRITE_2(sc_if, base + 4, *(uint16_t *)(&addr[4]));
594 }
595
596 static void
597 sk_setmulti(struct sk_if_softc *sc_if)
598 {
599 struct sk_softc *sc = sc_if->sk_softc;
600 struct ifnet *ifp= &sc_if->sk_ethercom.ec_if;
601 uint32_t hashes[2] = { 0, 0 };
602 int h = 0, i;
603 struct ethercom *ec = &sc_if->sk_ethercom;
604 struct ether_multi *enm;
605 struct ether_multistep step;
606 uint8_t dummy[] = { 0, 0, 0, 0, 0, 0 };
607
608 /* First, zot all the existing filters. */
609 switch (sc->sk_type) {
610 case SK_GENESIS:
611 for (i = 1; i < XM_RXFILT_MAX; i++)
612 sk_setfilt(sc_if, (void *)&dummy, i);
613
614 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
615 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
616 break;
617 case SK_YUKON:
618 case SK_YUKON_LITE:
619 case SK_YUKON_LP:
620 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
621 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
622 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
623 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
624 break;
625 }
626
627 /* Now program new ones. */
628 allmulti:
629 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
630 hashes[0] = 0xFFFFFFFF;
631 hashes[1] = 0xFFFFFFFF;
632 } else {
633 i = 1;
634 /* First find the tail of the list. */
635 ETHER_LOCK(ec);
636 ETHER_FIRST_MULTI(step, ec, enm);
637 while (enm != NULL) {
638 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
639 ETHER_ADDR_LEN)) {
640 ifp->if_flags |= IFF_ALLMULTI;
641 ETHER_UNLOCK(ec);
642 goto allmulti;
643 }
644 DPRINTFN(2,("multicast address %s\n",
645 ether_sprintf(enm->enm_addrlo)));
646 /*
647 * Program the first XM_RXFILT_MAX multicast groups
648 * into the perfect filter. For all others,
649 * use the hash table.
650 */
651 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
652 sk_setfilt(sc_if, enm->enm_addrlo, i);
653 i++;
654 }
655 else {
656 switch (sc->sk_type) {
657 case SK_GENESIS:
658 h = sk_xmac_hash(enm->enm_addrlo);
659 break;
660 case SK_YUKON:
661 case SK_YUKON_LITE:
662 case SK_YUKON_LP:
663 h = sk_yukon_hash(enm->enm_addrlo);
664 break;
665 }
666 if (h < 32)
667 hashes[0] |= (1 << h);
668 else
669 hashes[1] |= (1 << (h - 32));
670 }
671
672 ETHER_NEXT_MULTI(step, enm);
673 }
674 ETHER_UNLOCK(ec);
675 }
676
677 switch (sc->sk_type) {
678 case SK_GENESIS:
679 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH |
680 XM_MODE_RX_USE_PERFECT);
681 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
682 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
683 break;
684 case SK_YUKON:
685 case SK_YUKON_LITE:
686 case SK_YUKON_LP:
687 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
688 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
689 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
690 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
691 break;
692 }
693 }
694
695 static int
696 sk_init_rx_ring(struct sk_if_softc *sc_if)
697 {
698 struct sk_chain_data *cd = &sc_if->sk_cdata;
699 struct sk_ring_data *rd = sc_if->sk_rdata;
700 int i;
701
702 memset((char *)rd->sk_rx_ring, 0,
703 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
704
705 for (i = 0; i < SK_RX_RING_CNT; i++) {
706 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
707 if (i == (SK_RX_RING_CNT - 1)) {
708 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[0];
709 rd->sk_rx_ring[i].sk_next =
710 htole32(SK_RX_RING_ADDR(sc_if, 0));
711 } else {
712 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[i + 1];
713 rd->sk_rx_ring[i].sk_next =
714 htole32(SK_RX_RING_ADDR(sc_if, i+1));
715 }
716 }
717
718 for (i = 0; i < SK_RX_RING_CNT; i++) {
719 if (sk_newbuf(sc_if, i, NULL,
720 sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
721 aprint_error_dev(sc_if->sk_dev,
722 "failed alloc of %dth mbuf\n", i);
723 return ENOBUFS;
724 }
725 }
726 sc_if->sk_cdata.sk_rx_prod = 0;
727 sc_if->sk_cdata.sk_rx_cons = 0;
728
729 return 0;
730 }
731
732 static int
733 sk_init_tx_ring(struct sk_if_softc *sc_if)
734 {
735 struct sk_chain_data *cd = &sc_if->sk_cdata;
736 struct sk_ring_data *rd = sc_if->sk_rdata;
737 int i;
738
739 memset(sc_if->sk_rdata->sk_tx_ring, 0,
740 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
741
742 for (i = 0; i < SK_TX_RING_CNT; i++) {
743 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
744 if (i == (SK_TX_RING_CNT - 1)) {
745 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[0];
746 rd->sk_tx_ring[i].sk_next =
747 htole32(SK_TX_RING_ADDR(sc_if, 0));
748 } else {
749 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[i + 1];
750 rd->sk_tx_ring[i].sk_next =
751 htole32(SK_TX_RING_ADDR(sc_if, i+1));
752 }
753 }
754
755 sc_if->sk_cdata.sk_tx_prod = 0;
756 sc_if->sk_cdata.sk_tx_cons = 0;
757 sc_if->sk_cdata.sk_tx_cnt = 0;
758
759 SK_CDTXSYNC(sc_if, 0, SK_TX_RING_CNT,
760 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
761
762 return 0;
763 }
764
765 static int
766 sk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
767 bus_dmamap_t dmamap)
768 {
769 struct mbuf *m_new = NULL;
770 struct sk_chain *c;
771 struct sk_rx_desc *r;
772
773 if (m == NULL) {
774 void *buf = NULL;
775
776 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
777 if (m_new == NULL) {
778 aprint_error_dev(sc_if->sk_dev,
779 "no memory for rx list -- packet dropped!\n");
780 return ENOBUFS;
781 }
782
783 /* Allocate the jumbo buffer */
784 buf = sk_jalloc(sc_if);
785 if (buf == NULL) {
786 m_freem(m_new);
787 DPRINTFN(1, ("%s jumbo allocation failed -- packet "
788 "dropped!\n", sc_if->sk_ethercom.ec_if.if_xname));
789 return ENOBUFS;
790 }
791
792 /* Attach the buffer to the mbuf */
793 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
794 MEXTADD(m_new, buf, SK_JLEN, 0, sk_jfree, sc_if);
795
796 } else {
797 /*
798 * We're re-using a previously allocated mbuf;
799 * be sure to re-init pointers and lengths to
800 * default values.
801 */
802 m_new = m;
803 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
804 m_new->m_data = m_new->m_ext.ext_buf;
805 }
806 m_adj(m_new, ETHER_ALIGN);
807
808 c = &sc_if->sk_cdata.sk_rx_chain[i];
809 r = c->sk_desc;
810 c->sk_mbuf = m_new;
811 r->sk_data_lo = htole32(dmamap->dm_segs[0].ds_addr +
812 (((vaddr_t)m_new->m_data
813 - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
814 r->sk_ctl = htole32(SK_JLEN | SK_RXSTAT);
815
816 SK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
817
818 return 0;
819 }
820
821 /*
822 * Memory management for jumbo frames.
823 */
824
825 static int
826 sk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
827 {
828 struct sk_softc *sc = sc_if->sk_softc;
829 char *ptr, *kva;
830 bus_dma_segment_t seg;
831 int i, rseg, state, error;
832 struct sk_jpool_entry *entry;
833
834 state = error = 0;
835
836 /* Grab a big chunk o' storage. */
837 if (bus_dmamem_alloc(sc->sc_dmatag, SK_JMEM, PAGE_SIZE, 0,
838 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
839 aprint_error_dev(sc->sk_dev, "can't alloc rx buffers\n");
840 return ENOBUFS;
841 }
842
843 state = 1;
844 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, SK_JMEM, (void **)&kva,
845 BUS_DMA_NOWAIT)) {
846 aprint_error_dev(sc->sk_dev,
847 "can't map dma buffers (%d bytes)\n",
848 SK_JMEM);
849 error = ENOBUFS;
850 goto out;
851 }
852
853 state = 2;
854 if (bus_dmamap_create(sc->sc_dmatag, SK_JMEM, 1, SK_JMEM, 0,
855 BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
856 aprint_error_dev(sc->sk_dev, "can't create dma map\n");
857 error = ENOBUFS;
858 goto out;
859 }
860
861 state = 3;
862 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
863 kva, SK_JMEM, NULL, BUS_DMA_NOWAIT)) {
864 aprint_error_dev(sc->sk_dev, "can't load dma map\n");
865 error = ENOBUFS;
866 goto out;
867 }
868
869 state = 4;
870 sc_if->sk_cdata.sk_jumbo_buf = (void *)kva;
871 DPRINTFN(1,("sk_jumbo_buf = %p\n", sc_if->sk_cdata.sk_jumbo_buf));
872
873 LIST_INIT(&sc_if->sk_jfree_listhead);
874 LIST_INIT(&sc_if->sk_jinuse_listhead);
875 mutex_init(&sc_if->sk_jpool_mtx, MUTEX_DEFAULT, IPL_NET);
876
877 /*
878 * Now divide it up into 9K pieces and save the addresses
879 * in an array.
880 */
881 ptr = sc_if->sk_cdata.sk_jumbo_buf;
882 for (i = 0; i < SK_JSLOTS; i++) {
883 sc_if->sk_cdata.sk_jslots[i] = ptr;
884 ptr += SK_JLEN;
885 entry = malloc(sizeof(struct sk_jpool_entry),
886 M_DEVBUF, M_NOWAIT);
887 if (entry == NULL) {
888 aprint_error_dev(sc->sk_dev,
889 "no memory for jumbo buffer queue!\n");
890 error = ENOBUFS;
891 goto out;
892 }
893 entry->slot = i;
894 if (i)
895 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
896 entry, jpool_entries);
897 else
898 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead,
899 entry, jpool_entries);
900 }
901 out:
902 if (error != 0) {
903 switch (state) {
904 case 4:
905 bus_dmamap_unload(sc->sc_dmatag,
906 sc_if->sk_cdata.sk_rx_jumbo_map);
907 /* FALLTHROUGH */
908 case 3:
909 bus_dmamap_destroy(sc->sc_dmatag,
910 sc_if->sk_cdata.sk_rx_jumbo_map);
911 /* FALLTHROUGH */
912 case 2:
913 bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM);
914 /* FALLTHROUGH */
915 case 1:
916 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
917 break;
918 default:
919 break;
920 }
921 }
922
923 return error;
924 }
925
926 /*
927 * Allocate a jumbo buffer.
928 */
929 static void *
930 sk_jalloc(struct sk_if_softc *sc_if)
931 {
932 struct sk_jpool_entry *entry;
933
934 mutex_enter(&sc_if->sk_jpool_mtx);
935 entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
936
937 if (entry == NULL) {
938 mutex_exit(&sc_if->sk_jpool_mtx);
939 return NULL;
940 }
941
942 LIST_REMOVE(entry, jpool_entries);
943 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
944 mutex_exit(&sc_if->sk_jpool_mtx);
945 return sc_if->sk_cdata.sk_jslots[entry->slot];
946 }
947
948 /*
949 * Release a jumbo buffer.
950 */
951 static void
952 sk_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
953 {
954 struct sk_jpool_entry *entry;
955 struct sk_if_softc *sc;
956 int i;
957
958 /* Extract the softc struct pointer. */
959 sc = (struct sk_if_softc *)arg;
960
961 if (sc == NULL)
962 panic("sk_jfree: can't find softc pointer!");
963
964 /* calculate the slot this buffer belongs to */
965
966 i = ((vaddr_t)buf
967 - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
968
969 if ((i < 0) || (i >= SK_JSLOTS))
970 panic("sk_jfree: asked to free buffer that we don't manage!");
971
972 mutex_enter(&sc->sk_jpool_mtx);
973 entry = LIST_FIRST(&sc->sk_jinuse_listhead);
974 if (entry == NULL)
975 panic("sk_jfree: buffer not in use!");
976 entry->slot = i;
977 LIST_REMOVE(entry, jpool_entries);
978 LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
979 mutex_exit(&sc->sk_jpool_mtx);
980
981 if (__predict_true(m != NULL))
982 pool_cache_put(mb_cache, m);
983 }
984
985 /*
986 * Set media options.
987 */
988 static int
989 sk_ifmedia_upd(struct ifnet *ifp)
990 {
991 struct sk_if_softc *sc_if = ifp->if_softc;
992 int rc;
993
994 (void) sk_init(ifp);
995 if ((rc = mii_mediachg(&sc_if->sk_mii)) == ENXIO)
996 return 0;
997 return rc;
998 }
999
1000 static void
1001 sk_promisc(struct sk_if_softc *sc_if, int on)
1002 {
1003 struct sk_softc *sc = sc_if->sk_softc;
1004 switch (sc->sk_type) {
1005 case SK_GENESIS:
1006 if (on)
1007 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1008 else
1009 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1010 break;
1011 case SK_YUKON:
1012 case SK_YUKON_LITE:
1013 case SK_YUKON_LP:
1014 if (on)
1015 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
1016 YU_RCR_UFLEN | YU_RCR_MUFLEN);
1017 else
1018 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
1019 YU_RCR_UFLEN | YU_RCR_MUFLEN);
1020 break;
1021 default:
1022 aprint_error_dev(sc_if->sk_dev, "Can't set promisc for %d\n",
1023 sc->sk_type);
1024 break;
1025 }
1026 }
1027
1028 static int
1029 sk_ioctl(struct ifnet *ifp, u_long command, void *data)
1030 {
1031 struct sk_if_softc *sc_if = ifp->if_softc;
1032 int s, error = 0;
1033
1034 /* DPRINTFN(2, ("sk_ioctl\n")); */
1035
1036 s = splnet();
1037
1038 switch (command) {
1039
1040 case SIOCSIFFLAGS:
1041 DPRINTFN(2, ("sk_ioctl IFFLAGS\n"));
1042 if ((error = ifioctl_common(ifp, command, data)) != 0)
1043 break;
1044 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1045 case IFF_RUNNING:
1046 sk_stop(ifp, 1);
1047 break;
1048 case IFF_UP:
1049 sk_init(ifp);
1050 break;
1051 case IFF_UP | IFF_RUNNING:
1052 if ((ifp->if_flags ^ sc_if->sk_if_flags) == IFF_PROMISC) {
1053 sk_promisc(sc_if, ifp->if_flags & IFF_PROMISC);
1054 sk_setmulti(sc_if);
1055 } else
1056 sk_init(ifp);
1057 break;
1058 }
1059 sc_if->sk_if_flags = ifp->if_flags;
1060 error = 0;
1061 break;
1062
1063 default:
1064 DPRINTFN(2, ("sk_ioctl ETHER\n"));
1065 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
1066 break;
1067
1068 error = 0;
1069
1070 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
1071 ;
1072 else if (ifp->if_flags & IFF_RUNNING) {
1073 sk_setmulti(sc_if);
1074 DPRINTFN(2, ("sk_ioctl setmulti called\n"));
1075 }
1076 break;
1077 }
1078
1079 splx(s);
1080 return error;
1081 }
1082
1083 static void
1084 sk_update_int_mod(struct sk_softc *sc)
1085 {
1086 uint32_t imtimer_ticks;
1087
1088 /*
1089 * Configure interrupt moderation. The moderation timer
1090 * defers interrupts specified in the interrupt moderation
1091 * timer mask based on the timeout specified in the interrupt
1092 * moderation timer init register. Each bit in the timer
1093 * register represents one tick, so to specify a timeout in
1094 * microseconds, we have to multiply by the correct number of
1095 * ticks-per-microsecond.
1096 */
1097 switch (sc->sk_type) {
1098 case SK_GENESIS:
1099 imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
1100 break;
1101 case SK_YUKON_EC:
1102 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
1103 break;
1104 default:
1105 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
1106 }
1107 aprint_verbose_dev(sc->sk_dev, "interrupt moderation is %d us\n",
1108 sc->sk_int_mod);
1109 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
1110 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF | SK_ISR_TX2_S_EOF |
1111 SK_ISR_RX1_EOF | SK_ISR_RX2_EOF);
1112 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1113 sc->sk_int_mod_pending = 0;
1114 }
1115
1116 /*
1117 * Lookup: Check the PCI vendor and device, and return a pointer to
1118 * The structure if the IDs match against our list.
1119 */
1120
1121 static const struct sk_product *
1122 sk_lookup(const struct pci_attach_args *pa)
1123 {
1124 const struct sk_product *psk;
1125
1126 for ( psk = &sk_products[0]; psk->sk_vendor != 0; psk++ ) {
1127 if (PCI_VENDOR(pa->pa_id) == psk->sk_vendor &&
1128 PCI_PRODUCT(pa->pa_id) == psk->sk_product)
1129 return psk;
1130 }
1131 return NULL;
1132 }
1133
1134 /*
1135 * Probe for a SysKonnect GEnesis chip.
1136 */
1137
1138 static int
1139 skc_probe(device_t parent, cfdata_t match, void *aux)
1140 {
1141 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1142 const struct sk_product *psk;
1143 pcireg_t subid;
1144
1145 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1146
1147 /* special-case Linksys EG1032, since rev 3 uses re(4) */
1148 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LINKSYS &&
1149 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LINKSYS_EG1032 &&
1150 subid == SK_LINKSYS_EG1032_SUBID)
1151 return 1;
1152
1153 if ((psk = sk_lookup(pa))) {
1154 return 1;
1155 }
1156 return 0;
1157 }
1158
1159 /*
1160 * Force the GEnesis into reset, then bring it out of reset.
1161 */
1162 static void
1163 sk_reset(struct sk_softc *sc)
1164 {
1165 DPRINTFN(2, ("sk_reset\n"));
1166
1167 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1168 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1169 if (SK_YUKON_FAMILY(sc->sk_type))
1170 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1171
1172 DELAY(1000);
1173 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1174 DELAY(2);
1175 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1176 if (SK_YUKON_FAMILY(sc->sk_type))
1177 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1178
1179 DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
1180 DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
1181 CSR_READ_2(sc, SK_LINK_CTRL)));
1182
1183 if (sc->sk_type == SK_GENESIS) {
1184 /* Configure packet arbiter */
1185 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1186 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1187 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1188 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1189 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1190 }
1191
1192 /* Enable RAM interface */
1193 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1194
1195 sk_update_int_mod(sc);
1196 }
1197
1198 static int
1199 sk_probe(device_t parent, cfdata_t match, void *aux)
1200 {
1201 struct skc_attach_args *sa = aux;
1202
1203 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
1204 return 0;
1205
1206 return 1;
1207 }
1208
1209 /*
1210 * Each XMAC chip is attached as a separate logical IP interface.
1211 * Single port cards will have only one logical interface of course.
1212 */
1213 static void
1214 sk_attach(device_t parent, device_t self, void *aux)
1215 {
1216 struct sk_if_softc *sc_if = device_private(self);
1217 struct mii_data *mii = &sc_if->sk_mii;
1218 struct sk_softc *sc = device_private(parent);
1219 struct skc_attach_args *sa = aux;
1220 struct sk_txmap_entry *entry;
1221 struct ifnet *ifp;
1222 bus_dma_segment_t seg;
1223 bus_dmamap_t dmamap;
1224 prop_data_t data;
1225 void *kva;
1226 int i, rseg;
1227 int mii_flags = 0;
1228
1229 aprint_naive("\n");
1230
1231 sc_if->sk_dev = self;
1232 sc_if->sk_port = sa->skc_port;
1233 sc_if->sk_softc = sc;
1234 sc->sk_if[sa->skc_port] = sc_if;
1235
1236 if (sa->skc_port == SK_PORT_A)
1237 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1238 if (sa->skc_port == SK_PORT_B)
1239 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1240
1241 DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
1242
1243 /*
1244 * Get station address for this interface. Note that
1245 * dual port cards actually come with three station
1246 * addresses: one for each port, plus an extra. The
1247 * extra one is used by the SysKonnect driver software
1248 * as a 'virtual' station address for when both ports
1249 * are operating in failover mode. Currently we don't
1250 * use this extra address.
1251 */
1252 data = prop_dictionary_get(device_properties(self), "mac-address");
1253 if (data != NULL) {
1254 /*
1255 * Try to get the station address from device properties
1256 * first, in case the ROM is missing.
1257 */
1258 KASSERT(prop_object_type(data) == PROP_TYPE_DATA);
1259 KASSERT(prop_data_size(data) == ETHER_ADDR_LEN);
1260 memcpy(sc_if->sk_enaddr, prop_data_data_nocopy(data),
1261 ETHER_ADDR_LEN);
1262 } else
1263 for (i = 0; i < ETHER_ADDR_LEN; i++)
1264 sc_if->sk_enaddr[i] = sk_win_read_1(sc,
1265 SK_MAC0_0 + (sa->skc_port * 8) + i);
1266
1267 aprint_normal(": Ethernet address %s\n",
1268 ether_sprintf(sc_if->sk_enaddr));
1269
1270 /*
1271 * Set up RAM buffer addresses. The NIC will have a certain
1272 * amount of SRAM on it, somewhere between 512K and 2MB. We
1273 * need to divide this up a) between the transmitter and
1274 * receiver and b) between the two XMACs, if this is a
1275 * dual port NIC. Our algorithm is to divide up the memory
1276 * evenly so that everyone gets a fair share.
1277 */
1278 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1279 uint32_t chunk, val;
1280
1281 chunk = sc->sk_ramsize / 2;
1282 val = sc->sk_rboff / sizeof(uint64_t);
1283 sc_if->sk_rx_ramstart = val;
1284 val += (chunk / sizeof(uint64_t));
1285 sc_if->sk_rx_ramend = val - 1;
1286 sc_if->sk_tx_ramstart = val;
1287 val += (chunk / sizeof(uint64_t));
1288 sc_if->sk_tx_ramend = val - 1;
1289 } else {
1290 uint32_t chunk, val;
1291
1292 chunk = sc->sk_ramsize / 4;
1293 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1294 sizeof(uint64_t);
1295 sc_if->sk_rx_ramstart = val;
1296 val += (chunk / sizeof(uint64_t));
1297 sc_if->sk_rx_ramend = val - 1;
1298 sc_if->sk_tx_ramstart = val;
1299 val += (chunk / sizeof(uint64_t));
1300 sc_if->sk_tx_ramend = val - 1;
1301 }
1302
1303 DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1304 " tx_ramstart=%#x tx_ramend=%#x\n",
1305 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1306 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1307
1308 /* Read and save PHY type and set PHY address */
1309 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1310 switch (sc_if->sk_phytype) {
1311 case SK_PHYTYPE_XMAC:
1312 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1313 break;
1314 case SK_PHYTYPE_BCOM:
1315 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1316 break;
1317 case SK_PHYTYPE_MARV_COPPER:
1318 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1319 break;
1320 default:
1321 aprint_error_dev(sc->sk_dev, "unsupported PHY type: %d\n",
1322 sc_if->sk_phytype);
1323 return;
1324 }
1325
1326 /* Allocate the descriptor queues. */
1327 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
1328 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1329 aprint_error_dev(sc->sk_dev, "can't alloc rx buffers\n");
1330 goto fail;
1331 }
1332 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1333 sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) {
1334 aprint_error_dev(sc_if->sk_dev,
1335 "can't map dma buffers (%lu bytes)\n",
1336 (u_long) sizeof(struct sk_ring_data));
1337 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1338 goto fail;
1339 }
1340 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
1341 sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT,
1342 &sc_if->sk_ring_map)) {
1343 aprint_error_dev(sc_if->sk_dev, "can't create dma map\n");
1344 bus_dmamem_unmap(sc->sc_dmatag, kva,
1345 sizeof(struct sk_ring_data));
1346 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1347 goto fail;
1348 }
1349 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
1350 sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
1351 aprint_error_dev(sc_if->sk_dev, "can't load dma map\n");
1352 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1353 bus_dmamem_unmap(sc->sc_dmatag, kva,
1354 sizeof(struct sk_ring_data));
1355 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1356 goto fail;
1357 }
1358
1359 for (i = 0; i < SK_RX_RING_CNT; i++)
1360 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
1361
1362 SIMPLEQ_INIT(&sc_if->sk_txmap_head);
1363 for (i = 0; i < SK_TX_RING_CNT; i++) {
1364 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
1365
1366 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
1367 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) {
1368 aprint_error_dev(sc_if->sk_dev,
1369 "Can't create TX dmamap\n");
1370 bus_dmamap_unload(sc->sc_dmatag, sc_if->sk_ring_map);
1371 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1372 bus_dmamem_unmap(sc->sc_dmatag, kva,
1373 sizeof(struct sk_ring_data));
1374 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1375 goto fail;
1376 }
1377
1378 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
1379 if (!entry) {
1380 aprint_error_dev(sc_if->sk_dev,
1381 "Can't alloc txmap entry\n");
1382 bus_dmamap_destroy(sc->sc_dmatag, dmamap);
1383 bus_dmamap_unload(sc->sc_dmatag, sc_if->sk_ring_map);
1384 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1385 bus_dmamem_unmap(sc->sc_dmatag, kva,
1386 sizeof(struct sk_ring_data));
1387 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1388 goto fail;
1389 }
1390 entry->dmamap = dmamap;
1391 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
1392 }
1393
1394 sc_if->sk_rdata = (struct sk_ring_data *)kva;
1395 memset(sc_if->sk_rdata, 0, sizeof(struct sk_ring_data));
1396
1397 ifp = &sc_if->sk_ethercom.ec_if;
1398 /* Try to allocate memory for jumbo buffers. */
1399 if (sk_alloc_jumbo_mem(sc_if)) {
1400 aprint_error("%s: jumbo buffer allocation failed\n",
1401 ifp->if_xname);
1402 goto fail;
1403 }
1404 sc_if->sk_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU
1405 | ETHERCAP_JUMBO_MTU;
1406
1407 ifp->if_softc = sc_if;
1408 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1409 ifp->if_ioctl = sk_ioctl;
1410 ifp->if_start = sk_start;
1411 ifp->if_stop = sk_stop;
1412 ifp->if_init = sk_init;
1413 ifp->if_watchdog = sk_watchdog;
1414 ifp->if_capabilities = 0;
1415 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1416 IFQ_SET_READY(&ifp->if_snd);
1417 strlcpy(ifp->if_xname, device_xname(sc_if->sk_dev), IFNAMSIZ);
1418
1419 /*
1420 * Do miibus setup.
1421 */
1422 switch (sc->sk_type) {
1423 case SK_GENESIS:
1424 sk_unreset_xmac(sc_if);
1425 break;
1426 case SK_YUKON:
1427 case SK_YUKON_LITE:
1428 case SK_YUKON_LP:
1429 sk_unreset_yukon(sc_if);
1430 break;
1431 default:
1432 aprint_error_dev(sc->sk_dev, "unknown device type %d\n",
1433 sc->sk_type);
1434 goto fail;
1435 }
1436
1437 DPRINTFN(2, ("sk_attach: 1\n"));
1438
1439 mii->mii_ifp = ifp;
1440 switch (sc->sk_type) {
1441 case SK_GENESIS:
1442 mii->mii_readreg = sk_xmac_miibus_readreg;
1443 mii->mii_writereg = sk_xmac_miibus_writereg;
1444 mii->mii_statchg = sk_xmac_miibus_statchg;
1445 break;
1446 case SK_YUKON:
1447 case SK_YUKON_LITE:
1448 case SK_YUKON_LP:
1449 mii->mii_readreg = sk_marv_miibus_readreg;
1450 mii->mii_writereg = sk_marv_miibus_writereg;
1451 mii->mii_statchg = sk_marv_miibus_statchg;
1452 mii_flags = MIIF_DOPAUSE;
1453 break;
1454 }
1455
1456 sc_if->sk_ethercom.ec_mii = mii;
1457 ifmedia_init(&mii->mii_media, 0, sk_ifmedia_upd, ether_mediastatus);
1458 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
1459 MII_OFFSET_ANY, mii_flags);
1460 if (LIST_EMPTY(&mii->mii_phys)) {
1461 aprint_error_dev(sc_if->sk_dev, "no PHY found!\n");
1462 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
1463 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
1464 } else
1465 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1466
1467 callout_init(&sc_if->sk_tick_ch, 0);
1468 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
1469
1470 DPRINTFN(2, ("sk_attach: 1\n"));
1471
1472 /*
1473 * Call MI attach routines.
1474 */
1475 if_attach(ifp);
1476 if_deferred_start_init(ifp, NULL);
1477
1478 ether_ifattach(ifp, sc_if->sk_enaddr);
1479
1480 if (sc->rnd_attached++ == 0) {
1481 rnd_attach_source(&sc->rnd_source, device_xname(sc->sk_dev),
1482 RND_TYPE_NET, RND_FLAG_DEFAULT);
1483 }
1484
1485 if (pmf_device_register(self, NULL, sk_resume))
1486 pmf_class_network_register(self, ifp);
1487 else
1488 aprint_error_dev(self, "couldn't establish power handler\n");
1489
1490 DPRINTFN(2, ("sk_attach: end\n"));
1491
1492 return;
1493
1494 fail:
1495 sc->sk_if[sa->skc_port] = NULL;
1496 }
1497
1498 static int
1499 skcprint(void *aux, const char *pnp)
1500 {
1501 struct skc_attach_args *sa = aux;
1502
1503 if (pnp)
1504 aprint_normal("sk port %c at %s",
1505 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1506 else
1507 aprint_normal(" port %c",
1508 (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1509 return UNCONF;
1510 }
1511
1512 /*
1513 * Attach the interface. Allocate softc structures, do ifmedia
1514 * setup and ethernet/BPF attach.
1515 */
1516 static void
1517 skc_attach(device_t parent, device_t self, void *aux)
1518 {
1519 struct sk_softc *sc = device_private(self);
1520 struct pci_attach_args *pa = aux;
1521 struct skc_attach_args skca;
1522 pci_chipset_tag_t pc = pa->pa_pc;
1523 #ifndef SK_USEIOSPACE
1524 pcireg_t memtype;
1525 #endif
1526 pci_intr_handle_t ih;
1527 const char *intrstr = NULL;
1528 bus_addr_t iobase;
1529 bus_size_t iosize;
1530 int rc, sk_nodenum;
1531 uint32_t command;
1532 const char *revstr;
1533 const struct sysctlnode *node;
1534 char intrbuf[PCI_INTRSTR_LEN];
1535
1536 sc->sk_dev = self;
1537 aprint_naive("\n");
1538
1539 DPRINTFN(2, ("begin skc_attach\n"));
1540
1541 /*
1542 * Handle power management nonsense.
1543 */
1544 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1545
1546 if (command == 0x01) {
1547 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1548 if (command & SK_PSTATE_MASK) {
1549 uint32_t xiobase, membase, irq;
1550
1551 /* Save important PCI config data. */
1552 xiobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1553 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1554 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1555
1556 /* Reset the power state. */
1557 aprint_normal_dev(sc->sk_dev,
1558 "chip is in D%d power mode -- setting to D0\n",
1559 command & SK_PSTATE_MASK);
1560 command &= 0xFFFFFFFC;
1561 pci_conf_write(pc, pa->pa_tag,
1562 SK_PCI_PWRMGMTCTRL, command);
1563
1564 /* Restore PCI config data. */
1565 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, xiobase);
1566 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1567 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1568 }
1569 }
1570
1571 /*
1572 * The firmware might have configured the interface to revert the
1573 * byte order in all descriptors. Make that undone.
1574 */
1575 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_OURREG2);
1576 if (command & SK_REG2_REV_DESC)
1577 pci_conf_write(pc, pa->pa_tag, SK_PCI_OURREG2,
1578 command & ~SK_REG2_REV_DESC);
1579
1580 /*
1581 * Map control/status registers.
1582 */
1583 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1584 command |= PCI_COMMAND_IO_ENABLE |
1585 PCI_COMMAND_MEM_ENABLE |
1586 PCI_COMMAND_MASTER_ENABLE;
1587 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
1588 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1589
1590 #ifdef SK_USEIOSPACE
1591 if (!(command & PCI_COMMAND_IO_ENABLE)) {
1592 aprint_error(": failed to enable I/O ports!\n");
1593 return;
1594 }
1595 /*
1596 * Map control/status registers.
1597 */
1598 if (pci_mapreg_map(pa, SK_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
1599 &sc->sk_btag, &sc->sk_bhandle,
1600 &iobase, &iosize)) {
1601 aprint_error(": can't find i/o space\n");
1602 return;
1603 }
1604 #else
1605 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1606 aprint_error(": failed to enable memory mapping!\n");
1607 return;
1608 }
1609 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1610 switch (memtype) {
1611 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1612 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1613 if (pci_mapreg_map(pa, SK_PCI_LOMEM,
1614 memtype, 0, &sc->sk_btag, &sc->sk_bhandle,
1615 &iobase, &iosize) == 0)
1616 break;
1617 /* FALLTHROUGH */
1618 default:
1619 aprint_error_dev(sc->sk_dev, "can't find mem space\n");
1620 return;
1621 }
1622
1623 DPRINTFN(2, ("skc_attach: iobase=%#" PRIxPADDR ", iosize=%zx\n",
1624 iobase, iosize));
1625 #endif
1626 sc->sc_dmatag = pa->pa_dmat;
1627
1628 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1629 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1630
1631 /* bail out here if chip is not recognized */
1632 if ( sc->sk_type != SK_GENESIS && ! SK_YUKON_FAMILY(sc->sk_type)) {
1633 aprint_error_dev(sc->sk_dev, "unknown chip type\n");
1634 goto fail;
1635 }
1636 if (SK_IS_YUKON2(sc)) {
1637 aprint_error_dev(sc->sk_dev,
1638 "Does not support Yukon2--try msk(4).\n");
1639 goto fail;
1640 }
1641 DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1642
1643 /* Allocate interrupt */
1644 if (pci_intr_map(pa, &ih)) {
1645 aprint_error(": couldn't map interrupt\n");
1646 goto fail;
1647 }
1648
1649 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1650 sc->sk_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, sk_intr,
1651 sc, device_xname(sc->sk_dev));
1652 if (sc->sk_intrhand == NULL) {
1653 aprint_error(": couldn't establish interrupt");
1654 if (intrstr != NULL)
1655 aprint_error(" at %s", intrstr);
1656 aprint_error("\n");
1657 goto fail;
1658 }
1659 aprint_normal(": %s\n", intrstr);
1660
1661 /* Reset the adapter. */
1662 sk_reset(sc);
1663
1664 /* Read and save vital product data from EEPROM. */
1665 sk_vpd_read(sc);
1666
1667 if (sc->sk_type == SK_GENESIS) {
1668 uint8_t val = sk_win_read_1(sc, SK_EPROM0);
1669 /* Read and save RAM size and RAMbuffer offset */
1670 switch (val) {
1671 case SK_RAMSIZE_512K_64:
1672 sc->sk_ramsize = 0x80000;
1673 sc->sk_rboff = SK_RBOFF_0;
1674 break;
1675 case SK_RAMSIZE_1024K_64:
1676 sc->sk_ramsize = 0x100000;
1677 sc->sk_rboff = SK_RBOFF_80000;
1678 break;
1679 case SK_RAMSIZE_1024K_128:
1680 sc->sk_ramsize = 0x100000;
1681 sc->sk_rboff = SK_RBOFF_0;
1682 break;
1683 case SK_RAMSIZE_2048K_128:
1684 sc->sk_ramsize = 0x200000;
1685 sc->sk_rboff = SK_RBOFF_0;
1686 break;
1687 default:
1688 aprint_error_dev(sc->sk_dev, "unknown ram size: %d\n",
1689 val);
1690 goto fail_1;
1691 break;
1692 }
1693
1694 DPRINTFN(2, ("skc_attach: ramsize=%d(%dk), rboff=%d\n",
1695 sc->sk_ramsize, sc->sk_ramsize / 1024,
1696 sc->sk_rboff));
1697 } else {
1698 uint8_t val = sk_win_read_1(sc, SK_EPROM0);
1699 sc->sk_ramsize = ( val == 0 ) ? 0x20000 : (( val * 4 )*1024);
1700 sc->sk_rboff = SK_RBOFF_0;
1701
1702 DPRINTFN(2, ("skc_attach: ramsize=%dk (%d), rboff=%d\n",
1703 sc->sk_ramsize / 1024, sc->sk_ramsize,
1704 sc->sk_rboff));
1705 }
1706
1707 /* Read and save physical media type */
1708 switch (sk_win_read_1(sc, SK_PMDTYPE)) {
1709 case SK_PMD_1000BASESX:
1710 sc->sk_pmd = IFM_1000_SX;
1711 break;
1712 case SK_PMD_1000BASELX:
1713 sc->sk_pmd = IFM_1000_LX;
1714 break;
1715 case SK_PMD_1000BASECX:
1716 sc->sk_pmd = IFM_1000_CX;
1717 break;
1718 case SK_PMD_1000BASETX:
1719 case SK_PMD_1000BASETX_ALT:
1720 sc->sk_pmd = IFM_1000_T;
1721 break;
1722 default:
1723 aprint_error_dev(sc->sk_dev, "unknown media type: 0x%x\n",
1724 sk_win_read_1(sc, SK_PMDTYPE));
1725 goto fail_1;
1726 }
1727
1728 /* determine whether to name it with vpd or just make it up */
1729 /* Marvell Yukon VPD's can freqently be bogus */
1730
1731 switch (pa->pa_id) {
1732 case PCI_ID_CODE(PCI_VENDOR_SCHNEIDERKOCH,
1733 PCI_PRODUCT_SCHNEIDERKOCH_SKNET_GE):
1734 case PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2:
1735 case PCI_PRODUCT_3COM_3C940:
1736 case PCI_PRODUCT_DLINK_DGE530T:
1737 case PCI_PRODUCT_DLINK_DGE560T:
1738 case PCI_PRODUCT_DLINK_DGE560T_2:
1739 case PCI_PRODUCT_LINKSYS_EG1032:
1740 case PCI_PRODUCT_LINKSYS_EG1064:
1741 case PCI_ID_CODE(PCI_VENDOR_SCHNEIDERKOCH,
1742 PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2):
1743 case PCI_ID_CODE(PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940):
1744 case PCI_ID_CODE(PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T):
1745 case PCI_ID_CODE(PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T):
1746 case PCI_ID_CODE(PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T_2):
1747 case PCI_ID_CODE(PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032):
1748 case PCI_ID_CODE(PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1064):
1749 sc->sk_name = sc->sk_vpd_prodname;
1750 break;
1751 case PCI_ID_CODE(PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_SKNET):
1752 /* whoops yukon vpd prodname bears no resemblance to reality */
1753 switch (sc->sk_type) {
1754 case SK_GENESIS:
1755 sc->sk_name = sc->sk_vpd_prodname;
1756 break;
1757 case SK_YUKON:
1758 sc->sk_name = "Marvell Yukon Gigabit Ethernet";
1759 break;
1760 case SK_YUKON_LITE:
1761 sc->sk_name = "Marvell Yukon Lite Gigabit Ethernet";
1762 break;
1763 case SK_YUKON_LP:
1764 sc->sk_name = "Marvell Yukon LP Gigabit Ethernet";
1765 break;
1766 default:
1767 sc->sk_name = "Marvell Yukon (Unknown) Gigabit Ethernet";
1768 }
1769
1770 /* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1771
1772 if ( sc->sk_type == SK_YUKON ) {
1773 uint32_t flashaddr;
1774 uint8_t testbyte;
1775
1776 flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1777
1778 /* test Flash-Address Register */
1779 sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1780 testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1781
1782 if (testbyte != 0) {
1783 /* this is yukon lite Rev. A0 */
1784 sc->sk_type = SK_YUKON_LITE;
1785 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1786 /* restore Flash-Address Register */
1787 sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1788 }
1789 }
1790 break;
1791 case PCI_ID_CODE(PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_BELKIN):
1792 sc->sk_name = sc->sk_vpd_prodname;
1793 break;
1794 default:
1795 sc->sk_name = "Unknown Marvell";
1796 }
1797
1798
1799 if ( sc->sk_type == SK_YUKON_LITE ) {
1800 switch (sc->sk_rev) {
1801 case SK_YUKON_LITE_REV_A0:
1802 revstr = "A0";
1803 break;
1804 case SK_YUKON_LITE_REV_A1:
1805 revstr = "A1";
1806 break;
1807 case SK_YUKON_LITE_REV_A3:
1808 revstr = "A3";
1809 break;
1810 default:
1811 revstr = "";
1812 }
1813 } else {
1814 revstr = "";
1815 }
1816
1817 /* Announce the product name. */
1818 aprint_normal_dev(sc->sk_dev, "%s rev. %s(0x%x)\n",
1819 sc->sk_name, revstr, sc->sk_rev);
1820
1821 skca.skc_port = SK_PORT_A;
1822 (void)config_found(sc->sk_dev, &skca, skcprint);
1823
1824 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1825 skca.skc_port = SK_PORT_B;
1826 (void)config_found(sc->sk_dev, &skca, skcprint);
1827 }
1828
1829 /* Turn on the 'driver is loaded' LED. */
1830 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1831
1832 /* skc sysctl setup */
1833
1834 sc->sk_int_mod = SK_IM_DEFAULT;
1835 sc->sk_int_mod_pending = 0;
1836
1837 if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
1838 0, CTLTYPE_NODE, device_xname(sc->sk_dev),
1839 SYSCTL_DESCR("skc per-controller controls"),
1840 NULL, 0, NULL, 0, CTL_HW, sk_root_num, CTL_CREATE,
1841 CTL_EOL)) != 0) {
1842 aprint_normal_dev(sc->sk_dev, "couldn't create sysctl node\n");
1843 goto fail_1;
1844 }
1845
1846 sk_nodenum = node->sysctl_num;
1847
1848 /* interrupt moderation time in usecs */
1849 if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
1850 CTLFLAG_READWRITE,
1851 CTLTYPE_INT, "int_mod",
1852 SYSCTL_DESCR("sk interrupt moderation timer"),
1853 sk_sysctl_handler, 0, (void *)sc,
1854 0, CTL_HW, sk_root_num, sk_nodenum, CTL_CREATE,
1855 CTL_EOL)) != 0) {
1856 aprint_normal_dev(sc->sk_dev,
1857 "couldn't create int_mod sysctl node\n");
1858 goto fail_1;
1859 }
1860
1861 if (!pmf_device_register(self, skc_suspend, skc_resume))
1862 aprint_error_dev(self, "couldn't establish power handler\n");
1863
1864 return;
1865
1866 fail_1:
1867 pci_intr_disestablish(pc, sc->sk_intrhand);
1868 fail:
1869 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, iosize);
1870 }
1871
1872 static int
1873 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, uint32_t *txidx)
1874 {
1875 struct sk_softc *sc = sc_if->sk_softc;
1876 struct sk_tx_desc *f = NULL;
1877 uint32_t frag, cur, cnt = 0, sk_ctl;
1878 int i;
1879 struct sk_txmap_entry *entry;
1880 bus_dmamap_t txmap;
1881
1882 DPRINTFN(3, ("sk_encap\n"));
1883
1884 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1885 if (entry == NULL) {
1886 DPRINTFN(3, ("sk_encap: no txmap available\n"));
1887 return ENOBUFS;
1888 }
1889 txmap = entry->dmamap;
1890
1891 cur = frag = *txidx;
1892
1893 #ifdef SK_DEBUG
1894 if (skdebug >= 3)
1895 sk_dump_mbuf(m_head);
1896 #endif
1897
1898 /*
1899 * Start packing the mbufs in this chain into
1900 * the fragment pointers. Stop when we run out
1901 * of fragments or hit the end of the mbuf chain.
1902 */
1903 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1904 BUS_DMA_NOWAIT)) {
1905 DPRINTFN(1, ("sk_encap: dmamap failed\n"));
1906 return ENOBUFS;
1907 }
1908
1909 DPRINTFN(3, ("sk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1910
1911 /* Sync the DMA map. */
1912 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1913 BUS_DMASYNC_PREWRITE);
1914
1915 for (i = 0; i < txmap->dm_nsegs; i++) {
1916 if ((SK_TX_RING_CNT - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) {
1917 DPRINTFN(1, ("sk_encap: too few descriptors free\n"));
1918 return ENOBUFS;
1919 }
1920 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1921 f->sk_data_lo = htole32(txmap->dm_segs[i].ds_addr);
1922 sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT;
1923 if (cnt == 0)
1924 sk_ctl |= SK_TXCTL_FIRSTFRAG;
1925 else
1926 sk_ctl |= SK_TXCTL_OWN;
1927 f->sk_ctl = htole32(sk_ctl);
1928 cur = frag;
1929 SK_INC(frag, SK_TX_RING_CNT);
1930 cnt++;
1931 }
1932
1933 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1934 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1935
1936 sc_if->sk_cdata.sk_tx_map[cur] = entry;
1937 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1938 htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
1939
1940 /* Sync descriptors before handing to chip */
1941 SK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1942 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1943
1944 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |=
1945 htole32(SK_TXCTL_OWN);
1946
1947 /* Sync first descriptor to hand it off */
1948 SK_CDTXSYNC(sc_if, *txidx, 1,
1949 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1950
1951 sc_if->sk_cdata.sk_tx_cnt += cnt;
1952
1953 #ifdef SK_DEBUG
1954 if (skdebug >= 3) {
1955 struct sk_tx_desc *desc;
1956 uint32_t idx;
1957 for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1958 desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1959 sk_dump_txdesc(desc, idx);
1960 }
1961 }
1962 #endif
1963
1964 *txidx = frag;
1965
1966 DPRINTFN(3, ("sk_encap: completed successfully\n"));
1967
1968 return 0;
1969 }
1970
1971 static void
1972 sk_start(struct ifnet *ifp)
1973 {
1974 struct sk_if_softc *sc_if = ifp->if_softc;
1975 struct sk_softc *sc = sc_if->sk_softc;
1976 struct mbuf *m_head = NULL;
1977 uint32_t idx = sc_if->sk_cdata.sk_tx_prod;
1978 int pkts = 0;
1979
1980 DPRINTFN(3, ("sk_start (idx %d, tx_chain[idx] %p)\n", idx,
1981 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf));
1982
1983 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1984 IFQ_POLL(&ifp->if_snd, m_head);
1985 if (m_head == NULL)
1986 break;
1987
1988 /*
1989 * Pack the data into the transmit ring. If we
1990 * don't have room, set the OACTIVE flag and wait
1991 * for the NIC to drain the ring.
1992 */
1993 if (sk_encap(sc_if, m_head, &idx)) {
1994 ifp->if_flags |= IFF_OACTIVE;
1995 break;
1996 }
1997
1998 /* now we are committed to transmit the packet */
1999 IFQ_DEQUEUE(&ifp->if_snd, m_head);
2000 pkts++;
2001
2002 /*
2003 * If there's a BPF listener, bounce a copy of this frame
2004 * to him.
2005 */
2006 bpf_mtap(ifp, m_head, BPF_D_OUT);
2007 }
2008 if (pkts == 0)
2009 return;
2010
2011 /* Transmit */
2012 if (idx != sc_if->sk_cdata.sk_tx_prod) {
2013 sc_if->sk_cdata.sk_tx_prod = idx;
2014 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2015
2016 /* Set a timeout in case the chip goes out to lunch. */
2017 ifp->if_timer = 5;
2018 }
2019 }
2020
2021
2022 static void
2023 sk_watchdog(struct ifnet *ifp)
2024 {
2025 struct sk_if_softc *sc_if = ifp->if_softc;
2026
2027 /*
2028 * Reclaim first as there is a possibility of losing Tx completion
2029 * interrupts.
2030 */
2031 sk_txeof(sc_if);
2032 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2033 aprint_error_dev(sc_if->sk_dev, "watchdog timeout\n");
2034
2035 if_statinc(ifp, if_oerrors);
2036
2037 sk_init(ifp);
2038 }
2039 }
2040
2041 #if 0 /* XXX XXX XXX UNUSED */
2042 static void
2043 sk_shutdown(void *v)
2044 {
2045 struct sk_if_softc *sc_if = (struct sk_if_softc *)v;
2046 struct sk_softc *sc = sc_if->sk_softc;
2047 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
2048
2049 DPRINTFN(2, ("sk_shutdown\n"));
2050 sk_stop(ifp, 1);
2051
2052 /* Turn off the 'driver is loaded' LED. */
2053 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2054
2055 /*
2056 * Reset the GEnesis controller. Doing this should also
2057 * assert the resets on the attached XMAC(s).
2058 */
2059 sk_reset(sc);
2060 }
2061 #endif
2062
2063 static void
2064 sk_rxeof(struct sk_if_softc *sc_if)
2065 {
2066 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
2067 struct mbuf *m;
2068 struct sk_chain *cur_rx;
2069 struct sk_rx_desc *cur_desc;
2070 int i, cur, total_len = 0;
2071 uint32_t rxstat, sk_ctl;
2072 bus_dmamap_t dmamap;
2073
2074 i = sc_if->sk_cdata.sk_rx_prod;
2075
2076 DPRINTFN(3, ("sk_rxeof %d\n", i));
2077
2078 for (;;) {
2079 cur = i;
2080
2081 /* Sync the descriptor */
2082 SK_CDRXSYNC(sc_if, cur,
2083 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2084
2085 sk_ctl = le32toh(sc_if->sk_rdata->sk_rx_ring[cur].sk_ctl);
2086 if (sk_ctl & SK_RXCTL_OWN) {
2087 /* Invalidate the descriptor -- it's not ready yet */
2088 SK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_PREREAD);
2089 sc_if->sk_cdata.sk_rx_prod = i;
2090 break;
2091 }
2092
2093 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
2094 cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur];
2095 dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
2096
2097 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
2098 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2099
2100 rxstat = le32toh(cur_desc->sk_xmac_rxstat);
2101 m = cur_rx->sk_mbuf;
2102 cur_rx->sk_mbuf = NULL;
2103 total_len = SK_RXBYTES(le32toh(cur_desc->sk_ctl));
2104
2105 sc_if->sk_cdata.sk_rx_map[cur] = 0;
2106
2107 SK_INC(i, SK_RX_RING_CNT);
2108
2109 if (rxstat & XM_RXSTAT_ERRFRAME) {
2110 if_statinc(ifp, if_ierrors);
2111 sk_newbuf(sc_if, cur, m, dmamap);
2112 continue;
2113 }
2114
2115 /*
2116 * Try to allocate a new jumbo buffer. If that
2117 * fails, copy the packet to mbufs and put the
2118 * jumbo buffer back in the ring so it can be
2119 * re-used. If allocating mbufs fails, then we
2120 * have to drop the packet.
2121 */
2122 if (sk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
2123 struct mbuf *m0;
2124 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
2125 total_len + ETHER_ALIGN, 0, ifp);
2126 sk_newbuf(sc_if, cur, m, dmamap);
2127 if (m0 == NULL) {
2128 aprint_error_dev(sc_if->sk_dev, "no receive "
2129 "buffers available -- packet dropped!\n");
2130 if_statinc(ifp, if_ierrors);
2131 continue;
2132 }
2133 m_adj(m0, ETHER_ALIGN);
2134 m = m0;
2135 } else {
2136 m_set_rcvif(m, ifp);
2137 m->m_pkthdr.len = m->m_len = total_len;
2138 }
2139
2140 /* pass it on. */
2141 if_percpuq_enqueue(ifp->if_percpuq, m);
2142 }
2143 }
2144
2145 static void
2146 sk_txeof(struct sk_if_softc *sc_if)
2147 {
2148 struct sk_softc *sc = sc_if->sk_softc;
2149 struct sk_tx_desc *cur_tx;
2150 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
2151 uint32_t idx, sk_ctl;
2152 struct sk_txmap_entry *entry;
2153
2154 DPRINTFN(3, ("sk_txeof\n"));
2155
2156 /*
2157 * Go through our tx ring and free mbufs for those
2158 * frames that have been sent.
2159 */
2160 idx = sc_if->sk_cdata.sk_tx_cons;
2161 while (idx != sc_if->sk_cdata.sk_tx_prod) {
2162 SK_CDTXSYNC(sc_if, idx, 1,
2163 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2164
2165 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2166 sk_ctl = le32toh(cur_tx->sk_ctl);
2167 #ifdef SK_DEBUG
2168 if (skdebug >= 3)
2169 sk_dump_txdesc(cur_tx, idx);
2170 #endif
2171 if (sk_ctl & SK_TXCTL_OWN) {
2172 SK_CDTXSYNC(sc_if, idx, 1, BUS_DMASYNC_PREREAD);
2173 break;
2174 }
2175 if (sk_ctl & SK_TXCTL_LASTFRAG)
2176 if_statinc(ifp, if_opackets);
2177 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2178 entry = sc_if->sk_cdata.sk_tx_map[idx];
2179
2180 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2181 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2182
2183 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
2184 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2185
2186 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
2187 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
2188 link);
2189 sc_if->sk_cdata.sk_tx_map[idx] = NULL;
2190 }
2191 sc_if->sk_cdata.sk_tx_cnt--;
2192 SK_INC(idx, SK_TX_RING_CNT);
2193 }
2194 if (sc_if->sk_cdata.sk_tx_cnt == 0)
2195 ifp->if_timer = 0;
2196 else /* nudge chip to keep tx ring moving */
2197 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2198
2199 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2200 ifp->if_flags &= ~IFF_OACTIVE;
2201
2202 sc_if->sk_cdata.sk_tx_cons = idx;
2203 }
2204
2205 static void
2206 sk_tick(void *xsc_if)
2207 {
2208 struct sk_if_softc *sc_if = xsc_if;
2209 struct mii_data *mii = &sc_if->sk_mii;
2210 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
2211 int i;
2212
2213 DPRINTFN(3, ("sk_tick\n"));
2214
2215 if (!(ifp->if_flags & IFF_UP))
2216 return;
2217
2218 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2219 sk_intr_bcom(sc_if);
2220 return;
2221 }
2222
2223 /*
2224 * According to SysKonnect, the correct way to verify that
2225 * the link has come back up is to poll bit 0 of the GPIO
2226 * register three times. This pin has the signal from the
2227 * link sync pin connected to it; if we read the same link
2228 * state 3 times in a row, we know the link is up.
2229 */
2230 for (i = 0; i < 3; i++) {
2231 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2232 break;
2233 }
2234
2235 if (i != 3) {
2236 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2237 return;
2238 }
2239
2240 /* Turn the GP0 interrupt back on. */
2241 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2242 SK_XM_READ_2(sc_if, XM_ISR);
2243 mii_tick(mii);
2244 if (ifp->if_link_state != LINK_STATE_UP)
2245 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2246 else
2247 callout_stop(&sc_if->sk_tick_ch);
2248 }
2249
2250 static void
2251 sk_intr_bcom(struct sk_if_softc *sc_if)
2252 {
2253 struct mii_data *mii = &sc_if->sk_mii;
2254 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
2255 uint16_t status;
2256
2257
2258 DPRINTFN(3, ("sk_intr_bcom\n"));
2259
2260 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB | XM_MMUCMD_RX_ENB);
2261
2262 /*
2263 * Read the PHY interrupt register to make sure
2264 * we clear any pending interrupts.
2265 */
2266 sk_xmac_miibus_readreg(sc_if->sk_dev,
2267 SK_PHYADDR_BCOM, BRGPHY_MII_ISR, &status);
2268
2269 if (!(ifp->if_flags & IFF_RUNNING)) {
2270 sk_init_xmac(sc_if);
2271 return;
2272 }
2273
2274 if (status & (BRGPHY_ISR_LNK_CHG | BRGPHY_ISR_AN_PR)) {
2275 uint16_t lstat;
2276 sk_xmac_miibus_readreg(sc_if->sk_dev,
2277 SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS, &lstat);
2278
2279 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2280 (void)mii_mediachg(mii);
2281 /* Turn off the link LED. */
2282 SK_IF_WRITE_1(sc_if, 0,
2283 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2284 sc_if->sk_link = 0;
2285 } else if (status & BRGPHY_ISR_LNK_CHG) {
2286 sk_xmac_miibus_writereg(sc_if->sk_dev,
2287 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
2288 mii_tick(mii);
2289 sc_if->sk_link = 1;
2290 /* Turn on the link LED. */
2291 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2292 SK_LINKLED_ON | SK_LINKLED_LINKSYNC_OFF |
2293 SK_LINKLED_BLINK_OFF);
2294 mii_pollstat(mii);
2295 } else {
2296 mii_tick(mii);
2297 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2298 }
2299 }
2300
2301 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB | XM_MMUCMD_RX_ENB);
2302 }
2303
2304 static void
2305 sk_intr_xmac(struct sk_if_softc *sc_if)
2306 {
2307 uint16_t status = SK_XM_READ_2(sc_if, XM_ISR);
2308
2309 DPRINTFN(3, ("sk_intr_xmac\n"));
2310
2311 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2312 if (status & XM_ISR_GP0_SET) {
2313 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2314 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2315 }
2316
2317 if (status & XM_ISR_AUTONEG_DONE) {
2318 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2319 }
2320 }
2321
2322 if (status & XM_IMR_TX_UNDERRUN)
2323 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2324
2325 if (status & XM_IMR_RX_OVERRUN)
2326 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2327 }
2328
2329 static void
2330 sk_intr_yukon(struct sk_if_softc *sc_if)
2331 {
2332 #ifdef SK_DEBUG
2333 int status;
2334
2335 status =
2336 #endif
2337 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2338
2339 DPRINTFN(3, ("sk_intr_yukon status=%#x\n", status));
2340 }
2341
2342 static int
2343 sk_intr(void *xsc)
2344 {
2345 struct sk_softc *sc = xsc;
2346 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
2347 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
2348 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2349 uint32_t status;
2350 int claimed = 0;
2351
2352 if (sc_if0 != NULL)
2353 ifp0 = &sc_if0->sk_ethercom.ec_if;
2354 if (sc_if1 != NULL)
2355 ifp1 = &sc_if1->sk_ethercom.ec_if;
2356
2357 for (;;) {
2358 status = CSR_READ_4(sc, SK_ISSR);
2359 DPRINTFN(3, ("sk_intr: status=%#x\n", status));
2360
2361 if (!(status & sc->sk_intrmask))
2362 break;
2363
2364 claimed = 1;
2365
2366 /* Handle receive interrupts first. */
2367 if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
2368 sk_rxeof(sc_if0);
2369 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2370 SK_RXBMU_CLR_IRQ_EOF | SK_RXBMU_RX_START);
2371 }
2372 if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
2373 sk_rxeof(sc_if1);
2374 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2375 SK_RXBMU_CLR_IRQ_EOF | SK_RXBMU_RX_START);
2376 }
2377
2378 /* Then transmit interrupts. */
2379 if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
2380 sk_txeof(sc_if0);
2381 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2382 SK_TXBMU_CLR_IRQ_EOF);
2383 }
2384 if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
2385 sk_txeof(sc_if1);
2386 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2387 SK_TXBMU_CLR_IRQ_EOF);
2388 }
2389
2390 /* Then MAC interrupts. */
2391 if (sc_if0 && (status & SK_ISR_MAC1) &&
2392 (ifp0->if_flags & IFF_RUNNING)) {
2393 if (sc->sk_type == SK_GENESIS)
2394 sk_intr_xmac(sc_if0);
2395 else
2396 sk_intr_yukon(sc_if0);
2397 }
2398
2399 if (sc_if1 && (status & SK_ISR_MAC2) &&
2400 (ifp1->if_flags & IFF_RUNNING)) {
2401 if (sc->sk_type == SK_GENESIS)
2402 sk_intr_xmac(sc_if1);
2403 else
2404 sk_intr_yukon(sc_if1);
2405
2406 }
2407
2408 if (status & SK_ISR_EXTERNAL_REG) {
2409 if (sc_if0 != NULL &&
2410 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2411 sk_intr_bcom(sc_if0);
2412
2413 if (sc_if1 != NULL &&
2414 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2415 sk_intr_bcom(sc_if1);
2416 }
2417 }
2418
2419 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2420
2421 if (ifp0 != NULL)
2422 if_schedule_deferred_start(ifp0);
2423 if (ifp1 != NULL)
2424 if_schedule_deferred_start(ifp1);
2425
2426 KASSERT(sc->rnd_attached > 0);
2427 rnd_add_uint32(&sc->rnd_source, status);
2428
2429 if (sc->sk_int_mod_pending)
2430 sk_update_int_mod(sc);
2431
2432 return claimed;
2433 }
2434
2435 static void
2436 sk_unreset_xmac(struct sk_if_softc *sc_if)
2437 {
2438 struct sk_softc *sc = sc_if->sk_softc;
2439 static const struct sk_bcom_hack bhack[] = {
2440 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2441 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2442 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2443 { 0, 0 } };
2444
2445 DPRINTFN(1, ("sk_unreset_xmac\n"));
2446
2447 /* Unreset the XMAC. */
2448 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2449 DELAY(1000);
2450
2451 /* Reset the XMAC's internal state. */
2452 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2453
2454 /* Save the XMAC II revision */
2455 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2456
2457 /*
2458 * Perform additional initialization for external PHYs,
2459 * namely for the 1000baseTX cards that use the XMAC's
2460 * GMII mode.
2461 */
2462 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2463 int i = 0;
2464 uint32_t val;
2465 uint16_t phyval;
2466
2467 /* Take PHY out of reset. */
2468 val = sk_win_read_4(sc, SK_GPIO);
2469 if (sc_if->sk_port == SK_PORT_A)
2470 val |= SK_GPIO_DIR0 | SK_GPIO_DAT0;
2471 else
2472 val |= SK_GPIO_DIR2 | SK_GPIO_DAT2;
2473 sk_win_write_4(sc, SK_GPIO, val);
2474
2475 /* Enable GMII mode on the XMAC. */
2476 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2477
2478 sk_xmac_miibus_writereg(sc_if->sk_dev,
2479 SK_PHYADDR_BCOM, MII_BMCR, BMCR_RESET);
2480 DELAY(10000);
2481 sk_xmac_miibus_writereg(sc_if->sk_dev,
2482 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0);
2483
2484 /*
2485 * Early versions of the BCM5400 apparently have
2486 * a bug that requires them to have their reserved
2487 * registers initialized to some magic values. I don't
2488 * know what the numbers do, I'm just the messenger.
2489 */
2490 sk_xmac_miibus_readreg(sc_if->sk_dev,
2491 SK_PHYADDR_BCOM, 0x03, &phyval);
2492 if (phyval == 0x6041) {
2493 while (bhack[i].reg) {
2494 sk_xmac_miibus_writereg(sc_if->sk_dev,
2495 SK_PHYADDR_BCOM, bhack[i].reg,
2496 bhack[i].val);
2497 i++;
2498 }
2499 }
2500 }
2501 }
2502
2503 static void
2504 sk_init_xmac(struct sk_if_softc *sc_if)
2505 {
2506 struct sk_softc *sc = sc_if->sk_softc;
2507 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
2508
2509 sk_unreset_xmac(sc_if);
2510
2511 /* Set station address */
2512 SK_XM_WRITE_2(sc_if, XM_PAR0,
2513 *(uint16_t *)(&sc_if->sk_enaddr[0]));
2514 SK_XM_WRITE_2(sc_if, XM_PAR1,
2515 *(uint16_t *)(&sc_if->sk_enaddr[2]));
2516 SK_XM_WRITE_2(sc_if, XM_PAR2,
2517 *(uint16_t *)(&sc_if->sk_enaddr[4]));
2518 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2519
2520 if (ifp->if_flags & IFF_PROMISC)
2521 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
2522 else
2523 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
2524
2525 if (ifp->if_flags & IFF_BROADCAST)
2526 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2527 else
2528 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2529
2530 /* We don't need the FCS appended to the packet. */
2531 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2532
2533 /* We want short frames padded to 60 bytes. */
2534 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2535
2536 /*
2537 * Enable the reception of all error frames. This is
2538 * a necessary evil due to the design of the XMAC. The
2539 * XMAC's receive FIFO is only 8K in size, however jumbo
2540 * frames can be up to 9000 bytes in length. When bad
2541 * frame filtering is enabled, the XMAC's RX FIFO operates
2542 * in 'store and forward' mode. For this to work, the
2543 * entire frame has to fit into the FIFO, but that means
2544 * that jumbo frames larger than 8192 bytes will be
2545 * truncated. Disabling all bad frame filtering causes
2546 * the RX FIFO to operate in streaming mode, in which
2547 * case the XMAC will start transfering frames out of the
2548 * RX FIFO as soon as the FIFO threshold is reached.
2549 */
2550 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES |
2551 XM_MODE_RX_GIANTS | XM_MODE_RX_RUNTS | XM_MODE_RX_CRCERRS |
2552 XM_MODE_RX_INRANGELEN);
2553
2554 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2555 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2556 else
2557 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2558
2559 /*
2560 * Bump up the transmit threshold. This helps hold off transmit
2561 * underruns when we're blasting traffic from both ports at once.
2562 */
2563 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2564
2565 /* Set multicast filter */
2566 sk_setmulti(sc_if);
2567
2568 /* Clear and enable interrupts */
2569 SK_XM_READ_2(sc_if, XM_ISR);
2570 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2571 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2572 else
2573 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2574
2575 /* Configure MAC arbiter */
2576 switch (sc_if->sk_xmac_rev) {
2577 case XM_XMAC_REV_B2:
2578 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2579 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2580 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2581 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2582 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2583 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2584 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2585 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2586 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2587 break;
2588 case XM_XMAC_REV_C1:
2589 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2590 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2591 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2592 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2593 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2594 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2595 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2596 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2597 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2598 break;
2599 default:
2600 break;
2601 }
2602 sk_win_write_2(sc, SK_MACARB_CTL,
2603 SK_MACARBCTL_UNRESET | SK_MACARBCTL_FASTOE_OFF);
2604
2605 sc_if->sk_link = 1;
2606 }
2607
2608 static void
2609 sk_unreset_yukon(struct sk_if_softc *sc_if)
2610 {
2611 uint32_t /*mac, */phy;
2612 struct sk_softc *sc;
2613
2614 DPRINTFN(1, ("sk_unreset_yukon: start: sk_csr=%#x\n",
2615 CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2616
2617 sc = sc_if->sk_softc;
2618 if (sc->sk_type == SK_YUKON_LITE &&
2619 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2620 /* Take PHY out of reset. */
2621 sk_win_write_4(sc, SK_GPIO,
2622 (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9)
2623 & ~SK_GPIO_DAT9);
2624 }
2625
2626 /* GMAC and GPHY Reset */
2627 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2628
2629 DPRINTFN(6, ("sk_init_yukon: 1\n"));
2630
2631 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2632 DELAY(1000);
2633 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2634 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2635 DELAY(1000);
2636
2637
2638 DPRINTFN(6, ("sk_init_yukon: 2\n"));
2639
2640 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2641 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2642
2643 switch (sc_if->sk_softc->sk_pmd) {
2644 case IFM_1000_SX:
2645 case IFM_1000_LX:
2646 phy |= SK_GPHY_FIBER;
2647 break;
2648
2649 case IFM_1000_CX:
2650 case IFM_1000_T:
2651 phy |= SK_GPHY_COPPER;
2652 break;
2653 }
2654
2655 DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2656
2657 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2658 DELAY(1000);
2659 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2660 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2661 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2662
2663 DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2664 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2665 }
2666
2667 static void
2668 sk_init_yukon(struct sk_if_softc *sc_if)
2669 {
2670 uint16_t reg;
2671 int i;
2672
2673 DPRINTFN(1, ("sk_init_yukon: start\n"));
2674 sk_unreset_yukon(sc_if);
2675
2676 /* unused read of the interrupt source register */
2677 DPRINTFN(6, ("sk_init_yukon: 4\n"));
2678 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2679
2680 DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2681 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2682 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2683
2684 /* MIB Counter Clear Mode set */
2685 reg |= YU_PAR_MIB_CLR;
2686 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2687 DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2688 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2689
2690 /* MIB Counter Clear Mode clear */
2691 DPRINTFN(6, ("sk_init_yukon: 5\n"));
2692 reg &= ~YU_PAR_MIB_CLR;
2693 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2694
2695 /* receive control reg */
2696 DPRINTFN(6, ("sk_init_yukon: 7\n"));
2697 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN |
2698 YU_RCR_CRCR);
2699
2700 /* transmit parameter register */
2701 DPRINTFN(6, ("sk_init_yukon: 8\n"));
2702 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2703 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a));
2704
2705 /* serial mode register */
2706 DPRINTFN(6, ("sk_init_yukon: 9\n"));
2707 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2708 YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
2709 YU_SMR_IPG_DATA(0x1e));
2710
2711 DPRINTFN(6, ("sk_init_yukon: 10\n"));
2712 /* Setup Yukon's address */
2713 for (i = 0; i < 3; i++) {
2714 /* Write Source Address 1 (unicast filter) */
2715 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2716 sc_if->sk_enaddr[i * 2] |
2717 sc_if->sk_enaddr[i * 2 + 1] << 8);
2718 }
2719
2720 for (i = 0; i < 3; i++) {
2721 reg = sk_win_read_2(sc_if->sk_softc,
2722 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2723 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2724 }
2725
2726 /* Set multicast filter */
2727 DPRINTFN(6, ("sk_init_yukon: 11\n"));
2728 sk_setmulti(sc_if);
2729
2730 /* enable interrupt mask for counter overflows */
2731 DPRINTFN(6, ("sk_init_yukon: 12\n"));
2732 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2733 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2734 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2735
2736 /* Configure RX MAC FIFO */
2737 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2738 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2739
2740 /* Configure TX MAC FIFO */
2741 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2742 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2743
2744 DPRINTFN(6, ("sk_init_yukon: end\n"));
2745 }
2746
2747 /*
2748 * Note that to properly initialize any part of the GEnesis chip,
2749 * you first have to take it out of reset mode.
2750 */
2751 static int
2752 sk_init(struct ifnet *ifp)
2753 {
2754 struct sk_if_softc *sc_if = ifp->if_softc;
2755 struct sk_softc *sc = sc_if->sk_softc;
2756 struct mii_data *mii = &sc_if->sk_mii;
2757 int rc = 0, s;
2758 uint32_t imr, imtimer_ticks;
2759
2760 DPRINTFN(1, ("sk_init\n"));
2761
2762 s = splnet();
2763
2764 if (ifp->if_flags & IFF_RUNNING) {
2765 splx(s);
2766 return 0;
2767 }
2768
2769 /* Cancel pending I/O and free all RX/TX buffers. */
2770 sk_stop(ifp, 0);
2771
2772 if (sc->sk_type == SK_GENESIS) {
2773 /* Configure LINK_SYNC LED */
2774 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2775 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2776 SK_LINKLED_LINKSYNC_ON);
2777
2778 /* Configure RX LED */
2779 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2780 SK_RXLEDCTL_COUNTER_START);
2781
2782 /* Configure TX LED */
2783 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2784 SK_TXLEDCTL_COUNTER_START);
2785 }
2786
2787 /* Configure I2C registers */
2788
2789 /* Configure XMAC(s) */
2790 switch (sc->sk_type) {
2791 case SK_GENESIS:
2792 sk_init_xmac(sc_if);
2793 break;
2794 case SK_YUKON:
2795 case SK_YUKON_LITE:
2796 case SK_YUKON_LP:
2797 sk_init_yukon(sc_if);
2798 break;
2799 }
2800 if ((rc = mii_mediachg(mii)) == ENXIO)
2801 rc = 0;
2802 else if (rc != 0)
2803 goto out;
2804
2805 if (sc->sk_type == SK_GENESIS) {
2806 /* Configure MAC FIFOs */
2807 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2808 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2809 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2810
2811 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2812 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2813 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2814 }
2815
2816 /* Configure transmit arbiter(s) */
2817 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2818 SK_TXARCTL_ON | SK_TXARCTL_FSYNC_ON);
2819
2820 /* Configure RAMbuffers */
2821 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2822 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2823 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2824 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2825 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2826 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2827
2828 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2829 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2830 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2831 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2832 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2833 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2834 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2835
2836 /* Configure BMUs */
2837 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2838 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2839 SK_RX_RING_ADDR(sc_if, 0));
2840 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2841
2842 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2843 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2844 SK_TX_RING_ADDR(sc_if, 0));
2845 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2846
2847 /* Init descriptors */
2848 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2849 aprint_error_dev(sc_if->sk_dev, "initialization failed: no "
2850 "memory for rx buffers\n");
2851 sk_stop(ifp, 0);
2852 splx(s);
2853 return ENOBUFS;
2854 }
2855
2856 if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2857 aprint_error_dev(sc_if->sk_dev, "initialization failed: no "
2858 "memory for tx buffers\n");
2859 sk_stop(ifp, 0);
2860 splx(s);
2861 return ENOBUFS;
2862 }
2863
2864 /* Set interrupt moderation if changed via sysctl. */
2865 switch (sc->sk_type) {
2866 case SK_GENESIS:
2867 imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
2868 break;
2869 case SK_YUKON_EC:
2870 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
2871 break;
2872 default:
2873 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
2874 }
2875 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
2876 if (imr != SK_IM_USECS(sc->sk_int_mod)) {
2877 sk_win_write_4(sc, SK_IMTIMERINIT,
2878 SK_IM_USECS(sc->sk_int_mod));
2879 aprint_verbose_dev(sc->sk_dev,
2880 "interrupt moderation is %d us\n", sc->sk_int_mod);
2881 }
2882
2883 /* Configure interrupt handling */
2884 CSR_READ_4(sc, SK_ISSR);
2885 if (sc_if->sk_port == SK_PORT_A)
2886 sc->sk_intrmask |= SK_INTRS1;
2887 else
2888 sc->sk_intrmask |= SK_INTRS2;
2889
2890 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2891
2892 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2893
2894 /* Start BMUs. */
2895 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2896
2897 if (sc->sk_type == SK_GENESIS) {
2898 /* Enable XMACs TX and RX state machines */
2899 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2900 SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2901 XM_MMUCMD_TX_ENB | XM_MMUCMD_RX_ENB);
2902 }
2903
2904 if (SK_YUKON_FAMILY(sc->sk_type)) {
2905 uint16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2906 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2907 #if 0
2908 /* XXX disable 100Mbps and full duplex mode? */
2909 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_EN);
2910 #endif
2911 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2912 }
2913
2914
2915 ifp->if_flags |= IFF_RUNNING;
2916 ifp->if_flags &= ~IFF_OACTIVE;
2917 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2918
2919 out:
2920 splx(s);
2921 return rc;
2922 }
2923
2924 static void
2925 sk_stop(struct ifnet *ifp, int disable)
2926 {
2927 struct sk_if_softc *sc_if = ifp->if_softc;
2928 struct sk_softc *sc = sc_if->sk_softc;
2929 int i;
2930
2931 DPRINTFN(1, ("sk_stop\n"));
2932
2933 callout_stop(&sc_if->sk_tick_ch);
2934
2935 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2936 uint32_t val;
2937
2938 /* Put PHY back into reset. */
2939 val = sk_win_read_4(sc, SK_GPIO);
2940 if (sc_if->sk_port == SK_PORT_A) {
2941 val |= SK_GPIO_DIR0;
2942 val &= ~SK_GPIO_DAT0;
2943 } else {
2944 val |= SK_GPIO_DIR2;
2945 val &= ~SK_GPIO_DAT2;
2946 }
2947 sk_win_write_4(sc, SK_GPIO, val);
2948 }
2949
2950 /* Turn off various components of this interface. */
2951 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2952 switch (sc->sk_type) {
2953 case SK_GENESIS:
2954 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL,
2955 SK_TXMACCTL_XMAC_RESET);
2956 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2957 break;
2958 case SK_YUKON:
2959 case SK_YUKON_LITE:
2960 case SK_YUKON_LP:
2961 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2962 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2963 break;
2964 }
2965 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2966 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET |SK_RBCTL_OFF);
2967 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2968 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2969 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2970 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2971 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2972 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2973 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2974
2975 /* Disable interrupts */
2976 if (sc_if->sk_port == SK_PORT_A)
2977 sc->sk_intrmask &= ~SK_INTRS1;
2978 else
2979 sc->sk_intrmask &= ~SK_INTRS2;
2980 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2981
2982 SK_XM_READ_2(sc_if, XM_ISR);
2983 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2984
2985 /* Free RX and TX mbufs still in the queues. */
2986 for (i = 0; i < SK_RX_RING_CNT; i++) {
2987 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2988 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2989 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2990 }
2991 }
2992
2993 for (i = 0; i < SK_TX_RING_CNT; i++) {
2994 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2995 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2996 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2997 }
2998 }
2999
3000 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3001 }
3002
3003 /* Power Management Framework */
3004
3005 static bool
3006 skc_suspend(device_t dv, const pmf_qual_t *qual)
3007 {
3008 struct sk_softc *sc = device_private(dv);
3009
3010 DPRINTFN(2, ("skc_suspend\n"));
3011
3012 /* Turn off the driver is loaded LED */
3013 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
3014
3015 return true;
3016 }
3017
3018 static bool
3019 skc_resume(device_t dv, const pmf_qual_t *qual)
3020 {
3021 struct sk_softc *sc = device_private(dv);
3022
3023 DPRINTFN(2, ("skc_resume\n"));
3024
3025 sk_reset(sc);
3026 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
3027
3028 return true;
3029 }
3030
3031 static bool
3032 sk_resume(device_t dv, const pmf_qual_t *qual)
3033 {
3034 struct sk_if_softc *sc_if = device_private(dv);
3035
3036 sk_init_yukon(sc_if);
3037 return true;
3038 }
3039
3040 CFATTACH_DECL_NEW(skc, sizeof(struct sk_softc),
3041 skc_probe, skc_attach, NULL, NULL);
3042
3043 CFATTACH_DECL_NEW(sk, sizeof(struct sk_if_softc),
3044 sk_probe, sk_attach, NULL, NULL);
3045
3046 #ifdef SK_DEBUG
3047 static void
3048 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
3049 {
3050 #define DESC_PRINT(X) \
3051 if (X) \
3052 printf("txdesc[%d]." #X "=%#x\n", \
3053 idx, X);
3054
3055 DESC_PRINT(le32toh(desc->sk_ctl));
3056 DESC_PRINT(le32toh(desc->sk_next));
3057 DESC_PRINT(le32toh(desc->sk_data_lo));
3058 DESC_PRINT(le32toh(desc->sk_data_hi));
3059 DESC_PRINT(le32toh(desc->sk_xmac_txstat));
3060 DESC_PRINT(le16toh(desc->sk_rsvd0));
3061 DESC_PRINT(le16toh(desc->sk_csum_startval));
3062 DESC_PRINT(le16toh(desc->sk_csum_startpos));
3063 DESC_PRINT(le16toh(desc->sk_csum_writepos));
3064 DESC_PRINT(le16toh(desc->sk_rsvd1));
3065 #undef PRINT
3066 }
3067
3068 static void
3069 sk_dump_bytes(const char *data, int len)
3070 {
3071 int c, i, j;
3072
3073 for (i = 0; i < len; i += 16) {
3074 printf("%08x ", i);
3075 c = len - i;
3076 if (c > 16) c = 16;
3077
3078 for (j = 0; j < c; j++) {
3079 printf("%02x ", data[i + j] & 0xff);
3080 if ((j & 0xf) == 7 && j > 0)
3081 printf(" ");
3082 }
3083
3084 for (; j < 16; j++)
3085 printf(" ");
3086 printf(" ");
3087
3088 for (j = 0; j < c; j++) {
3089 int ch = data[i + j] & 0xff;
3090 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
3091 }
3092
3093 printf("\n");
3094
3095 if (c < 16)
3096 break;
3097 }
3098 }
3099
3100 static void
3101 sk_dump_mbuf(struct mbuf *m)
3102 {
3103 int count = m->m_pkthdr.len;
3104
3105 printf("m=%p, m->m_pkthdr.len=%d\n", m, m->m_pkthdr.len);
3106
3107 while (count > 0 && m) {
3108 printf("m=%p, m->m_data=%p, m->m_len=%d\n",
3109 m, m->m_data, m->m_len);
3110 sk_dump_bytes(mtod(m, char *), m->m_len);
3111
3112 count -= m->m_len;
3113 m = m->m_next;
3114 }
3115 }
3116 #endif
3117
3118 static int
3119 sk_sysctl_handler(SYSCTLFN_ARGS)
3120 {
3121 int error, t;
3122 struct sysctlnode node;
3123 struct sk_softc *sc;
3124
3125 node = *rnode;
3126 sc = node.sysctl_data;
3127 t = sc->sk_int_mod;
3128 node.sysctl_data = &t;
3129 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3130 if (error || newp == NULL)
3131 return error;
3132
3133 if (t < SK_IM_MIN || t > SK_IM_MAX)
3134 return EINVAL;
3135
3136 /* update the softc with sysctl-changed value, and mark
3137 for hardware update */
3138 sc->sk_int_mod = t;
3139 sc->sk_int_mod_pending = 1;
3140 return 0;
3141 }
3142
3143 /*
3144 * Set up sysctl(3) MIB, hw.sk.* - Individual controllers will be
3145 * set up in skc_attach()
3146 */
3147 SYSCTL_SETUP(sysctl_sk, "sysctl sk subtree setup")
3148 {
3149 int rc;
3150 const struct sysctlnode *node;
3151
3152 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3153 0, CTLTYPE_NODE, "sk",
3154 SYSCTL_DESCR("sk interface controls"),
3155 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
3156 goto err;
3157 }
3158
3159 sk_root_num = node->sysctl_num;
3160 return;
3161
3162 err:
3163 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
3164 }
3165