if_msk.c revision 1.10.6.1 1 /* $NetBSD: if_msk.c,v 1.10.6.1 2007/11/06 23:29:02 matt Exp $ */
2 /* $OpenBSD: if_msk.c,v 1.42 2007/01/17 02:43:02 krw Exp $ */
3
4 /*
5 * Copyright (c) 1997, 1998, 1999, 2000
6 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
36 */
37
38 /*
39 * Copyright (c) 2003 Nathan L. Binkert <binkertn (at) umich.edu>
40 *
41 * Permission to use, copy, modify, and distribute this software for any
42 * purpose with or without fee is hereby granted, provided that the above
43 * copyright notice and this permission notice appear in all copies.
44 *
45 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
46 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
47 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
48 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
49 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
50 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
51 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
52 */
53
54 #include <sys/cdefs.h>
55
56 #include "bpfilter.h"
57 #include "rnd.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/sockio.h>
62 #include <sys/mbuf.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
65 #include <sys/socket.h>
66 #include <sys/device.h>
67 #include <sys/queue.h>
68 #include <sys/callout.h>
69 #include <sys/sysctl.h>
70 #include <sys/endian.h>
71 #ifdef __NetBSD__
72 #define letoh16 htole16
73 #define letoh32 htole32
74 #endif
75
76 #include <net/if.h>
77 #include <net/if_dl.h>
78 #include <net/if_types.h>
79
80 #include <net/if_media.h>
81
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #endif
85 #if NRND > 0
86 #include <sys/rnd.h>
87 #endif
88
89 #include <dev/mii/mii.h>
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/brgphyreg.h>
92
93 #include <dev/pci/pcireg.h>
94 #include <dev/pci/pcivar.h>
95 #include <dev/pci/pcidevs.h>
96
97 #include <dev/pci/if_skreg.h>
98 #include <dev/pci/if_mskvar.h>
99
100 int mskc_probe(struct device *, struct cfdata *, void *);
101 void mskc_attach(struct device *, struct device *self, void *aux);
102 void mskc_shutdown(void *);
103 int msk_probe(struct device *, struct cfdata *, void *);
104 void msk_attach(struct device *, struct device *self, void *aux);
105 int mskcprint(void *, const char *);
106 int msk_intr(void *);
107 void msk_intr_yukon(struct sk_if_softc *);
108 __inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
109 void msk_rxeof(struct sk_if_softc *, u_int16_t, u_int32_t);
110 void msk_txeof(struct sk_if_softc *, int);
111 int msk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
112 void msk_start(struct ifnet *);
113 int msk_ioctl(struct ifnet *, u_long, void *);
114 int msk_init(struct ifnet *);
115 void msk_init_yukon(struct sk_if_softc *);
116 void msk_stop(struct ifnet *, int);
117 void msk_watchdog(struct ifnet *);
118 int msk_ifmedia_upd(struct ifnet *);
119 void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120 void msk_reset(struct sk_softc *);
121 int msk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
122 int msk_alloc_jumbo_mem(struct sk_if_softc *);
123 void *msk_jalloc(struct sk_if_softc *);
124 void msk_jfree(struct mbuf *, void *, size_t, void *);
125 int msk_init_rx_ring(struct sk_if_softc *);
126 int msk_init_tx_ring(struct sk_if_softc *);
127
128 void msk_update_int_mod(struct sk_softc *);
129
130 int msk_miibus_readreg(struct device *, int, int);
131 void msk_miibus_writereg(struct device *, int, int, int);
132 void msk_miibus_statchg(struct device *);
133
134 void msk_setfilt(struct sk_if_softc *, void *, int);
135 void msk_setmulti(struct sk_if_softc *);
136 void msk_setpromisc(struct sk_if_softc *);
137 void msk_tick(void *);
138
139 /* #define MSK_DEBUG 1 */
140 #ifdef MSK_DEBUG
141 #define DPRINTF(x) if (mskdebug) printf x
142 #define DPRINTFN(n,x) if (mskdebug >= (n)) printf x
143 int mskdebug = MSK_DEBUG;
144
145 void msk_dump_txdesc(struct msk_tx_desc *, int);
146 void msk_dump_mbuf(struct mbuf *);
147 void msk_dump_bytes(const char *, int);
148 #else
149 #define DPRINTF(x)
150 #define DPRINTFN(n,x)
151 #endif
152
153 static int msk_sysctl_handler(SYSCTLFN_PROTO);
154 static int msk_root_num;
155
156 /* supported device vendors */
157 static const struct msk_product {
158 pci_vendor_id_t msk_vendor;
159 pci_product_id_t msk_product;
160 } msk_products[] = {
161 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550SX },
162 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560SX },
163 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T },
164 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_1 },
165 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C032 },
166 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C033 },
167 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C034 },
168 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C036 },
169 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C042 },
170 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_CO55 },
171 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 },
172 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 },
173 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 },
174 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8039 },
175 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 },
176 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 },
177 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 },
178 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055 },
179 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8056 },
180 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8021CU },
181 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8021X },
182 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8022CU },
183 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8022X },
184 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8061CU },
185 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8061X },
186 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8062CU },
187 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8062X },
188 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9SXX },
189 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9E21 }
190 };
191
192 static inline u_int32_t
193 sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
194 {
195 return CSR_READ_4(sc, reg);
196 }
197
198 static inline u_int16_t
199 sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
200 {
201 return CSR_READ_2(sc, reg);
202 }
203
204 static inline u_int8_t
205 sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
206 {
207 return CSR_READ_1(sc, reg);
208 }
209
210 static inline void
211 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
212 {
213 CSR_WRITE_4(sc, reg, x);
214 }
215
216 static inline void
217 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
218 {
219 CSR_WRITE_2(sc, reg, x);
220 }
221
222 static inline void
223 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
224 {
225 CSR_WRITE_1(sc, reg, x);
226 }
227
228 int
229 msk_miibus_readreg(struct device *dev, int phy, int reg)
230 {
231 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
232 u_int16_t val;
233 int i;
234
235 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
236 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
237
238 for (i = 0; i < SK_TIMEOUT; i++) {
239 DELAY(1);
240 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
241 if (val & YU_SMICR_READ_VALID)
242 break;
243 }
244
245 if (i == SK_TIMEOUT) {
246 aprint_error("%s: phy failed to come ready\n",
247 sc_if->sk_dev.dv_xname);
248 return (0);
249 }
250
251 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i,
252 SK_TIMEOUT));
253
254 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
255
256 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
257 phy, reg, val));
258
259 return (val);
260 }
261
262 void
263 msk_miibus_writereg(struct device *dev, int phy, int reg, int val)
264 {
265 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
266 int i;
267
268 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n",
269 phy, reg, val));
270
271 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
272 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
273 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
274
275 for (i = 0; i < SK_TIMEOUT; i++) {
276 DELAY(1);
277 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
278 break;
279 }
280
281 if (i == SK_TIMEOUT)
282 aprint_error("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
283 }
284
285 void
286 msk_miibus_statchg(struct device *dev)
287 {
288 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
289 struct mii_data *mii = &sc_if->sk_mii;
290 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
291 int gpcr;
292
293 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR);
294 gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN);
295
296 if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) {
297 /* Set speed. */
298 gpcr |= YU_GPCR_SPEED_DIS;
299 switch (IFM_SUBTYPE(mii->mii_media_active)) {
300 case IFM_1000_SX:
301 case IFM_1000_LX:
302 case IFM_1000_CX:
303 case IFM_1000_T:
304 gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED);
305 break;
306 case IFM_100_TX:
307 gpcr |= YU_GPCR_SPEED;
308 break;
309 }
310
311 /* Set duplex. */
312 gpcr |= YU_GPCR_DPLX_DIS;
313 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
314 gpcr |= YU_GPCR_DUPLEX;
315
316 /* Disable flow control. */
317 gpcr |= YU_GPCR_FCTL_DIS;
318 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS);
319 }
320
321 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr);
322
323 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n",
324 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
325 }
326
327 #define HASH_BITS 6
328
329 void
330 msk_setfilt(struct sk_if_softc *sc_if, void *addrv, int slot)
331 {
332 char *addr = addrv;
333 int base = XM_RXFILT_ENTRY(slot);
334
335 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
336 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
337 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
338 }
339
340 void
341 msk_setmulti(struct sk_if_softc *sc_if)
342 {
343 struct ifnet *ifp= &sc_if->sk_ethercom.ec_if;
344 u_int32_t hashes[2] = { 0, 0 };
345 int h;
346 struct ethercom *ec = &sc_if->sk_ethercom;
347 struct ether_multi *enm;
348 struct ether_multistep step;
349 u_int16_t reg;
350
351 /* First, zot all the existing filters. */
352 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
353 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
354 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
355 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
356
357
358 /* Now program new ones. */
359 reg = SK_YU_READ_2(sc_if, YUKON_RCR);
360 reg |= YU_RCR_UFLEN;
361 allmulti:
362 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
363 if ((ifp->if_flags & IFF_PROMISC) != 0)
364 reg &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
365 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
366 hashes[0] = 0xFFFFFFFF;
367 hashes[1] = 0xFFFFFFFF;
368 }
369 } else {
370 /* First find the tail of the list. */
371 ETHER_FIRST_MULTI(step, ec, enm);
372 while (enm != NULL) {
373 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
374 ETHER_ADDR_LEN)) {
375 ifp->if_flags |= IFF_ALLMULTI;
376 goto allmulti;
377 }
378 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
379 ((1 << HASH_BITS) - 1);
380 if (h < 32)
381 hashes[0] |= (1 << h);
382 else
383 hashes[1] |= (1 << (h - 32));
384
385 ETHER_NEXT_MULTI(step, enm);
386 }
387 reg |= YU_RCR_MUFLEN;
388 }
389
390 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
391 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
392 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
393 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
394 SK_YU_WRITE_2(sc_if, YUKON_RCR, reg);
395 }
396
397 void
398 msk_setpromisc(struct sk_if_softc *sc_if)
399 {
400 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
401
402 if (ifp->if_flags & IFF_PROMISC)
403 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
404 YU_RCR_UFLEN | YU_RCR_MUFLEN);
405 else
406 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
407 YU_RCR_UFLEN | YU_RCR_MUFLEN);
408 }
409
410 int
411 msk_init_rx_ring(struct sk_if_softc *sc_if)
412 {
413 struct msk_chain_data *cd = &sc_if->sk_cdata;
414 struct msk_ring_data *rd = sc_if->sk_rdata;
415 int i, nexti;
416
417 bzero((char *)rd->sk_rx_ring,
418 sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
419
420 for (i = 0; i < MSK_RX_RING_CNT; i++) {
421 cd->sk_rx_chain[i].sk_le = &rd->sk_rx_ring[i];
422 if (i == (MSK_RX_RING_CNT - 1))
423 nexti = 0;
424 else
425 nexti = i + 1;
426 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
427 }
428
429 for (i = 0; i < MSK_RX_RING_CNT; i++) {
430 if (msk_newbuf(sc_if, i, NULL,
431 sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
432 aprint_error("%s: failed alloc of %dth mbuf\n",
433 sc_if->sk_dev.dv_xname, i);
434 return (ENOBUFS);
435 }
436 }
437
438 sc_if->sk_cdata.sk_rx_prod = MSK_RX_RING_CNT - 1;
439 sc_if->sk_cdata.sk_rx_cons = 0;
440
441 return (0);
442 }
443
444 int
445 msk_init_tx_ring(struct sk_if_softc *sc_if)
446 {
447 struct sk_softc *sc = sc_if->sk_softc;
448 struct msk_chain_data *cd = &sc_if->sk_cdata;
449 struct msk_ring_data *rd = sc_if->sk_rdata;
450 bus_dmamap_t dmamap;
451 struct sk_txmap_entry *entry;
452 int i, nexti;
453
454 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
455 sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
456
457 SIMPLEQ_INIT(&sc_if->sk_txmap_head);
458 for (i = 0; i < MSK_TX_RING_CNT; i++) {
459 cd->sk_tx_chain[i].sk_le = &rd->sk_tx_ring[i];
460 if (i == (MSK_TX_RING_CNT - 1))
461 nexti = 0;
462 else
463 nexti = i + 1;
464 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
465
466 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
467 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
468 return (ENOBUFS);
469
470 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
471 if (!entry) {
472 bus_dmamap_destroy(sc->sc_dmatag, dmamap);
473 return (ENOBUFS);
474 }
475 entry->dmamap = dmamap;
476 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
477 }
478
479 sc_if->sk_cdata.sk_tx_prod = 0;
480 sc_if->sk_cdata.sk_tx_cons = 0;
481 sc_if->sk_cdata.sk_tx_cnt = 0;
482
483 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT,
484 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
485
486 return (0);
487 }
488
489 int
490 msk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
491 bus_dmamap_t dmamap)
492 {
493 struct mbuf *m_new = NULL;
494 struct sk_chain *c;
495 struct msk_rx_desc *r;
496
497 if (m == NULL) {
498 void *buf = NULL;
499
500 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
501 if (m_new == NULL)
502 return (ENOBUFS);
503
504 /* Allocate the jumbo buffer */
505 buf = msk_jalloc(sc_if);
506 if (buf == NULL) {
507 m_freem(m_new);
508 DPRINTFN(1, ("%s jumbo allocation failed -- packet "
509 "dropped!\n", sc_if->sk_ethercom.ec_if.if_xname));
510 return (ENOBUFS);
511 }
512
513 /* Attach the buffer to the mbuf */
514 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
515 MEXTADD(m_new, buf, SK_JLEN, 0, msk_jfree, sc_if);
516 } else {
517 /*
518 * We're re-using a previously allocated mbuf;
519 * be sure to re-init pointers and lengths to
520 * default values.
521 */
522 m_new = m;
523 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
524 m_new->m_data = m_new->m_ext.ext_buf;
525 }
526 m_adj(m_new, ETHER_ALIGN);
527
528 c = &sc_if->sk_cdata.sk_rx_chain[i];
529 r = c->sk_le;
530 c->sk_mbuf = m_new;
531 r->sk_addr = htole32(dmamap->dm_segs[0].ds_addr +
532 (((vaddr_t)m_new->m_data
533 - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
534 r->sk_len = htole16(SK_JLEN);
535 r->sk_ctl = 0;
536 r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN;
537
538 MSK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
539
540 return (0);
541 }
542
543 /*
544 * Memory management for jumbo frames.
545 */
546
547 int
548 msk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
549 {
550 struct sk_softc *sc = sc_if->sk_softc;
551 char *ptr, *kva;
552 bus_dma_segment_t seg;
553 int i, rseg, state, error;
554 struct sk_jpool_entry *entry;
555
556 state = error = 0;
557
558 /* Grab a big chunk o' storage. */
559 if (bus_dmamem_alloc(sc->sc_dmatag, MSK_JMEM, PAGE_SIZE, 0,
560 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
561 aprint_error(": can't alloc rx buffers");
562 return (ENOBUFS);
563 }
564
565 state = 1;
566 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, MSK_JMEM, (void **)&kva,
567 BUS_DMA_NOWAIT)) {
568 aprint_error(": can't map dma buffers (%d bytes)", MSK_JMEM);
569 error = ENOBUFS;
570 goto out;
571 }
572
573 state = 2;
574 if (bus_dmamap_create(sc->sc_dmatag, MSK_JMEM, 1, MSK_JMEM, 0,
575 BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
576 aprint_error(": can't create dma map");
577 error = ENOBUFS;
578 goto out;
579 }
580
581 state = 3;
582 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
583 kva, MSK_JMEM, NULL, BUS_DMA_NOWAIT)) {
584 aprint_error(": can't load dma map");
585 error = ENOBUFS;
586 goto out;
587 }
588
589 state = 4;
590 sc_if->sk_cdata.sk_jumbo_buf = (void *)kva;
591 DPRINTFN(1,("msk_jumbo_buf = %p\n", (void *)sc_if->sk_cdata.sk_jumbo_buf));
592
593 LIST_INIT(&sc_if->sk_jfree_listhead);
594 LIST_INIT(&sc_if->sk_jinuse_listhead);
595
596 /*
597 * Now divide it up into 9K pieces and save the addresses
598 * in an array.
599 */
600 ptr = sc_if->sk_cdata.sk_jumbo_buf;
601 for (i = 0; i < MSK_JSLOTS; i++) {
602 sc_if->sk_cdata.sk_jslots[i] = ptr;
603 ptr += SK_JLEN;
604 entry = malloc(sizeof(struct sk_jpool_entry),
605 M_DEVBUF, M_NOWAIT);
606 if (entry == NULL) {
607 sc_if->sk_cdata.sk_jumbo_buf = NULL;
608 aprint_error(": no memory for jumbo buffer queue!");
609 error = ENOBUFS;
610 goto out;
611 }
612 entry->slot = i;
613 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
614 entry, jpool_entries);
615 }
616 out:
617 if (error != 0) {
618 switch (state) {
619 case 4:
620 bus_dmamap_unload(sc->sc_dmatag,
621 sc_if->sk_cdata.sk_rx_jumbo_map);
622 case 3:
623 bus_dmamap_destroy(sc->sc_dmatag,
624 sc_if->sk_cdata.sk_rx_jumbo_map);
625 case 2:
626 bus_dmamem_unmap(sc->sc_dmatag, kva, MSK_JMEM);
627 case 1:
628 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
629 break;
630 default:
631 break;
632 }
633 }
634
635 return (error);
636 }
637
638 /*
639 * Allocate a jumbo buffer.
640 */
641 void *
642 msk_jalloc(struct sk_if_softc *sc_if)
643 {
644 struct sk_jpool_entry *entry;
645
646 entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
647
648 if (entry == NULL)
649 return (NULL);
650
651 LIST_REMOVE(entry, jpool_entries);
652 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
653 return (sc_if->sk_cdata.sk_jslots[entry->slot]);
654 }
655
656 /*
657 * Release a jumbo buffer.
658 */
659 void
660 msk_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
661 {
662 struct sk_jpool_entry *entry;
663 struct sk_if_softc *sc;
664 int i, s;
665
666 /* Extract the softc struct pointer. */
667 sc = (struct sk_if_softc *)arg;
668
669 if (sc == NULL)
670 panic("msk_jfree: can't find softc pointer!");
671
672 /* calculate the slot this buffer belongs to */
673 i = ((vaddr_t)buf
674 - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
675
676 if ((i < 0) || (i >= MSK_JSLOTS))
677 panic("msk_jfree: asked to free buffer that we don't manage!");
678
679 s = splvm();
680 entry = LIST_FIRST(&sc->sk_jinuse_listhead);
681 if (entry == NULL)
682 panic("msk_jfree: buffer not in use!");
683 entry->slot = i;
684 LIST_REMOVE(entry, jpool_entries);
685 LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
686
687 if (__predict_true(m != NULL))
688 pool_cache_put(&mbpool_cache, m);
689 splx(s);
690 }
691
692 /*
693 * Set media options.
694 */
695 int
696 msk_ifmedia_upd(struct ifnet *ifp)
697 {
698 struct sk_if_softc *sc_if = ifp->if_softc;
699
700 mii_mediachg(&sc_if->sk_mii);
701 return (0);
702 }
703
704 /*
705 * Report current media status.
706 */
707 void
708 msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
709 {
710 struct sk_if_softc *sc_if = ifp->if_softc;
711
712 mii_pollstat(&sc_if->sk_mii);
713 ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
714 ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
715 }
716
717 int
718 msk_ioctl(struct ifnet *ifp, u_long command, void *data)
719 {
720 struct sk_if_softc *sc_if = ifp->if_softc;
721 struct ifreq *ifr = (struct ifreq *) data;
722 struct mii_data *mii;
723 int s, error = 0;
724
725 s = splnet();
726
727 switch(command) {
728 case SIOCSIFMTU:
729 if (ifr->ifr_mtu < ETHERMIN)
730 return EINVAL;
731 else if (sc_if->sk_softc->sk_type != SK_YUKON_FE) {
732 if (ifr->ifr_mtu > SK_JUMBO_MTU)
733 error = EINVAL;
734 } else if (ifr->ifr_mtu > ETHERMTU)
735 error = EINVAL;
736 ifp->if_mtu = ifr->ifr_mtu;
737 break;
738 case SIOCGIFMEDIA:
739 case SIOCSIFMEDIA:
740 DPRINTFN(2,("msk_ioctl: SIOC[GS]IFMEDIA\n"));
741 mii = &sc_if->sk_mii;
742 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
743 DPRINTFN(2,("msk_ioctl: SIOC[GS]IFMEDIA done\n"));
744 break;
745 default:
746 DPRINTFN(2, ("msk_ioctl ETHER\n"));
747 error = ether_ioctl(ifp, command, data);
748
749 if (error == ENETRESET) {
750 /*
751 * Multicast list has changed; set the hardware
752 * filter accordingly.
753 */
754 if (ifp->if_flags & IFF_RUNNING)
755 msk_setmulti(sc_if);
756 error = 0;
757 }
758 break;
759 }
760
761 splx(s);
762 return (error);
763 }
764
765 void
766 msk_update_int_mod(struct sk_softc *sc)
767 {
768 u_int32_t imtimer_ticks;
769
770 /*
771 * Configure interrupt moderation. The moderation timer
772 * defers interrupts specified in the interrupt moderation
773 * timer mask based on the timeout specified in the interrupt
774 * moderation timer init register. Each bit in the timer
775 * register represents one tick, so to specify a timeout in
776 * microseconds, we have to multiply by the correct number of
777 * ticks-per-microsecond.
778 */
779 switch (sc->sk_type) {
780 case SK_YUKON_EC:
781 case SK_YUKON_EC_U:
782 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
783 break;
784 case SK_YUKON_FE:
785 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE;
786 break;
787 case SK_YUKON_XL:
788 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL;
789 break;
790 default:
791 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
792 }
793 aprint_verbose("%s: interrupt moderation is %d us\n",
794 sc->sk_dev.dv_xname, sc->sk_int_mod);
795 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
796 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
797 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
798 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
799 sc->sk_int_mod_pending = 0;
800 }
801
802 static int
803 msk_lookup(const struct pci_attach_args *pa)
804 {
805 const struct msk_product *pmsk;
806
807 for ( pmsk = &msk_products[0]; pmsk->msk_vendor != 0; pmsk++) {
808 if (PCI_VENDOR(pa->pa_id) == pmsk->msk_vendor &&
809 PCI_PRODUCT(pa->pa_id) == pmsk->msk_product)
810 return 1;
811 }
812 return 0;
813 }
814
815 /*
816 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
817 * IDs against our list and return a device name if we find a match.
818 */
819 int
820 mskc_probe(struct device *parent, struct cfdata *match,
821 void *aux)
822 {
823 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
824
825 return msk_lookup(pa);
826 }
827
828 /*
829 * Force the GEnesis into reset, then bring it out of reset.
830 */
831 void msk_reset(struct sk_softc *sc)
832 {
833 u_int32_t imtimer_ticks, reg1;
834 int reg;
835
836 DPRINTFN(2, ("msk_reset\n"));
837
838 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET);
839 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET);
840
841 DELAY(1000);
842 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET);
843 DELAY(2);
844 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
845 sk_win_write_1(sc, SK_TESTCTL1, 2);
846
847 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1));
848 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1)
849 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA);
850 else
851 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA);
852 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1);
853
854 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1)
855 sk_win_write_1(sc, SK_Y2_CLKGATE,
856 SK_Y2_CLKGATE_LINK1_GATE_DIS |
857 SK_Y2_CLKGATE_LINK2_GATE_DIS |
858 SK_Y2_CLKGATE_LINK1_CORE_DIS |
859 SK_Y2_CLKGATE_LINK2_CORE_DIS |
860 SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS);
861 else
862 sk_win_write_1(sc, SK_Y2_CLKGATE, 0);
863
864 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
865 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET);
866 DELAY(1000);
867 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
868 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR);
869
870 sk_win_write_1(sc, SK_TESTCTL1, 1);
871
872 DPRINTFN(2, ("msk_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR)));
873 DPRINTFN(2, ("msk_reset: sk_link_ctrl=%x\n",
874 CSR_READ_2(sc, SK_LINK_CTRL)));
875
876 /* Disable ASF */
877 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET);
878 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF);
879
880 /* Clear I2C IRQ noise */
881 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1);
882
883 /* Disable hardware timer */
884 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP);
885 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR);
886
887 /* Disable descriptor polling */
888 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
889
890 /* Disable time stamps */
891 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP);
892 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR);
893
894 /* Enable RAM interface */
895 sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
896 for (reg = SK_TO0;reg <= SK_TO11; reg++)
897 sk_win_write_1(sc, reg, 36);
898 sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET);
899 for (reg = SK_TO0;reg <= SK_TO11; reg++)
900 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36);
901
902 /*
903 * Configure interrupt moderation. The moderation timer
904 * defers interrupts specified in the interrupt moderation
905 * timer mask based on the timeout specified in the interrupt
906 * moderation timer init register. Each bit in the timer
907 * register represents one tick, so to specify a timeout in
908 * microseconds, we have to multiply by the correct number of
909 * ticks-per-microsecond.
910 */
911 switch (sc->sk_type) {
912 case SK_YUKON_EC:
913 case SK_YUKON_EC_U:
914 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
915 break;
916 case SK_YUKON_FE:
917 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE;
918 break;
919 case SK_YUKON_XL:
920 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL;
921 break;
922 default:
923 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
924 }
925
926 /* Reset status ring. */
927 bzero((char *)sc->sk_status_ring,
928 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
929 sc->sk_status_idx = 0;
930
931 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET);
932 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET);
933
934 sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1);
935 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO,
936 sc->sk_status_map->dm_segs[0].ds_addr);
937 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI,
938 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32);
939 if ((sc->sk_workaround & SK_STAT_BMU_FIFOIWM) != 0) {
940 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, SK_STAT_BMU_TXTHIDX_MSK);
941 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 0x21);
942 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 0x07);
943 } else {
944 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 0x000a);
945 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 0x10);
946 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM,
947 ((sc->sk_workaround & SK_WA_4109) != 0) ? 0x10 : 0x04);
948 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, 0x0190); /* 3.2us on Yukon-EC */
949 }
950
951 #if 0
952 sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT, SK_IM_USECS(100));
953 #endif
954 sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT, SK_IM_USECS(1000));
955
956 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON);
957
958 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START);
959 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START);
960 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START);
961
962 msk_update_int_mod(sc);
963 }
964
965 int
966 msk_probe(struct device *parent, struct cfdata *match,
967 void *aux)
968 {
969 struct skc_attach_args *sa = aux;
970
971 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
972 return (0);
973
974 switch (sa->skc_type) {
975 case SK_YUKON_XL:
976 case SK_YUKON_EC_U:
977 case SK_YUKON_EC:
978 case SK_YUKON_FE:
979 return (1);
980 }
981
982 return (0);
983 }
984
985 /*
986 * Each XMAC chip is attached as a separate logical IP interface.
987 * Single port cards will have only one logical interface of course.
988 */
989 void
990 msk_attach(struct device *parent, struct device *self, void *aux)
991 {
992 struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
993 struct sk_softc *sc = (struct sk_softc *)parent;
994 struct skc_attach_args *sa = aux;
995 struct ifnet *ifp;
996 void *kva;
997 bus_dma_segment_t seg;
998 int i, rseg;
999 u_int32_t chunk, val;
1000
1001 sc_if->sk_port = sa->skc_port;
1002 sc_if->sk_softc = sc;
1003 sc->sk_if[sa->skc_port] = sc_if;
1004
1005 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port));
1006
1007 /*
1008 * Get station address for this interface. Note that
1009 * dual port cards actually come with three station
1010 * addresses: one for each port, plus an extra. The
1011 * extra one is used by the SysKonnect driver software
1012 * as a 'virtual' station address for when both ports
1013 * are operating in failover mode. Currently we don't
1014 * use this extra address.
1015 */
1016 for (i = 0; i < ETHER_ADDR_LEN; i++)
1017 sc_if->sk_enaddr[i] =
1018 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
1019
1020 aprint_normal(": Ethernet address %s\n",
1021 ether_sprintf(sc_if->sk_enaddr));
1022
1023 /*
1024 * Set up RAM buffer addresses. The NIC will have a certain
1025 * amount of SRAM on it, somewhere between 512K and 2MB. We
1026 * need to divide this up a) between the transmitter and
1027 * receiver and b) between the two XMACs, if this is a
1028 * dual port NIC. Our algorithm is to divide up the memory
1029 * evenly so that everyone gets a fair share.
1030 *
1031 * Just to be contrary, Yukon2 appears to have separate memory
1032 * for each MAC.
1033 */
1034 chunk = sc->sk_ramsize - (sc->sk_ramsize + 2) / 3;
1035 val = sc->sk_rboff / sizeof(u_int64_t);
1036 sc_if->sk_rx_ramstart = val;
1037 val += (chunk / sizeof(u_int64_t));
1038 sc_if->sk_rx_ramend = val - 1;
1039 chunk = sc->sk_ramsize - chunk;
1040 sc_if->sk_tx_ramstart = val;
1041 val += (chunk / sizeof(u_int64_t));
1042 sc_if->sk_tx_ramend = val - 1;
1043
1044 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1045 " tx_ramstart=%#x tx_ramend=%#x\n",
1046 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1047 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1048
1049 /* Allocate the descriptor queues. */
1050 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data),
1051 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1052 aprint_error(": can't alloc rx buffers\n");
1053 goto fail;
1054 }
1055 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1056 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) {
1057 aprint_error(": can't map dma buffers (%zu bytes)\n",
1058 sizeof(struct msk_ring_data));
1059 goto fail_1;
1060 }
1061 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1,
1062 sizeof(struct msk_ring_data), 0, BUS_DMA_NOWAIT,
1063 &sc_if->sk_ring_map)) {
1064 aprint_error(": can't create dma map\n");
1065 goto fail_2;
1066 }
1067 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
1068 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) {
1069 aprint_error(": can't load dma map\n");
1070 goto fail_3;
1071 }
1072 sc_if->sk_rdata = (struct msk_ring_data *)kva;
1073 bzero(sc_if->sk_rdata, sizeof(struct msk_ring_data));
1074
1075 ifp = &sc_if->sk_ethercom.ec_if;
1076 /* Try to allocate memory for jumbo buffers. */
1077 if (msk_alloc_jumbo_mem(sc_if)) {
1078 aprint_error(": jumbo buffer allocation failed\n");
1079 goto fail_3;
1080 }
1081 sc_if->sk_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU
1082 | ETHERCAP_JUMBO_MTU;
1083
1084 ifp->if_softc = sc_if;
1085 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1086 ifp->if_ioctl = msk_ioctl;
1087 ifp->if_start = msk_start;
1088 ifp->if_stop = msk_stop;
1089 ifp->if_init = msk_init;
1090 ifp->if_watchdog = msk_watchdog;
1091 ifp->if_baudrate = 1000000000;
1092 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1093 IFQ_SET_READY(&ifp->if_snd);
1094 strcpy(ifp->if_xname, sc_if->sk_dev.dv_xname);
1095
1096 /*
1097 * Do miibus setup.
1098 */
1099 msk_init_yukon(sc_if);
1100
1101 DPRINTFN(2, ("msk_attach: 1\n"));
1102
1103 sc_if->sk_mii.mii_ifp = ifp;
1104 sc_if->sk_mii.mii_readreg = msk_miibus_readreg;
1105 sc_if->sk_mii.mii_writereg = msk_miibus_writereg;
1106 sc_if->sk_mii.mii_statchg = msk_miibus_statchg;
1107
1108 ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1109 msk_ifmedia_upd, msk_ifmedia_sts);
1110 mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1111 MII_OFFSET_ANY, MIIF_DOPAUSE|MIIF_FORCEANEG);
1112 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
1113 aprint_error("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
1114 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
1115 0, NULL);
1116 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1117 } else
1118 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
1119
1120 callout_init(&sc_if->sk_tick_ch, 0);
1121 callout_setfunc(&sc_if->sk_tick_ch, msk_tick, sc_if);
1122 callout_schedule(&sc_if->sk_tick_ch, hz);
1123
1124 /*
1125 * Call MI attach routines.
1126 */
1127 if_attach(ifp);
1128 ether_ifattach(ifp, sc_if->sk_enaddr);
1129
1130 shutdownhook_establish(mskc_shutdown, sc);
1131
1132 #if NRND > 0
1133 rnd_attach_source(&sc->rnd_source, sc->sk_dev.dv_xname,
1134 RND_TYPE_NET, 0);
1135 #endif
1136
1137 DPRINTFN(2, ("msk_attach: end\n"));
1138 return;
1139
1140 fail_3:
1141 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1142 fail_2:
1143 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data));
1144 fail_1:
1145 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1146 fail:
1147 sc->sk_if[sa->skc_port] = NULL;
1148 }
1149
1150 int
1151 mskcprint(void *aux, const char *pnp)
1152 {
1153 struct skc_attach_args *sa = aux;
1154
1155 if (pnp)
1156 aprint_normal("sk port %c at %s",
1157 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1158 else
1159 aprint_normal(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1160 return (UNCONF);
1161 }
1162
1163 /*
1164 * Attach the interface. Allocate softc structures, do ifmedia
1165 * setup and ethernet/BPF attach.
1166 */
1167 void
1168 mskc_attach(struct device *parent, struct device *self, void *aux)
1169 {
1170 struct sk_softc *sc = (struct sk_softc *)self;
1171 struct pci_attach_args *pa = aux;
1172 struct skc_attach_args skca;
1173 pci_chipset_tag_t pc = pa->pa_pc;
1174 pcireg_t command, memtype;
1175 pci_intr_handle_t ih;
1176 const char *intrstr = NULL;
1177 bus_size_t size;
1178 int rc, sk_nodenum;
1179 u_int8_t hw, skrs;
1180 const char *revstr = NULL;
1181 const struct sysctlnode *node;
1182 void *kva;
1183 bus_dma_segment_t seg;
1184 int rseg;
1185
1186 DPRINTFN(2, ("begin mskc_attach\n"));
1187
1188 /*
1189 * Handle power management nonsense.
1190 */
1191 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1192
1193 if (command == 0x01) {
1194 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1195 if (command & SK_PSTATE_MASK) {
1196 u_int32_t iobase, membase, irq;
1197
1198 /* Save important PCI config data. */
1199 iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1200 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1201 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1202
1203 /* Reset the power state. */
1204 aprint_normal("%s chip is in D%d power mode "
1205 "-- setting to D0\n", sc->sk_dev.dv_xname,
1206 command & SK_PSTATE_MASK);
1207 command &= 0xFFFFFFFC;
1208 pci_conf_write(pc, pa->pa_tag,
1209 SK_PCI_PWRMGMTCTRL, command);
1210
1211 /* Restore PCI config data. */
1212 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1213 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1214 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1215 }
1216 }
1217
1218 /*
1219 * Map control/status registers.
1220 */
1221
1222 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1223 switch (memtype) {
1224 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1225 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1226 if (pci_mapreg_map(pa, SK_PCI_LOMEM,
1227 memtype, 0, &sc->sk_btag, &sc->sk_bhandle,
1228 NULL, &size) == 0)
1229 break;
1230 default:
1231 aprint_error(": can't map mem space\n");
1232 return;
1233 }
1234
1235 sc->sc_dmatag = pa->pa_dmat;
1236
1237 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1238 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1239
1240 /* bail out here if chip is not recognized */
1241 if (!(SK_IS_YUKON2(sc))) {
1242 aprint_error(": unknown chip type: %d\n", sc->sk_type);
1243 goto fail_1;
1244 }
1245 DPRINTFN(2, ("mskc_attach: allocate interrupt\n"));
1246
1247 /* Allocate interrupt */
1248 if (pci_intr_map(pa, &ih)) {
1249 aprint_error(": couldn't map interrupt\n");
1250 goto fail_1;
1251 }
1252
1253 intrstr = pci_intr_string(pc, ih);
1254 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc);
1255 if (sc->sk_intrhand == NULL) {
1256 aprint_error(": couldn't establish interrupt");
1257 if (intrstr != NULL)
1258 aprint_error(" at %s", intrstr);
1259 aprint_error("\n");
1260 goto fail_1;
1261 }
1262
1263 if (bus_dmamem_alloc(sc->sc_dmatag,
1264 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
1265 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1266 aprint_error(": can't alloc status buffers\n");
1267 goto fail_2;
1268 }
1269
1270 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1271 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
1272 &kva, BUS_DMA_NOWAIT)) {
1273 aprint_error(": can't map dma buffers (%zu bytes)\n",
1274 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
1275 goto fail_3;
1276 }
1277 if (bus_dmamap_create(sc->sc_dmatag,
1278 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1,
1279 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 0,
1280 BUS_DMA_NOWAIT, &sc->sk_status_map)) {
1281 aprint_error(": can't create dma map\n");
1282 goto fail_4;
1283 }
1284 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva,
1285 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
1286 NULL, BUS_DMA_NOWAIT)) {
1287 aprint_error(": can't load dma map\n");
1288 goto fail_5;
1289 }
1290 sc->sk_status_ring = (struct msk_status_desc *)kva;
1291 bzero(sc->sk_status_ring,
1292 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
1293
1294 /* Reset the adapter. */
1295 msk_reset(sc);
1296
1297 skrs = sk_win_read_1(sc, SK_EPROM0);
1298 if (skrs == 0x00)
1299 sc->sk_ramsize = 0x20000;
1300 else
1301 sc->sk_ramsize = skrs * (1<<12);
1302 sc->sk_rboff = SK_RBOFF_0;
1303
1304 DPRINTFN(2, ("mskc_attach: ramsize=%d (%dk), rboff=%d\n",
1305 sc->sk_ramsize, sc->sk_ramsize / 1024,
1306 sc->sk_rboff));
1307
1308 switch (sc->sk_type) {
1309 case SK_YUKON_XL:
1310 sc->sk_name = "Yukon-2 XL";
1311 break;
1312 case SK_YUKON_EC_U:
1313 sc->sk_name = "Yukon-2 EC Ultra";
1314 break;
1315 case SK_YUKON_EC:
1316 sc->sk_name = "Yukon-2 EC";
1317 break;
1318 case SK_YUKON_FE:
1319 sc->sk_name = "Yukon-2 FE";
1320 break;
1321 default:
1322 sc->sk_name = "Yukon (Unknown)";
1323 }
1324
1325 if (sc->sk_type == SK_YUKON_XL) {
1326 switch (sc->sk_rev) {
1327 case SK_YUKON_XL_REV_A0:
1328 sc->sk_workaround = 0;
1329 revstr = "A0";
1330 break;
1331 case SK_YUKON_XL_REV_A1:
1332 sc->sk_workaround = SK_WA_4109;
1333 revstr = "A1";
1334 break;
1335 case SK_YUKON_XL_REV_A2:
1336 sc->sk_workaround = SK_WA_4109;
1337 revstr = "A2";
1338 break;
1339 case SK_YUKON_XL_REV_A3:
1340 sc->sk_workaround = SK_WA_4109;
1341 revstr = "A3";
1342 break;
1343 default:
1344 sc->sk_workaround = 0;
1345 break;
1346 }
1347 }
1348
1349 if (sc->sk_type == SK_YUKON_EC) {
1350 switch (sc->sk_rev) {
1351 case SK_YUKON_EC_REV_A1:
1352 sc->sk_workaround = SK_WA_43_418 | SK_WA_4109;
1353 revstr = "A1";
1354 break;
1355 case SK_YUKON_EC_REV_A2:
1356 sc->sk_workaround = SK_WA_4109;
1357 revstr = "A2";
1358 break;
1359 case SK_YUKON_EC_REV_A3:
1360 sc->sk_workaround = SK_WA_4109;
1361 revstr = "A3";
1362 break;
1363 default:
1364 sc->sk_workaround = 0;
1365 break;
1366 }
1367 }
1368
1369 if (sc->sk_type == SK_YUKON_FE) {
1370 sc->sk_workaround = SK_WA_4109;
1371 switch (sc->sk_rev) {
1372 case SK_YUKON_FE_REV_A1:
1373 revstr = "A1";
1374 break;
1375 case SK_YUKON_FE_REV_A2:
1376 revstr = "A2";
1377 break;
1378 default:
1379 sc->sk_workaround = 0;
1380 break;
1381 }
1382 }
1383
1384 if (sc->sk_type == SK_YUKON_EC_U) {
1385 sc->sk_workaround = SK_WA_4109;
1386 switch (sc->sk_rev) {
1387 case SK_YUKON_EC_U_REV_A0:
1388 revstr = "A0";
1389 break;
1390 case SK_YUKON_EC_U_REV_A1:
1391 revstr = "A1";
1392 break;
1393 case SK_YUKON_EC_U_REV_B0:
1394 revstr = "B0";
1395 break;
1396 default:
1397 sc->sk_workaround = 0;
1398 break;
1399 }
1400 }
1401
1402 /* Announce the product name. */
1403 aprint_normal(", %s", sc->sk_name);
1404 if (revstr != NULL)
1405 aprint_normal(" rev. %s", revstr);
1406 aprint_normal(" (0x%x): %s\n", sc->sk_rev, intrstr);
1407
1408 sc->sk_macs = 1;
1409
1410 hw = sk_win_read_1(sc, SK_Y2_HWRES);
1411 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) {
1412 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) &
1413 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0)
1414 sc->sk_macs++;
1415 }
1416
1417 skca.skc_port = SK_PORT_A;
1418 skca.skc_type = sc->sk_type;
1419 skca.skc_rev = sc->sk_rev;
1420 (void)config_found(&sc->sk_dev, &skca, mskcprint);
1421
1422 if (sc->sk_macs > 1) {
1423 skca.skc_port = SK_PORT_B;
1424 skca.skc_type = sc->sk_type;
1425 skca.skc_rev = sc->sk_rev;
1426 (void)config_found(&sc->sk_dev, &skca, mskcprint);
1427 }
1428
1429 /* Turn on the 'driver is loaded' LED. */
1430 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1431
1432 /* skc sysctl setup */
1433
1434 sc->sk_int_mod = SK_IM_DEFAULT;
1435 sc->sk_int_mod_pending = 0;
1436
1437 if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
1438 0, CTLTYPE_NODE, sc->sk_dev.dv_xname,
1439 SYSCTL_DESCR("mskc per-controller controls"),
1440 NULL, 0, NULL, 0, CTL_HW, msk_root_num, CTL_CREATE,
1441 CTL_EOL)) != 0) {
1442 aprint_normal("%s: couldn't create sysctl node\n",
1443 sc->sk_dev.dv_xname);
1444 goto fail_6;
1445 }
1446
1447 sk_nodenum = node->sysctl_num;
1448
1449 /* interrupt moderation time in usecs */
1450 if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
1451 CTLFLAG_READWRITE,
1452 CTLTYPE_INT, "int_mod",
1453 SYSCTL_DESCR("msk interrupt moderation timer"),
1454 msk_sysctl_handler, 0, sc,
1455 0, CTL_HW, msk_root_num, sk_nodenum, CTL_CREATE,
1456 CTL_EOL)) != 0) {
1457 aprint_normal("%s: couldn't create int_mod sysctl node\n",
1458 sc->sk_dev.dv_xname);
1459 goto fail_6;
1460 }
1461
1462 return;
1463
1464 fail_6:
1465 bus_dmamap_unload(sc->sc_dmatag, sc->sk_status_map);
1466 fail_5:
1467 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map);
1468 fail_4:
1469 bus_dmamem_unmap(sc->sc_dmatag, kva,
1470 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
1471 fail_3:
1472 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1473 fail_2:
1474 pci_intr_disestablish(pc, sc->sk_intrhand);
1475 fail_1:
1476 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
1477 }
1478
1479 int
1480 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
1481 {
1482 struct sk_softc *sc = sc_if->sk_softc;
1483 struct msk_tx_desc *f = NULL;
1484 u_int32_t frag, cur;
1485 int i;
1486 struct sk_txmap_entry *entry;
1487 bus_dmamap_t txmap;
1488
1489 DPRINTFN(2, ("msk_encap\n"));
1490
1491 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1492 if (entry == NULL) {
1493 DPRINTFN(2, ("msk_encap: no txmap available\n"));
1494 return (ENOBUFS);
1495 }
1496 txmap = entry->dmamap;
1497
1498 cur = frag = *txidx;
1499
1500 #ifdef MSK_DEBUG
1501 if (mskdebug >= 2)
1502 msk_dump_mbuf(m_head);
1503 #endif
1504
1505 /*
1506 * Start packing the mbufs in this chain into
1507 * the fragment pointers. Stop when we run out
1508 * of fragments or hit the end of the mbuf chain.
1509 */
1510 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1511 BUS_DMA_NOWAIT)) {
1512 DPRINTFN(2, ("msk_encap: dmamap failed\n"));
1513 return (ENOBUFS);
1514 }
1515
1516 if (txmap->dm_nsegs > (MSK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) {
1517 DPRINTFN(2, ("msk_encap: too few descriptors free\n"));
1518 bus_dmamap_unload(sc->sc_dmatag, txmap);
1519 return (ENOBUFS);
1520 }
1521
1522 DPRINTFN(2, ("msk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1523
1524 /* Sync the DMA map. */
1525 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1526 BUS_DMASYNC_PREWRITE);
1527
1528 for (i = 0; i < txmap->dm_nsegs; i++) {
1529 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1530 f->sk_addr = htole32(txmap->dm_segs[i].ds_addr);
1531 f->sk_len = htole16(txmap->dm_segs[i].ds_len);
1532 f->sk_ctl = 0;
1533 if (i == 0)
1534 f->sk_opcode = SK_Y2_TXOPC_PACKET;
1535 else
1536 f->sk_opcode = SK_Y2_TXOPC_BUFFER | SK_Y2_TXOPC_OWN;
1537 cur = frag;
1538 SK_INC(frag, MSK_TX_RING_CNT);
1539 }
1540
1541 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1542 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1543
1544 sc_if->sk_cdata.sk_tx_map[cur] = entry;
1545 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_Y2_TXCTL_LASTFRAG;
1546
1547 /* Sync descriptors before handing to chip */
1548 MSK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1549 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1550
1551 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_opcode |= SK_Y2_TXOPC_OWN;
1552
1553 /* Sync first descriptor to hand it off */
1554 MSK_CDTXSYNC(sc_if, *txidx, 1,
1555 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1556
1557 sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
1558
1559 #ifdef MSK_DEBUG
1560 if (mskdebug >= 2) {
1561 struct msk_tx_desc *le;
1562 u_int32_t idx;
1563 for (idx = *txidx; idx != frag; SK_INC(idx, MSK_TX_RING_CNT)) {
1564 le = &sc_if->sk_rdata->sk_tx_ring[idx];
1565 msk_dump_txdesc(le, idx);
1566 }
1567 }
1568 #endif
1569
1570 *txidx = frag;
1571
1572 DPRINTFN(2, ("msk_encap: completed successfully\n"));
1573
1574 return (0);
1575 }
1576
1577 void
1578 msk_start(struct ifnet *ifp)
1579 {
1580 struct sk_if_softc *sc_if = ifp->if_softc;
1581 struct mbuf *m_head = NULL;
1582 u_int32_t idx = sc_if->sk_cdata.sk_tx_prod;
1583 int pkts = 0;
1584
1585 DPRINTFN(2, ("msk_start\n"));
1586
1587 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1588 IFQ_POLL(&ifp->if_snd, m_head);
1589 if (m_head == NULL)
1590 break;
1591
1592 /*
1593 * Pack the data into the transmit ring. If we
1594 * don't have room, set the OACTIVE flag and wait
1595 * for the NIC to drain the ring.
1596 */
1597 if (msk_encap(sc_if, m_head, &idx)) {
1598 ifp->if_flags |= IFF_OACTIVE;
1599 break;
1600 }
1601
1602 /* now we are committed to transmit the packet */
1603 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1604 pkts++;
1605
1606 /*
1607 * If there's a BPF listener, bounce a copy of this frame
1608 * to him.
1609 */
1610 #if NBPFILTER > 0
1611 if (ifp->if_bpf)
1612 bpf_mtap(ifp->if_bpf, m_head);
1613 #endif
1614 }
1615 if (pkts == 0)
1616 return;
1617
1618 /* Transmit */
1619 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1620 sc_if->sk_cdata.sk_tx_prod = idx;
1621 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, idx);
1622
1623 /* Set a timeout in case the chip goes out to lunch. */
1624 ifp->if_timer = 5;
1625 }
1626 }
1627
1628 void
1629 msk_watchdog(struct ifnet *ifp)
1630 {
1631 struct sk_if_softc *sc_if = ifp->if_softc;
1632 u_int32_t reg;
1633 int idx;
1634
1635 /*
1636 * Reclaim first as there is a possibility of losing Tx completion
1637 * interrupts.
1638 */
1639 if (sc_if->sk_port == SK_PORT_A)
1640 reg = SK_STAT_BMU_TXA1_RIDX;
1641 else
1642 reg = SK_STAT_BMU_TXA2_RIDX;
1643
1644 idx = sk_win_read_2(sc_if->sk_softc, reg);
1645 if (sc_if->sk_cdata.sk_tx_cons != idx) {
1646 msk_txeof(sc_if, idx);
1647 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1648 aprint_error("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1649
1650 ifp->if_oerrors++;
1651
1652 /* XXX Resets both ports; we shouldn't do that. */
1653 msk_reset(sc_if->sk_softc);
1654 msk_init(ifp);
1655 }
1656 }
1657 }
1658
1659 void
1660 mskc_shutdown(void *v)
1661 {
1662 struct sk_softc *sc = v;
1663
1664 DPRINTFN(2, ("msk_shutdown\n"));
1665
1666 /* Turn off the 'driver is loaded' LED. */
1667 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1668
1669 msk_reset(sc);
1670 }
1671
1672 __inline int
1673 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
1674 {
1675 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1676 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1677 YU_RXSTAT_JABBER)) != 0 ||
1678 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1679 YU_RXSTAT_BYTES(stat) != len)
1680 return (0);
1681
1682 return (1);
1683 }
1684
1685 void
1686 msk_rxeof(struct sk_if_softc *sc_if, u_int16_t len, u_int32_t rxstat)
1687 {
1688 struct sk_softc *sc = sc_if->sk_softc;
1689 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
1690 struct mbuf *m;
1691 struct sk_chain *cur_rx;
1692 int cur, total_len = len;
1693 bus_dmamap_t dmamap;
1694
1695 DPRINTFN(2, ("msk_rxeof\n"));
1696
1697 cur = sc_if->sk_cdata.sk_rx_cons;
1698 SK_INC(sc_if->sk_cdata.sk_rx_cons, MSK_RX_RING_CNT);
1699 SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
1700
1701 /* Sync the descriptor */
1702 MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1703
1704 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
1705 dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
1706
1707 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
1708 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1709
1710 m = cur_rx->sk_mbuf;
1711 cur_rx->sk_mbuf = NULL;
1712
1713 if (total_len < SK_MIN_FRAMELEN ||
1714 total_len > SK_JUMBO_FRAMELEN ||
1715 msk_rxvalid(sc, rxstat, total_len) == 0) {
1716 ifp->if_ierrors++;
1717 msk_newbuf(sc_if, cur, m, dmamap);
1718 return;
1719 }
1720
1721 /*
1722 * Try to allocate a new jumbo buffer. If that fails, copy the
1723 * packet to mbufs and put the jumbo buffer back in the ring
1724 * so it can be re-used. If allocating mbufs fails, then we
1725 * have to drop the packet.
1726 */
1727 if (msk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
1728 struct mbuf *m0;
1729 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1730 total_len + ETHER_ALIGN, 0, ifp, NULL);
1731 msk_newbuf(sc_if, cur, m, dmamap);
1732 if (m0 == NULL) {
1733 ifp->if_ierrors++;
1734 return;
1735 }
1736 m_adj(m0, ETHER_ALIGN);
1737 m = m0;
1738 } else {
1739 m->m_pkthdr.rcvif = ifp;
1740 m->m_pkthdr.len = m->m_len = total_len;
1741 }
1742
1743 ifp->if_ipackets++;
1744
1745 #if NBPFILTER > 0
1746 if (ifp->if_bpf)
1747 bpf_mtap(ifp->if_bpf, m);
1748 #endif
1749
1750 /* pass it on. */
1751 (*ifp->if_input)(ifp, m);
1752 }
1753
1754 void
1755 msk_txeof(struct sk_if_softc *sc_if, int idx)
1756 {
1757 struct sk_softc *sc = sc_if->sk_softc;
1758 struct msk_tx_desc *cur_tx;
1759 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
1760 u_int32_t sk_ctl;
1761 struct sk_txmap_entry *entry;
1762 int cons, prog;
1763
1764 DPRINTFN(2, ("msk_txeof\n"));
1765
1766 /*
1767 * Go through our tx ring and free mbufs for those
1768 * frames that have been sent.
1769 */
1770 cons = sc_if->sk_cdata.sk_tx_cons;
1771 prog = 0;
1772 while (cons != idx) {
1773 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
1774 break;
1775 prog++;
1776 MSK_CDTXSYNC(sc_if, cons, 1,
1777 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1778
1779 cur_tx = &sc_if->sk_rdata->sk_tx_ring[cons];
1780 sk_ctl = cur_tx->sk_ctl;
1781 #ifdef MSK_DEBUG
1782 if (mskdebug >= 2)
1783 msk_dump_txdesc(cur_tx, cons);
1784 #endif
1785 if (sk_ctl & SK_Y2_TXCTL_LASTFRAG)
1786 ifp->if_opackets++;
1787 if (sc_if->sk_cdata.sk_tx_chain[cons].sk_mbuf != NULL) {
1788 entry = sc_if->sk_cdata.sk_tx_map[cons];
1789
1790 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1791 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1792
1793 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1794 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
1795 link);
1796 sc_if->sk_cdata.sk_tx_map[cons] = NULL;
1797 m_freem(sc_if->sk_cdata.sk_tx_chain[cons].sk_mbuf);
1798 sc_if->sk_cdata.sk_tx_chain[cons].sk_mbuf = NULL;
1799 }
1800 sc_if->sk_cdata.sk_tx_cnt--;
1801 SK_INC(cons, MSK_TX_RING_CNT);
1802 }
1803 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
1804
1805 if (sc_if->sk_cdata.sk_tx_cnt < MSK_TX_RING_CNT - 2)
1806 ifp->if_flags &= ~IFF_OACTIVE;
1807
1808 if (prog > 0)
1809 sc_if->sk_cdata.sk_tx_cons = cons;
1810 }
1811
1812 void
1813 msk_tick(void *xsc_if)
1814 {
1815 struct sk_if_softc *sc_if = xsc_if;
1816 struct mii_data *mii = &sc_if->sk_mii;
1817
1818 mii_tick(mii);
1819 callout_schedule(&sc_if->sk_tick_ch, hz);
1820 }
1821
1822 void
1823 msk_intr_yukon(struct sk_if_softc *sc_if)
1824 {
1825 u_int8_t status;
1826
1827 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
1828 /* RX overrun */
1829 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
1830 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
1831 SK_RFCTL_RX_FIFO_OVER);
1832 }
1833 /* TX underrun */
1834 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
1835 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,
1836 SK_TFCTL_TX_FIFO_UNDER);
1837 }
1838
1839 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status));
1840 }
1841
1842 int
1843 msk_intr(void *xsc)
1844 {
1845 struct sk_softc *sc = xsc;
1846 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
1847 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
1848 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
1849 int claimed = 0;
1850 u_int32_t status;
1851 struct msk_status_desc *cur_st;
1852
1853 status = CSR_READ_4(sc, SK_Y2_ISSR2);
1854 if (status == 0) {
1855 CSR_WRITE_4(sc, SK_Y2_ICR, 2);
1856 return (0);
1857 }
1858
1859 status = CSR_READ_4(sc, SK_ISR);
1860
1861 if (sc_if0 != NULL)
1862 ifp0 = &sc_if0->sk_ethercom.ec_if;
1863 if (sc_if1 != NULL)
1864 ifp1 = &sc_if1->sk_ethercom.ec_if;
1865
1866 if (sc_if0 && (status & SK_Y2_IMR_MAC1) &&
1867 (ifp0->if_flags & IFF_RUNNING)) {
1868 msk_intr_yukon(sc_if0);
1869 }
1870
1871 if (sc_if1 && (status & SK_Y2_IMR_MAC2) &&
1872 (ifp1->if_flags & IFF_RUNNING)) {
1873 msk_intr_yukon(sc_if1);
1874 }
1875
1876 MSK_CDSTSYNC(sc, sc->sk_status_idx,
1877 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1878 cur_st = &sc->sk_status_ring[sc->sk_status_idx];
1879
1880 while (cur_st->sk_opcode & SK_Y2_STOPC_OWN) {
1881 cur_st->sk_opcode &= ~SK_Y2_STOPC_OWN;
1882 switch (cur_st->sk_opcode) {
1883 case SK_Y2_STOPC_RXSTAT:
1884 msk_rxeof(sc->sk_if[cur_st->sk_link],
1885 letoh16(cur_st->sk_len),
1886 letoh32(cur_st->sk_status));
1887 SK_IF_WRITE_2(sc->sk_if[cur_st->sk_link], 0,
1888 SK_RXQ1_Y2_PREF_PUTIDX,
1889 sc->sk_if[cur_st->sk_link]->sk_cdata.sk_rx_prod);
1890 break;
1891 case SK_Y2_STOPC_TXSTAT:
1892 if (sc_if0)
1893 msk_txeof(sc_if0,
1894 letoh32(cur_st->sk_status)
1895 & SK_Y2_ST_TXA1_MSKL);
1896 if (sc_if1)
1897 msk_txeof(sc_if1,
1898 ((letoh32(cur_st->sk_status)
1899 & SK_Y2_ST_TXA2_MSKL)
1900 >> SK_Y2_ST_TXA2_SHIFTL)
1901 | ((letoh16(cur_st->sk_len) & SK_Y2_ST_TXA2_MSKH) << SK_Y2_ST_TXA2_SHIFTH));
1902 break;
1903 default:
1904 aprint_error("opcode=0x%x\n", cur_st->sk_opcode);
1905 break;
1906 }
1907 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT);
1908
1909 MSK_CDSTSYNC(sc, sc->sk_status_idx,
1910 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1911 cur_st = &sc->sk_status_ring[sc->sk_status_idx];
1912 }
1913
1914 if (status & SK_Y2_IMR_BMU) {
1915 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR);
1916 claimed = 1;
1917 }
1918
1919 CSR_WRITE_4(sc, SK_Y2_ICR, 2);
1920
1921 if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
1922 msk_start(ifp0);
1923 if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
1924 msk_start(ifp1);
1925
1926 #if NRND > 0
1927 if (RND_ENABLED(&sc->rnd_source))
1928 rnd_add_uint32(&sc->rnd_source, status);
1929 #endif
1930
1931 if (sc->sk_int_mod_pending)
1932 msk_update_int_mod(sc);
1933
1934 return claimed;
1935 }
1936
1937 void
1938 msk_init_yukon(struct sk_if_softc *sc_if)
1939 {
1940 u_int32_t v;
1941 u_int16_t reg;
1942 struct sk_softc *sc;
1943 int i;
1944
1945 sc = sc_if->sk_softc;
1946
1947 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n",
1948 CSR_READ_4(sc_if->sk_softc, SK_CSR)));
1949
1950 DPRINTFN(6, ("msk_init_yukon: 1\n"));
1951
1952 /* GMAC and GPHY Reset */
1953 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
1954 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
1955 DELAY(1000);
1956
1957 DPRINTFN(6, ("msk_init_yukon: 2\n"));
1958
1959 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR);
1960 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
1961 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
1962
1963 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n",
1964 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
1965
1966 DPRINTFN(6, ("msk_init_yukon: 3\n"));
1967
1968 /* unused read of the interrupt source register */
1969 DPRINTFN(6, ("msk_init_yukon: 4\n"));
1970 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
1971
1972 DPRINTFN(6, ("msk_init_yukon: 4a\n"));
1973 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
1974 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
1975
1976 /* MIB Counter Clear Mode set */
1977 reg |= YU_PAR_MIB_CLR;
1978 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
1979 DPRINTFN(6, ("msk_init_yukon: 4b\n"));
1980 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
1981
1982 /* MIB Counter Clear Mode clear */
1983 DPRINTFN(6, ("msk_init_yukon: 5\n"));
1984 reg &= ~YU_PAR_MIB_CLR;
1985 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
1986
1987 /* receive control reg */
1988 DPRINTFN(6, ("msk_init_yukon: 7\n"));
1989 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
1990
1991 /* transmit control register */
1992 SK_YU_WRITE_2(sc_if, YUKON_TCR, (0x04 << 10));
1993
1994 /* transmit flow control register */
1995 SK_YU_WRITE_2(sc_if, YUKON_TFCR, 0xffff);
1996
1997 /* transmit parameter register */
1998 DPRINTFN(6, ("msk_init_yukon: 8\n"));
1999 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2000 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1c) | 0x04);
2001
2002 /* serial mode register */
2003 DPRINTFN(6, ("msk_init_yukon: 9\n"));
2004 reg = YU_SMR_DATA_BLIND(0x1c) |
2005 YU_SMR_MFL_VLAN |
2006 YU_SMR_IPG_DATA(0x1e);
2007
2008 if (sc->sk_type != SK_YUKON_FE)
2009 reg |= YU_SMR_MFL_JUMBO;
2010
2011 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2012
2013 DPRINTFN(6, ("msk_init_yukon: 10\n"));
2014 /* Setup Yukon's address */
2015 for (i = 0; i < 3; i++) {
2016 /* Write Source Address 1 (unicast filter) */
2017 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2018 sc_if->sk_enaddr[i * 2] |
2019 sc_if->sk_enaddr[i * 2 + 1] << 8);
2020 }
2021
2022 for (i = 0; i < 3; i++) {
2023 reg = sk_win_read_2(sc_if->sk_softc,
2024 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2025 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2026 }
2027
2028 /* Set promiscuous mode */
2029 msk_setpromisc(sc_if);
2030
2031 /* Set multicast filter */
2032 DPRINTFN(6, ("msk_init_yukon: 11\n"));
2033 msk_setmulti(sc_if);
2034
2035 /* enable interrupt mask for counter overflows */
2036 DPRINTFN(6, ("msk_init_yukon: 12\n"));
2037 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2038 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2039 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2040
2041 /* Configure RX MAC FIFO Flush Mask */
2042 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2043 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2044 YU_RXSTAT_JABBER;
2045 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2046
2047 /* Configure RX MAC FIFO */
2048 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2049 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON |
2050 SK_RFCTL_FIFO_FLUSH_ON);
2051
2052 /* Increase flush threshould to 64 bytes */
2053 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2054 SK_RFCTL_FIFO_THRESHOLD + 1);
2055
2056 /* Configure TX MAC FIFO */
2057 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2058 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2059
2060 #if 1
2061 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN);
2062 #endif
2063 DPRINTFN(6, ("msk_init_yukon: end\n"));
2064 }
2065
2066 /*
2067 * Note that to properly initialize any part of the GEnesis chip,
2068 * you first have to take it out of reset mode.
2069 */
2070 int
2071 msk_init(struct ifnet *ifp)
2072 {
2073 struct sk_if_softc *sc_if = ifp->if_softc;
2074 struct sk_softc *sc = sc_if->sk_softc;
2075 struct mii_data *mii = &sc_if->sk_mii;
2076 int s;
2077 uint32_t imr, imtimer_ticks;
2078
2079
2080 DPRINTFN(2, ("msk_init\n"));
2081
2082 s = splnet();
2083
2084 /* Cancel pending I/O and free all RX/TX buffers. */
2085 msk_stop(ifp,0);
2086
2087 /* Configure I2C registers */
2088
2089 /* Configure XMAC(s) */
2090 msk_init_yukon(sc_if);
2091 mii_mediachg(mii);
2092
2093 /* Configure transmit arbiter(s) */
2094 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON);
2095 #if 0
2096 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2097 #endif
2098
2099 /* Configure RAMbuffers */
2100 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2101 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2102 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2103 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2104 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2105 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2106
2107 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET);
2108 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON);
2109 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart);
2110 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart);
2111 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart);
2112 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend);
2113 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON);
2114
2115 /* Configure BMUs */
2116 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016);
2117 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28);
2118 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080);
2119 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_WM, 0x0600); /* XXX ??? */
2120
2121 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016);
2122 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28);
2123 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080);
2124 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_WM, 0x0600); /* XXX ??? */
2125
2126 /* Make sure the sync transmit queue is disabled. */
2127 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET);
2128
2129 /* Init descriptors */
2130 if (msk_init_rx_ring(sc_if) == ENOBUFS) {
2131 aprint_error("%s: initialization failed: no "
2132 "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
2133 msk_stop(ifp,0);
2134 splx(s);
2135 return ENOBUFS;
2136 }
2137
2138 if (msk_init_tx_ring(sc_if) == ENOBUFS) {
2139 aprint_error("%s: initialization failed: no "
2140 "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
2141 msk_stop(ifp,0);
2142 splx(s);
2143 return ENOBUFS;
2144 }
2145
2146 /* Set interrupt moderation if changed via sysctl. */
2147 switch (sc->sk_type) {
2148 case SK_YUKON_EC:
2149 case SK_YUKON_EC_U:
2150 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
2151 break;
2152 case SK_YUKON_FE:
2153 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE;
2154 break;
2155 case SK_YUKON_XL:
2156 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL;
2157 break;
2158 default:
2159 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
2160 }
2161 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
2162 if (imr != SK_IM_USECS(sc->sk_int_mod)) {
2163 sk_win_write_4(sc, SK_IMTIMERINIT,
2164 SK_IM_USECS(sc->sk_int_mod));
2165 aprint_verbose("%s: interrupt moderation is %d us\n",
2166 sc->sk_dev.dv_xname, sc->sk_int_mod);
2167 }
2168
2169 /* Initialize prefetch engine. */
2170 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
2171 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002);
2172 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1);
2173 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO,
2174 MSK_RX_RING_ADDR(sc_if, 0));
2175 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI,
2176 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32);
2177 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008);
2178 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR);
2179
2180 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
2181 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002);
2182 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1);
2183 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO,
2184 MSK_TX_RING_ADDR(sc_if, 0));
2185 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI,
2186 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32);
2187 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008);
2188 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR);
2189
2190 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX,
2191 sc_if->sk_cdata.sk_rx_prod);
2192
2193 /* Configure interrupt handling */
2194 if (sc_if->sk_port == SK_PORT_A)
2195 sc->sk_intrmask |= SK_Y2_INTRS1;
2196 else
2197 sc->sk_intrmask |= SK_Y2_INTRS2;
2198 sc->sk_intrmask |= SK_Y2_IMR_BMU;
2199 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2200
2201 ifp->if_flags |= IFF_RUNNING;
2202 ifp->if_flags &= ~IFF_OACTIVE;
2203
2204 callout_schedule(&sc_if->sk_tick_ch, hz);
2205
2206 splx(s);
2207 return 0;
2208 }
2209
2210 void
2211 msk_stop(struct ifnet *ifp, int disable)
2212 {
2213 struct sk_if_softc *sc_if = ifp->if_softc;
2214 struct sk_softc *sc = sc_if->sk_softc;
2215 struct sk_txmap_entry *dma;
2216 int i;
2217
2218 DPRINTFN(2, ("msk_stop\n"));
2219
2220 callout_stop(&sc_if->sk_tick_ch);
2221
2222 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2223
2224 /* Stop transfer of Tx descriptors */
2225
2226 /* Stop transfer of Rx descriptors */
2227
2228 /* Turn off various components of this interface. */
2229 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2230 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2231 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2232 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2233 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2234 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE);
2235 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2236 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2237 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2238 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP);
2239 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2240 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2241
2242 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
2243 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
2244
2245 /* Disable interrupts */
2246 if (sc_if->sk_port == SK_PORT_A)
2247 sc->sk_intrmask &= ~SK_Y2_INTRS1;
2248 else
2249 sc->sk_intrmask &= ~SK_Y2_INTRS2;
2250 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2251
2252 SK_XM_READ_2(sc_if, XM_ISR);
2253 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2254
2255 /* Free RX and TX mbufs still in the queues. */
2256 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2257 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2258 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2259 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2260 }
2261 }
2262
2263 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2264 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2265 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2266 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2267 #if 1
2268 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
2269 sc_if->sk_cdata.sk_tx_map[i], link);
2270 sc_if->sk_cdata.sk_tx_map[i] = 0;
2271 #endif
2272 }
2273 }
2274
2275 #if 1
2276 while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
2277 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
2278 bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
2279 free(dma, M_DEVBUF);
2280 }
2281 #endif
2282 }
2283
2284 CFATTACH_DECL(mskc, sizeof(struct sk_softc), mskc_probe, mskc_attach,
2285 NULL, NULL);
2286
2287 CFATTACH_DECL(msk, sizeof(struct sk_if_softc), msk_probe, msk_attach,
2288 NULL, NULL);
2289
2290 #ifdef MSK_DEBUG
2291 void
2292 msk_dump_txdesc(struct msk_tx_desc *le, int idx)
2293 {
2294 #define DESC_PRINT(X) \
2295 if (X) \
2296 printf("txdesc[%d]." #X "=%#x\n", \
2297 idx, X);
2298
2299 DESC_PRINT(letoh32(le->sk_addr));
2300 DESC_PRINT(letoh16(le->sk_len));
2301 DESC_PRINT(le->sk_ctl);
2302 DESC_PRINT(le->sk_opcode);
2303 #undef DESC_PRINT
2304 }
2305
2306 void
2307 msk_dump_bytes(const char *data, int len)
2308 {
2309 int c, i, j;
2310
2311 for (i = 0; i < len; i += 16) {
2312 printf("%08x ", i);
2313 c = len - i;
2314 if (c > 16) c = 16;
2315
2316 for (j = 0; j < c; j++) {
2317 printf("%02x ", data[i + j] & 0xff);
2318 if ((j & 0xf) == 7 && j > 0)
2319 printf(" ");
2320 }
2321
2322 for (; j < 16; j++)
2323 printf(" ");
2324 printf(" ");
2325
2326 for (j = 0; j < c; j++) {
2327 int ch = data[i + j] & 0xff;
2328 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2329 }
2330
2331 printf("\n");
2332
2333 if (c < 16)
2334 break;
2335 }
2336 }
2337
2338 void
2339 msk_dump_mbuf(struct mbuf *m)
2340 {
2341 int count = m->m_pkthdr.len;
2342
2343 printf("m=%p, m->m_pkthdr.len=%d\n", m, m->m_pkthdr.len);
2344
2345 while (count > 0 && m) {
2346 printf("m=%p, m->m_data=%p, m->m_len=%d\n",
2347 m, m->m_data, m->m_len);
2348 msk_dump_bytes(mtod(m, char *), m->m_len);
2349
2350 count -= m->m_len;
2351 m = m->m_next;
2352 }
2353 }
2354 #endif
2355
2356 static int
2357 msk_sysctl_handler(SYSCTLFN_ARGS)
2358 {
2359 int error, t;
2360 struct sysctlnode node;
2361 struct sk_softc *sc;
2362
2363 node = *rnode;
2364 sc = node.sysctl_data;
2365 t = sc->sk_int_mod;
2366 node.sysctl_data = &t;
2367 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2368 if (error || newp == NULL)
2369 return error;
2370
2371 if (t < SK_IM_MIN || t > SK_IM_MAX)
2372 return EINVAL;
2373
2374 /* update the softc with sysctl-changed value, and mark
2375 for hardware update */
2376 sc->sk_int_mod = t;
2377 sc->sk_int_mod_pending = 1;
2378 return 0;
2379 }
2380
2381 /*
2382 * Set up sysctl(3) MIB, hw.sk.* - Individual controllers will be
2383 * set up in skc_attach()
2384 */
2385 SYSCTL_SETUP(sysctl_msk, "sysctl msk subtree setup")
2386 {
2387 int rc;
2388 const struct sysctlnode *node;
2389
2390 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
2391 0, CTLTYPE_NODE, "hw", NULL,
2392 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
2393 goto err;
2394 }
2395
2396 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2397 0, CTLTYPE_NODE, "msk",
2398 SYSCTL_DESCR("msk interface controls"),
2399 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2400 goto err;
2401 }
2402
2403 msk_root_num = node->sysctl_num;
2404 return;
2405
2406 err:
2407 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2408 }
2409