if_msk.c revision 1.18.2.1 1 /* $NetBSD: if_msk.c,v 1.18.2.1 2008/06/04 02:05:14 yamt Exp $ */
2 /* $OpenBSD: if_msk.c,v 1.42 2007/01/17 02:43:02 krw Exp $ */
3
4 /*
5 * Copyright (c) 1997, 1998, 1999, 2000
6 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
36 */
37
38 /*
39 * Copyright (c) 2003 Nathan L. Binkert <binkertn (at) umich.edu>
40 *
41 * Permission to use, copy, modify, and distribute this software for any
42 * purpose with or without fee is hereby granted, provided that the above
43 * copyright notice and this permission notice appear in all copies.
44 *
45 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
46 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
47 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
48 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
49 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
50 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
51 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
52 */
53
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: if_msk.c,v 1.18.2.1 2008/06/04 02:05:14 yamt Exp $");
56
57 #include "bpfilter.h"
58 #include "rnd.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sockio.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/device.h>
68 #include <sys/queue.h>
69 #include <sys/callout.h>
70 #include <sys/sysctl.h>
71 #include <sys/endian.h>
72 #ifdef __NetBSD__
73 #define letoh16 htole16
74 #define letoh32 htole32
75 #endif
76
77 #include <net/if.h>
78 #include <net/if_dl.h>
79 #include <net/if_types.h>
80
81 #include <net/if_media.h>
82
83 #if NBPFILTER > 0
84 #include <net/bpf.h>
85 #endif
86 #if NRND > 0
87 #include <sys/rnd.h>
88 #endif
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/brgphyreg.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_skreg.h>
99 #include <dev/pci/if_mskvar.h>
100
101 int mskc_probe(struct device *, struct cfdata *, void *);
102 void mskc_attach(struct device *, struct device *self, void *aux);
103 static bool mskc_suspend(device_t PMF_FN_PROTO);
104 static bool mskc_resume(device_t PMF_FN_PROTO);
105 int msk_probe(struct device *, struct cfdata *, void *);
106 void msk_attach(struct device *, struct device *self, void *aux);
107 int mskcprint(void *, const char *);
108 int msk_intr(void *);
109 void msk_intr_yukon(struct sk_if_softc *);
110 __inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
111 void msk_rxeof(struct sk_if_softc *, u_int16_t, u_int32_t);
112 void msk_txeof(struct sk_if_softc *, int);
113 int msk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
114 void msk_start(struct ifnet *);
115 int msk_ioctl(struct ifnet *, u_long, void *);
116 int msk_init(struct ifnet *);
117 void msk_init_yukon(struct sk_if_softc *);
118 void msk_stop(struct ifnet *, int);
119 void msk_watchdog(struct ifnet *);
120 void msk_reset(struct sk_softc *);
121 int msk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
122 int msk_alloc_jumbo_mem(struct sk_if_softc *);
123 void *msk_jalloc(struct sk_if_softc *);
124 void msk_jfree(struct mbuf *, void *, size_t, void *);
125 int msk_init_rx_ring(struct sk_if_softc *);
126 int msk_init_tx_ring(struct sk_if_softc *);
127
128 void msk_update_int_mod(struct sk_softc *);
129
130 int msk_miibus_readreg(struct device *, int, int);
131 void msk_miibus_writereg(struct device *, int, int, int);
132 void msk_miibus_statchg(struct device *);
133
134 void msk_setfilt(struct sk_if_softc *, void *, int);
135 void msk_setmulti(struct sk_if_softc *);
136 void msk_setpromisc(struct sk_if_softc *);
137 void msk_tick(void *);
138
139 /* #define MSK_DEBUG 1 */
140 #ifdef MSK_DEBUG
141 #define DPRINTF(x) if (mskdebug) printf x
142 #define DPRINTFN(n,x) if (mskdebug >= (n)) printf x
143 int mskdebug = MSK_DEBUG;
144
145 void msk_dump_txdesc(struct msk_tx_desc *, int);
146 void msk_dump_mbuf(struct mbuf *);
147 void msk_dump_bytes(const char *, int);
148 #else
149 #define DPRINTF(x)
150 #define DPRINTFN(n,x)
151 #endif
152
153 static int msk_sysctl_handler(SYSCTLFN_PROTO);
154 static int msk_root_num;
155
156 /* supported device vendors */
157 static const struct msk_product {
158 pci_vendor_id_t msk_vendor;
159 pci_product_id_t msk_product;
160 } msk_products[] = {
161 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550SX },
162 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560SX },
163 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T },
164 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_1 },
165 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C032 },
166 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C033 },
167 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C034 },
168 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C036 },
169 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C042 },
170 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C055 },
171 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 },
172 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 },
173 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 },
174 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8039 },
175 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 },
176 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 },
177 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 },
178 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055 },
179 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8056 },
180 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8021CU },
181 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8021X },
182 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8022CU },
183 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8022X },
184 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8061CU },
185 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8061X },
186 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8062CU },
187 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKONII_8062X },
188 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9SXX },
189 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9E21 }
190 };
191
192 static inline u_int32_t
193 sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
194 {
195 return CSR_READ_4(sc, reg);
196 }
197
198 static inline u_int16_t
199 sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
200 {
201 return CSR_READ_2(sc, reg);
202 }
203
204 static inline u_int8_t
205 sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
206 {
207 return CSR_READ_1(sc, reg);
208 }
209
210 static inline void
211 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
212 {
213 CSR_WRITE_4(sc, reg, x);
214 }
215
216 static inline void
217 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
218 {
219 CSR_WRITE_2(sc, reg, x);
220 }
221
222 static inline void
223 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
224 {
225 CSR_WRITE_1(sc, reg, x);
226 }
227
228 int
229 msk_miibus_readreg(struct device *dev, int phy, int reg)
230 {
231 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
232 u_int16_t val;
233 int i;
234
235 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
236 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
237
238 for (i = 0; i < SK_TIMEOUT; i++) {
239 DELAY(1);
240 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
241 if (val & YU_SMICR_READ_VALID)
242 break;
243 }
244
245 if (i == SK_TIMEOUT) {
246 aprint_error_dev(&sc_if->sk_dev, "phy failed to come ready\n");
247 return (0);
248 }
249
250 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i,
251 SK_TIMEOUT));
252
253 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
254
255 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
256 phy, reg, val));
257
258 return (val);
259 }
260
261 void
262 msk_miibus_writereg(struct device *dev, int phy, int reg, int val)
263 {
264 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
265 int i;
266
267 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n",
268 phy, reg, val));
269
270 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
271 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
272 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
273
274 for (i = 0; i < SK_TIMEOUT; i++) {
275 DELAY(1);
276 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
277 break;
278 }
279
280 if (i == SK_TIMEOUT)
281 aprint_error_dev(&sc_if->sk_dev, "phy write timed out\n");
282 }
283
284 void
285 msk_miibus_statchg(struct device *dev)
286 {
287 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
288 struct mii_data *mii = &sc_if->sk_mii;
289 struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
290 int gpcr;
291
292 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR);
293 gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN);
294
295 if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) {
296 /* Set speed. */
297 gpcr |= YU_GPCR_SPEED_DIS;
298 switch (IFM_SUBTYPE(mii->mii_media_active)) {
299 case IFM_1000_SX:
300 case IFM_1000_LX:
301 case IFM_1000_CX:
302 case IFM_1000_T:
303 gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED);
304 break;
305 case IFM_100_TX:
306 gpcr |= YU_GPCR_SPEED;
307 break;
308 }
309
310 /* Set duplex. */
311 gpcr |= YU_GPCR_DPLX_DIS;
312 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
313 gpcr |= YU_GPCR_DUPLEX;
314
315 /* Disable flow control. */
316 gpcr |= YU_GPCR_FCTL_DIS;
317 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS);
318 }
319
320 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr);
321
322 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n",
323 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
324 }
325
326 #define HASH_BITS 6
327
328 void
329 msk_setfilt(struct sk_if_softc *sc_if, void *addrv, int slot)
330 {
331 char *addr = addrv;
332 int base = XM_RXFILT_ENTRY(slot);
333
334 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
335 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
336 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
337 }
338
339 void
340 msk_setmulti(struct sk_if_softc *sc_if)
341 {
342 struct ifnet *ifp= &sc_if->sk_ethercom.ec_if;
343 u_int32_t hashes[2] = { 0, 0 };
344 int h;
345 struct ethercom *ec = &sc_if->sk_ethercom;
346 struct ether_multi *enm;
347 struct ether_multistep step;
348 u_int16_t reg;
349
350 /* First, zot all the existing filters. */
351 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
352 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
353 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
354 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
355
356
357 /* Now program new ones. */
358 reg = SK_YU_READ_2(sc_if, YUKON_RCR);
359 reg |= YU_RCR_UFLEN;
360 allmulti:
361 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
362 if ((ifp->if_flags & IFF_PROMISC) != 0)
363 reg &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
364 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
365 hashes[0] = 0xFFFFFFFF;
366 hashes[1] = 0xFFFFFFFF;
367 }
368 } else {
369 /* First find the tail of the list. */
370 ETHER_FIRST_MULTI(step, ec, enm);
371 while (enm != NULL) {
372 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
373 ETHER_ADDR_LEN)) {
374 ifp->if_flags |= IFF_ALLMULTI;
375 goto allmulti;
376 }
377 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
378 ((1 << HASH_BITS) - 1);
379 if (h < 32)
380 hashes[0] |= (1 << h);
381 else
382 hashes[1] |= (1 << (h - 32));
383
384 ETHER_NEXT_MULTI(step, enm);
385 }
386 reg |= YU_RCR_MUFLEN;
387 }
388
389 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
390 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
391 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
392 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
393 SK_YU_WRITE_2(sc_if, YUKON_RCR, reg);
394 }
395
396 void
397 msk_setpromisc(struct sk_if_softc *sc_if)
398 {
399 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
400
401 if (ifp->if_flags & IFF_PROMISC)
402 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
403 YU_RCR_UFLEN | YU_RCR_MUFLEN);
404 else
405 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
406 YU_RCR_UFLEN | YU_RCR_MUFLEN);
407 }
408
409 int
410 msk_init_rx_ring(struct sk_if_softc *sc_if)
411 {
412 struct msk_chain_data *cd = &sc_if->sk_cdata;
413 struct msk_ring_data *rd = sc_if->sk_rdata;
414 int i, nexti;
415
416 bzero((char *)rd->sk_rx_ring,
417 sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
418
419 for (i = 0; i < MSK_RX_RING_CNT; i++) {
420 cd->sk_rx_chain[i].sk_le = &rd->sk_rx_ring[i];
421 if (i == (MSK_RX_RING_CNT - 1))
422 nexti = 0;
423 else
424 nexti = i + 1;
425 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
426 }
427
428 for (i = 0; i < MSK_RX_RING_CNT; i++) {
429 if (msk_newbuf(sc_if, i, NULL,
430 sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
431 aprint_error_dev(&sc_if->sk_dev, "failed alloc of %dth mbuf\n", i);
432 return (ENOBUFS);
433 }
434 }
435
436 sc_if->sk_cdata.sk_rx_prod = MSK_RX_RING_CNT - 1;
437 sc_if->sk_cdata.sk_rx_cons = 0;
438
439 return (0);
440 }
441
442 int
443 msk_init_tx_ring(struct sk_if_softc *sc_if)
444 {
445 struct sk_softc *sc = sc_if->sk_softc;
446 struct msk_chain_data *cd = &sc_if->sk_cdata;
447 struct msk_ring_data *rd = sc_if->sk_rdata;
448 bus_dmamap_t dmamap;
449 struct sk_txmap_entry *entry;
450 int i, nexti;
451
452 bzero((char *)sc_if->sk_rdata->sk_tx_ring,
453 sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
454
455 SIMPLEQ_INIT(&sc_if->sk_txmap_head);
456 for (i = 0; i < MSK_TX_RING_CNT; i++) {
457 cd->sk_tx_chain[i].sk_le = &rd->sk_tx_ring[i];
458 if (i == (MSK_TX_RING_CNT - 1))
459 nexti = 0;
460 else
461 nexti = i + 1;
462 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
463
464 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
465 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
466 return (ENOBUFS);
467
468 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
469 if (!entry) {
470 bus_dmamap_destroy(sc->sc_dmatag, dmamap);
471 return (ENOBUFS);
472 }
473 entry->dmamap = dmamap;
474 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
475 }
476
477 sc_if->sk_cdata.sk_tx_prod = 0;
478 sc_if->sk_cdata.sk_tx_cons = 0;
479 sc_if->sk_cdata.sk_tx_cnt = 0;
480
481 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT,
482 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
483
484 return (0);
485 }
486
487 int
488 msk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
489 bus_dmamap_t dmamap)
490 {
491 struct mbuf *m_new = NULL;
492 struct sk_chain *c;
493 struct msk_rx_desc *r;
494
495 if (m == NULL) {
496 void *buf = NULL;
497
498 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
499 if (m_new == NULL)
500 return (ENOBUFS);
501
502 /* Allocate the jumbo buffer */
503 buf = msk_jalloc(sc_if);
504 if (buf == NULL) {
505 m_freem(m_new);
506 DPRINTFN(1, ("%s jumbo allocation failed -- packet "
507 "dropped!\n", sc_if->sk_ethercom.ec_if.if_xname));
508 return (ENOBUFS);
509 }
510
511 /* Attach the buffer to the mbuf */
512 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
513 MEXTADD(m_new, buf, SK_JLEN, 0, msk_jfree, sc_if);
514 } else {
515 /*
516 * We're re-using a previously allocated mbuf;
517 * be sure to re-init pointers and lengths to
518 * default values.
519 */
520 m_new = m;
521 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
522 m_new->m_data = m_new->m_ext.ext_buf;
523 }
524 m_adj(m_new, ETHER_ALIGN);
525
526 c = &sc_if->sk_cdata.sk_rx_chain[i];
527 r = c->sk_le;
528 c->sk_mbuf = m_new;
529 r->sk_addr = htole32(dmamap->dm_segs[0].ds_addr +
530 (((vaddr_t)m_new->m_data
531 - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
532 r->sk_len = htole16(SK_JLEN);
533 r->sk_ctl = 0;
534 r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN;
535
536 MSK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
537
538 return (0);
539 }
540
541 /*
542 * Memory management for jumbo frames.
543 */
544
545 int
546 msk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
547 {
548 struct sk_softc *sc = sc_if->sk_softc;
549 char *ptr, *kva;
550 bus_dma_segment_t seg;
551 int i, rseg, state, error;
552 struct sk_jpool_entry *entry;
553
554 state = error = 0;
555
556 /* Grab a big chunk o' storage. */
557 if (bus_dmamem_alloc(sc->sc_dmatag, MSK_JMEM, PAGE_SIZE, 0,
558 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
559 aprint_error(": can't alloc rx buffers");
560 return (ENOBUFS);
561 }
562
563 state = 1;
564 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, MSK_JMEM, (void **)&kva,
565 BUS_DMA_NOWAIT)) {
566 aprint_error(": can't map dma buffers (%d bytes)", MSK_JMEM);
567 error = ENOBUFS;
568 goto out;
569 }
570
571 state = 2;
572 if (bus_dmamap_create(sc->sc_dmatag, MSK_JMEM, 1, MSK_JMEM, 0,
573 BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
574 aprint_error(": can't create dma map");
575 error = ENOBUFS;
576 goto out;
577 }
578
579 state = 3;
580 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
581 kva, MSK_JMEM, NULL, BUS_DMA_NOWAIT)) {
582 aprint_error(": can't load dma map");
583 error = ENOBUFS;
584 goto out;
585 }
586
587 state = 4;
588 sc_if->sk_cdata.sk_jumbo_buf = (void *)kva;
589 DPRINTFN(1,("msk_jumbo_buf = %p\n", (void *)sc_if->sk_cdata.sk_jumbo_buf));
590
591 LIST_INIT(&sc_if->sk_jfree_listhead);
592 LIST_INIT(&sc_if->sk_jinuse_listhead);
593
594 /*
595 * Now divide it up into 9K pieces and save the addresses
596 * in an array.
597 */
598 ptr = sc_if->sk_cdata.sk_jumbo_buf;
599 for (i = 0; i < MSK_JSLOTS; i++) {
600 sc_if->sk_cdata.sk_jslots[i] = ptr;
601 ptr += SK_JLEN;
602 entry = malloc(sizeof(struct sk_jpool_entry),
603 M_DEVBUF, M_NOWAIT);
604 if (entry == NULL) {
605 sc_if->sk_cdata.sk_jumbo_buf = NULL;
606 aprint_error(": no memory for jumbo buffer queue!");
607 error = ENOBUFS;
608 goto out;
609 }
610 entry->slot = i;
611 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
612 entry, jpool_entries);
613 }
614 out:
615 if (error != 0) {
616 switch (state) {
617 case 4:
618 bus_dmamap_unload(sc->sc_dmatag,
619 sc_if->sk_cdata.sk_rx_jumbo_map);
620 case 3:
621 bus_dmamap_destroy(sc->sc_dmatag,
622 sc_if->sk_cdata.sk_rx_jumbo_map);
623 case 2:
624 bus_dmamem_unmap(sc->sc_dmatag, kva, MSK_JMEM);
625 case 1:
626 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
627 break;
628 default:
629 break;
630 }
631 }
632
633 return (error);
634 }
635
636 /*
637 * Allocate a jumbo buffer.
638 */
639 void *
640 msk_jalloc(struct sk_if_softc *sc_if)
641 {
642 struct sk_jpool_entry *entry;
643
644 entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
645
646 if (entry == NULL)
647 return (NULL);
648
649 LIST_REMOVE(entry, jpool_entries);
650 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
651 return (sc_if->sk_cdata.sk_jslots[entry->slot]);
652 }
653
654 /*
655 * Release a jumbo buffer.
656 */
657 void
658 msk_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
659 {
660 struct sk_jpool_entry *entry;
661 struct sk_if_softc *sc;
662 int i, s;
663
664 /* Extract the softc struct pointer. */
665 sc = (struct sk_if_softc *)arg;
666
667 if (sc == NULL)
668 panic("msk_jfree: can't find softc pointer!");
669
670 /* calculate the slot this buffer belongs to */
671 i = ((vaddr_t)buf
672 - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
673
674 if ((i < 0) || (i >= MSK_JSLOTS))
675 panic("msk_jfree: asked to free buffer that we don't manage!");
676
677 s = splvm();
678 entry = LIST_FIRST(&sc->sk_jinuse_listhead);
679 if (entry == NULL)
680 panic("msk_jfree: buffer not in use!");
681 entry->slot = i;
682 LIST_REMOVE(entry, jpool_entries);
683 LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
684
685 if (__predict_true(m != NULL))
686 pool_cache_put(mb_cache, m);
687 splx(s);
688 }
689
690 int
691 msk_ioctl(struct ifnet *ifp, u_long cmd, void *data)
692 {
693 struct sk_if_softc *sc_if = ifp->if_softc;
694 int s, error = 0;
695
696 s = splnet();
697
698 DPRINTFN(2, ("msk_ioctl ETHER\n"));
699 error = ether_ioctl(ifp, cmd, data);
700
701 if (error == ENETRESET) {
702 error = 0;
703 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
704 ;
705 else if (ifp->if_flags & IFF_RUNNING) {
706 /*
707 * Multicast list has changed; set the hardware
708 * filter accordingly.
709 */
710 msk_setmulti(sc_if);
711 }
712 }
713
714 splx(s);
715 return (error);
716 }
717
718 void
719 msk_update_int_mod(struct sk_softc *sc)
720 {
721 u_int32_t imtimer_ticks;
722
723 /*
724 * Configure interrupt moderation. The moderation timer
725 * defers interrupts specified in the interrupt moderation
726 * timer mask based on the timeout specified in the interrupt
727 * moderation timer init register. Each bit in the timer
728 * register represents one tick, so to specify a timeout in
729 * microseconds, we have to multiply by the correct number of
730 * ticks-per-microsecond.
731 */
732 switch (sc->sk_type) {
733 case SK_YUKON_EC:
734 case SK_YUKON_EC_U:
735 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
736 break;
737 case SK_YUKON_FE:
738 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE;
739 break;
740 case SK_YUKON_XL:
741 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL;
742 break;
743 default:
744 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
745 }
746 aprint_verbose_dev(&sc->sk_dev, "interrupt moderation is %d us\n",
747 sc->sk_int_mod);
748 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
749 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
750 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
751 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
752 sc->sk_int_mod_pending = 0;
753 }
754
755 static int
756 msk_lookup(const struct pci_attach_args *pa)
757 {
758 const struct msk_product *pmsk;
759
760 for ( pmsk = &msk_products[0]; pmsk->msk_vendor != 0; pmsk++) {
761 if (PCI_VENDOR(pa->pa_id) == pmsk->msk_vendor &&
762 PCI_PRODUCT(pa->pa_id) == pmsk->msk_product)
763 return 1;
764 }
765 return 0;
766 }
767
768 /*
769 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
770 * IDs against our list and return a device name if we find a match.
771 */
772 int
773 mskc_probe(struct device *parent, struct cfdata *match,
774 void *aux)
775 {
776 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
777
778 return msk_lookup(pa);
779 }
780
781 /*
782 * Force the GEnesis into reset, then bring it out of reset.
783 */
784 void msk_reset(struct sk_softc *sc)
785 {
786 u_int32_t imtimer_ticks, reg1;
787 int reg;
788
789 DPRINTFN(2, ("msk_reset\n"));
790
791 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET);
792 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET);
793
794 DELAY(1000);
795 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET);
796 DELAY(2);
797 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
798 sk_win_write_1(sc, SK_TESTCTL1, 2);
799
800 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1));
801 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1)
802 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA);
803 else
804 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA);
805 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1);
806
807 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1)
808 sk_win_write_1(sc, SK_Y2_CLKGATE,
809 SK_Y2_CLKGATE_LINK1_GATE_DIS |
810 SK_Y2_CLKGATE_LINK2_GATE_DIS |
811 SK_Y2_CLKGATE_LINK1_CORE_DIS |
812 SK_Y2_CLKGATE_LINK2_CORE_DIS |
813 SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS);
814 else
815 sk_win_write_1(sc, SK_Y2_CLKGATE, 0);
816
817 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
818 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET);
819 DELAY(1000);
820 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
821 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR);
822
823 sk_win_write_1(sc, SK_TESTCTL1, 1);
824
825 DPRINTFN(2, ("msk_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR)));
826 DPRINTFN(2, ("msk_reset: sk_link_ctrl=%x\n",
827 CSR_READ_2(sc, SK_LINK_CTRL)));
828
829 /* Disable ASF */
830 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET);
831 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF);
832
833 /* Clear I2C IRQ noise */
834 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1);
835
836 /* Disable hardware timer */
837 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP);
838 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR);
839
840 /* Disable descriptor polling */
841 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
842
843 /* Disable time stamps */
844 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP);
845 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR);
846
847 /* Enable RAM interface */
848 sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
849 for (reg = SK_TO0;reg <= SK_TO11; reg++)
850 sk_win_write_1(sc, reg, 36);
851 sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET);
852 for (reg = SK_TO0;reg <= SK_TO11; reg++)
853 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36);
854
855 /*
856 * Configure interrupt moderation. The moderation timer
857 * defers interrupts specified in the interrupt moderation
858 * timer mask based on the timeout specified in the interrupt
859 * moderation timer init register. Each bit in the timer
860 * register represents one tick, so to specify a timeout in
861 * microseconds, we have to multiply by the correct number of
862 * ticks-per-microsecond.
863 */
864 switch (sc->sk_type) {
865 case SK_YUKON_EC:
866 case SK_YUKON_EC_U:
867 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
868 break;
869 case SK_YUKON_FE:
870 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE;
871 break;
872 case SK_YUKON_XL:
873 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL;
874 break;
875 default:
876 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
877 }
878
879 /* Reset status ring. */
880 bzero((char *)sc->sk_status_ring,
881 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
882 bus_dmamap_sync(sc->sc_dmatag, sc->sk_status_map, 0,
883 sc->sk_status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
884 sc->sk_status_idx = 0;
885 sc->sk_status_own_idx = 0;
886
887 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET);
888 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET);
889
890 sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1);
891 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO,
892 sc->sk_status_map->dm_segs[0].ds_addr);
893 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI,
894 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32);
895 if ((sc->sk_workaround & SK_STAT_BMU_FIFOIWM) != 0) {
896 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, SK_STAT_BMU_TXTHIDX_MSK);
897 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 0x21);
898 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 0x07);
899 } else {
900 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 0x000a);
901 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 0x10);
902 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM,
903 ((sc->sk_workaround & SK_WA_4109) != 0) ? 0x10 : 0x04);
904 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, 0x0190); /* 3.2us on Yukon-EC */
905 }
906
907 #if 0
908 sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT, SK_IM_USECS(100));
909 #endif
910 sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT, SK_IM_USECS(1000));
911
912 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON);
913
914 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START);
915 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START);
916 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START);
917
918 msk_update_int_mod(sc);
919 }
920
921 int
922 msk_probe(struct device *parent, struct cfdata *match,
923 void *aux)
924 {
925 struct skc_attach_args *sa = aux;
926
927 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
928 return (0);
929
930 switch (sa->skc_type) {
931 case SK_YUKON_XL:
932 case SK_YUKON_EC_U:
933 case SK_YUKON_EC:
934 case SK_YUKON_FE:
935 return (1);
936 }
937
938 return (0);
939 }
940
941 static bool
942 msk_resume(device_t dv PMF_FN_ARGS)
943 {
944 struct sk_if_softc *sc_if = device_private(dv);
945
946 msk_init_yukon(sc_if);
947 return true;
948 }
949
950 /*
951 * Each XMAC chip is attached as a separate logical IP interface.
952 * Single port cards will have only one logical interface of course.
953 */
954 void
955 msk_attach(struct device *parent, struct device *self, void *aux)
956 {
957 struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
958 struct sk_softc *sc = (struct sk_softc *)parent;
959 struct skc_attach_args *sa = aux;
960 struct ifnet *ifp;
961 void *kva;
962 bus_dma_segment_t seg;
963 int i, rseg;
964 u_int32_t chunk, val;
965
966 sc_if->sk_port = sa->skc_port;
967 sc_if->sk_softc = sc;
968 sc->sk_if[sa->skc_port] = sc_if;
969
970 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port));
971
972 /*
973 * Get station address for this interface. Note that
974 * dual port cards actually come with three station
975 * addresses: one for each port, plus an extra. The
976 * extra one is used by the SysKonnect driver software
977 * as a 'virtual' station address for when both ports
978 * are operating in failover mode. Currently we don't
979 * use this extra address.
980 */
981 for (i = 0; i < ETHER_ADDR_LEN; i++)
982 sc_if->sk_enaddr[i] =
983 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
984
985 aprint_normal(": Ethernet address %s\n",
986 ether_sprintf(sc_if->sk_enaddr));
987
988 /*
989 * Set up RAM buffer addresses. The NIC will have a certain
990 * amount of SRAM on it, somewhere between 512K and 2MB. We
991 * need to divide this up a) between the transmitter and
992 * receiver and b) between the two XMACs, if this is a
993 * dual port NIC. Our algorithm is to divide up the memory
994 * evenly so that everyone gets a fair share.
995 *
996 * Just to be contrary, Yukon2 appears to have separate memory
997 * for each MAC.
998 */
999 chunk = sc->sk_ramsize - (sc->sk_ramsize + 2) / 3;
1000 val = sc->sk_rboff / sizeof(u_int64_t);
1001 sc_if->sk_rx_ramstart = val;
1002 val += (chunk / sizeof(u_int64_t));
1003 sc_if->sk_rx_ramend = val - 1;
1004 chunk = sc->sk_ramsize - chunk;
1005 sc_if->sk_tx_ramstart = val;
1006 val += (chunk / sizeof(u_int64_t));
1007 sc_if->sk_tx_ramend = val - 1;
1008
1009 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1010 " tx_ramstart=%#x tx_ramend=%#x\n",
1011 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1012 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1013
1014 /* Allocate the descriptor queues. */
1015 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data),
1016 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1017 aprint_error(": can't alloc rx buffers\n");
1018 goto fail;
1019 }
1020 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1021 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) {
1022 aprint_error(": can't map dma buffers (%zu bytes)\n",
1023 sizeof(struct msk_ring_data));
1024 goto fail_1;
1025 }
1026 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1,
1027 sizeof(struct msk_ring_data), 0, BUS_DMA_NOWAIT,
1028 &sc_if->sk_ring_map)) {
1029 aprint_error(": can't create dma map\n");
1030 goto fail_2;
1031 }
1032 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
1033 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) {
1034 aprint_error(": can't load dma map\n");
1035 goto fail_3;
1036 }
1037 sc_if->sk_rdata = (struct msk_ring_data *)kva;
1038 bzero(sc_if->sk_rdata, sizeof(struct msk_ring_data));
1039
1040 ifp = &sc_if->sk_ethercom.ec_if;
1041 /* Try to allocate memory for jumbo buffers. */
1042 if (msk_alloc_jumbo_mem(sc_if)) {
1043 aprint_error(": jumbo buffer allocation failed\n");
1044 goto fail_3;
1045 }
1046 sc_if->sk_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
1047 if (sc->sk_type != SK_YUKON_FE)
1048 sc_if->sk_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1049
1050 ifp->if_softc = sc_if;
1051 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1052 ifp->if_ioctl = msk_ioctl;
1053 ifp->if_start = msk_start;
1054 ifp->if_stop = msk_stop;
1055 ifp->if_init = msk_init;
1056 ifp->if_watchdog = msk_watchdog;
1057 ifp->if_baudrate = 1000000000;
1058 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1059 IFQ_SET_READY(&ifp->if_snd);
1060 strlcpy(ifp->if_xname, device_xname(&sc_if->sk_dev), IFNAMSIZ);
1061
1062 /*
1063 * Do miibus setup.
1064 */
1065 msk_init_yukon(sc_if);
1066
1067 DPRINTFN(2, ("msk_attach: 1\n"));
1068
1069 sc_if->sk_mii.mii_ifp = ifp;
1070 sc_if->sk_mii.mii_readreg = msk_miibus_readreg;
1071 sc_if->sk_mii.mii_writereg = msk_miibus_writereg;
1072 sc_if->sk_mii.mii_statchg = msk_miibus_statchg;
1073
1074 sc_if->sk_ethercom.ec_mii = &sc_if->sk_mii;
1075 ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1076 ether_mediachange, ether_mediastatus);
1077 mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1078 MII_OFFSET_ANY, MIIF_DOPAUSE|MIIF_FORCEANEG);
1079 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
1080 aprint_error_dev(&sc_if->sk_dev, "no PHY found!\n");
1081 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
1082 0, NULL);
1083 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1084 } else
1085 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
1086
1087 callout_init(&sc_if->sk_tick_ch, 0);
1088 callout_setfunc(&sc_if->sk_tick_ch, msk_tick, sc_if);
1089 callout_schedule(&sc_if->sk_tick_ch, hz);
1090
1091 /*
1092 * Call MI attach routines.
1093 */
1094 if_attach(ifp);
1095 ether_ifattach(ifp, sc_if->sk_enaddr);
1096
1097 if (!pmf_device_register(self, NULL, msk_resume))
1098 aprint_error_dev(self, "couldn't establish power handler\n");
1099 else
1100 pmf_class_network_register(self, ifp);
1101
1102 #if NRND > 0
1103 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sk_dev),
1104 RND_TYPE_NET, 0);
1105 #endif
1106
1107 DPRINTFN(2, ("msk_attach: end\n"));
1108 return;
1109
1110 fail_3:
1111 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1112 fail_2:
1113 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data));
1114 fail_1:
1115 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1116 fail:
1117 sc->sk_if[sa->skc_port] = NULL;
1118 }
1119
1120 int
1121 mskcprint(void *aux, const char *pnp)
1122 {
1123 struct skc_attach_args *sa = aux;
1124
1125 if (pnp)
1126 aprint_normal("sk port %c at %s",
1127 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1128 else
1129 aprint_normal(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1130 return (UNCONF);
1131 }
1132
1133 /*
1134 * Attach the interface. Allocate softc structures, do ifmedia
1135 * setup and ethernet/BPF attach.
1136 */
1137 void
1138 mskc_attach(struct device *parent, struct device *self, void *aux)
1139 {
1140 struct sk_softc *sc = (struct sk_softc *)self;
1141 struct pci_attach_args *pa = aux;
1142 struct skc_attach_args skca;
1143 pci_chipset_tag_t pc = pa->pa_pc;
1144 pcireg_t command, memtype;
1145 pci_intr_handle_t ih;
1146 const char *intrstr = NULL;
1147 bus_size_t size;
1148 int rc, sk_nodenum;
1149 u_int8_t hw, skrs;
1150 const char *revstr = NULL;
1151 const struct sysctlnode *node;
1152 void *kva;
1153 bus_dma_segment_t seg;
1154 int rseg;
1155
1156 DPRINTFN(2, ("begin mskc_attach\n"));
1157
1158 /*
1159 * Handle power management nonsense.
1160 */
1161 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1162
1163 if (command == 0x01) {
1164 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1165 if (command & SK_PSTATE_MASK) {
1166 u_int32_t iobase, membase, irq;
1167
1168 /* Save important PCI config data. */
1169 iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1170 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1171 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1172
1173 /* Reset the power state. */
1174 aprint_normal_dev(&sc->sk_dev, "chip is in D%d power mode "
1175 "-- setting to D0\n",
1176 command & SK_PSTATE_MASK);
1177 command &= 0xFFFFFFFC;
1178 pci_conf_write(pc, pa->pa_tag,
1179 SK_PCI_PWRMGMTCTRL, command);
1180
1181 /* Restore PCI config data. */
1182 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1183 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1184 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1185 }
1186 }
1187
1188 /*
1189 * Map control/status registers.
1190 */
1191
1192 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1193 switch (memtype) {
1194 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1195 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1196 if (pci_mapreg_map(pa, SK_PCI_LOMEM,
1197 memtype, 0, &sc->sk_btag, &sc->sk_bhandle,
1198 NULL, &size) == 0)
1199 break;
1200 default:
1201 aprint_error(": can't map mem space\n");
1202 return;
1203 }
1204
1205 sc->sc_dmatag = pa->pa_dmat;
1206
1207 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1208 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1209
1210 /* bail out here if chip is not recognized */
1211 if (!(SK_IS_YUKON2(sc))) {
1212 aprint_error(": unknown chip type: %d\n", sc->sk_type);
1213 goto fail_1;
1214 }
1215 DPRINTFN(2, ("mskc_attach: allocate interrupt\n"));
1216
1217 /* Allocate interrupt */
1218 if (pci_intr_map(pa, &ih)) {
1219 aprint_error(": couldn't map interrupt\n");
1220 goto fail_1;
1221 }
1222
1223 intrstr = pci_intr_string(pc, ih);
1224 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc);
1225 if (sc->sk_intrhand == NULL) {
1226 aprint_error(": couldn't establish interrupt");
1227 if (intrstr != NULL)
1228 aprint_error(" at %s", intrstr);
1229 aprint_error("\n");
1230 goto fail_1;
1231 }
1232
1233 if (bus_dmamem_alloc(sc->sc_dmatag,
1234 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
1235 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1236 aprint_error(": can't alloc status buffers\n");
1237 goto fail_2;
1238 }
1239
1240 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1241 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
1242 &kva, BUS_DMA_NOWAIT)) {
1243 aprint_error(": can't map dma buffers (%zu bytes)\n",
1244 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
1245 goto fail_3;
1246 }
1247 if (bus_dmamap_create(sc->sc_dmatag,
1248 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1,
1249 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 0,
1250 BUS_DMA_NOWAIT, &sc->sk_status_map)) {
1251 aprint_error(": can't create dma map\n");
1252 goto fail_4;
1253 }
1254 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva,
1255 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc),
1256 NULL, BUS_DMA_NOWAIT)) {
1257 aprint_error(": can't load dma map\n");
1258 goto fail_5;
1259 }
1260 sc->sk_status_ring = (struct msk_status_desc *)kva;
1261
1262 /* Reset the adapter. */
1263 msk_reset(sc);
1264
1265 skrs = sk_win_read_1(sc, SK_EPROM0);
1266 if (skrs == 0x00)
1267 sc->sk_ramsize = 0x20000;
1268 else
1269 sc->sk_ramsize = skrs * (1<<12);
1270 sc->sk_rboff = SK_RBOFF_0;
1271
1272 DPRINTFN(2, ("mskc_attach: ramsize=%d (%dk), rboff=%d\n",
1273 sc->sk_ramsize, sc->sk_ramsize / 1024,
1274 sc->sk_rboff));
1275
1276 switch (sc->sk_type) {
1277 case SK_YUKON_XL:
1278 sc->sk_name = "Yukon-2 XL";
1279 break;
1280 case SK_YUKON_EC_U:
1281 sc->sk_name = "Yukon-2 EC Ultra";
1282 break;
1283 case SK_YUKON_EC:
1284 sc->sk_name = "Yukon-2 EC";
1285 break;
1286 case SK_YUKON_FE:
1287 sc->sk_name = "Yukon-2 FE";
1288 break;
1289 default:
1290 sc->sk_name = "Yukon (Unknown)";
1291 }
1292
1293 if (sc->sk_type == SK_YUKON_XL) {
1294 switch (sc->sk_rev) {
1295 case SK_YUKON_XL_REV_A0:
1296 sc->sk_workaround = 0;
1297 revstr = "A0";
1298 break;
1299 case SK_YUKON_XL_REV_A1:
1300 sc->sk_workaround = SK_WA_4109;
1301 revstr = "A1";
1302 break;
1303 case SK_YUKON_XL_REV_A2:
1304 sc->sk_workaround = SK_WA_4109;
1305 revstr = "A2";
1306 break;
1307 case SK_YUKON_XL_REV_A3:
1308 sc->sk_workaround = SK_WA_4109;
1309 revstr = "A3";
1310 break;
1311 default:
1312 sc->sk_workaround = 0;
1313 break;
1314 }
1315 }
1316
1317 if (sc->sk_type == SK_YUKON_EC) {
1318 switch (sc->sk_rev) {
1319 case SK_YUKON_EC_REV_A1:
1320 sc->sk_workaround = SK_WA_43_418 | SK_WA_4109;
1321 revstr = "A1";
1322 break;
1323 case SK_YUKON_EC_REV_A2:
1324 sc->sk_workaround = SK_WA_4109;
1325 revstr = "A2";
1326 break;
1327 case SK_YUKON_EC_REV_A3:
1328 sc->sk_workaround = SK_WA_4109;
1329 revstr = "A3";
1330 break;
1331 default:
1332 sc->sk_workaround = 0;
1333 break;
1334 }
1335 }
1336
1337 if (sc->sk_type == SK_YUKON_FE) {
1338 sc->sk_workaround = SK_WA_4109;
1339 switch (sc->sk_rev) {
1340 case SK_YUKON_FE_REV_A1:
1341 revstr = "A1";
1342 break;
1343 case SK_YUKON_FE_REV_A2:
1344 revstr = "A2";
1345 break;
1346 default:
1347 sc->sk_workaround = 0;
1348 break;
1349 }
1350 }
1351
1352 if (sc->sk_type == SK_YUKON_EC_U) {
1353 sc->sk_workaround = SK_WA_4109;
1354 switch (sc->sk_rev) {
1355 case SK_YUKON_EC_U_REV_A0:
1356 revstr = "A0";
1357 break;
1358 case SK_YUKON_EC_U_REV_A1:
1359 revstr = "A1";
1360 break;
1361 case SK_YUKON_EC_U_REV_B0:
1362 revstr = "B0";
1363 break;
1364 default:
1365 sc->sk_workaround = 0;
1366 break;
1367 }
1368 }
1369
1370 /* Announce the product name. */
1371 aprint_normal(", %s", sc->sk_name);
1372 if (revstr != NULL)
1373 aprint_normal(" rev. %s", revstr);
1374 aprint_normal(" (0x%x): %s\n", sc->sk_rev, intrstr);
1375
1376 sc->sk_macs = 1;
1377
1378 hw = sk_win_read_1(sc, SK_Y2_HWRES);
1379 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) {
1380 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) &
1381 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0)
1382 sc->sk_macs++;
1383 }
1384
1385 skca.skc_port = SK_PORT_A;
1386 skca.skc_type = sc->sk_type;
1387 skca.skc_rev = sc->sk_rev;
1388 (void)config_found(&sc->sk_dev, &skca, mskcprint);
1389
1390 if (sc->sk_macs > 1) {
1391 skca.skc_port = SK_PORT_B;
1392 skca.skc_type = sc->sk_type;
1393 skca.skc_rev = sc->sk_rev;
1394 (void)config_found(&sc->sk_dev, &skca, mskcprint);
1395 }
1396
1397 /* Turn on the 'driver is loaded' LED. */
1398 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1399
1400 /* skc sysctl setup */
1401
1402 sc->sk_int_mod = SK_IM_DEFAULT;
1403 sc->sk_int_mod_pending = 0;
1404
1405 if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
1406 0, CTLTYPE_NODE, device_xname(&sc->sk_dev),
1407 SYSCTL_DESCR("mskc per-controller controls"),
1408 NULL, 0, NULL, 0, CTL_HW, msk_root_num, CTL_CREATE,
1409 CTL_EOL)) != 0) {
1410 aprint_normal_dev(&sc->sk_dev, "couldn't create sysctl node\n");
1411 goto fail_6;
1412 }
1413
1414 sk_nodenum = node->sysctl_num;
1415
1416 /* interrupt moderation time in usecs */
1417 if ((rc = sysctl_createv(&sc->sk_clog, 0, NULL, &node,
1418 CTLFLAG_READWRITE,
1419 CTLTYPE_INT, "int_mod",
1420 SYSCTL_DESCR("msk interrupt moderation timer"),
1421 msk_sysctl_handler, 0, sc,
1422 0, CTL_HW, msk_root_num, sk_nodenum, CTL_CREATE,
1423 CTL_EOL)) != 0) {
1424 aprint_normal_dev(&sc->sk_dev, "couldn't create int_mod sysctl node\n");
1425 goto fail_6;
1426 }
1427
1428 if (!pmf_device_register(self, mskc_suspend, mskc_resume))
1429 aprint_error_dev(self, "couldn't establish power handler\n");
1430
1431 return;
1432
1433 fail_6:
1434 bus_dmamap_unload(sc->sc_dmatag, sc->sk_status_map);
1435 fail_5:
1436 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map);
1437 fail_4:
1438 bus_dmamem_unmap(sc->sc_dmatag, kva,
1439 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc));
1440 fail_3:
1441 bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1442 fail_2:
1443 pci_intr_disestablish(pc, sc->sk_intrhand);
1444 fail_1:
1445 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
1446 }
1447
1448 int
1449 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
1450 {
1451 struct sk_softc *sc = sc_if->sk_softc;
1452 struct msk_tx_desc *f = NULL;
1453 u_int32_t frag, cur;
1454 int i;
1455 struct sk_txmap_entry *entry;
1456 bus_dmamap_t txmap;
1457
1458 DPRINTFN(2, ("msk_encap\n"));
1459
1460 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1461 if (entry == NULL) {
1462 DPRINTFN(2, ("msk_encap: no txmap available\n"));
1463 return (ENOBUFS);
1464 }
1465 txmap = entry->dmamap;
1466
1467 cur = frag = *txidx;
1468
1469 #ifdef MSK_DEBUG
1470 if (mskdebug >= 2)
1471 msk_dump_mbuf(m_head);
1472 #endif
1473
1474 /*
1475 * Start packing the mbufs in this chain into
1476 * the fragment pointers. Stop when we run out
1477 * of fragments or hit the end of the mbuf chain.
1478 */
1479 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1480 BUS_DMA_NOWAIT)) {
1481 DPRINTFN(2, ("msk_encap: dmamap failed\n"));
1482 return (ENOBUFS);
1483 }
1484
1485 if (txmap->dm_nsegs > (MSK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) {
1486 DPRINTFN(2, ("msk_encap: too few descriptors free\n"));
1487 bus_dmamap_unload(sc->sc_dmatag, txmap);
1488 return (ENOBUFS);
1489 }
1490
1491 DPRINTFN(2, ("msk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1492
1493 /* Sync the DMA map. */
1494 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1495 BUS_DMASYNC_PREWRITE);
1496
1497 for (i = 0; i < txmap->dm_nsegs; i++) {
1498 f = &sc_if->sk_rdata->sk_tx_ring[frag];
1499 f->sk_addr = htole32(txmap->dm_segs[i].ds_addr);
1500 f->sk_len = htole16(txmap->dm_segs[i].ds_len);
1501 f->sk_ctl = 0;
1502 if (i == 0)
1503 f->sk_opcode = SK_Y2_TXOPC_PACKET;
1504 else
1505 f->sk_opcode = SK_Y2_TXOPC_BUFFER | SK_Y2_TXOPC_OWN;
1506 cur = frag;
1507 SK_INC(frag, MSK_TX_RING_CNT);
1508 }
1509
1510 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1511 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1512
1513 sc_if->sk_cdata.sk_tx_map[cur] = entry;
1514 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_Y2_TXCTL_LASTFRAG;
1515
1516 /* Sync descriptors before handing to chip */
1517 MSK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1518 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1519
1520 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_opcode |= SK_Y2_TXOPC_OWN;
1521
1522 /* Sync first descriptor to hand it off */
1523 MSK_CDTXSYNC(sc_if, *txidx, 1,
1524 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1525
1526 sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
1527
1528 #ifdef MSK_DEBUG
1529 if (mskdebug >= 2) {
1530 struct msk_tx_desc *le;
1531 u_int32_t idx;
1532 for (idx = *txidx; idx != frag; SK_INC(idx, MSK_TX_RING_CNT)) {
1533 le = &sc_if->sk_rdata->sk_tx_ring[idx];
1534 msk_dump_txdesc(le, idx);
1535 }
1536 }
1537 #endif
1538
1539 *txidx = frag;
1540
1541 DPRINTFN(2, ("msk_encap: completed successfully\n"));
1542
1543 return (0);
1544 }
1545
1546 void
1547 msk_start(struct ifnet *ifp)
1548 {
1549 struct sk_if_softc *sc_if = ifp->if_softc;
1550 struct mbuf *m_head = NULL;
1551 u_int32_t idx = sc_if->sk_cdata.sk_tx_prod;
1552 int pkts = 0;
1553
1554 DPRINTFN(2, ("msk_start\n"));
1555
1556 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1557 IFQ_POLL(&ifp->if_snd, m_head);
1558 if (m_head == NULL)
1559 break;
1560
1561 /*
1562 * Pack the data into the transmit ring. If we
1563 * don't have room, set the OACTIVE flag and wait
1564 * for the NIC to drain the ring.
1565 */
1566 if (msk_encap(sc_if, m_head, &idx)) {
1567 ifp->if_flags |= IFF_OACTIVE;
1568 break;
1569 }
1570
1571 /* now we are committed to transmit the packet */
1572 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1573 pkts++;
1574
1575 /*
1576 * If there's a BPF listener, bounce a copy of this frame
1577 * to him.
1578 */
1579 #if NBPFILTER > 0
1580 if (ifp->if_bpf)
1581 bpf_mtap(ifp->if_bpf, m_head);
1582 #endif
1583 }
1584 if (pkts == 0)
1585 return;
1586
1587 /* Transmit */
1588 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1589 sc_if->sk_cdata.sk_tx_prod = idx;
1590 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, idx);
1591
1592 /* Set a timeout in case the chip goes out to lunch. */
1593 ifp->if_timer = 5;
1594 }
1595 }
1596
1597 void
1598 msk_watchdog(struct ifnet *ifp)
1599 {
1600 struct sk_if_softc *sc_if = ifp->if_softc;
1601 u_int32_t reg;
1602 int idx;
1603
1604 /*
1605 * Reclaim first as there is a possibility of losing Tx completion
1606 * interrupts.
1607 */
1608 if (sc_if->sk_port == SK_PORT_A)
1609 reg = SK_STAT_BMU_TXA1_RIDX;
1610 else
1611 reg = SK_STAT_BMU_TXA2_RIDX;
1612
1613 idx = sk_win_read_2(sc_if->sk_softc, reg);
1614 if (sc_if->sk_cdata.sk_tx_cons != idx) {
1615 msk_txeof(sc_if, idx);
1616 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1617 aprint_error_dev(&sc_if->sk_dev, "watchdog timeout\n");
1618
1619 ifp->if_oerrors++;
1620
1621 /* XXX Resets both ports; we shouldn't do that. */
1622 msk_reset(sc_if->sk_softc);
1623 msk_init(ifp);
1624 }
1625 }
1626 }
1627
1628 static bool
1629 mskc_suspend(device_t dv PMF_FN_ARGS)
1630 {
1631 struct sk_softc *sc = device_private(dv);
1632
1633 DPRINTFN(2, ("mskc_suspend\n"));
1634
1635 /* Turn off the 'driver is loaded' LED. */
1636 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1637
1638 return true;
1639 }
1640
1641 static bool
1642 mskc_resume(device_t dv PMF_FN_ARGS)
1643 {
1644 struct sk_softc *sc = device_private(dv);
1645
1646 DPRINTFN(2, ("mskc_resume\n"));
1647
1648 msk_reset(sc);
1649 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1650
1651 return true;
1652 }
1653
1654 __inline int
1655 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
1656 {
1657 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1658 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1659 YU_RXSTAT_JABBER)) != 0 ||
1660 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1661 YU_RXSTAT_BYTES(stat) != len)
1662 return (0);
1663
1664 return (1);
1665 }
1666
1667 void
1668 msk_rxeof(struct sk_if_softc *sc_if, u_int16_t len, u_int32_t rxstat)
1669 {
1670 struct sk_softc *sc = sc_if->sk_softc;
1671 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
1672 struct mbuf *m;
1673 struct sk_chain *cur_rx;
1674 int cur, total_len = len;
1675 bus_dmamap_t dmamap;
1676
1677 DPRINTFN(2, ("msk_rxeof\n"));
1678
1679 cur = sc_if->sk_cdata.sk_rx_cons;
1680 SK_INC(sc_if->sk_cdata.sk_rx_cons, MSK_RX_RING_CNT);
1681 SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
1682
1683 /* Sync the descriptor */
1684 MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1685
1686 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
1687 dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
1688
1689 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
1690 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1691
1692 m = cur_rx->sk_mbuf;
1693 cur_rx->sk_mbuf = NULL;
1694
1695 if (total_len < SK_MIN_FRAMELEN ||
1696 total_len > ETHER_MAX_LEN_JUMBO ||
1697 msk_rxvalid(sc, rxstat, total_len) == 0) {
1698 ifp->if_ierrors++;
1699 msk_newbuf(sc_if, cur, m, dmamap);
1700 return;
1701 }
1702
1703 /*
1704 * Try to allocate a new jumbo buffer. If that fails, copy the
1705 * packet to mbufs and put the jumbo buffer back in the ring
1706 * so it can be re-used. If allocating mbufs fails, then we
1707 * have to drop the packet.
1708 */
1709 if (msk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
1710 struct mbuf *m0;
1711 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1712 total_len + ETHER_ALIGN, 0, ifp, NULL);
1713 msk_newbuf(sc_if, cur, m, dmamap);
1714 if (m0 == NULL) {
1715 ifp->if_ierrors++;
1716 return;
1717 }
1718 m_adj(m0, ETHER_ALIGN);
1719 m = m0;
1720 } else {
1721 m->m_pkthdr.rcvif = ifp;
1722 m->m_pkthdr.len = m->m_len = total_len;
1723 }
1724
1725 ifp->if_ipackets++;
1726
1727 #if NBPFILTER > 0
1728 if (ifp->if_bpf)
1729 bpf_mtap(ifp->if_bpf, m);
1730 #endif
1731
1732 /* pass it on. */
1733 (*ifp->if_input)(ifp, m);
1734 }
1735
1736 void
1737 msk_txeof(struct sk_if_softc *sc_if, int idx)
1738 {
1739 struct sk_softc *sc = sc_if->sk_softc;
1740 struct msk_tx_desc *cur_tx;
1741 struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
1742 u_int32_t sk_ctl;
1743 struct sk_txmap_entry *entry;
1744 int cons, prog;
1745
1746 DPRINTFN(2, ("msk_txeof\n"));
1747
1748 /*
1749 * Go through our tx ring and free mbufs for those
1750 * frames that have been sent.
1751 */
1752 cons = sc_if->sk_cdata.sk_tx_cons;
1753 prog = 0;
1754 while (cons != idx) {
1755 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
1756 break;
1757 prog++;
1758 cur_tx = &sc_if->sk_rdata->sk_tx_ring[cons];
1759
1760 MSK_CDTXSYNC(sc_if, cons, 1,
1761 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1762 sk_ctl = cur_tx->sk_ctl;
1763 MSK_CDTXSYNC(sc_if, cons, 1, BUS_DMASYNC_PREREAD);
1764 #ifdef MSK_DEBUG
1765 if (mskdebug >= 2)
1766 msk_dump_txdesc(cur_tx, cons);
1767 #endif
1768 if (sk_ctl & SK_Y2_TXCTL_LASTFRAG)
1769 ifp->if_opackets++;
1770 if (sc_if->sk_cdata.sk_tx_chain[cons].sk_mbuf != NULL) {
1771 entry = sc_if->sk_cdata.sk_tx_map[cons];
1772
1773 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1774 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1775
1776 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1777 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
1778 link);
1779 sc_if->sk_cdata.sk_tx_map[cons] = NULL;
1780 m_freem(sc_if->sk_cdata.sk_tx_chain[cons].sk_mbuf);
1781 sc_if->sk_cdata.sk_tx_chain[cons].sk_mbuf = NULL;
1782 }
1783 sc_if->sk_cdata.sk_tx_cnt--;
1784 SK_INC(cons, MSK_TX_RING_CNT);
1785 }
1786 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
1787
1788 if (sc_if->sk_cdata.sk_tx_cnt < MSK_TX_RING_CNT - 2)
1789 ifp->if_flags &= ~IFF_OACTIVE;
1790
1791 if (prog > 0)
1792 sc_if->sk_cdata.sk_tx_cons = cons;
1793 }
1794
1795 void
1796 msk_tick(void *xsc_if)
1797 {
1798 struct sk_if_softc *sc_if = xsc_if;
1799 struct mii_data *mii = &sc_if->sk_mii;
1800
1801 mii_tick(mii);
1802 callout_schedule(&sc_if->sk_tick_ch, hz);
1803 }
1804
1805 void
1806 msk_intr_yukon(struct sk_if_softc *sc_if)
1807 {
1808 u_int8_t status;
1809
1810 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
1811 /* RX overrun */
1812 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
1813 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
1814 SK_RFCTL_RX_FIFO_OVER);
1815 }
1816 /* TX underrun */
1817 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
1818 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,
1819 SK_TFCTL_TX_FIFO_UNDER);
1820 }
1821
1822 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status));
1823 }
1824
1825 int
1826 msk_intr(void *xsc)
1827 {
1828 struct sk_softc *sc = xsc;
1829 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
1830 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
1831 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
1832 int claimed = 0;
1833 u_int32_t status;
1834 uint32_t st_status;
1835 uint16_t st_len;
1836 uint8_t st_opcode, st_link;
1837 struct msk_status_desc *cur_st;
1838
1839 status = CSR_READ_4(sc, SK_Y2_ISSR2);
1840 if (status == 0) {
1841 CSR_WRITE_4(sc, SK_Y2_ICR, 2);
1842 return (0);
1843 }
1844
1845 status = CSR_READ_4(sc, SK_ISR);
1846
1847 if (sc_if0 != NULL)
1848 ifp0 = &sc_if0->sk_ethercom.ec_if;
1849 if (sc_if1 != NULL)
1850 ifp1 = &sc_if1->sk_ethercom.ec_if;
1851
1852 if (sc_if0 && (status & SK_Y2_IMR_MAC1) &&
1853 (ifp0->if_flags & IFF_RUNNING)) {
1854 msk_intr_yukon(sc_if0);
1855 }
1856
1857 if (sc_if1 && (status & SK_Y2_IMR_MAC2) &&
1858 (ifp1->if_flags & IFF_RUNNING)) {
1859 msk_intr_yukon(sc_if1);
1860 }
1861
1862 for (;;) {
1863 cur_st = &sc->sk_status_ring[sc->sk_status_idx];
1864 MSK_CDSTSYNC(sc, sc->sk_status_idx,
1865 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1866 st_opcode = cur_st->sk_opcode;
1867 if ((st_opcode & SK_Y2_STOPC_OWN) == 0) {
1868 MSK_CDSTSYNC(sc, sc->sk_status_idx,
1869 BUS_DMASYNC_PREREAD);
1870 break;
1871 }
1872 st_status = le32toh(cur_st->sk_status);
1873 st_len = le16toh(cur_st->sk_len);
1874 st_link = cur_st->sk_link;
1875 st_opcode &= ~SK_Y2_STOPC_OWN;
1876
1877 switch (st_opcode) {
1878 case SK_Y2_STOPC_RXSTAT:
1879 msk_rxeof(sc->sk_if[st_link], st_len, st_status);
1880 SK_IF_WRITE_2(sc->sk_if[st_link], 0,
1881 SK_RXQ1_Y2_PREF_PUTIDX,
1882 sc->sk_if[st_link]->sk_cdata.sk_rx_prod);
1883 break;
1884 case SK_Y2_STOPC_TXSTAT:
1885 if (sc_if0)
1886 msk_txeof(sc_if0, st_status
1887 & SK_Y2_ST_TXA1_MSKL);
1888 if (sc_if1)
1889 msk_txeof(sc_if1,
1890 ((st_status & SK_Y2_ST_TXA2_MSKL)
1891 >> SK_Y2_ST_TXA2_SHIFTL)
1892 | ((st_len & SK_Y2_ST_TXA2_MSKH) << SK_Y2_ST_TXA2_SHIFTH));
1893 break;
1894 default:
1895 aprint_error("opcode=0x%x\n", st_opcode);
1896 break;
1897 }
1898 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT);
1899 }
1900
1901 #define MSK_STATUS_RING_OWN_CNT(sc) \
1902 (((sc)->sk_status_idx + MSK_STATUS_RING_CNT - \
1903 (sc)->sk_status_own_idx) % MSK_STATUS_RING_CNT)
1904
1905 while (MSK_STATUS_RING_OWN_CNT(sc) > MSK_STATUS_RING_CNT / 2) {
1906 cur_st = &sc->sk_status_ring[sc->sk_status_own_idx];
1907 cur_st->sk_opcode &= ~SK_Y2_STOPC_OWN;
1908 MSK_CDSTSYNC(sc, sc->sk_status_own_idx,
1909 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1910
1911 SK_INC(sc->sk_status_own_idx, MSK_STATUS_RING_CNT);
1912 }
1913
1914 if (status & SK_Y2_IMR_BMU) {
1915 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR);
1916 claimed = 1;
1917 }
1918
1919 CSR_WRITE_4(sc, SK_Y2_ICR, 2);
1920
1921 if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
1922 msk_start(ifp0);
1923 if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
1924 msk_start(ifp1);
1925
1926 #if NRND > 0
1927 if (RND_ENABLED(&sc->rnd_source))
1928 rnd_add_uint32(&sc->rnd_source, status);
1929 #endif
1930
1931 if (sc->sk_int_mod_pending)
1932 msk_update_int_mod(sc);
1933
1934 return claimed;
1935 }
1936
1937 void
1938 msk_init_yukon(struct sk_if_softc *sc_if)
1939 {
1940 u_int32_t v;
1941 u_int16_t reg;
1942 struct sk_softc *sc;
1943 int i;
1944
1945 sc = sc_if->sk_softc;
1946
1947 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n",
1948 CSR_READ_4(sc_if->sk_softc, SK_CSR)));
1949
1950 DPRINTFN(6, ("msk_init_yukon: 1\n"));
1951
1952 /* GMAC and GPHY Reset */
1953 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
1954 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
1955 DELAY(1000);
1956
1957 DPRINTFN(6, ("msk_init_yukon: 2\n"));
1958
1959 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR);
1960 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
1961 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
1962
1963 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n",
1964 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
1965
1966 DPRINTFN(6, ("msk_init_yukon: 3\n"));
1967
1968 /* unused read of the interrupt source register */
1969 DPRINTFN(6, ("msk_init_yukon: 4\n"));
1970 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
1971
1972 DPRINTFN(6, ("msk_init_yukon: 4a\n"));
1973 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
1974 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
1975
1976 /* MIB Counter Clear Mode set */
1977 reg |= YU_PAR_MIB_CLR;
1978 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg));
1979 DPRINTFN(6, ("msk_init_yukon: 4b\n"));
1980 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
1981
1982 /* MIB Counter Clear Mode clear */
1983 DPRINTFN(6, ("msk_init_yukon: 5\n"));
1984 reg &= ~YU_PAR_MIB_CLR;
1985 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
1986
1987 /* receive control reg */
1988 DPRINTFN(6, ("msk_init_yukon: 7\n"));
1989 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
1990
1991 /* transmit control register */
1992 SK_YU_WRITE_2(sc_if, YUKON_TCR, (0x04 << 10));
1993
1994 /* transmit flow control register */
1995 SK_YU_WRITE_2(sc_if, YUKON_TFCR, 0xffff);
1996
1997 /* transmit parameter register */
1998 DPRINTFN(6, ("msk_init_yukon: 8\n"));
1999 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2000 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1c) | 0x04);
2001
2002 /* serial mode register */
2003 DPRINTFN(6, ("msk_init_yukon: 9\n"));
2004 reg = YU_SMR_DATA_BLIND(0x1c) |
2005 YU_SMR_MFL_VLAN |
2006 YU_SMR_IPG_DATA(0x1e);
2007
2008 if (sc->sk_type != SK_YUKON_FE)
2009 reg |= YU_SMR_MFL_JUMBO;
2010
2011 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2012
2013 DPRINTFN(6, ("msk_init_yukon: 10\n"));
2014 /* Setup Yukon's address */
2015 for (i = 0; i < 3; i++) {
2016 /* Write Source Address 1 (unicast filter) */
2017 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2018 sc_if->sk_enaddr[i * 2] |
2019 sc_if->sk_enaddr[i * 2 + 1] << 8);
2020 }
2021
2022 for (i = 0; i < 3; i++) {
2023 reg = sk_win_read_2(sc_if->sk_softc,
2024 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2025 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2026 }
2027
2028 /* Set promiscuous mode */
2029 msk_setpromisc(sc_if);
2030
2031 /* Set multicast filter */
2032 DPRINTFN(6, ("msk_init_yukon: 11\n"));
2033 msk_setmulti(sc_if);
2034
2035 /* enable interrupt mask for counter overflows */
2036 DPRINTFN(6, ("msk_init_yukon: 12\n"));
2037 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2038 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2039 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2040
2041 /* Configure RX MAC FIFO Flush Mask */
2042 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2043 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2044 YU_RXSTAT_JABBER;
2045 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2046
2047 /* Configure RX MAC FIFO */
2048 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2049 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON |
2050 SK_RFCTL_FIFO_FLUSH_ON);
2051
2052 /* Increase flush threshould to 64 bytes */
2053 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2054 SK_RFCTL_FIFO_THRESHOLD + 1);
2055
2056 /* Configure TX MAC FIFO */
2057 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2058 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2059
2060 #if 1
2061 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN);
2062 #endif
2063 DPRINTFN(6, ("msk_init_yukon: end\n"));
2064 }
2065
2066 /*
2067 * Note that to properly initialize any part of the GEnesis chip,
2068 * you first have to take it out of reset mode.
2069 */
2070 int
2071 msk_init(struct ifnet *ifp)
2072 {
2073 struct sk_if_softc *sc_if = ifp->if_softc;
2074 struct sk_softc *sc = sc_if->sk_softc;
2075 int rc = 0, s;
2076 uint32_t imr, imtimer_ticks;
2077
2078
2079 DPRINTFN(2, ("msk_init\n"));
2080
2081 s = splnet();
2082
2083 /* Cancel pending I/O and free all RX/TX buffers. */
2084 msk_stop(ifp,0);
2085
2086 /* Configure I2C registers */
2087
2088 /* Configure XMAC(s) */
2089 msk_init_yukon(sc_if);
2090 if ((rc = ether_mediachange(ifp)) != 0)
2091 goto out;
2092
2093 /* Configure transmit arbiter(s) */
2094 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON);
2095 #if 0
2096 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2097 #endif
2098
2099 /* Configure RAMbuffers */
2100 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2101 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2102 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2103 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2104 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2105 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2106
2107 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET);
2108 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON);
2109 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart);
2110 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart);
2111 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart);
2112 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend);
2113 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON);
2114
2115 /* Configure BMUs */
2116 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016);
2117 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28);
2118 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080);
2119 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_WM, 0x0600); /* XXX ??? */
2120
2121 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016);
2122 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28);
2123 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080);
2124 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_WM, 0x0600); /* XXX ??? */
2125
2126 /* Make sure the sync transmit queue is disabled. */
2127 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET);
2128
2129 /* Init descriptors */
2130 if (msk_init_rx_ring(sc_if) == ENOBUFS) {
2131 aprint_error_dev(&sc_if->sk_dev, "initialization failed: no "
2132 "memory for rx buffers\n");
2133 msk_stop(ifp,0);
2134 splx(s);
2135 return ENOBUFS;
2136 }
2137
2138 if (msk_init_tx_ring(sc_if) == ENOBUFS) {
2139 aprint_error_dev(&sc_if->sk_dev, "initialization failed: no "
2140 "memory for tx buffers\n");
2141 msk_stop(ifp,0);
2142 splx(s);
2143 return ENOBUFS;
2144 }
2145
2146 /* Set interrupt moderation if changed via sysctl. */
2147 switch (sc->sk_type) {
2148 case SK_YUKON_EC:
2149 case SK_YUKON_EC_U:
2150 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
2151 break;
2152 case SK_YUKON_FE:
2153 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE;
2154 break;
2155 case SK_YUKON_XL:
2156 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL;
2157 break;
2158 default:
2159 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
2160 }
2161 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
2162 if (imr != SK_IM_USECS(sc->sk_int_mod)) {
2163 sk_win_write_4(sc, SK_IMTIMERINIT,
2164 SK_IM_USECS(sc->sk_int_mod));
2165 aprint_verbose_dev(&sc->sk_dev, "interrupt moderation is %d us\n",
2166 sc->sk_int_mod);
2167 }
2168
2169 /* Initialize prefetch engine. */
2170 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
2171 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002);
2172 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1);
2173 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO,
2174 MSK_RX_RING_ADDR(sc_if, 0));
2175 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI,
2176 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32);
2177 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008);
2178 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR);
2179
2180 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
2181 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002);
2182 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1);
2183 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO,
2184 MSK_TX_RING_ADDR(sc_if, 0));
2185 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI,
2186 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32);
2187 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008);
2188 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR);
2189
2190 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX,
2191 sc_if->sk_cdata.sk_rx_prod);
2192
2193 /* Configure interrupt handling */
2194 if (sc_if->sk_port == SK_PORT_A)
2195 sc->sk_intrmask |= SK_Y2_INTRS1;
2196 else
2197 sc->sk_intrmask |= SK_Y2_INTRS2;
2198 sc->sk_intrmask |= SK_Y2_IMR_BMU;
2199 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2200
2201 ifp->if_flags |= IFF_RUNNING;
2202 ifp->if_flags &= ~IFF_OACTIVE;
2203
2204 callout_schedule(&sc_if->sk_tick_ch, hz);
2205
2206 out:
2207 splx(s);
2208 return rc;
2209 }
2210
2211 void
2212 msk_stop(struct ifnet *ifp, int disable)
2213 {
2214 struct sk_if_softc *sc_if = ifp->if_softc;
2215 struct sk_softc *sc = sc_if->sk_softc;
2216 struct sk_txmap_entry *dma;
2217 int i;
2218
2219 DPRINTFN(2, ("msk_stop\n"));
2220
2221 callout_stop(&sc_if->sk_tick_ch);
2222
2223 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2224
2225 /* Stop transfer of Tx descriptors */
2226
2227 /* Stop transfer of Rx descriptors */
2228
2229 /* Turn off various components of this interface. */
2230 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2231 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2232 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2233 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2234 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2235 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE);
2236 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2237 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2238 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2239 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP);
2240 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2241 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2242
2243 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001);
2244 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001);
2245
2246 /* Disable interrupts */
2247 if (sc_if->sk_port == SK_PORT_A)
2248 sc->sk_intrmask &= ~SK_Y2_INTRS1;
2249 else
2250 sc->sk_intrmask &= ~SK_Y2_INTRS2;
2251 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2252
2253 SK_XM_READ_2(sc_if, XM_ISR);
2254 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2255
2256 /* Free RX and TX mbufs still in the queues. */
2257 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2258 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2259 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2260 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2261 }
2262 }
2263
2264 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2265 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2266 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2267 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2268 #if 1
2269 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
2270 sc_if->sk_cdata.sk_tx_map[i], link);
2271 sc_if->sk_cdata.sk_tx_map[i] = 0;
2272 #endif
2273 }
2274 }
2275
2276 #if 1
2277 while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
2278 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
2279 bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
2280 free(dma, M_DEVBUF);
2281 }
2282 #endif
2283 }
2284
2285 CFATTACH_DECL(mskc, sizeof(struct sk_softc), mskc_probe, mskc_attach,
2286 NULL, NULL);
2287
2288 CFATTACH_DECL(msk, sizeof(struct sk_if_softc), msk_probe, msk_attach,
2289 NULL, NULL);
2290
2291 #ifdef MSK_DEBUG
2292 void
2293 msk_dump_txdesc(struct msk_tx_desc *le, int idx)
2294 {
2295 #define DESC_PRINT(X) \
2296 if (X) \
2297 printf("txdesc[%d]." #X "=%#x\n", \
2298 idx, X);
2299
2300 DESC_PRINT(letoh32(le->sk_addr));
2301 DESC_PRINT(letoh16(le->sk_len));
2302 DESC_PRINT(le->sk_ctl);
2303 DESC_PRINT(le->sk_opcode);
2304 #undef DESC_PRINT
2305 }
2306
2307 void
2308 msk_dump_bytes(const char *data, int len)
2309 {
2310 int c, i, j;
2311
2312 for (i = 0; i < len; i += 16) {
2313 printf("%08x ", i);
2314 c = len - i;
2315 if (c > 16) c = 16;
2316
2317 for (j = 0; j < c; j++) {
2318 printf("%02x ", data[i + j] & 0xff);
2319 if ((j & 0xf) == 7 && j > 0)
2320 printf(" ");
2321 }
2322
2323 for (; j < 16; j++)
2324 printf(" ");
2325 printf(" ");
2326
2327 for (j = 0; j < c; j++) {
2328 int ch = data[i + j] & 0xff;
2329 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2330 }
2331
2332 printf("\n");
2333
2334 if (c < 16)
2335 break;
2336 }
2337 }
2338
2339 void
2340 msk_dump_mbuf(struct mbuf *m)
2341 {
2342 int count = m->m_pkthdr.len;
2343
2344 printf("m=%p, m->m_pkthdr.len=%d\n", m, m->m_pkthdr.len);
2345
2346 while (count > 0 && m) {
2347 printf("m=%p, m->m_data=%p, m->m_len=%d\n",
2348 m, m->m_data, m->m_len);
2349 msk_dump_bytes(mtod(m, char *), m->m_len);
2350
2351 count -= m->m_len;
2352 m = m->m_next;
2353 }
2354 }
2355 #endif
2356
2357 static int
2358 msk_sysctl_handler(SYSCTLFN_ARGS)
2359 {
2360 int error, t;
2361 struct sysctlnode node;
2362 struct sk_softc *sc;
2363
2364 node = *rnode;
2365 sc = node.sysctl_data;
2366 t = sc->sk_int_mod;
2367 node.sysctl_data = &t;
2368 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2369 if (error || newp == NULL)
2370 return error;
2371
2372 if (t < SK_IM_MIN || t > SK_IM_MAX)
2373 return EINVAL;
2374
2375 /* update the softc with sysctl-changed value, and mark
2376 for hardware update */
2377 sc->sk_int_mod = t;
2378 sc->sk_int_mod_pending = 1;
2379 return 0;
2380 }
2381
2382 /*
2383 * Set up sysctl(3) MIB, hw.sk.* - Individual controllers will be
2384 * set up in skc_attach()
2385 */
2386 SYSCTL_SETUP(sysctl_msk, "sysctl msk subtree setup")
2387 {
2388 int rc;
2389 const struct sysctlnode *node;
2390
2391 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
2392 0, CTLTYPE_NODE, "hw", NULL,
2393 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
2394 goto err;
2395 }
2396
2397 if ((rc = sysctl_createv(clog, 0, NULL, &node,
2398 0, CTLTYPE_NODE, "msk",
2399 SYSCTL_DESCR("msk interface controls"),
2400 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2401 goto err;
2402 }
2403
2404 msk_root_num = node->sysctl_num;
2405 return;
2406
2407 err:
2408 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2409 }
2410