if_axe.c revision 1.100 1 /* $NetBSD: if_axe.c,v 1.100 2019/07/15 06:40:21 mrg Exp $ */
2 /* $OpenBSD: if_axe.c,v 1.137 2016/04/13 11:03:37 mpi Exp $ */
3
4 /*
5 * Copyright (c) 2005, 2006, 2007 Jonathan Gray <jsg (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * Copyright (c) 1997, 1998, 1999, 2000-2003
22 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by Bill Paul.
35 * 4. Neither the name of the author nor the names of any co-contributors
36 * may be used to endorse or promote products derived from this software
37 * without specific prior written permission.
38 *
39 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
43 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
49 * THE POSSIBILITY OF SUCH DAMAGE.
50 */
51
52 /*
53 * ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver.
54 * Used in the LinkSys USB200M and various other adapters.
55 *
56 * Written by Bill Paul <wpaul (at) windriver.com>
57 * Senior Engineer
58 * Wind River Systems
59 */
60
61 /*
62 * The AX88172 provides USB ethernet supports at 10 and 100Mbps.
63 * It uses an external PHY (reference designs use a RealTek chip),
64 * and has a 64-bit multicast hash filter. There is some information
65 * missing from the manual which one needs to know in order to make
66 * the chip function:
67 *
68 * - You must set bit 7 in the RX control register, otherwise the
69 * chip won't receive any packets.
70 * - You must initialize all 3 IPG registers, or you won't be able
71 * to send any packets.
72 *
73 * Note that this device appears to only support loading the station
74 * address via autoload from the EEPROM (i.e. there's no way to manually
75 * set it).
76 *
77 * (Adam Weinberger wanted me to name this driver if_gir.c.)
78 */
79
80 /*
81 * Ax88178 and Ax88772 support backported from the OpenBSD driver.
82 * 2007/02/12, J.R. Oldroyd, fbsd (at) opal.com
83 *
84 * Manual here:
85 * http://www.asix.com.tw/FrootAttach/datasheet/AX88178_datasheet_Rev10.pdf
86 * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf
87 */
88
89 #include <sys/cdefs.h>
90 __KERNEL_RCSID(0, "$NetBSD: if_axe.c,v 1.100 2019/07/15 06:40:21 mrg Exp $");
91
92 #ifdef _KERNEL_OPT
93 #include "opt_inet.h"
94 #include "opt_usb.h"
95 #include "opt_net_mpsafe.h"
96 #endif
97
98 #include <sys/param.h>
99 #include <sys/bus.h>
100 #include <sys/device.h>
101 #include <sys/kernel.h>
102 #include <sys/mbuf.h>
103 #include <sys/module.h>
104 #include <sys/mutex.h>
105 #include <sys/socket.h>
106 #include <sys/sockio.h>
107 #include <sys/systm.h>
108
109 #include <sys/rndsource.h>
110
111 #include <net/if.h>
112 #include <net/if_dl.h>
113 #include <net/if_ether.h>
114 #include <net/if_media.h>
115
116 #include <net/bpf.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120
121 #include <dev/usb/usb.h>
122 #include <dev/usb/usbhist.h>
123 #include <dev/usb/usbdi.h>
124 #include <dev/usb/usbdi_util.h>
125 #include <dev/usb/usbdivar.h>
126 #include <dev/usb/usbdevs.h>
127
128 #include <dev/usb/if_axereg.h>
129
130 struct axe_type {
131 struct usb_devno axe_dev;
132 uint16_t axe_flags;
133 };
134
135 struct axe_softc;
136
137 struct axe_chain {
138 struct axe_softc *axe_sc;
139 struct usbd_xfer *axe_xfer;
140 uint8_t *axe_buf;
141 int axe_accum;
142 int axe_idx;
143 };
144
145 struct axe_cdata {
146 struct axe_chain axe_tx_chain[AXE_TX_LIST_CNT];
147 struct axe_chain axe_rx_chain[AXE_RX_LIST_CNT];
148 int axe_tx_prod;
149 int axe_tx_cons;
150 int axe_tx_cnt;
151 int axe_rx_prod;
152 };
153
154 struct axe_softc {
155 device_t axe_dev;
156 struct ethercom axe_ec;
157 struct mii_data axe_mii;
158 krndsource_t rnd_source;
159 struct usbd_device * axe_udev;
160 struct usbd_interface * axe_iface;
161
162 uint16_t axe_vendor;
163 uint16_t axe_product;
164 uint16_t axe_timer;
165 uint32_t axe_flags; /* copied from axe_type */
166 #define AX178 __BIT(0) /* AX88178 */
167 #define AX772 __BIT(1) /* AX88772 */
168 #define AX772A __BIT(2) /* AX88772A */
169 #define AX772B __BIT(3) /* AX88772B */
170 #define AXSTD_FRAME __BIT(12)
171 #define AXCSUM_FRAME __BIT(13)
172
173 int axe_ed[AXE_ENDPT_MAX];
174 struct usbd_pipe * axe_ep[AXE_ENDPT_MAX];
175 int axe_if_flags;
176 int axe_phyno;
177 struct axe_cdata axe_cdata;
178 struct callout axe_stat_ch;
179
180 uint8_t axe_enaddr[ETHER_ADDR_LEN];
181
182 int axe_refcnt;
183 bool axe_dying;
184 bool axe_stopping;
185 bool axe_attached;
186
187 struct usb_task axe_tick_task;
188
189 kmutex_t axe_lock;
190 kmutex_t axe_mii_lock;
191 kmutex_t axe_rxlock;
192 kmutex_t axe_txlock;
193 kcondvar_t axe_detachcv;
194
195 int axe_link;
196
197 uint8_t axe_ipgs[3];
198 uint8_t axe_phyaddrs[2];
199 uint16_t sc_pwrcfg;
200 uint16_t sc_lenmask;
201
202 struct timeval axe_rx_notice;
203 struct timeval axe_tx_notice;
204 int axe_bufsz;
205
206 #define sc_if axe_ec.ec_if
207 };
208
209 #define AXE_IS_178_FAMILY(sc) \
210 ((sc)->axe_flags & (AX772 | AX772A | AX772B | AX178))
211
212 #define AXE_IS_772(sc) \
213 ((sc)->axe_flags & (AX772 | AX772A | AX772B))
214
215 #define AX_RXCSUM \
216 (IFCAP_CSUM_IPv4_Rx | \
217 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | \
218 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)
219
220 #define AX_TXCSUM \
221 (IFCAP_CSUM_IPv4_Tx | \
222 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | \
223 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx)
224
225 /*
226 * AXE_178_MAX_FRAME_BURST
227 * max frame burst size for Ax88178 and Ax88772
228 * 0 2048 bytes
229 * 1 4096 bytes
230 * 2 8192 bytes
231 * 3 16384 bytes
232 * use the largest your system can handle without USB stalling.
233 *
234 * NB: 88772 parts appear to generate lots of input errors with
235 * a 2K rx buffer and 8K is only slightly faster than 4K on an
236 * EHCI port on a T42 so change at your own risk.
237 */
238 #define AXE_178_MAX_FRAME_BURST 1
239
240
241 #ifdef USB_DEBUG
242 #ifndef AXE_DEBUG
243 #define axedebug 0
244 #else
245 static int axedebug = 20;
246
247 SYSCTL_SETUP(sysctl_hw_axe_setup, "sysctl hw.axe setup")
248 {
249 int err;
250 const struct sysctlnode *rnode;
251 const struct sysctlnode *cnode;
252
253 err = sysctl_createv(clog, 0, NULL, &rnode,
254 CTLFLAG_PERMANENT, CTLTYPE_NODE, "axe",
255 SYSCTL_DESCR("axe global controls"),
256 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
257
258 if (err)
259 goto fail;
260
261 /* control debugging printfs */
262 err = sysctl_createv(clog, 0, &rnode, &cnode,
263 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
264 "debug", SYSCTL_DESCR("Enable debugging output"),
265 NULL, 0, &axedebug, sizeof(axedebug), CTL_CREATE, CTL_EOL);
266 if (err)
267 goto fail;
268
269 return;
270 fail:
271 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
272 }
273
274 #endif /* AXE_DEBUG */
275 #endif /* USB_DEBUG */
276
277 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(axedebug,1,FMT,A,B,C,D)
278 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(axedebug,N,FMT,A,B,C,D)
279 #define AXEHIST_FUNC() USBHIST_FUNC()
280 #define AXEHIST_CALLED(name) USBHIST_CALLED(axedebug)
281
282 /*
283 * Various supported device vendors/products.
284 */
285 static const struct axe_type axe_devs[] = {
286 { { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_UFE2000}, 0 },
287 { { USB_VENDOR_ACERCM, USB_PRODUCT_ACERCM_EP1427X2}, 0 },
288 { { USB_VENDOR_APPLE, USB_PRODUCT_APPLE_ETHERNET }, AX772 },
289 { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88172}, 0 },
290 { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88772}, AX772 },
291 { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88772A}, AX772 },
292 { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88772B}, AX772B },
293 { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88772B_1}, AX772B },
294 { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88178}, AX178 },
295 { { USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC210T}, 0 },
296 { { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D5055 }, AX178 },
297 { { USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USB2AR}, 0},
298 { { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_USB200MV2}, AX772A },
299 { { USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB2_TX }, 0},
300 { { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBE100}, 0 },
301 { { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBE100B1 }, AX772 },
302 { { USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DUBE100B1 }, AX772 },
303 { { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBE100C1 }, AX772B },
304 { { USB_VENDOR_GOODWAY, USB_PRODUCT_GOODWAY_GWUSB2E}, 0 },
305 { { USB_VENDOR_IODATA, USB_PRODUCT_IODATA_ETGUS2 }, AX178 },
306 { { USB_VENDOR_JVC, USB_PRODUCT_JVC_MP_PRX1}, 0 },
307 { { USB_VENDOR_LENOVO, USB_PRODUCT_LENOVO_ETHERNET }, AX772B },
308 { { USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_HG20F9}, AX772B },
309 { { USB_VENDOR_LINKSYS2, USB_PRODUCT_LINKSYS2_USB200M}, 0 },
310 { { USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_USB1000 }, AX178 },
311 { { USB_VENDOR_LOGITEC, USB_PRODUCT_LOGITEC_LAN_GTJU2}, AX178 },
312 { { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAU2GT}, AX178 },
313 { { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAU2KTX}, 0 },
314 { { USB_VENDOR_MSI, USB_PRODUCT_MSI_AX88772A}, AX772 },
315 { { USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_FA120}, 0 },
316 { { USB_VENDOR_OQO, USB_PRODUCT_OQO_ETHER01PLUS }, AX772 },
317 { { USB_VENDOR_PLANEX3, USB_PRODUCT_PLANEX3_GU1000T }, AX178 },
318 { { USB_VENDOR_SITECOM, USB_PRODUCT_SITECOM_LN029}, 0 },
319 { { USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_LN028 }, AX178 },
320 { { USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_LN031 }, AX178 },
321 { { USB_VENDOR_SYSTEMTALKS, USB_PRODUCT_SYSTEMTALKS_SGCX2UL}, 0 },
322 };
323 #define axe_lookup(v, p) ((const struct axe_type *)usb_lookup(axe_devs, v, p))
324
325 static const struct ax88772b_mfb ax88772b_mfb_table[] = {
326 { 0x8000, 0x8001, 2048 },
327 { 0x8100, 0x8147, 4096 },
328 { 0x8200, 0x81EB, 6144 },
329 { 0x8300, 0x83D7, 8192 },
330 { 0x8400, 0x851E, 16384 },
331 { 0x8500, 0x8666, 20480 },
332 { 0x8600, 0x87AE, 24576 },
333 { 0x8700, 0x8A3D, 32768 }
334 };
335
336 int axe_match(device_t, cfdata_t, void *);
337 void axe_attach(device_t, device_t, void *);
338 int axe_detach(device_t, int);
339 int axe_activate(device_t, devact_t);
340
341 CFATTACH_DECL_NEW(axe, sizeof(struct axe_softc),
342 axe_match, axe_attach, axe_detach, axe_activate);
343
344 static int axe_tx_list_init(struct axe_softc *);
345 static int axe_rx_list_init(struct axe_softc *);
346 static int axe_encap(struct axe_softc *, struct mbuf *, int);
347 static void axe_rxeof(struct usbd_xfer *, void *, usbd_status);
348 static void axe_txeof(struct usbd_xfer *, void *, usbd_status);
349 static void axe_tick(void *);
350 static void axe_tick_task(void *);
351 static void axe_start(struct ifnet *);
352 static void axe_start_locked(struct ifnet *);
353 static int axe_ioctl(struct ifnet *, u_long, void *);
354 static int axe_init(struct ifnet *);
355 static int axe_init_locked(struct ifnet *);
356 static void axe_stop(struct ifnet *, int);
357 static void axe_stop_locked(struct ifnet *, int);
358 static void axe_watchdog(struct ifnet *);
359 static int axe_miibus_readreg(device_t, int, int, uint16_t *);
360 static int axe_miibus_readreg_locked(device_t, int, int, uint16_t *);
361 static int axe_miibus_writereg(device_t, int, int, uint16_t);
362 static int axe_miibus_writereg_locked(device_t, int, int, uint16_t);
363 static void axe_miibus_statchg(struct ifnet *);
364 static int axe_cmd(struct axe_softc *, int, int, int, void *);
365 static void axe_reset(struct axe_softc *);
366
367 static void axe_setmulti(struct axe_softc *);
368 static void axe_setmulti_locked(struct axe_softc *);
369 static void axe_lock_mii(struct axe_softc *);
370 static void axe_unlock_mii(struct axe_softc *);
371
372 static void axe_ax88178_init(struct axe_softc *);
373 static void axe_ax88772_init(struct axe_softc *);
374 static void axe_ax88772a_init(struct axe_softc *);
375 static void axe_ax88772b_init(struct axe_softc *);
376
377 /* Get exclusive access to the MII registers */
378 static void
379 axe_lock_mii(struct axe_softc *sc)
380 {
381
382 mutex_enter(&sc->axe_lock);
383 sc->axe_refcnt++;
384 mutex_exit(&sc->axe_lock);
385
386 mutex_enter(&sc->axe_mii_lock);
387 }
388
389 static void
390 axe_lock_mii_sc_locked(struct axe_softc *sc)
391 {
392 KASSERT(mutex_owned(&sc->axe_lock));
393
394 sc->axe_refcnt++;
395 mutex_enter(&sc->axe_mii_lock);
396 }
397
398 static void
399 axe_unlock_mii(struct axe_softc *sc)
400 {
401
402 mutex_exit(&sc->axe_mii_lock);
403 mutex_enter(&sc->axe_lock);
404 if (--sc->axe_refcnt < 0)
405 cv_broadcast(&sc->axe_detachcv);
406 mutex_exit(&sc->axe_lock);
407 }
408
409 static void
410 axe_unlock_mii_sc_locked(struct axe_softc *sc)
411 {
412 KASSERT(mutex_owned(&sc->axe_lock));
413
414 mutex_exit(&sc->axe_mii_lock);
415 if (--sc->axe_refcnt < 0)
416 cv_broadcast(&sc->axe_detachcv);
417 }
418
419 static int
420 axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf)
421 {
422 AXEHIST_FUNC(); AXEHIST_CALLED();
423 usb_device_request_t req;
424 usbd_status err;
425
426 KASSERT(mutex_owned(&sc->axe_mii_lock));
427
428 if (sc->axe_dying)
429 return -1;
430
431 DPRINTFN(20, "cmd %#jx index %#jx val %#jx", cmd, index, val, 0);
432
433 if (AXE_CMD_DIR(cmd))
434 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
435 else
436 req.bmRequestType = UT_READ_VENDOR_DEVICE;
437 req.bRequest = AXE_CMD_CMD(cmd);
438 USETW(req.wValue, val);
439 USETW(req.wIndex, index);
440 USETW(req.wLength, AXE_CMD_LEN(cmd));
441
442 err = usbd_do_request(sc->axe_udev, &req, buf);
443
444 if (err) {
445 DPRINTF("cmd %jd err %jd", cmd, err, 0, 0);
446 return -1;
447 }
448 return 0;
449 }
450
451 static int
452 axe_miibus_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
453 {
454 AXEHIST_FUNC(); AXEHIST_CALLED();
455 struct axe_softc *sc = device_private(dev);
456 usbd_status err;
457 uint16_t data;
458
459 mutex_enter(&sc->axe_lock);
460 if (sc->axe_dying || sc->axe_phyno != phy) {
461 mutex_exit(&sc->axe_lock);
462 return -1;
463 }
464 mutex_exit(&sc->axe_lock);
465
466 DPRINTFN(30, "phy 0x%jx reg 0x%jx\n", phy, reg, 0, 0);
467
468 axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
469
470 err = axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, &data);
471 axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
472
473 if (err) {
474 aprint_error_dev(sc->axe_dev, "read PHY failed\n");
475 return err;
476 }
477
478 *val = le16toh(data);
479 if (AXE_IS_772(sc) && reg == MII_BMSR) {
480 /*
481 * BMSR of AX88772 indicates that it supports extended
482 * capability but the extended status register is
483 * reserved for embedded ethernet PHY. So clear the
484 * extended capability bit of BMSR.
485 */
486 *val &= ~BMSR_EXTCAP;
487 }
488
489 DPRINTFN(30, "phy 0x%jx reg 0x%jx val %#jx", phy, reg, *val, 0);
490
491 return 0;
492 }
493
494 static int
495 axe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
496 {
497 struct axe_softc *sc = device_private(dev);
498 int rv;
499
500 mutex_enter(&sc->axe_lock);
501 if (sc->axe_dying || sc->axe_phyno != phy) {
502 mutex_exit(&sc->axe_lock);
503 return -1;
504 }
505 mutex_exit(&sc->axe_lock);
506
507 axe_lock_mii(sc);
508 rv = axe_miibus_readreg_locked(dev, phy, reg, val);
509 axe_unlock_mii(sc);
510
511 return rv;
512 }
513
514 static int
515 axe_miibus_writereg_locked(device_t dev, int phy, int reg, uint16_t aval)
516 {
517 struct axe_softc *sc = device_private(dev);
518 usbd_status err;
519 uint16_t val;
520
521 val = htole16(aval);
522
523 axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
524 err = axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, &val);
525 axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
526
527 if (err) {
528 aprint_error_dev(sc->axe_dev, "write PHY failed\n");
529 return err;
530 }
531
532 return 0;
533 }
534
535 static int
536 axe_miibus_writereg(device_t dev, int phy, int reg, uint16_t aval)
537 {
538 struct axe_softc *sc = device_private(dev);
539 int rv;
540
541 mutex_enter(&sc->axe_lock);
542 if (sc->axe_dying || sc->axe_phyno != phy) {
543 mutex_exit(&sc->axe_lock);
544 return -1;
545 }
546 mutex_exit(&sc->axe_lock);
547
548 axe_lock_mii(sc);
549 rv = axe_miibus_writereg_locked(dev, phy, reg, aval);
550 axe_unlock_mii(sc);
551
552 return rv;
553 }
554
555 static void
556 axe_miibus_statchg(struct ifnet *ifp)
557 {
558 AXEHIST_FUNC(); AXEHIST_CALLED();
559
560 struct axe_softc * const sc = ifp->if_softc;
561 struct mii_data *mii = &sc->axe_mii;
562 int val, err;
563
564 if (sc->axe_dying)
565 return;
566
567 val = 0;
568 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
569 val |= AXE_MEDIA_FULL_DUPLEX;
570 if (AXE_IS_178_FAMILY(sc)) {
571 if ((IFM_OPTIONS(mii->mii_media_active) &
572 IFM_ETH_TXPAUSE) != 0)
573 val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN;
574 if ((IFM_OPTIONS(mii->mii_media_active) &
575 IFM_ETH_RXPAUSE) != 0)
576 val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN;
577 }
578 }
579 if (AXE_IS_178_FAMILY(sc)) {
580 val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC;
581 if (sc->axe_flags & AX178)
582 val |= AXE_178_MEDIA_ENCK;
583 switch (IFM_SUBTYPE(mii->mii_media_active)) {
584 case IFM_1000_T:
585 val |= AXE_178_MEDIA_GMII | AXE_178_MEDIA_ENCK;
586 break;
587 case IFM_100_TX:
588 val |= AXE_178_MEDIA_100TX;
589 break;
590 case IFM_10_T:
591 /* doesn't need to be handled */
592 break;
593 }
594 }
595
596 DPRINTF("val=0x%jx", val, 0, 0, 0);
597 axe_lock_mii(sc);
598 err = axe_cmd(sc, AXE_CMD_WRITE_MEDIA, 0, val, NULL);
599 axe_unlock_mii(sc);
600 if (err) {
601 aprint_error_dev(sc->axe_dev, "media change failed\n");
602 return;
603 }
604 }
605
606 static void
607 axe_setmulti_locked(struct axe_softc *sc)
608 {
609 AXEHIST_FUNC(); AXEHIST_CALLED();
610 struct ethercom *ec = &sc->axe_ec;
611 struct ifnet *ifp = &sc->sc_if;
612 struct ether_multi *enm;
613 struct ether_multistep step;
614 uint32_t h = 0;
615 uint16_t rxmode;
616 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
617
618 KASSERT(mutex_owned(&sc->axe_mii_lock));
619
620 if (sc->axe_dying)
621 return;
622
623 if (axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode)) {
624 aprint_error_dev(sc->axe_dev, "can't read rxmode");
625 return;
626 }
627 rxmode = le16toh(rxmode);
628
629 rxmode &=
630 ~(AXE_RXCMD_ALLMULTI | AXE_RXCMD_PROMISC |
631 AXE_RXCMD_BROADCAST | AXE_RXCMD_MULTICAST);
632
633 rxmode |=
634 (ifp->if_flags & IFF_BROADCAST) ? AXE_RXCMD_BROADCAST : 0;
635
636 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
637 if (ifp->if_flags & IFF_PROMISC)
638 rxmode |= AXE_RXCMD_PROMISC;
639 goto allmulti;
640 }
641
642 /* Now program new ones */
643 ETHER_LOCK(ec);
644 ETHER_FIRST_MULTI(step, ec, enm);
645 while (enm != NULL) {
646 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
647 ETHER_ADDR_LEN) != 0) {
648 ETHER_UNLOCK(ec);
649 goto allmulti;
650 }
651
652 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
653 hashtbl[h >> 3] |= 1U << (h & 7);
654 ETHER_NEXT_MULTI(step, enm);
655 }
656 ETHER_UNLOCK(ec);
657 ifp->if_flags &= ~IFF_ALLMULTI;
658 rxmode |= AXE_RXCMD_MULTICAST;
659
660 axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, hashtbl);
661 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
662 return;
663
664 allmulti:
665 ifp->if_flags |= IFF_ALLMULTI;
666 rxmode |= AXE_RXCMD_ALLMULTI;
667 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
668 }
669
670 static void
671 axe_setmulti(struct axe_softc *sc)
672 {
673 axe_lock_mii(sc);
674 axe_setmulti_locked(sc);
675 axe_unlock_mii(sc);
676 }
677
678 static void
679 axe_ax_init(struct axe_softc *sc)
680 {
681 int cmd = AXE_178_CMD_READ_NODEID;
682
683 if (sc->axe_flags & AX178) {
684 axe_ax88178_init(sc);
685 } else if (sc->axe_flags & AX772) {
686 axe_ax88772_init(sc);
687 } else if (sc->axe_flags & AX772A) {
688 axe_ax88772a_init(sc);
689 } else if (sc->axe_flags & AX772B) {
690 axe_ax88772b_init(sc);
691 return;
692 } else {
693 cmd = AXE_172_CMD_READ_NODEID;
694 }
695
696 if (axe_cmd(sc, cmd, 0, 0, sc->axe_enaddr)) {
697 aprint_error_dev(sc->axe_dev,
698 "failed to read ethernet address\n");
699 }
700 }
701
702
703 static void
704 axe_reset(struct axe_softc *sc)
705 {
706
707 if (sc->axe_dying)
708 return;
709
710 /*
711 * softnet_lock can be taken when NET_MPAFE is not defined when calling
712 * if_addr_init -> if_init. This doesn't mix well with the
713 * usbd_delay_ms calls in the init routines as things like nd6_slowtimo
714 * can fire during the wait and attempt to take softnet_lock and then
715 * block the softclk thread meaing the wait never ends.
716 */
717 #ifndef NET_MPSAFE
718 /* XXX What to reset? */
719
720 /* Wait a little while for the chip to get its brains in order. */
721 DELAY(1000);
722 #else
723 axe_lock_mii(sc);
724
725 axe_ax_init(sc);
726
727 axe_unlock_mii(sc);
728 #endif
729 }
730
731 static int
732 axe_get_phyno(struct axe_softc *sc, int sel)
733 {
734 int phyno;
735
736 switch (AXE_PHY_TYPE(sc->axe_phyaddrs[sel])) {
737 case PHY_TYPE_100_HOME:
738 /* FALLTHROUGH */
739 case PHY_TYPE_GIG:
740 phyno = AXE_PHY_NO(sc->axe_phyaddrs[sel]);
741 break;
742 case PHY_TYPE_SPECIAL:
743 /* FALLTHROUGH */
744 case PHY_TYPE_RSVD:
745 /* FALLTHROUGH */
746 case PHY_TYPE_NON_SUP:
747 /* FALLTHROUGH */
748 default:
749 phyno = -1;
750 break;
751 }
752
753 return phyno;
754 }
755
756 #define AXE_GPIO_WRITE(x, y) do { \
757 axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, (x), NULL); \
758 usbd_delay_ms(sc->axe_udev, hztoms(y)); \
759 } while (0)
760
761 static void
762 axe_ax88178_init(struct axe_softc *sc)
763 {
764 AXEHIST_FUNC(); AXEHIST_CALLED();
765 int gpio0, ledmode, phymode;
766 uint16_t eeprom, val;
767
768 axe_cmd(sc, AXE_CMD_SROM_WR_ENABLE, 0, 0, NULL);
769 /* XXX magic */
770 if (axe_cmd(sc, AXE_CMD_SROM_READ, 0, 0x0017, &eeprom) != 0)
771 eeprom = 0xffff;
772 axe_cmd(sc, AXE_CMD_SROM_WR_DISABLE, 0, 0, NULL);
773
774 eeprom = le16toh(eeprom);
775
776 DPRINTF("EEPROM is 0x%jx", eeprom, 0, 0, 0);
777
778 /* if EEPROM is invalid we have to use to GPIO0 */
779 if (eeprom == 0xffff) {
780 phymode = AXE_PHY_MODE_MARVELL;
781 gpio0 = 1;
782 ledmode = 0;
783 } else {
784 phymode = eeprom & 0x7f;
785 gpio0 = (eeprom & 0x80) ? 0 : 1;
786 ledmode = eeprom >> 8;
787 }
788
789 DPRINTF("use gpio0: %jd, phymode %jd", gpio0, phymode, 0, 0);
790
791 /* Program GPIOs depending on PHY hardware. */
792 switch (phymode) {
793 case AXE_PHY_MODE_MARVELL:
794 if (gpio0 == 1) {
795 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0_EN,
796 hz / 32);
797 AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
798 hz / 32);
799 AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4);
800 AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
801 hz / 32);
802 } else {
803 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
804 AXE_GPIO1_EN, hz / 3);
805 if (ledmode == 1) {
806 AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3);
807 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN,
808 hz / 3);
809 } else {
810 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
811 AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
812 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
813 AXE_GPIO2_EN, hz / 4);
814 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
815 AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
816 }
817 }
818 break;
819 case AXE_PHY_MODE_CICADA:
820 case AXE_PHY_MODE_CICADA_V2:
821 case AXE_PHY_MODE_CICADA_V2_ASIX:
822 if (gpio0 == 1)
823 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0 |
824 AXE_GPIO0_EN, hz / 32);
825 else
826 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
827 AXE_GPIO1_EN, hz / 32);
828 break;
829 case AXE_PHY_MODE_AGERE:
830 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
831 AXE_GPIO1_EN, hz / 32);
832 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
833 AXE_GPIO2_EN, hz / 32);
834 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4);
835 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
836 AXE_GPIO2_EN, hz / 32);
837 break;
838 case AXE_PHY_MODE_REALTEK_8211CL:
839 case AXE_PHY_MODE_REALTEK_8211BN:
840 case AXE_PHY_MODE_REALTEK_8251CL:
841 val = gpio0 == 1 ? AXE_GPIO0 | AXE_GPIO0_EN :
842 AXE_GPIO1 | AXE_GPIO1_EN;
843 AXE_GPIO_WRITE(val, hz / 32);
844 AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
845 AXE_GPIO_WRITE(val | AXE_GPIO2_EN, hz / 4);
846 AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
847 if (phymode == AXE_PHY_MODE_REALTEK_8211CL) {
848 axe_miibus_writereg_locked(sc->axe_dev,
849 sc->axe_phyno, 0x1F, 0x0005);
850 axe_miibus_writereg_locked(sc->axe_dev,
851 sc->axe_phyno, 0x0C, 0x0000);
852 axe_miibus_readreg_locked(sc->axe_dev,
853 sc->axe_phyno, 0x0001, &val);
854 axe_miibus_writereg_locked(sc->axe_dev,
855 sc->axe_phyno, 0x01, val | 0x0080);
856 axe_miibus_writereg_locked(sc->axe_dev,
857 sc->axe_phyno, 0x1F, 0x0000);
858 }
859 break;
860 default:
861 /* Unknown PHY model or no need to program GPIOs. */
862 break;
863 }
864
865 /* soft reset */
866 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
867 usbd_delay_ms(sc->axe_udev, 150);
868 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
869 AXE_SW_RESET_PRL | AXE_178_RESET_MAGIC, NULL);
870 usbd_delay_ms(sc->axe_udev, 150);
871 /* Enable MII/GMII/RGMII interface to work with external PHY. */
872 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0, NULL);
873 usbd_delay_ms(sc->axe_udev, 10);
874 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
875 }
876
877 static void
878 axe_ax88772_init(struct axe_softc *sc)
879 {
880 AXEHIST_FUNC(); AXEHIST_CALLED();
881
882 axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, 0x00b0, NULL);
883 usbd_delay_ms(sc->axe_udev, 40);
884
885 if (sc->axe_phyno == AXE_772_PHY_NO_EPHY) {
886 /* ask for the embedded PHY */
887 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0,
888 AXE_SW_PHY_SELECT_EMBEDDED, NULL);
889 usbd_delay_ms(sc->axe_udev, 10);
890
891 /* power down and reset state, pin reset state */
892 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
893 usbd_delay_ms(sc->axe_udev, 60);
894
895 /* power down/reset state, pin operating state */
896 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
897 AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
898 usbd_delay_ms(sc->axe_udev, 150);
899
900 /* power up, reset */
901 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL, NULL);
902
903 /* power up, operating */
904 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
905 AXE_SW_RESET_IPRL | AXE_SW_RESET_PRL, NULL);
906 } else {
907 /* ask for external PHY */
908 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_EXT,
909 NULL);
910 usbd_delay_ms(sc->axe_udev, 10);
911
912 /* power down internal PHY */
913 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
914 AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
915 }
916
917 usbd_delay_ms(sc->axe_udev, 150);
918 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
919 }
920
921 static void
922 axe_ax88772_phywake(struct axe_softc *sc)
923 {
924 AXEHIST_FUNC(); AXEHIST_CALLED();
925
926 if (sc->axe_phyno == AXE_772_PHY_NO_EPHY) {
927 /* Manually select internal(embedded) PHY - MAC mode. */
928 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0,
929 AXE_SW_PHY_SELECT_EMBEDDED, NULL);
930 usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
931 } else {
932 /*
933 * Manually select external PHY - MAC mode.
934 * Reverse MII/RMII is for AX88772A PHY mode.
935 */
936 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
937 AXE_SW_PHY_SELECT_EXT | AXE_SW_PHY_SELECT_SS_MII, NULL);
938 usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
939 }
940
941 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD |
942 AXE_SW_RESET_IPRL, NULL);
943
944 /* T1 = min 500ns everywhere */
945 usbd_delay_ms(sc->axe_udev, 150);
946
947 /* Take PHY out of power down. */
948 if (sc->axe_phyno == AXE_772_PHY_NO_EPHY) {
949 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
950 } else {
951 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRTE, NULL);
952 }
953
954 /* 772 T2 is 60ms. 772A T2 is 160ms, 772B T2 is 600ms */
955 usbd_delay_ms(sc->axe_udev, 600);
956
957 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
958
959 /* T3 = 500ns everywhere */
960 usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
961 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
962 usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
963 }
964
965 static void
966 axe_ax88772a_init(struct axe_softc *sc)
967 {
968 AXEHIST_FUNC(); AXEHIST_CALLED();
969
970 /* Reload EEPROM. */
971 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
972 axe_ax88772_phywake(sc);
973 /* Stop MAC. */
974 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
975 }
976
977 static void
978 axe_ax88772b_init(struct axe_softc *sc)
979 {
980 AXEHIST_FUNC(); AXEHIST_CALLED();
981 uint16_t eeprom;
982 int i;
983
984 /* Reload EEPROM. */
985 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM , hz / 32);
986
987 /*
988 * Save PHY power saving configuration(high byte) and
989 * clear EEPROM checksum value(low byte).
990 */
991 if (axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_PHY_PWRCFG,
992 &eeprom)) {
993 aprint_error_dev(sc->axe_dev, "failed to read eeprom\n");
994 return;
995 }
996
997 sc->sc_pwrcfg = le16toh(eeprom) & 0xFF00;
998
999 /*
1000 * Auto-loaded default station address from internal ROM is
1001 * 00:00:00:00:00:00 such that an explicit access to EEPROM
1002 * is required to get real station address.
1003 */
1004 uint8_t *eaddr = sc->axe_enaddr;
1005 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
1006 if (axe_cmd(sc, AXE_CMD_SROM_READ, 0,
1007 AXE_EEPROM_772B_NODE_ID + i, &eeprom)) {
1008 aprint_error_dev(sc->axe_dev,
1009 "failed to read eeprom\n");
1010 eeprom = 0;
1011 }
1012 eeprom = le16toh(eeprom);
1013 *eaddr++ = (uint8_t)(eeprom & 0xFF);
1014 *eaddr++ = (uint8_t)((eeprom >> 8) & 0xFF);
1015 }
1016 /* Wakeup PHY. */
1017 axe_ax88772_phywake(sc);
1018 /* Stop MAC. */
1019 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
1020 }
1021
1022 #undef AXE_GPIO_WRITE
1023
1024 /*
1025 * Probe for a AX88172 chip.
1026 */
1027 int
1028 axe_match(device_t parent, cfdata_t match, void *aux)
1029 {
1030 struct usb_attach_arg *uaa = aux;
1031
1032 return axe_lookup(uaa->uaa_vendor, uaa->uaa_product) != NULL ?
1033 UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
1034 }
1035
1036 /*
1037 * Attach the interface. Allocate softc structures, do ifmedia
1038 * setup and ethernet/BPF attach.
1039 */
1040 void
1041 axe_attach(device_t parent, device_t self, void *aux)
1042 {
1043 AXEHIST_FUNC(); AXEHIST_CALLED();
1044 struct axe_softc *sc = device_private(self);
1045 struct usb_attach_arg *uaa = aux;
1046 struct usbd_device *dev = uaa->uaa_device;
1047 usbd_status err;
1048 usb_interface_descriptor_t *id;
1049 usb_endpoint_descriptor_t *ed;
1050 struct mii_data *mii;
1051 char *devinfop;
1052 const char *devname = device_xname(self);
1053 struct ifnet *ifp;
1054 int i;
1055
1056 aprint_naive("\n");
1057 aprint_normal("\n");
1058
1059 sc->axe_dev = self;
1060 sc->axe_udev = dev;
1061
1062 devinfop = usbd_devinfo_alloc(dev, 0);
1063 aprint_normal_dev(self, "%s\n", devinfop);
1064 usbd_devinfo_free(devinfop);
1065
1066 err = usbd_set_config_no(dev, AXE_CONFIG_NO, 1);
1067 if (err) {
1068 aprint_error_dev(self, "failed to set configuration"
1069 ", err=%s\n", usbd_errstr(err));
1070 return;
1071 }
1072
1073 sc->axe_flags = axe_lookup(uaa->uaa_vendor, uaa->uaa_product)->axe_flags;
1074
1075 usb_init_task(&sc->axe_tick_task, axe_tick_task, sc, USB_TASKQ_MPSAFE);
1076
1077 err = usbd_device2interface_handle(dev, AXE_IFACE_IDX, &sc->axe_iface);
1078 if (err) {
1079 aprint_error_dev(self, "getting interface handle failed\n");
1080 return;
1081 }
1082
1083 sc->axe_product = uaa->uaa_product;
1084 sc->axe_vendor = uaa->uaa_vendor;
1085
1086 id = usbd_get_interface_descriptor(sc->axe_iface);
1087
1088 /* decide on what our bufsize will be */
1089 if (AXE_IS_178_FAMILY(sc))
1090 sc->axe_bufsz = (sc->axe_udev->ud_speed == USB_SPEED_HIGH) ?
1091 AXE_178_MAX_BUFSZ : AXE_178_MIN_BUFSZ;
1092 else
1093 sc->axe_bufsz = AXE_172_BUFSZ;
1094
1095 sc->axe_ed[AXE_ENDPT_RX] = -1;
1096 sc->axe_ed[AXE_ENDPT_TX] = -1;
1097 sc->axe_ed[AXE_ENDPT_INTR] = -1;
1098
1099 /* Find endpoints. */
1100 for (i = 0; i < id->bNumEndpoints; i++) {
1101 ed = usbd_interface2endpoint_descriptor(sc->axe_iface, i);
1102 if (ed == NULL) {
1103 aprint_error_dev(self, "couldn't get ep %d\n", i);
1104 return;
1105 }
1106 const uint8_t xt = UE_GET_XFERTYPE(ed->bmAttributes);
1107 const uint8_t dir = UE_GET_DIR(ed->bEndpointAddress);
1108
1109 if (dir == UE_DIR_IN && xt == UE_BULK &&
1110 sc->axe_ed[AXE_ENDPT_RX] == -1) {
1111 sc->axe_ed[AXE_ENDPT_RX] = ed->bEndpointAddress;
1112 } else if (dir == UE_DIR_OUT && xt == UE_BULK &&
1113 sc->axe_ed[AXE_ENDPT_TX] == -1) {
1114 sc->axe_ed[AXE_ENDPT_TX] = ed->bEndpointAddress;
1115 } else if (dir == UE_DIR_IN && xt == UE_INTERRUPT) {
1116 sc->axe_ed[AXE_ENDPT_INTR] = ed->bEndpointAddress;
1117 }
1118 }
1119
1120 /* Set these up now for axe_cmd(). */
1121 mutex_init(&sc->axe_mii_lock, MUTEX_DEFAULT, IPL_NONE);
1122 mutex_init(&sc->axe_txlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1123 mutex_init(&sc->axe_rxlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1124 mutex_init(&sc->axe_lock, MUTEX_DEFAULT, IPL_NONE);
1125 cv_init(&sc->axe_detachcv, "axedet");
1126
1127 /* We need the PHYID for init dance in some cases */
1128 axe_lock_mii(sc);
1129 if (axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, &sc->axe_phyaddrs)) {
1130 aprint_error_dev(self, "failed to read phyaddrs\n");
1131
1132 cv_destroy(&sc->axe_detachcv);
1133 mutex_destroy(&sc->axe_lock);
1134 mutex_destroy(&sc->axe_rxlock);
1135 mutex_destroy(&sc->axe_txlock);
1136 mutex_destroy(&sc->axe_mii_lock);
1137
1138 return;
1139 }
1140
1141 DPRINTF(" phyaddrs[0]: %jx phyaddrs[1]: %jx",
1142 sc->axe_phyaddrs[0], sc->axe_phyaddrs[1], 0, 0);
1143 sc->axe_phyno = axe_get_phyno(sc, AXE_PHY_SEL_PRI);
1144 if (sc->axe_phyno == -1)
1145 sc->axe_phyno = axe_get_phyno(sc, AXE_PHY_SEL_SEC);
1146 if (sc->axe_phyno == -1) {
1147 DPRINTF(" no valid PHY address found, assuming PHY address 0",
1148 0, 0, 0, 0);
1149 sc->axe_phyno = 0;
1150 }
1151
1152 /* Initialize controller and get station address. */
1153
1154 axe_ax_init(sc);
1155
1156 /*
1157 * Fetch IPG values.
1158 */
1159 if (sc->axe_flags & (AX772A | AX772B)) {
1160 /* Set IPG values. */
1161 sc->axe_ipgs[0] = AXE_IPG0_DEFAULT;
1162 sc->axe_ipgs[1] = AXE_IPG1_DEFAULT;
1163 sc->axe_ipgs[2] = AXE_IPG2_DEFAULT;
1164 } else {
1165 if (axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->axe_ipgs)) {
1166 aprint_error_dev(self, "failed to read ipg\n");
1167
1168 cv_destroy(&sc->axe_detachcv);
1169 mutex_destroy(&sc->axe_lock);
1170 mutex_destroy(&sc->axe_rxlock);
1171 mutex_destroy(&sc->axe_txlock);
1172 mutex_destroy(&sc->axe_mii_lock);
1173
1174 return;
1175 }
1176 }
1177
1178 axe_unlock_mii(sc);
1179
1180 /*
1181 * An ASIX chip was detected. Inform the world.
1182 */
1183 aprint_normal_dev(self, "Ethernet address %s\n",
1184 ether_sprintf(sc->axe_enaddr));
1185
1186 /* Initialize interface info.*/
1187 ifp = &sc->sc_if;
1188 ifp->if_softc = sc;
1189 strlcpy(ifp->if_xname, devname, IFNAMSIZ);
1190 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1191 ifp->if_extflags = IFEF_MPSAFE;
1192 ifp->if_ioctl = axe_ioctl;
1193 ifp->if_start = axe_start;
1194 ifp->if_init = axe_init;
1195 ifp->if_stop = axe_stop;
1196 ifp->if_watchdog = axe_watchdog;
1197
1198 IFQ_SET_READY(&ifp->if_snd);
1199
1200 if (AXE_IS_178_FAMILY(sc))
1201 sc->axe_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
1202 if (sc->axe_flags & AX772B) {
1203 ifp->if_capabilities =
1204 IFCAP_CSUM_IPv4_Rx |
1205 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1206 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1207 /*
1208 * Checksum offloading of AX88772B also works with VLAN
1209 * tagged frames but there is no way to take advantage
1210 * of the feature because vlan(4) assumes
1211 * IFCAP_VLAN_HWTAGGING is prerequisite condition to
1212 * support checksum offloading with VLAN. VLAN hardware
1213 * tagging support of AX88772B is very limited so it's
1214 * not possible to announce IFCAP_VLAN_HWTAGGING.
1215 */
1216 }
1217 u_int adv_pause;
1218 if (sc->axe_flags & (AX772A | AX772B | AX178))
1219 adv_pause = MIIF_DOPAUSE;
1220 else
1221 adv_pause = 0;
1222 adv_pause = 0;
1223
1224 /* Initialize MII/media info. */
1225 mii = &sc->axe_mii;
1226 mii->mii_ifp = ifp;
1227 mii->mii_readreg = axe_miibus_readreg;
1228 mii->mii_writereg = axe_miibus_writereg;
1229 mii->mii_statchg = axe_miibus_statchg;
1230 mii->mii_flags = MIIF_AUTOTSLEEP;
1231
1232 sc->axe_ec.ec_mii = mii;
1233 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
1234
1235 mii_attach(sc->axe_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1236 adv_pause);
1237
1238 if (LIST_EMPTY(&mii->mii_phys)) {
1239 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
1240 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
1241 } else
1242 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1243
1244 /* Attach the interface. */
1245 if_attach(ifp);
1246 ether_ifattach(ifp, sc->axe_enaddr);
1247 rnd_attach_source(&sc->rnd_source, device_xname(sc->axe_dev),
1248 RND_TYPE_NET, RND_FLAG_DEFAULT);
1249
1250 callout_init(&sc->axe_stat_ch, CALLOUT_MPSAFE);
1251 callout_setfunc(&sc->axe_stat_ch, axe_tick, sc);
1252
1253 sc->axe_attached = true;
1254
1255 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->axe_udev, sc->axe_dev);
1256
1257 if (!pmf_device_register(self, NULL, NULL))
1258 aprint_error_dev(self, "couldn't establish power handler\n");
1259 }
1260
1261 int
1262 axe_detach(device_t self, int flags)
1263 {
1264 AXEHIST_FUNC(); AXEHIST_CALLED();
1265 struct axe_softc *sc = device_private(self);
1266 struct ifnet *ifp = &sc->sc_if;
1267
1268 mutex_enter(&sc->axe_lock);
1269 sc->axe_dying = true;
1270 mutex_exit(&sc->axe_lock);
1271
1272 /* Detached before attached finished, so just bail out. */
1273 if (!sc->axe_attached)
1274 return 0;
1275
1276 pmf_device_deregister(self);
1277
1278 callout_halt(&sc->axe_stat_ch, NULL);
1279 usb_rem_task_wait(sc->axe_udev, &sc->axe_tick_task, USB_TASKQ_DRIVER,
1280 NULL);
1281
1282 if (ifp->if_flags & IFF_RUNNING) {
1283 IFNET_LOCK(ifp);
1284 axe_stop(ifp, 1);
1285 IFNET_UNLOCK(ifp);
1286 }
1287
1288 mutex_enter(&sc->axe_lock);
1289 sc->axe_refcnt--;
1290 while (sc->axe_refcnt > 0) {
1291 /* Wait for processes to go away */
1292 cv_wait(&sc->axe_detachcv, &sc->axe_lock);
1293 }
1294
1295 #ifdef DIAGNOSTIC
1296 if (sc->axe_ep[AXE_ENDPT_TX] != NULL ||
1297 sc->axe_ep[AXE_ENDPT_RX] != NULL ||
1298 sc->axe_ep[AXE_ENDPT_INTR] != NULL)
1299 aprint_debug_dev(self, "detach has active endpoints\n");
1300 #endif
1301
1302 mutex_exit(&sc->axe_lock);
1303
1304 callout_destroy(&sc->axe_stat_ch);
1305 rnd_detach_source(&sc->rnd_source);
1306 mii_detach(&sc->axe_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1307 ifmedia_delete_instance(&sc->axe_mii.mii_media, IFM_INST_ANY);
1308 ether_ifdetach(ifp);
1309 if_detach(ifp);
1310
1311 sc->axe_attached = false;
1312
1313 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->axe_udev, sc->axe_dev);
1314
1315 cv_destroy(&sc->axe_detachcv);
1316 mutex_destroy(&sc->axe_lock);
1317 mutex_destroy(&sc->axe_rxlock);
1318 mutex_destroy(&sc->axe_txlock);
1319 mutex_destroy(&sc->axe_mii_lock);
1320
1321 return 0;
1322 }
1323
1324 int
1325 axe_activate(device_t self, devact_t act)
1326 {
1327 AXEHIST_FUNC(); AXEHIST_CALLED();
1328 struct axe_softc *sc = device_private(self);
1329
1330 switch (act) {
1331 case DVACT_DEACTIVATE:
1332 if_deactivate(&sc->axe_ec.ec_if);
1333
1334 mutex_enter(&sc->axe_lock);
1335 sc->axe_dying = true;
1336 mutex_exit(&sc->axe_lock);
1337
1338 mutex_enter(&sc->axe_rxlock);
1339 mutex_enter(&sc->axe_txlock);
1340 sc->axe_stopping = true;
1341 mutex_exit(&sc->axe_txlock);
1342 mutex_exit(&sc->axe_rxlock);
1343
1344 return 0;
1345 default:
1346 return EOPNOTSUPP;
1347 }
1348 }
1349
1350 static int
1351 axe_rx_list_init(struct axe_softc *sc)
1352 {
1353 AXEHIST_FUNC(); AXEHIST_CALLED();
1354
1355 struct axe_cdata *cd;
1356 struct axe_chain *c;
1357 int i;
1358
1359 cd = &sc->axe_cdata;
1360 for (i = 0; i < AXE_RX_LIST_CNT; i++) {
1361 c = &cd->axe_rx_chain[i];
1362 c->axe_sc = sc;
1363 c->axe_idx = i;
1364 if (c->axe_xfer == NULL) {
1365 int err = usbd_create_xfer(sc->axe_ep[AXE_ENDPT_RX],
1366 sc->axe_bufsz, 0, 0, &c->axe_xfer);
1367 if (err)
1368 return err;
1369 c->axe_buf = usbd_get_buffer(c->axe_xfer);
1370 }
1371 }
1372
1373 return 0;
1374 }
1375
1376 static int
1377 axe_tx_list_init(struct axe_softc *sc)
1378 {
1379 AXEHIST_FUNC(); AXEHIST_CALLED();
1380 struct axe_cdata *cd;
1381 struct axe_chain *c;
1382 int i;
1383
1384 cd = &sc->axe_cdata;
1385 for (i = 0; i < AXE_TX_LIST_CNT; i++) {
1386 c = &cd->axe_tx_chain[i];
1387 c->axe_sc = sc;
1388 c->axe_idx = i;
1389 if (c->axe_xfer == NULL) {
1390 int err = usbd_create_xfer(sc->axe_ep[AXE_ENDPT_TX],
1391 sc->axe_bufsz, USBD_FORCE_SHORT_XFER, 0,
1392 &c->axe_xfer);
1393 if (err)
1394 return err;
1395 c->axe_buf = usbd_get_buffer(c->axe_xfer);
1396 }
1397 }
1398
1399 cd->axe_tx_cnt = 0;
1400
1401 return 0;
1402 }
1403
1404 /*
1405 * A frame has been uploaded: pass the resulting mbuf chain up to
1406 * the higher level protocols.
1407 */
1408 static void
1409 axe_rxeof(struct usbd_xfer *xfer, void * priv, usbd_status status)
1410 {
1411 AXEHIST_FUNC(); AXEHIST_CALLED();
1412 struct axe_chain *c = (struct axe_chain *)priv;
1413 struct axe_softc * const sc = c->axe_sc;
1414 struct ifnet *ifp = &sc->sc_if;
1415 uint8_t *buf = c->axe_buf;
1416 uint32_t total_len;
1417 struct mbuf *m;
1418
1419 mutex_enter(&sc->axe_rxlock);
1420
1421 if (sc->axe_dying || sc->axe_stopping ||
1422 status == USBD_INVAL || status == USBD_NOT_STARTED ||
1423 status == USBD_CANCELLED || !(ifp->if_flags & IFF_RUNNING)) {
1424 mutex_exit(&sc->axe_rxlock);
1425 return;
1426 }
1427
1428 if (status != USBD_NORMAL_COMPLETION) {
1429 if (usbd_ratecheck(&sc->axe_rx_notice)) {
1430 aprint_error_dev(sc->axe_dev, "usb errors on rx: %s\n",
1431 usbd_errstr(status));
1432 }
1433 if (status == USBD_STALLED)
1434 usbd_clear_endpoint_stall_async(sc->axe_ep[AXE_ENDPT_RX]);
1435 goto done;
1436 }
1437
1438 usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);
1439
1440 do {
1441 u_int pktlen = 0;
1442 u_int rxlen = 0;
1443 int flags = 0;
1444 if ((sc->axe_flags & AXSTD_FRAME) != 0) {
1445 struct axe_sframe_hdr hdr;
1446
1447 if (total_len < sizeof(hdr)) {
1448 ifp->if_ierrors++;
1449 goto done;
1450 }
1451
1452 #if !defined(__NO_STRICT_ALIGNMENT) && __GNUC_PREREQ__(6, 1)
1453 /*
1454 * XXX hdr is 2-byte aligned in buf, not 4-byte.
1455 * For some architectures, __builtin_memcpy() of
1456 * GCC 6 attempts to copy sizeof(hdr) = 4 bytes
1457 * at onece, which results in alignment error.
1458 */
1459 hdr.len = *(uint16_t *)buf;
1460 hdr.ilen = *(uint16_t *)(buf + sizeof(uint16_t));
1461 #else
1462 memcpy(&hdr, buf, sizeof(hdr));
1463 #endif
1464
1465 DPRINTFN(20, "total_len %#jx len %jx ilen %#jx",
1466 total_len,
1467 (le16toh(hdr.len) & AXE_RH1M_RXLEN_MASK),
1468 (le16toh(hdr.ilen) & AXE_RH1M_RXLEN_MASK), 0);
1469
1470 total_len -= sizeof(hdr);
1471 buf += sizeof(hdr);
1472
1473 if (((le16toh(hdr.len) & AXE_RH1M_RXLEN_MASK) ^
1474 (le16toh(hdr.ilen) & AXE_RH1M_RXLEN_MASK)) !=
1475 AXE_RH1M_RXLEN_MASK) {
1476 ifp->if_ierrors++;
1477 goto done;
1478 }
1479
1480 rxlen = le16toh(hdr.len) & AXE_RH1M_RXLEN_MASK;
1481 if (total_len < rxlen) {
1482 pktlen = total_len;
1483 total_len = 0;
1484 } else {
1485 pktlen = rxlen;
1486 rxlen = roundup2(rxlen, 2);
1487 total_len -= rxlen;
1488 }
1489
1490 } else if ((sc->axe_flags & AXCSUM_FRAME) != 0) {
1491 struct axe_csum_hdr csum_hdr;
1492
1493 if (total_len < sizeof(csum_hdr)) {
1494 ifp->if_ierrors++;
1495 goto done;
1496 }
1497
1498 memcpy(&csum_hdr, buf, sizeof(csum_hdr));
1499
1500 csum_hdr.len = le16toh(csum_hdr.len);
1501 csum_hdr.ilen = le16toh(csum_hdr.ilen);
1502 csum_hdr.cstatus = le16toh(csum_hdr.cstatus);
1503
1504 DPRINTFN(20, "total_len %#jx len %#jx ilen %#jx"
1505 " cstatus %#jx", total_len,
1506 csum_hdr.len, csum_hdr.ilen, csum_hdr.cstatus);
1507
1508 if ((AXE_CSUM_RXBYTES(csum_hdr.len) ^
1509 AXE_CSUM_RXBYTES(csum_hdr.ilen)) !=
1510 sc->sc_lenmask) {
1511 /* we lost sync */
1512 ifp->if_ierrors++;
1513 DPRINTFN(20, "len %#jx ilen %#jx lenmask %#jx "
1514 "err",
1515 AXE_CSUM_RXBYTES(csum_hdr.len),
1516 AXE_CSUM_RXBYTES(csum_hdr.ilen),
1517 sc->sc_lenmask, 0);
1518 goto done;
1519 }
1520 /*
1521 * Get total transferred frame length including
1522 * checksum header. The length should be multiple
1523 * of 4.
1524 */
1525 pktlen = AXE_CSUM_RXBYTES(csum_hdr.len);
1526 u_int len = sizeof(csum_hdr) + pktlen;
1527 len = (len + 3) & ~3;
1528 if (total_len < len) {
1529 DPRINTFN(20, "total_len %#jx < len %#jx",
1530 total_len, len, 0, 0);
1531 /* invalid length */
1532 ifp->if_ierrors++;
1533 goto done;
1534 }
1535 buf += sizeof(csum_hdr);
1536
1537 const uint16_t cstatus = csum_hdr.cstatus;
1538
1539 if (cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) {
1540 if (cstatus & AXE_CSUM_HDR_L4_CSUM_ERR)
1541 flags |= M_CSUM_TCP_UDP_BAD;
1542 if (cstatus & AXE_CSUM_HDR_L3_CSUM_ERR)
1543 flags |= M_CSUM_IPv4_BAD;
1544
1545 const uint16_t l4type =
1546 cstatus & AXE_CSUM_HDR_L4_TYPE_MASK;
1547
1548 if (l4type == AXE_CSUM_HDR_L4_TYPE_TCP)
1549 flags |= M_CSUM_TCPv4;
1550 if (l4type == AXE_CSUM_HDR_L4_TYPE_UDP)
1551 flags |= M_CSUM_UDPv4;
1552 }
1553 if (total_len < len) {
1554 pktlen = total_len;
1555 total_len = 0;
1556 } else {
1557 total_len -= len;
1558 rxlen = len - sizeof(csum_hdr);
1559 }
1560 DPRINTFN(20, "total_len %#jx len %#jx pktlen %#jx"
1561 " rxlen %#jx", total_len, len, pktlen, rxlen);
1562 } else { /* AX172 */
1563 pktlen = rxlen = total_len;
1564 total_len = 0;
1565 }
1566
1567 MGETHDR(m, M_DONTWAIT, MT_DATA);
1568 if (m == NULL) {
1569 ifp->if_ierrors++;
1570 goto done;
1571 }
1572
1573 if (pktlen > MHLEN - ETHER_ALIGN) {
1574 MCLGET(m, M_DONTWAIT);
1575 if ((m->m_flags & M_EXT) == 0) {
1576 m_freem(m);
1577 ifp->if_ierrors++;
1578 goto done;
1579 }
1580 }
1581 m->m_data += ETHER_ALIGN;
1582
1583 m_set_rcvif(m, ifp);
1584 m->m_pkthdr.len = m->m_len = pktlen;
1585 m->m_pkthdr.csum_flags = flags;
1586
1587 memcpy(mtod(m, uint8_t *), buf, pktlen);
1588 buf += rxlen;
1589
1590 DPRINTFN(10, "deliver %jd (%#jx)", m->m_len, m->m_len, 0, 0);
1591
1592 mutex_exit(&sc->axe_rxlock);
1593
1594 if_percpuq_enqueue((ifp)->if_percpuq, (m));
1595
1596 mutex_enter(&sc->axe_rxlock);
1597 if (sc->axe_dying || sc->axe_stopping) {
1598 mutex_exit(&sc->axe_rxlock);
1599 return;
1600 }
1601
1602 } while (total_len > 0);
1603
1604 done:
1605
1606 if (sc->axe_dying || sc->axe_stopping) {
1607 mutex_exit(&sc->axe_rxlock);
1608 return;
1609 }
1610
1611 mutex_exit(&sc->axe_rxlock);
1612
1613 /* Setup new transfer. */
1614 usbd_setup_xfer(xfer, c, c->axe_buf, sc->axe_bufsz,
1615 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof);
1616 usbd_transfer(xfer);
1617
1618 DPRINTFN(10, "start rx", 0, 0, 0, 0);
1619 }
1620
1621 /*
1622 * A frame was downloaded to the chip. It's safe for us to clean up
1623 * the list buffers.
1624 */
1625
1626 static void
1627 axe_txeof(struct usbd_xfer *xfer, void * priv, usbd_status status)
1628 {
1629 AXEHIST_FUNC(); AXEHIST_CALLED();
1630 struct axe_chain *c = priv;
1631 struct axe_softc *sc = c->axe_sc;
1632 struct axe_cdata *cd = &sc->axe_cdata;
1633 struct ifnet *ifp = &sc->sc_if;
1634
1635 mutex_enter(&sc->axe_txlock);
1636 if (sc->axe_stopping || sc->axe_dying) {
1637 mutex_exit(&sc->axe_txlock);
1638 return;
1639 }
1640
1641 KASSERT(cd->axe_tx_cnt == 1);
1642 cd->axe_tx_cnt--;
1643
1644 sc->axe_timer = 0;
1645
1646 switch (status) {
1647 case USBD_NOT_STARTED:
1648 case USBD_CANCELLED:
1649 break;
1650
1651 case USBD_NORMAL_COMPLETION:
1652 ifp->if_opackets++;
1653 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1654 axe_start_locked(ifp);
1655 break;
1656
1657 default:
1658
1659 ifp->if_oerrors++;
1660 if (usbd_ratecheck(&sc->axe_tx_notice))
1661 aprint_error_dev(sc->axe_dev, "usb error on tx: %s\n",
1662 usbd_errstr(status));
1663 if (status == USBD_STALLED)
1664 usbd_clear_endpoint_stall_async(sc->axe_ep[AXE_ENDPT_TX]);
1665 break;
1666 }
1667
1668 mutex_exit(&sc->axe_txlock);
1669 }
1670
1671 static void
1672 axe_tick(void *xsc)
1673 {
1674 AXEHIST_FUNC(); AXEHIST_CALLED();
1675 struct axe_softc *sc = xsc;
1676
1677 if (sc == NULL)
1678 return;
1679
1680 mutex_enter(&sc->axe_lock);
1681 if (!sc->axe_stopping && !sc->axe_dying) {
1682 /* Perform periodic stuff in process context */
1683 usb_add_task(sc->axe_udev, &sc->axe_tick_task, USB_TASKQ_DRIVER);
1684 }
1685 mutex_exit(&sc->axe_lock);
1686
1687 }
1688
1689 static void
1690 axe_tick_task(void *xsc)
1691 {
1692 AXEHIST_FUNC(); AXEHIST_CALLED();
1693 struct axe_softc *sc = xsc;
1694 struct ifnet *ifp;
1695 struct mii_data *mii;
1696
1697 if (sc == NULL)
1698 return;
1699
1700 mutex_enter(&sc->axe_lock);
1701 if (sc->axe_stopping || sc->axe_dying) {
1702 mutex_exit(&sc->axe_lock);
1703 return;
1704 }
1705
1706 ifp = &sc->sc_if;
1707 mii = &sc->axe_mii;
1708
1709 if (mii == NULL) {
1710 mutex_exit(&sc->axe_lock);
1711 return;
1712 }
1713
1714 sc->axe_refcnt++;
1715 mutex_exit(&sc->axe_lock);
1716
1717 if (sc->axe_timer != 0 && --sc->axe_timer == 0)
1718 axe_watchdog(ifp);
1719
1720 mii_tick(mii);
1721
1722 if (sc->axe_link == 0 &&
1723 (mii->mii_media_status & IFM_ACTIVE) != 0 &&
1724 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1725 DPRINTF("got link", 0, 0, 0, 0);
1726 sc->axe_link++;
1727 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1728 axe_start(ifp);
1729 }
1730
1731
1732 mutex_enter(&sc->axe_lock);
1733 if (--sc->axe_refcnt < 0)
1734 cv_broadcast(&sc->axe_detachcv);
1735 if (!sc->axe_stopping && !sc->axe_dying)
1736 callout_schedule(&sc->axe_stat_ch, hz);
1737 mutex_exit(&sc->axe_lock);
1738 }
1739
1740 static int
1741 axe_encap(struct axe_softc *sc, struct mbuf *m, int idx)
1742 {
1743 struct ifnet *ifp = &sc->sc_if;
1744 struct axe_chain *c;
1745 usbd_status err;
1746 int length, boundary;
1747
1748 KASSERT(mutex_owned(&sc->axe_txlock));
1749
1750 c = &sc->axe_cdata.axe_tx_chain[idx];
1751
1752 /*
1753 * Copy the mbuf data into a contiguous buffer, leaving two
1754 * bytes at the beginning to hold the frame length.
1755 */
1756 if (AXE_IS_178_FAMILY(sc)) {
1757 struct axe_sframe_hdr hdr;
1758
1759 boundary = (sc->axe_udev->ud_speed == USB_SPEED_HIGH) ? 512 : 64;
1760
1761 hdr.len = htole16(m->m_pkthdr.len);
1762 hdr.ilen = ~hdr.len;
1763
1764 memcpy(c->axe_buf, &hdr, sizeof(hdr));
1765 length = sizeof(hdr);
1766
1767 m_copydata(m, 0, m->m_pkthdr.len, c->axe_buf + length);
1768 length += m->m_pkthdr.len;
1769
1770 if ((length % boundary) == 0) {
1771 hdr.len = 0x0000;
1772 hdr.ilen = 0xffff;
1773 memcpy(c->axe_buf + length, &hdr, sizeof(hdr));
1774 length += sizeof(hdr);
1775 }
1776 } else {
1777 m_copydata(m, 0, m->m_pkthdr.len, c->axe_buf);
1778 length = m->m_pkthdr.len;
1779 }
1780
1781 usbd_setup_xfer(c->axe_xfer, c, c->axe_buf, length,
1782 USBD_FORCE_SHORT_XFER, 10000, axe_txeof);
1783
1784 /* Transmit */
1785 err = usbd_transfer(c->axe_xfer);
1786 if (err != USBD_IN_PROGRESS) {
1787 /* XXXSMP IFNET_LOCK */
1788 axe_stop(ifp, 0);
1789 return EIO;
1790 }
1791
1792 sc->axe_cdata.axe_tx_cnt++;
1793
1794 return 0;
1795 }
1796
1797
1798 static void
1799 axe_csum_cfg(struct axe_softc *sc)
1800 {
1801 struct ifnet *ifp = &sc->sc_if;
1802 uint16_t csum1, csum2;
1803
1804 if ((sc->axe_flags & AX772B) != 0) {
1805 csum1 = 0;
1806 csum2 = 0;
1807 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) != 0)
1808 csum1 |= AXE_TXCSUM_IP;
1809 if ((ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) != 0)
1810 csum1 |= AXE_TXCSUM_TCP;
1811 if ((ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) != 0)
1812 csum1 |= AXE_TXCSUM_UDP;
1813 if ((ifp->if_capenable & IFCAP_CSUM_TCPv6_Tx) != 0)
1814 csum1 |= AXE_TXCSUM_TCPV6;
1815 if ((ifp->if_capenable & IFCAP_CSUM_UDPv6_Tx) != 0)
1816 csum1 |= AXE_TXCSUM_UDPV6;
1817 axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL);
1818 csum1 = 0;
1819 csum2 = 0;
1820
1821 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) != 0)
1822 csum1 |= AXE_RXCSUM_IP;
1823 if ((ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) != 0)
1824 csum1 |= AXE_RXCSUM_TCP;
1825 if ((ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) != 0)
1826 csum1 |= AXE_RXCSUM_UDP;
1827 if ((ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx) != 0)
1828 csum1 |= AXE_RXCSUM_TCPV6;
1829 if ((ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx) != 0)
1830 csum1 |= AXE_RXCSUM_UDPV6;
1831 axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL);
1832 }
1833 }
1834
1835 static void
1836 axe_start_locked(struct ifnet *ifp)
1837 {
1838 struct axe_softc *sc = ifp->if_softc;
1839 struct mbuf *m;
1840 struct axe_cdata *cd = &sc->axe_cdata;
1841
1842 KASSERT(mutex_owned(&sc->axe_txlock));
1843
1844 if (cd->axe_tx_cnt != 0)
1845 return;
1846
1847 if (sc->axe_link == 0 || (ifp->if_flags & IFF_RUNNING) == 0)
1848 return;
1849
1850 IFQ_POLL(&ifp->if_snd, m);
1851 if (m == NULL) {
1852 return;
1853 }
1854
1855 if (axe_encap(sc, m, 0)) {
1856 return;
1857 }
1858 IFQ_DEQUEUE(&ifp->if_snd, m);
1859
1860 /*
1861 * If there's a BPF listener, bounce a copy of this frame
1862 * to him.
1863 */
1864 bpf_mtap(ifp, m, BPF_D_OUT);
1865 m_freem(m);
1866
1867 /*
1868 * Set a timeout in case the chip goes out to lunch.
1869 */
1870 sc->axe_timer = 5;
1871
1872 return;
1873 }
1874
1875 static void
1876 axe_start(struct ifnet *ifp)
1877 {
1878 struct axe_softc * const sc = ifp->if_softc;
1879
1880 mutex_enter(&sc->axe_txlock);
1881 if (!sc->axe_stopping)
1882 axe_start_locked(ifp);
1883 mutex_exit(&sc->axe_txlock);
1884 }
1885
1886 static int
1887 axe_init_locked(struct ifnet *ifp)
1888 {
1889 AXEHIST_FUNC(); AXEHIST_CALLED();
1890 struct axe_softc *sc = ifp->if_softc;
1891 struct axe_chain *c;
1892 usbd_status err;
1893 int rxmode;
1894 int i;
1895
1896 KASSERT(mutex_owned(&sc->axe_lock));
1897
1898 if (sc->axe_dying)
1899 return EIO;
1900
1901 /* Cancel pending I/O */
1902 axe_stop_locked(ifp, 0);
1903
1904 /* Reset the ethernet interface. */
1905 axe_reset(sc);
1906
1907 axe_lock_mii_sc_locked(sc);
1908
1909 #if 0
1910 ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 |
1911 AX_GPIO_GPO2EN, 5, in_pm);
1912 #endif
1913 /* Set MAC address and transmitter IPG values. */
1914 if (AXE_IS_178_FAMILY(sc)) {
1915 axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, sc->axe_enaddr);
1916 axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->axe_ipgs[2],
1917 (sc->axe_ipgs[1] << 8) | (sc->axe_ipgs[0]), NULL);
1918 } else {
1919 axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, sc->axe_enaddr);
1920 axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->axe_ipgs[0], NULL);
1921 axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->axe_ipgs[1], NULL);
1922 axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->axe_ipgs[2], NULL);
1923 }
1924 if (AXE_IS_178_FAMILY(sc)) {
1925 sc->axe_flags &= ~(AXSTD_FRAME | AXCSUM_FRAME);
1926 if ((sc->axe_flags & AX772B) != 0 &&
1927 (ifp->if_capenable & AX_RXCSUM) != 0) {
1928 sc->sc_lenmask = AXE_CSUM_HDR_LEN_MASK;
1929 sc->axe_flags |= AXCSUM_FRAME;
1930 } else {
1931 sc->sc_lenmask = AXE_HDR_LEN_MASK;
1932 sc->axe_flags |= AXSTD_FRAME;
1933 }
1934 }
1935
1936 /* Configure TX/RX checksum offloading. */
1937 axe_csum_cfg(sc);
1938
1939 if (sc->axe_flags & AX772B) {
1940 /* AX88772B uses different maximum frame burst configuration. */
1941 axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG,
1942 ax88772b_mfb_table[AX88772B_MFB_16K].threshold,
1943 ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL);
1944 }
1945 /* Enable receiver, set RX mode */
1946 rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE);
1947 if (AXE_IS_178_FAMILY(sc)) {
1948 if (sc->axe_flags & AX772B) {
1949 /*
1950 * Select RX header format type 1. Aligning IP
1951 * header on 4 byte boundary is not needed when
1952 * checksum offloading feature is not used
1953 * because we always copy the received frame in
1954 * RX handler. When RX checksum offloading is
1955 * active, aligning IP header is required to
1956 * reflect actual frame length including RX
1957 * header size.
1958 */
1959 rxmode |= AXE_772B_RXCMD_HDR_TYPE_1;
1960 if (sc->axe_flags & AXCSUM_FRAME)
1961 rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN;
1962 } else {
1963 /*
1964 * Default Rx buffer size is too small to get
1965 * maximum performance.
1966 */
1967 #if 0
1968 if (sc->axe_udev->ud_speed == USB_SPEED_HIGH) {
1969 /* Largest possible USB buffer size for AX88178 */
1970 }
1971 #endif
1972 rxmode |= AXE_178_RXCMD_MFB_16384;
1973 }
1974 } else {
1975 rxmode |= AXE_172_RXCMD_UNICAST;
1976 }
1977
1978
1979 /* If we want promiscuous mode, set the allframes bit. */
1980 if (ifp->if_flags & IFF_PROMISC)
1981 rxmode |= AXE_RXCMD_PROMISC;
1982
1983 if (ifp->if_flags & IFF_BROADCAST)
1984 rxmode |= AXE_RXCMD_BROADCAST;
1985
1986 DPRINTF("rxmode 0x%#jx", rxmode, 0, 0, 0);
1987
1988 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1989
1990 /* Load the multicast filter. */
1991 axe_setmulti_locked(sc);
1992
1993 axe_unlock_mii_sc_locked(sc);
1994
1995 /* Open RX and TX pipes. */
1996 err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_RX],
1997 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &sc->axe_ep[AXE_ENDPT_RX]);
1998 if (err) {
1999 aprint_error_dev(sc->axe_dev, "open rx pipe failed: %s\n",
2000 usbd_errstr(err));
2001 return EIO;
2002 }
2003
2004 err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_TX],
2005 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &sc->axe_ep[AXE_ENDPT_TX]);
2006 if (err) {
2007 aprint_error_dev(sc->axe_dev, "open tx pipe failed: %s\n",
2008 usbd_errstr(err));
2009 return EIO;
2010 }
2011
2012 /* Init RX ring. */
2013 if (axe_rx_list_init(sc) != 0) {
2014 aprint_error_dev(sc->axe_dev, "rx list init failed\n");
2015 return ENOBUFS;
2016 }
2017
2018 /* Init TX ring. */
2019 if (axe_tx_list_init(sc) != 0) {
2020 aprint_error_dev(sc->axe_dev, "tx list init failed\n");
2021 return ENOBUFS;
2022 }
2023
2024 mutex_enter(&sc->axe_rxlock);
2025 mutex_enter(&sc->axe_txlock);
2026 sc->axe_stopping = false;
2027
2028 /* Start up the receive pipe. */
2029 for (i = 0; i < AXE_RX_LIST_CNT; i++) {
2030 c = &sc->axe_cdata.axe_rx_chain[i];
2031 usbd_setup_xfer(c->axe_xfer, c, c->axe_buf, sc->axe_bufsz,
2032 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof);
2033 usbd_transfer(c->axe_xfer);
2034 }
2035
2036 mutex_exit(&sc->axe_txlock);
2037 mutex_exit(&sc->axe_rxlock);
2038
2039 /* Indicate we are up and running. */
2040 KASSERT(IFNET_LOCKED(ifp));
2041 ifp->if_flags |= IFF_RUNNING;
2042
2043 callout_schedule(&sc->axe_stat_ch, hz);
2044 return 0;
2045 }
2046
2047 static int
2048 axe_init(struct ifnet *ifp)
2049 {
2050 struct axe_softc * const sc = ifp->if_softc;
2051
2052 mutex_enter(&sc->axe_lock);
2053 int ret = axe_init_locked(ifp);
2054 mutex_exit(&sc->axe_lock);
2055
2056 return ret;
2057 }
2058
2059 static int
2060 axe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2061 {
2062 struct axe_softc *sc = ifp->if_softc;
2063 int error = 0;
2064
2065 switch (cmd) {
2066 case SIOCSIFFLAGS:
2067 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2068 break;
2069
2070 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
2071 case IFF_RUNNING:
2072 axe_stop(ifp, 1);
2073 break;
2074 case IFF_UP:
2075 axe_init(ifp);
2076 break;
2077 case IFF_UP | IFF_RUNNING:
2078 if ((ifp->if_flags ^ sc->axe_if_flags) == IFF_PROMISC)
2079 axe_setmulti(sc);
2080 else
2081 axe_init(ifp);
2082 break;
2083 }
2084 mutex_enter(&sc->axe_rxlock);
2085 mutex_enter(&sc->axe_txlock);
2086 sc->axe_if_flags = ifp->if_flags;
2087 mutex_exit(&sc->axe_txlock);
2088 mutex_exit(&sc->axe_rxlock);
2089 break;
2090
2091 default:
2092 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2093 break;
2094
2095 error = 0;
2096
2097 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI)
2098 axe_setmulti(sc);
2099
2100 }
2101
2102 return error;
2103 }
2104
2105 static void
2106 axe_watchdog(struct ifnet *ifp)
2107 {
2108 struct axe_softc * const sc = ifp->if_softc;
2109 struct axe_chain *c;
2110 usbd_status stat;
2111
2112 ifp->if_oerrors++;
2113 aprint_error_dev(sc->axe_dev, "watchdog timeout\n");
2114
2115 c = &sc->axe_cdata.axe_tx_chain[0];
2116 usbd_get_xfer_status(c->axe_xfer, NULL, NULL, NULL, &stat);
2117 axe_txeof(c->axe_xfer, c, stat);
2118
2119 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2120 axe_start(ifp);
2121 }
2122
2123 /*
2124 * Stop the adapter and free any mbufs allocated to the
2125 * RX and TX lists.
2126 */
2127 static void
2128 axe_stop_locked(struct ifnet *ifp, int disable)
2129 {
2130 struct axe_softc * const sc = ifp->if_softc;
2131 usbd_status err;
2132 int i;
2133
2134 KASSERT(mutex_owned(&sc->axe_lock));
2135
2136 mutex_enter(&sc->axe_rxlock);
2137 mutex_enter(&sc->axe_txlock);
2138 sc->axe_stopping = true;
2139 mutex_exit(&sc->axe_txlock);
2140 mutex_exit(&sc->axe_rxlock);
2141
2142 /*
2143 * XXXSMP Would like to
2144 * KASSERT(IFNET_LOCKED(ifp))
2145 * here but the locking order is:
2146 * ifnet -> sc lock -> rxlock -> txlock
2147 * and sc lock is already held.
2148 */
2149 ifp->if_flags &= ~IFF_RUNNING;
2150 sc->axe_timer = 0;
2151
2152 callout_stop(&sc->axe_stat_ch);
2153 sc->axe_link = 0;
2154
2155 /* Stop transfers. */
2156 if (sc->axe_ep[AXE_ENDPT_RX] != NULL) {
2157 err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_RX]);
2158 if (err) {
2159 aprint_error_dev(sc->axe_dev,
2160 "abort rx pipe failed: %s\n", usbd_errstr(err));
2161 }
2162 }
2163
2164 if (sc->axe_ep[AXE_ENDPT_TX] != NULL) {
2165 err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_TX]);
2166 if (err) {
2167 aprint_error_dev(sc->axe_dev,
2168 "abort tx pipe failed: %s\n", usbd_errstr(err));
2169 }
2170 }
2171
2172 if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) {
2173 err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_INTR]);
2174 if (err) {
2175 aprint_error_dev(sc->axe_dev,
2176 "abort intr pipe failed: %s\n", usbd_errstr(err));
2177 }
2178 }
2179
2180 axe_reset(sc);
2181
2182 /* Free RX resources. */
2183 for (i = 0; i < AXE_RX_LIST_CNT; i++) {
2184 if (sc->axe_cdata.axe_rx_chain[i].axe_xfer != NULL) {
2185 usbd_destroy_xfer(sc->axe_cdata.axe_rx_chain[i].axe_xfer);
2186 sc->axe_cdata.axe_rx_chain[i].axe_xfer = NULL;
2187 }
2188 }
2189
2190 /* Free TX resources. */
2191 for (i = 0; i < AXE_TX_LIST_CNT; i++) {
2192 if (sc->axe_cdata.axe_tx_chain[i].axe_xfer != NULL) {
2193 usbd_destroy_xfer(sc->axe_cdata.axe_tx_chain[i].axe_xfer);
2194 sc->axe_cdata.axe_tx_chain[i].axe_xfer = NULL;
2195 }
2196 }
2197
2198 /* Close pipes. */
2199 if (sc->axe_ep[AXE_ENDPT_RX] != NULL) {
2200 err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_RX]);
2201 if (err) {
2202 aprint_error_dev(sc->axe_dev,
2203 "close rx pipe failed: %s\n", usbd_errstr(err));
2204 }
2205 sc->axe_ep[AXE_ENDPT_RX] = NULL;
2206 }
2207
2208 if (sc->axe_ep[AXE_ENDPT_TX] != NULL) {
2209 err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_TX]);
2210 if (err) {
2211 aprint_error_dev(sc->axe_dev,
2212 "close tx pipe failed: %s\n", usbd_errstr(err));
2213 }
2214 sc->axe_ep[AXE_ENDPT_TX] = NULL;
2215 }
2216
2217 if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) {
2218 err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_INTR]);
2219 if (err) {
2220 aprint_error_dev(sc->axe_dev,
2221 "close intr pipe failed: %s\n", usbd_errstr(err));
2222 }
2223 sc->axe_ep[AXE_ENDPT_INTR] = NULL;
2224 }
2225 }
2226
2227 static void
2228 axe_stop(struct ifnet *ifp, int disable)
2229 {
2230 struct axe_softc * const sc = ifp->if_softc;
2231
2232 mutex_enter(&sc->axe_lock);
2233 axe_stop_locked(ifp, disable);
2234 mutex_exit(&sc->axe_lock);
2235 }
2236
2237 MODULE(MODULE_CLASS_DRIVER, if_axe, NULL);
2238
2239 #ifdef _MODULE
2240 #include "ioconf.c"
2241 #endif
2242
2243 static int
2244 if_axe_modcmd(modcmd_t cmd, void *aux)
2245 {
2246 int error = 0;
2247
2248 switch (cmd) {
2249 case MODULE_CMD_INIT:
2250 #ifdef _MODULE
2251 error = config_init_component(cfdriver_ioconf_axe,
2252 cfattach_ioconf_axe, cfdata_ioconf_axe);
2253 #endif
2254 return error;
2255 case MODULE_CMD_FINI:
2256 #ifdef _MODULE
2257 error = config_fini_component(cfdriver_ioconf_axe,
2258 cfattach_ioconf_axe, cfdata_ioconf_axe);
2259 #endif
2260 return error;
2261 default:
2262 return ENOTTY;
2263 }
2264 }
2265