if_ure.c revision 1.13 1 /* $NetBSD: if_ure.c,v 1.13 2019/06/28 01:57:43 mrg Exp $ */
2
3 /* $OpenBSD: if_ure.c,v 1.10 2018/11/02 21:32:30 jcs Exp $ */
4 /*-
5 * Copyright (c) 2015-2016 Kevin Lo <kevlo (at) FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /* RealTek RTL8152/RTL8153 10/100/Gigabit USB Ethernet device */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_ure.c,v 1.13 2019/06/28 01:57:43 mrg Exp $");
34
35 #ifdef _KERNEL_OPT
36 #include "opt_usb.h"
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/bus.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/device.h>
49
50 #include <sys/rndsource.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55 #include <net/if_media.h>
56
57 #include <net/bpf.h>
58
59 #include <netinet/in.h>
60
61 #include <netinet/in_offload.h> /* XXX for in_undefer_cksum() */
62 #ifdef INET6
63 #include <netinet6/in6_offload.h> /* XXX for in6_undefer_cksum() */
64 #endif
65
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
68
69 #include <dev/usb/usb.h>
70 #include <dev/usb/usbdi.h>
71 #include <dev/usb/usbdi_util.h>
72 #include <dev/usb/usbdivar.h>
73 #include <dev/usb/usbdevs.h>
74
75 #include <dev/ic/rtl81x9reg.h> /* XXX for RTK_GMEDIASTAT */
76 #include <dev/usb/if_urereg.h>
77 #include <dev/usb/if_urevar.h>
78
79 #define URE_PRINTF(sc, fmt, args...) \
80 device_printf((sc)->ure_dev, "%s: " fmt, __func__, ##args);
81
82 #define URE_DEBUG
83 #ifdef URE_DEBUG
84 #define DPRINTF(x) do { if (uredebug) printf x; } while (0)
85 #define DPRINTFN(n, x) do { if (uredebug >= (n)) printf x; } while (0)
86 int uredebug = 1;
87 #else
88 #define DPRINTF(x)
89 #define DPRINTFN(n, x)
90 #endif
91
92 static const struct usb_devno ure_devs[] = {
93 { USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8152 },
94 { USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8153 }
95 };
96
97 static int ure_match(device_t, cfdata_t, void *);
98 static void ure_attach(device_t, device_t, void *);
99 static int ure_detach(device_t, int);
100 static int ure_activate(device_t, enum devact);
101
102 static int ure_ctl(struct ure_softc *, uint8_t, uint16_t, uint16_t,
103 void *, int);
104 static int ure_read_mem(struct ure_softc *, uint16_t, uint16_t, void *,
105 int);
106 static int ure_write_mem(struct ure_softc *, uint16_t, uint16_t, void *,
107 int);
108 static uint8_t ure_read_1(struct ure_softc *, uint16_t, uint16_t);
109 static uint16_t ure_read_2(struct ure_softc *, uint16_t, uint16_t);
110 static uint32_t ure_read_4(struct ure_softc *, uint16_t, uint16_t);
111 static int ure_write_1(struct ure_softc *, uint16_t, uint16_t, uint32_t);
112 static int ure_write_2(struct ure_softc *, uint16_t, uint16_t, uint32_t);
113 static int ure_write_4(struct ure_softc *, uint16_t, uint16_t, uint32_t);
114 static uint16_t ure_ocp_reg_read(struct ure_softc *, uint16_t);
115 static void ure_ocp_reg_write(struct ure_softc *, uint16_t, uint16_t);
116
117 static int ure_init(struct ifnet *);
118 static void ure_stop(struct ifnet *, int);
119 static void ure_start(struct ifnet *);
120 static void ure_reset(struct ure_softc *);
121 static void ure_miibus_statchg(struct ifnet *);
122 static int ure_miibus_readreg(device_t, int, int, uint16_t *);
123 static int ure_miibus_writereg(device_t, int, int, uint16_t);
124 static void ure_lock_mii(struct ure_softc *);
125 static void ure_unlock_mii(struct ure_softc *);
126
127 static int ure_encap(struct ure_softc *, struct mbuf *, int);
128 static uint32_t ure_txcsum(struct mbuf *);
129 static void ure_rxeof(struct usbd_xfer *, void *, usbd_status);
130 static int ure_rxcsum(struct ifnet *, struct ure_rxpkt *);
131 static void ure_txeof(struct usbd_xfer *, void *, usbd_status);
132 static int ure_rx_list_init(struct ure_softc *);
133 static int ure_tx_list_init(struct ure_softc *);
134
135 static void ure_tick_task(void *);
136 static void ure_tick(void *);
137
138 static int ure_ifmedia_upd(struct ifnet *);
139 static void ure_ifmedia_sts(struct ifnet *, struct ifmediareq *);
140 static int ure_ioctl(struct ifnet *, u_long, void *);
141 static void ure_rtl8152_init(struct ure_softc *);
142 static void ure_rtl8153_init(struct ure_softc *);
143 static void ure_disable_teredo(struct ure_softc *);
144 static void ure_init_fifo(struct ure_softc *);
145
146 CFATTACH_DECL_NEW(ure, sizeof(struct ure_softc), ure_match, ure_attach,
147 ure_detach, ure_activate);
148
149 static int
150 ure_ctl(struct ure_softc *sc, uint8_t rw, uint16_t val, uint16_t index,
151 void *buf, int len)
152 {
153 usb_device_request_t req;
154 usbd_status err;
155
156 if (sc->ure_dying)
157 return 0;
158
159 if (rw == URE_CTL_WRITE)
160 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
161 else
162 req.bmRequestType = UT_READ_VENDOR_DEVICE;
163 req.bRequest = UR_SET_ADDRESS;
164 USETW(req.wValue, val);
165 USETW(req.wIndex, index);
166 USETW(req.wLength, len);
167
168 DPRINTFN(5, ("ure_ctl: rw %d, val 0x%04hu, index 0x%04hu, len %d\n",
169 rw, val, index, len));
170 err = usbd_do_request(sc->ure_udev, &req, buf);
171 if (err) {
172 DPRINTF(("ure_ctl: error %d\n", err));
173 return -1;
174 }
175
176 return 0;
177 }
178
179 static int
180 ure_read_mem(struct ure_softc *sc, uint16_t addr, uint16_t index,
181 void *buf, int len)
182 {
183
184 return ure_ctl(sc, URE_CTL_READ, addr, index, buf, len);
185 }
186
187 static int
188 ure_write_mem(struct ure_softc *sc, uint16_t addr, uint16_t index,
189 void *buf, int len)
190 {
191
192 return ure_ctl(sc, URE_CTL_WRITE, addr, index, buf, len);
193 }
194
195 static uint8_t
196 ure_read_1(struct ure_softc *sc, uint16_t reg, uint16_t index)
197 {
198 uint32_t val;
199 uint8_t temp[4];
200 uint8_t shift;
201
202 shift = (reg & 3) << 3;
203 reg &= ~3;
204
205 ure_read_mem(sc, reg, index, &temp, 4);
206 val = UGETDW(temp);
207 val >>= shift;
208
209 return val & 0xff;
210 }
211
212 static uint16_t
213 ure_read_2(struct ure_softc *sc, uint16_t reg, uint16_t index)
214 {
215 uint32_t val;
216 uint8_t temp[4];
217 uint8_t shift;
218
219 shift = (reg & 2) << 3;
220 reg &= ~3;
221
222 ure_read_mem(sc, reg, index, &temp, 4);
223 val = UGETDW(temp);
224 val >>= shift;
225
226 return val & 0xffff;
227 }
228
229 static uint32_t
230 ure_read_4(struct ure_softc *sc, uint16_t reg, uint16_t index)
231 {
232 uint8_t temp[4];
233
234 ure_read_mem(sc, reg, index, &temp, 4);
235 return UGETDW(temp);
236 }
237
238 static int
239 ure_write_1(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
240 {
241 uint16_t byen;
242 uint8_t temp[4];
243 uint8_t shift;
244
245 byen = URE_BYTE_EN_BYTE;
246 shift = reg & 3;
247 val &= 0xff;
248
249 if (reg & 3) {
250 byen <<= shift;
251 val <<= (shift << 3);
252 reg &= ~3;
253 }
254
255 USETDW(temp, val);
256 return ure_write_mem(sc, reg, index | byen, &temp, 4);
257 }
258
259 static int
260 ure_write_2(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
261 {
262 uint16_t byen;
263 uint8_t temp[4];
264 uint8_t shift;
265
266 byen = URE_BYTE_EN_WORD;
267 shift = reg & 2;
268 val &= 0xffff;
269
270 if (reg & 2) {
271 byen <<= shift;
272 val <<= (shift << 3);
273 reg &= ~3;
274 }
275
276 USETDW(temp, val);
277 return ure_write_mem(sc, reg, index | byen, &temp, 4);
278 }
279
280 static int
281 ure_write_4(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
282 {
283 uint8_t temp[4];
284
285 USETDW(temp, val);
286 return ure_write_mem(sc, reg, index | URE_BYTE_EN_DWORD, &temp, 4);
287 }
288
289 static uint16_t
290 ure_ocp_reg_read(struct ure_softc *sc, uint16_t addr)
291 {
292 uint16_t reg;
293
294 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000);
295 reg = (addr & 0x0fff) | 0xb000;
296
297 return ure_read_2(sc, reg, URE_MCU_TYPE_PLA);
298 }
299
300 static void
301 ure_ocp_reg_write(struct ure_softc *sc, uint16_t addr, uint16_t data)
302 {
303 uint16_t reg;
304
305 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000);
306 reg = (addr & 0x0fff) | 0xb000;
307
308 ure_write_2(sc, reg, URE_MCU_TYPE_PLA, data);
309 }
310
311 static int
312 ure_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
313 {
314 struct ure_softc *sc = device_private(dev);
315
316 mutex_enter(&sc->ure_lock);
317 if (sc->ure_dying || sc->ure_phyno != phy) {
318 mutex_exit(&sc->ure_lock);
319 return -1;
320 }
321 mutex_exit(&sc->ure_lock);
322
323 /* Let the rgephy driver read the URE_PLA_PHYSTATUS register. */
324 if (reg == RTK_GMEDIASTAT) {
325 *val = ure_read_1(sc, URE_PLA_PHYSTATUS, URE_MCU_TYPE_PLA);
326 return 0;
327 }
328
329 ure_lock_mii(sc);
330 *val = ure_ocp_reg_read(sc, URE_OCP_BASE_MII + reg * 2);
331 ure_unlock_mii(sc);
332
333 return 0;
334 }
335
336 static int
337 ure_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
338 {
339 struct ure_softc *sc = device_private(dev);
340
341 mutex_enter(&sc->ure_lock);
342 if (sc->ure_dying || sc->ure_phyno != phy) {
343 mutex_exit(&sc->ure_lock);
344 return -1;
345 }
346 mutex_exit(&sc->ure_lock);
347
348 ure_lock_mii(sc);
349 ure_ocp_reg_write(sc, URE_OCP_BASE_MII + reg * 2, val);
350 ure_unlock_mii(sc);
351
352 return 0;
353 }
354
355 static void
356 ure_miibus_statchg(struct ifnet *ifp)
357 {
358 struct ure_softc *sc;
359 struct mii_data *mii;
360
361 if (ifp == NULL || (ifp->if_flags & IFF_RUNNING) == 0)
362 return;
363
364 sc = ifp->if_softc;
365 mii = GET_MII(sc);
366
367 if (mii == NULL)
368 return;
369
370 sc->ure_flags &= ~URE_FLAG_LINK;
371 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
372 (IFM_ACTIVE | IFM_AVALID)) {
373 switch (IFM_SUBTYPE(mii->mii_media_active)) {
374 case IFM_10_T:
375 case IFM_100_TX:
376 sc->ure_flags |= URE_FLAG_LINK;
377 break;
378 case IFM_1000_T:
379 if ((sc->ure_flags & URE_FLAG_8152) != 0)
380 break;
381 sc->ure_flags |= URE_FLAG_LINK;
382 break;
383 default:
384 break;
385 }
386 }
387 }
388
389 static int
390 ure_ifmedia_upd(struct ifnet *ifp)
391 {
392 struct ure_softc *sc = ifp->if_softc;
393 struct mii_data *mii = GET_MII(sc);
394 int err;
395
396 sc->ure_flags &= ~URE_FLAG_LINK;
397 if (mii->mii_instance) {
398 struct mii_softc *miisc;
399 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
400 mii_phy_reset(miisc);
401 }
402
403 err = mii_mediachg(mii);
404 if (err == ENXIO)
405 return 0; /* XXX */
406 else
407 return err;
408 }
409
410 static void
411 ure_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
412 {
413 struct ure_softc *sc = ifp->if_softc;
414 struct mii_data *mii = GET_MII(sc);
415
416 mii_pollstat(mii);
417 ifmr->ifm_active = mii->mii_media_active;
418 ifmr->ifm_status = mii->mii_media_status;
419 }
420
421 static void
422 ure_iff_locked(struct ure_softc *sc)
423 {
424 struct ethercom *ec = &sc->ure_ec;
425 struct ifnet *ifp = GET_IFP(sc);
426 struct ether_multi *enm;
427 struct ether_multistep step;
428 uint32_t hashes[2] = { 0, 0 };
429 uint32_t hash;
430 uint32_t rxmode;
431
432 KASSERT(mutex_owned(&sc->ure_lock));
433
434 if (sc->ure_dying)
435 return;
436
437 rxmode = ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA);
438 rxmode &= ~URE_RCR_ACPT_ALL;
439
440 /*
441 * Always accept frames destined to our station address.
442 * Always accept broadcast frames.
443 */
444 rxmode |= URE_RCR_APM | URE_RCR_AB;
445
446 if (ifp->if_flags & IFF_PROMISC) {
447 rxmode |= URE_RCR_AAP;
448 allmulti:
449 ETHER_LOCK(ec);
450 ec->ec_flags |= ETHER_F_ALLMULTI;
451 ETHER_UNLOCK(ec);
452 rxmode |= URE_RCR_AM;
453 hashes[0] = hashes[1] = 0xffffffff;
454 } else {
455 rxmode |= URE_RCR_AM;
456
457 ETHER_LOCK(ec);
458 ec->ec_flags &= ~ETHER_F_ALLMULTI;
459
460 ETHER_FIRST_MULTI(step, ec, enm);
461 while (enm != NULL) {
462 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
463 ETHER_ADDR_LEN)) {
464 ETHER_UNLOCK(ec);
465 goto allmulti;
466 }
467
468 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN)
469 >> 26;
470 if (hash < 32)
471 hashes[0] |= (1 << hash);
472 else
473 hashes[1] |= (1 << (hash - 32));
474
475 ETHER_NEXT_MULTI(step, enm);
476 }
477 ETHER_UNLOCK(ec);
478
479 hash = bswap32(hashes[0]);
480 hashes[0] = bswap32(hashes[1]);
481 hashes[1] = hash;
482 }
483
484 ure_write_4(sc, URE_PLA_MAR0, URE_MCU_TYPE_PLA, hashes[0]);
485 ure_write_4(sc, URE_PLA_MAR4, URE_MCU_TYPE_PLA, hashes[1]);
486 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, rxmode);
487 }
488
489 static void
490 ure_iff(struct ure_softc *sc)
491 {
492
493 mutex_enter(&sc->ure_lock);
494 ure_iff_locked(sc);
495 mutex_exit(&sc->ure_lock);
496 }
497
498 static void
499 ure_reset(struct ure_softc *sc)
500 {
501 int i;
502
503 KASSERT(mutex_owned(&sc->ure_lock));
504
505 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RST);
506
507 for (i = 0; i < URE_TIMEOUT; i++) {
508 if (!(ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) &
509 URE_CR_RST))
510 break;
511 usbd_delay_ms(sc->ure_udev, 10);
512 }
513 if (i == URE_TIMEOUT)
514 URE_PRINTF(sc, "reset never completed\n");
515 }
516
517 static int
518 ure_init_locked(struct ifnet *ifp)
519 {
520 struct ure_softc * const sc = ifp->if_softc;
521 struct ure_chain *c;
522 usbd_status err;
523 int i;
524 uint8_t eaddr[8];
525
526 KASSERT(mutex_owned(&sc->ure_lock));
527
528 if (sc->ure_dying)
529 return EIO;
530
531 /* Cancel pending I/O. */
532 if (ifp->if_flags & IFF_RUNNING)
533 ure_stop(ifp, 1);
534
535 /* Set MAC address. */
536 memset(eaddr, 0, sizeof(eaddr));
537 memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
538 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_CONFIG);
539 ure_write_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA | URE_BYTE_EN_SIX_BYTES,
540 eaddr, 8);
541 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_NORAML);
542
543 /* Reset the packet filter. */
544 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA,
545 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) &
546 ~URE_FMC_FCR_MCU_EN);
547 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA,
548 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) |
549 URE_FMC_FCR_MCU_EN);
550
551 /* Enable transmit and receive. */
552 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA,
553 ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) | URE_CR_RE |
554 URE_CR_TE);
555
556 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA,
557 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) &
558 ~URE_RXDY_GATED_EN);
559
560 /* Load the multicast filter. */
561 ure_iff_locked(sc);
562
563 /* Open RX and TX pipes. */
564 err = usbd_open_pipe(sc->ure_iface, sc->ure_ed[URE_ENDPT_RX],
565 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &sc->ure_ep[URE_ENDPT_RX]);
566 if (err) {
567 URE_PRINTF(sc, "open rx pipe failed: %s\n", usbd_errstr(err));
568 return EIO;
569 }
570
571 err = usbd_open_pipe(sc->ure_iface, sc->ure_ed[URE_ENDPT_TX],
572 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &sc->ure_ep[URE_ENDPT_TX]);
573 if (err) {
574 URE_PRINTF(sc, "open tx pipe failed: %s\n", usbd_errstr(err));
575 return EIO;
576 }
577
578 if (ure_rx_list_init(sc)) {
579 URE_PRINTF(sc, "rx list init failed\n");
580 return ENOBUFS;
581 }
582
583 if (ure_tx_list_init(sc)) {
584 URE_PRINTF(sc, "tx list init failed\n");
585 return ENOBUFS;
586 }
587
588 mutex_enter(&sc->ure_rxlock);
589 mutex_enter(&sc->ure_txlock);
590 sc->ure_stopping = false;
591
592 /* Start up the receive pipe. */
593 for (i = 0; i < URE_RX_LIST_CNT; i++) {
594 c = &sc->ure_cdata.rx_chain[i];
595 usbd_setup_xfer(c->uc_xfer, c, c->uc_buf, sc->ure_bufsz,
596 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ure_rxeof);
597 usbd_transfer(c->uc_xfer);
598 }
599
600 mutex_exit(&sc->ure_txlock);
601 mutex_exit(&sc->ure_rxlock);
602
603 /* Indicate we are up and running. */
604 KASSERT(IFNET_LOCKED(ifp));
605 ifp->if_flags |= IFF_RUNNING;
606
607 callout_reset(&sc->ure_stat_ch, hz, ure_tick, sc);
608
609 return 0;
610 }
611
612 static int
613 ure_init(struct ifnet *ifp)
614 {
615 struct ure_softc * const sc = ifp->if_softc;
616
617 mutex_enter(&sc->ure_lock);
618 int ret = ure_init_locked(ifp);
619 mutex_exit(&sc->ure_lock);
620
621 return ret;
622 }
623
624 static void
625 ure_start_locked(struct ifnet *ifp)
626 {
627 struct ure_softc *sc = ifp->if_softc;
628 struct mbuf *m;
629 struct ure_cdata *cd = &sc->ure_cdata;
630 int idx;
631
632 KASSERT(cd->tx_cnt <= URE_TX_LIST_CNT);
633
634 if (sc->ure_dying || sc->ure_stopping ||
635 (sc->ure_flags & URE_FLAG_LINK) == 0 ||
636 (ifp->if_flags & IFF_RUNNING) == 0 ||
637 cd->tx_cnt == URE_TX_LIST_CNT) {
638 return;
639 }
640
641 idx = cd->tx_prod;
642 while (cd->tx_cnt < URE_TX_LIST_CNT) {
643 IFQ_POLL(&ifp->if_snd, m);
644 if (m == NULL)
645 break;
646
647 if (ure_encap(sc, m, idx)) {
648 ifp->if_oerrors++;
649 break;
650 }
651 IFQ_DEQUEUE(&ifp->if_snd, m);
652
653 bpf_mtap(ifp, m, BPF_D_OUT);
654 m_freem(m);
655
656 idx = (idx + 1) % URE_TX_LIST_CNT;
657 cd->tx_cnt++;
658 }
659 cd->tx_prod = idx;
660 }
661
662 static void
663 ure_start(struct ifnet *ifp)
664 {
665 struct ure_softc * const sc = ifp->if_softc;
666
667 mutex_enter(&sc->ure_txlock);
668 ure_start_locked(ifp);
669 mutex_exit(&sc->ure_txlock);
670 }
671
672 static void
673 ure_tick(void *xsc)
674 {
675 struct ure_softc *sc = xsc;
676
677 if (sc == NULL)
678 return;
679
680 mutex_enter(&sc->ure_lock);
681 if (!sc->ure_stopping && !sc->ure_dying) {
682 /* Perform periodic stuff in process context */
683 usb_add_task(sc->ure_udev, &sc->ure_tick_task, USB_TASKQ_DRIVER);
684 }
685 mutex_exit(&sc->ure_lock);
686 }
687
688 static void
689 ure_stop_locked(struct ifnet *ifp, int disable __unused)
690 {
691 struct ure_softc *sc = ifp->if_softc;
692 struct ure_chain *c;
693 usbd_status err;
694 int i;
695
696 KASSERT(mutex_owned(&sc->ure_lock));
697 mutex_enter(&sc->ure_rxlock);
698 mutex_enter(&sc->ure_txlock);
699 sc->ure_stopping = true;
700 mutex_exit(&sc->ure_txlock);
701 mutex_exit(&sc->ure_rxlock);
702
703 ure_reset(sc);
704
705 /*
706 * XXXSMP Would like to
707 * KASSERT(IFNET_LOCKED(ifp))
708 * here but the locking order is:
709 * ifnet -> sc lock -> rxlock -> txlock
710 * and sc lock is already held.
711 */
712 ifp->if_flags &= ~IFF_RUNNING;
713
714 callout_stop(&sc->ure_stat_ch);
715
716 sc->ure_flags &= ~URE_FLAG_LINK; /* XXX */
717
718 if (sc->ure_ep[URE_ENDPT_RX] != NULL) {
719 err = usbd_abort_pipe(sc->ure_ep[URE_ENDPT_RX]);
720 if (err)
721 URE_PRINTF(sc, "abort rx pipe failed: %s\n",
722 usbd_errstr(err));
723 }
724
725 if (sc->ure_ep[URE_ENDPT_TX] != NULL) {
726 err = usbd_abort_pipe(sc->ure_ep[URE_ENDPT_TX]);
727 if (err)
728 URE_PRINTF(sc, "abort tx pipe failed: %s\n",
729 usbd_errstr(err));
730 }
731
732 for (i = 0; i < URE_RX_LIST_CNT; i++) {
733 c = &sc->ure_cdata.rx_chain[i];
734 if (c->uc_xfer != NULL) {
735 usbd_destroy_xfer(c->uc_xfer);
736 c->uc_xfer = NULL;
737 }
738 }
739
740 for (i = 0; i < URE_TX_LIST_CNT; i++) {
741 c = &sc->ure_cdata.tx_chain[i];
742 if (c->uc_xfer != NULL) {
743 usbd_destroy_xfer(c->uc_xfer);
744 c->uc_xfer = NULL;
745 }
746 }
747
748 if (sc->ure_ep[URE_ENDPT_RX] != NULL) {
749 err = usbd_close_pipe(sc->ure_ep[URE_ENDPT_RX]);
750 if (err)
751 URE_PRINTF(sc, "close rx pipe failed: %s\n",
752 usbd_errstr(err));
753 sc->ure_ep[URE_ENDPT_RX] = NULL;
754 }
755
756 if (sc->ure_ep[URE_ENDPT_TX] != NULL) {
757 err = usbd_close_pipe(sc->ure_ep[URE_ENDPT_TX]);
758 if (err)
759 URE_PRINTF(sc, "close tx pipe failed: %s\n",
760 usbd_errstr(err));
761 sc->ure_ep[URE_ENDPT_TX] = NULL;
762 }
763 }
764
765 static void
766 ure_stop(struct ifnet *ifp, int disable __unused)
767 {
768 struct ure_softc * const sc = ifp->if_softc;
769
770 mutex_enter(&sc->ure_lock);
771 ure_stop_locked(ifp, disable);
772 mutex_exit(&sc->ure_lock);
773 }
774
775 static void
776 ure_rtl8152_init(struct ure_softc *sc)
777 {
778 uint32_t pwrctrl;
779
780 /* Disable ALDPS. */
781 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA |
782 URE_DIS_SDSAVE);
783 usbd_delay_ms(sc->ure_udev, 20);
784
785 if (sc->ure_chip & URE_CHIP_VER_4C00) {
786 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA,
787 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) &
788 ~URE_LED_MODE_MASK);
789 }
790
791 ure_write_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB,
792 ure_read_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB) &
793 ~URE_POWER_CUT);
794 ure_write_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB,
795 ure_read_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB) &
796 ~URE_RESUME_INDICATE);
797
798 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA,
799 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) |
800 URE_TX_10M_IDLE_EN | URE_PFM_PWM_SWITCH);
801 pwrctrl = ure_read_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA);
802 pwrctrl &= ~URE_MCU_CLK_RATIO_MASK;
803 pwrctrl |= URE_MCU_CLK_RATIO | URE_D3_CLK_GATED_EN;
804 ure_write_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, pwrctrl);
805 ure_write_2(sc, URE_PLA_GPHY_INTR_IMR, URE_MCU_TYPE_PLA,
806 URE_GPHY_STS_MSK | URE_SPEED_DOWN_MSK | URE_SPDWN_RXDV_MSK |
807 URE_SPDWN_LINKCHG_MSK);
808
809 /* Enable Rx aggregation. */
810 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB,
811 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) &
812 ~URE_RX_AGG_DISABLE);
813
814 /* Disable ALDPS. */
815 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA |
816 URE_DIS_SDSAVE);
817 usbd_delay_ms(sc->ure_udev, 20);
818
819 ure_init_fifo(sc);
820
821 ure_write_1(sc, URE_USB_TX_AGG, URE_MCU_TYPE_USB,
822 URE_TX_AGG_MAX_THRESHOLD);
823 ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, URE_RX_THR_HIGH);
824 ure_write_4(sc, URE_USB_TX_DMA, URE_MCU_TYPE_USB,
825 URE_TEST_MODE_DISABLE | URE_TX_SIZE_ADJUST1);
826 }
827
828 static void
829 ure_rtl8153_init(struct ure_softc *sc)
830 {
831 uint16_t val;
832 uint8_t u1u2[8];
833 int i;
834
835 /* Disable ALDPS. */
836 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
837 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS);
838 usbd_delay_ms(sc->ure_udev, 20);
839
840 memset(u1u2, 0x00, sizeof(u1u2));
841 ure_write_mem(sc, URE_USB_TOLERANCE,
842 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
843
844 for (i = 0; i < URE_TIMEOUT; i++) {
845 if (ure_read_2(sc, URE_PLA_BOOT_CTRL, URE_MCU_TYPE_PLA) &
846 URE_AUTOLOAD_DONE)
847 break;
848 usbd_delay_ms(sc->ure_udev, 10);
849 }
850 if (i == URE_TIMEOUT)
851 URE_PRINTF(sc, "timeout waiting for chip autoload\n");
852
853 for (i = 0; i < URE_TIMEOUT; i++) {
854 val = ure_ocp_reg_read(sc, URE_OCP_PHY_STATUS) &
855 URE_PHY_STAT_MASK;
856 if (val == URE_PHY_STAT_LAN_ON || val == URE_PHY_STAT_PWRDN)
857 break;
858 usbd_delay_ms(sc->ure_udev, 10);
859 }
860 if (i == URE_TIMEOUT)
861 URE_PRINTF(sc, "timeout waiting for phy to stabilize\n");
862
863 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB,
864 ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB) &
865 ~URE_U2P3_ENABLE);
866
867 if (sc->ure_chip & URE_CHIP_VER_5C10) {
868 val = ure_read_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB);
869 val &= ~URE_PWD_DN_SCALE_MASK;
870 val |= URE_PWD_DN_SCALE(96);
871 ure_write_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB, val);
872
873 ure_write_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB,
874 ure_read_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB) |
875 URE_USB2PHY_L1 | URE_USB2PHY_SUSPEND);
876 } else if (sc->ure_chip & URE_CHIP_VER_5C20) {
877 ure_write_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA,
878 ure_read_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA) &
879 ~URE_ECM_ALDPS);
880 }
881 if (sc->ure_chip & (URE_CHIP_VER_5C20 | URE_CHIP_VER_5C30)) {
882 val = ure_read_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB);
883 if (ure_read_2(sc, URE_USB_BURST_SIZE, URE_MCU_TYPE_USB) ==
884 0)
885 val &= ~URE_DYNAMIC_BURST;
886 else
887 val |= URE_DYNAMIC_BURST;
888 ure_write_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB, val);
889 }
890
891 ure_write_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB,
892 ure_read_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB) |
893 URE_EP4_FULL_FC);
894
895 ure_write_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB,
896 ure_read_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB) &
897 ~URE_TIMER11_EN);
898
899 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA,
900 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) &
901 ~URE_LED_MODE_MASK);
902
903 if ((sc->ure_chip & URE_CHIP_VER_5C10) &&
904 sc->ure_udev->ud_speed != USB_SPEED_SUPER)
905 val = URE_LPM_TIMER_500MS;
906 else
907 val = URE_LPM_TIMER_500US;
908 ure_write_1(sc, URE_USB_LPM_CTRL, URE_MCU_TYPE_USB,
909 val | URE_FIFO_EMPTY_1FB | URE_ROK_EXIT_LPM);
910
911 val = ure_read_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB);
912 val &= ~URE_SEN_VAL_MASK;
913 val |= URE_SEN_VAL_NORMAL | URE_SEL_RXIDLE;
914 ure_write_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB, val);
915
916 ure_write_2(sc, URE_USB_CONNECT_TIMER, URE_MCU_TYPE_USB, 0x0001);
917
918 ure_write_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB,
919 ure_read_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB) &
920 ~(URE_PWR_EN | URE_PHASE2_EN));
921 ure_write_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB,
922 ure_read_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB) &
923 ~URE_PCUT_STATUS);
924
925 memset(u1u2, 0xff, sizeof(u1u2));
926 ure_write_mem(sc, URE_USB_TOLERANCE,
927 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
928
929 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA,
930 URE_ALDPS_SPDWN_RATIO);
931 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA,
932 URE_EEE_SPDWN_RATIO);
933 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA,
934 URE_PKT_AVAIL_SPDWN_EN | URE_SUSPEND_SPDWN_EN |
935 URE_U1U2_SPDWN_EN | URE_L1_SPDWN_EN);
936 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA,
937 URE_PWRSAVE_SPDWN_EN | URE_RXDV_SPDWN_EN | URE_TX10MIDLE_EN |
938 URE_TP100_SPDWN_EN | URE_TP500_SPDWN_EN | URE_TP1000_SPDWN_EN |
939 URE_EEE_SPDWN_EN);
940
941 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB);
942 if (!(sc->ure_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10)))
943 val |= URE_U2P3_ENABLE;
944 else
945 val &= ~URE_U2P3_ENABLE;
946 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val);
947
948 memset(u1u2, 0x00, sizeof(u1u2));
949 ure_write_mem(sc, URE_USB_TOLERANCE,
950 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
951
952 /* Disable ALDPS. */
953 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
954 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS);
955 usbd_delay_ms(sc->ure_udev, 20);
956
957 ure_init_fifo(sc);
958
959 /* Enable Rx aggregation. */
960 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB,
961 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) &
962 ~URE_RX_AGG_DISABLE);
963
964 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB);
965 if (!(sc->ure_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10)))
966 val |= URE_U2P3_ENABLE;
967 else
968 val &= ~URE_U2P3_ENABLE;
969 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val);
970
971 memset(u1u2, 0xff, sizeof(u1u2));
972 ure_write_mem(sc, URE_USB_TOLERANCE,
973 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
974 }
975
976 static void
977 ure_disable_teredo(struct ure_softc *sc)
978 {
979
980 ure_write_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA,
981 ure_read_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA) &
982 ~(URE_TEREDO_SEL | URE_TEREDO_RS_EVENT_MASK | URE_OOB_TEREDO_EN));
983 ure_write_2(sc, URE_PLA_WDT6_CTRL, URE_MCU_TYPE_PLA,
984 URE_WDT6_SET_MODE);
985 ure_write_2(sc, URE_PLA_REALWOW_TIMER, URE_MCU_TYPE_PLA, 0);
986 ure_write_4(sc, URE_PLA_TEREDO_TIMER, URE_MCU_TYPE_PLA, 0);
987 }
988
989 static void
990 ure_init_fifo(struct ure_softc *sc)
991 {
992 uint32_t rx_fifo1, rx_fifo2;
993 int i;
994
995 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA,
996 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) |
997 URE_RXDY_GATED_EN);
998
999 ure_disable_teredo(sc);
1000
1001 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA,
1002 ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA) &
1003 ~URE_RCR_ACPT_ALL);
1004
1005 if (!(sc->ure_flags & URE_FLAG_8152)) {
1006 if (sc->ure_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10 |
1007 URE_CHIP_VER_5C20))
1008 ure_ocp_reg_write(sc, URE_OCP_ADC_CFG,
1009 URE_CKADSEL_L | URE_ADC_EN | URE_EN_EMI_L);
1010 if (sc->ure_chip & URE_CHIP_VER_5C00)
1011 ure_ocp_reg_write(sc, URE_OCP_EEE_CFG,
1012 ure_ocp_reg_read(sc, URE_OCP_EEE_CFG) &
1013 ~URE_CTAP_SHORT_EN);
1014 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1015 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) |
1016 URE_EEE_CLKDIV_EN);
1017 ure_ocp_reg_write(sc, URE_OCP_DOWN_SPEED,
1018 ure_ocp_reg_read(sc, URE_OCP_DOWN_SPEED) |
1019 URE_EN_10M_BGOFF);
1020 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1021 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) |
1022 URE_EN_10M_PLLOFF);
1023 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_IMPEDANCE);
1024 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0b13);
1025 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA,
1026 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) |
1027 URE_PFM_PWM_SWITCH);
1028
1029 /* Enable LPF corner auto tune. */
1030 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_LPF_CFG);
1031 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0xf70f);
1032
1033 /* Adjust 10M amplitude. */
1034 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP1);
1035 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x00af);
1036 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP2);
1037 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0208);
1038 }
1039
1040 ure_reset(sc);
1041
1042 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, 0);
1043
1044 ure_write_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA,
1045 ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1046 ~URE_NOW_IS_OOB);
1047
1048 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA,
1049 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) &
1050 ~URE_MCU_BORW_EN);
1051 for (i = 0; i < URE_TIMEOUT; i++) {
1052 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1053 URE_LINK_LIST_READY)
1054 break;
1055 usbd_delay_ms(sc->ure_udev, 10);
1056 }
1057 if (i == URE_TIMEOUT)
1058 URE_PRINTF(sc, "timeout waiting for OOB control\n");
1059 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA,
1060 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) |
1061 URE_RE_INIT_LL);
1062 for (i = 0; i < URE_TIMEOUT; i++) {
1063 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1064 URE_LINK_LIST_READY)
1065 break;
1066 usbd_delay_ms(sc->ure_udev, 10);
1067 }
1068 if (i == URE_TIMEOUT)
1069 URE_PRINTF(sc, "timeout waiting for OOB control\n");
1070
1071 ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA,
1072 ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA) &
1073 ~URE_CPCR_RX_VLAN);
1074 ure_write_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA,
1075 ure_read_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA) |
1076 URE_TCR0_AUTO_FIFO);
1077
1078 /* Configure Rx FIFO threshold and coalescing. */
1079 ure_write_4(sc, URE_PLA_RXFIFO_CTRL0, URE_MCU_TYPE_PLA,
1080 URE_RXFIFO_THR1_NORMAL);
1081 if (sc->ure_udev->ud_speed == USB_SPEED_FULL) {
1082 rx_fifo1 = URE_RXFIFO_THR2_FULL;
1083 rx_fifo2 = URE_RXFIFO_THR3_FULL;
1084 } else {
1085 rx_fifo1 = URE_RXFIFO_THR2_HIGH;
1086 rx_fifo2 = URE_RXFIFO_THR3_HIGH;
1087 }
1088 ure_write_4(sc, URE_PLA_RXFIFO_CTRL1, URE_MCU_TYPE_PLA, rx_fifo1);
1089 ure_write_4(sc, URE_PLA_RXFIFO_CTRL2, URE_MCU_TYPE_PLA, rx_fifo2);
1090
1091 /* Configure Tx FIFO threshold. */
1092 ure_write_4(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA,
1093 URE_TXFIFO_THR_NORMAL);
1094 }
1095
1096 int
1097 ure_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1098 {
1099 struct ure_softc *sc = ifp->if_softc;
1100 int error = 0, oflags = ifp->if_flags;
1101
1102 switch (cmd) {
1103 case SIOCSIFFLAGS:
1104 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1105 break;
1106 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1107 case IFF_RUNNING:
1108 ure_stop(ifp, 1);
1109 break;
1110 case IFF_UP:
1111 ure_init(ifp);
1112 break;
1113 case IFF_UP | IFF_RUNNING:
1114 if ((ifp->if_flags ^ oflags) == IFF_PROMISC)
1115 ure_iff(sc);
1116 else
1117 ure_init(ifp);
1118 }
1119 break;
1120 default:
1121 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1122 break;
1123 error = 0;
1124 if ((ifp->if_flags & IFF_RUNNING) == 0)
1125 break;
1126 switch (cmd) {
1127 case SIOCADDMULTI:
1128 case SIOCDELMULTI:
1129 ure_iff(sc);
1130 break;
1131 default:
1132 break;
1133 }
1134 }
1135
1136 return error;
1137 }
1138
1139 static int
1140 ure_match(device_t parent, cfdata_t match, void *aux)
1141 {
1142 struct usb_attach_arg *uaa = aux;
1143
1144 return usb_lookup(ure_devs, uaa->uaa_vendor, uaa->uaa_product) != NULL ?
1145 UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
1146 }
1147
1148 static void
1149 ure_attach(device_t parent, device_t self, void *aux)
1150 {
1151 struct ure_softc *sc = device_private(self);
1152 struct usb_attach_arg *uaa = aux;
1153 struct usbd_device *dev = uaa->uaa_device;
1154 usb_interface_descriptor_t *id;
1155 usb_endpoint_descriptor_t *ed;
1156 struct ifnet *ifp;
1157 struct mii_data *mii;
1158 int error, i;
1159 uint16_t ver;
1160 uint8_t eaddr[8]; /* 2byte padded */
1161 char *devinfop;
1162
1163 aprint_naive("\n");
1164 aprint_normal("\n");
1165
1166 sc->ure_dev = self;
1167 sc->ure_udev = dev;
1168
1169 devinfop = usbd_devinfo_alloc(sc->ure_udev, 0);
1170 aprint_normal_dev(self, "%s\n", devinfop);
1171 usbd_devinfo_free(devinfop);
1172
1173 callout_init(&sc->ure_stat_ch, CALLOUT_MPSAFE);
1174 usb_init_task(&sc->ure_tick_task, ure_tick_task, sc, USB_TASKQ_MPSAFE);
1175 mutex_init(&sc->ure_mii_lock, MUTEX_DEFAULT, IPL_NONE);
1176 mutex_init(&sc->ure_txlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1177 mutex_init(&sc->ure_rxlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1178 mutex_init(&sc->ure_lock, MUTEX_DEFAULT, IPL_NONE);
1179 cv_init(&sc->ure_detachcv, "uredet");
1180
1181 /*
1182 * ure_phyno is set to 0 below when configuration has succeeded.
1183 * if it is still -1 in detach, then ifmedia/mii/etc was not
1184 * setup and should not be torn down.
1185 */
1186 sc->ure_phyno = -1;
1187
1188 #define URE_CONFIG_NO 1 /* XXX */
1189 error = usbd_set_config_no(dev, URE_CONFIG_NO, 1);
1190 if (error) {
1191 aprint_error_dev(self, "failed to set configuration: %s\n",
1192 usbd_errstr(error));
1193 return; /* XXX */
1194 }
1195
1196 if (uaa->uaa_product == USB_PRODUCT_REALTEK_RTL8152)
1197 sc->ure_flags |= URE_FLAG_8152;
1198
1199 #define URE_IFACE_IDX 0 /* XXX */
1200 error = usbd_device2interface_handle(dev, URE_IFACE_IDX, &sc->ure_iface);
1201 if (error) {
1202 aprint_error_dev(self, "failed to get interface handle: %s\n",
1203 usbd_errstr(error));
1204 return; /* XXX */
1205 }
1206
1207 sc->ure_bufsz = 16 * 1024;
1208
1209 id = usbd_get_interface_descriptor(sc->ure_iface);
1210 for (i = 0; i < id->bNumEndpoints; i++) {
1211 ed = usbd_interface2endpoint_descriptor(sc->ure_iface, i);
1212 if (ed == NULL) {
1213 aprint_error_dev(self, "couldn't get ep %d\n", i);
1214 return; /* XXX */
1215 }
1216 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
1217 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
1218 sc->ure_ed[URE_ENDPT_RX] = ed->bEndpointAddress;
1219 } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
1220 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
1221 sc->ure_ed[URE_ENDPT_TX] = ed->bEndpointAddress;
1222 }
1223 }
1224
1225 sc->ure_phyno = 0;
1226
1227 ver = ure_read_2(sc, URE_PLA_TCR1, URE_MCU_TYPE_PLA) & URE_VERSION_MASK;
1228 switch (ver) {
1229 case 0x4c00:
1230 sc->ure_chip |= URE_CHIP_VER_4C00;
1231 break;
1232 case 0x4c10:
1233 sc->ure_chip |= URE_CHIP_VER_4C10;
1234 break;
1235 case 0x5c00:
1236 sc->ure_chip |= URE_CHIP_VER_5C00;
1237 break;
1238 case 0x5c10:
1239 sc->ure_chip |= URE_CHIP_VER_5C10;
1240 break;
1241 case 0x5c20:
1242 sc->ure_chip |= URE_CHIP_VER_5C20;
1243 break;
1244 case 0x5c30:
1245 sc->ure_chip |= URE_CHIP_VER_5C30;
1246 break;
1247 default:
1248 /* fake addr? or just fail? */
1249 break;
1250 }
1251 aprint_normal_dev(self, "RTL%d %sver %04x\n",
1252 (sc->ure_flags & URE_FLAG_8152) ? 8152 : 8153,
1253 (sc->ure_chip != 0) ? "" : "unknown ",
1254 ver);
1255
1256 mutex_enter(&sc->ure_lock);
1257 if (sc->ure_flags & URE_FLAG_8152)
1258 ure_rtl8152_init(sc);
1259 else
1260 ure_rtl8153_init(sc);
1261
1262 if (sc->ure_chip & URE_CHIP_VER_4C00)
1263 ure_read_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA, eaddr,
1264 sizeof(eaddr));
1265 else
1266 ure_read_mem(sc, URE_PLA_BACKUP, URE_MCU_TYPE_PLA, eaddr,
1267 sizeof(eaddr));
1268 mutex_exit(&sc->ure_lock);
1269
1270 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
1271
1272 ifp = GET_IFP(sc);
1273 ifp->if_softc = sc;
1274 strlcpy(ifp->if_xname, device_xname(sc->ure_dev), IFNAMSIZ);
1275 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1276 ifp->if_extflags = IFEF_MPSAFE;
1277 ifp->if_init = ure_init;
1278 ifp->if_ioctl = ure_ioctl;
1279 ifp->if_start = ure_start;
1280 ifp->if_stop = ure_stop;
1281
1282 /*
1283 * We don't support TSOv4 and v6 for now, that are required to
1284 * be handled in software for some cases.
1285 */
1286 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx |
1287 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
1288 #ifdef INET6
1289 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx;
1290 #endif
1291 if (sc->ure_chip & ~URE_CHIP_VER_4C00) {
1292 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
1293 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1294 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1295 }
1296 sc->ure_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
1297 #ifdef notyet
1298 sc->ure_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1299 #endif
1300
1301 IFQ_SET_READY(&ifp->if_snd);
1302
1303 mii = GET_MII(sc);
1304 mii->mii_ifp = ifp;
1305 mii->mii_readreg = ure_miibus_readreg;
1306 mii->mii_writereg = ure_miibus_writereg;
1307 mii->mii_statchg = ure_miibus_statchg;
1308 mii->mii_flags = MIIF_AUTOTSLEEP;
1309
1310 sc->ure_ec.ec_mii = mii;
1311 ifmedia_init(&mii->mii_media, 0, ure_ifmedia_upd, ure_ifmedia_sts);
1312 mii_attach(self, mii, 0xffffffff, sc->ure_phyno, MII_OFFSET_ANY, 0);
1313
1314 if (LIST_FIRST(&mii->mii_phys) == NULL) {
1315 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
1316 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
1317 } else
1318 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1319
1320 if_attach(ifp);
1321 ether_ifattach(ifp, eaddr);
1322
1323 rnd_attach_source(&sc->ure_rnd_source, device_xname(sc->ure_dev),
1324 RND_TYPE_NET, RND_FLAG_DEFAULT);
1325
1326 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->ure_udev, sc->ure_dev);
1327
1328 if (!pmf_device_register(self, NULL, NULL))
1329 aprint_error_dev(self, "couldn't establish power handler\n");
1330 }
1331
1332 static int
1333 ure_detach(device_t self, int flags)
1334 {
1335 struct ure_softc *sc = device_private(self);
1336 struct ifnet *ifp = GET_IFP(sc);
1337
1338 pmf_device_deregister(self);
1339
1340 mutex_enter(&sc->ure_lock);
1341 sc->ure_dying = true;
1342 mutex_exit(&sc->ure_lock);
1343
1344 callout_halt(&sc->ure_stat_ch, NULL);
1345
1346 usb_rem_task_wait(sc->ure_udev, &sc->ure_tick_task, USB_TASKQ_DRIVER,
1347 NULL);
1348
1349 if (sc->ure_ep[URE_ENDPT_TX] != NULL)
1350 usbd_abort_pipe(sc->ure_ep[URE_ENDPT_TX]);
1351 if (sc->ure_ep[URE_ENDPT_RX] != NULL)
1352 usbd_abort_pipe(sc->ure_ep[URE_ENDPT_RX]);
1353
1354 mutex_enter(&sc->ure_lock);
1355 sc->ure_refcnt--;
1356 while (sc->ure_refcnt > 0) {
1357 /* Wait for processes to go away */
1358 cv_wait(&sc->ure_detachcv, &sc->ure_lock);
1359 }
1360 mutex_exit(&sc->ure_lock);
1361
1362 /* partial-attach, below items weren't configured. */
1363 if (sc->ure_phyno != -1) {
1364 if (ifp->if_flags & IFF_RUNNING) {
1365 IFNET_LOCK(ifp);
1366 ure_stop(ifp, 1);
1367 IFNET_UNLOCK(ifp);
1368 }
1369
1370 rnd_detach_source(&sc->ure_rnd_source);
1371 mii_detach(&sc->ure_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1372 ifmedia_delete_instance(&sc->ure_mii.mii_media, IFM_INST_ANY);
1373 if (ifp->if_softc != NULL) {
1374 ether_ifdetach(ifp);
1375 if_detach(ifp);
1376 }
1377 }
1378
1379 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->ure_udev, sc->ure_dev);
1380
1381 callout_destroy(&sc->ure_stat_ch);
1382 cv_destroy(&sc->ure_detachcv);
1383 mutex_destroy(&sc->ure_lock);
1384 mutex_destroy(&sc->ure_rxlock);
1385 mutex_destroy(&sc->ure_txlock);
1386 mutex_destroy(&sc->ure_mii_lock);
1387
1388 return 0;
1389 }
1390
1391 static int
1392 ure_activate(device_t self, enum devact act)
1393 {
1394 struct ure_softc *sc = device_private(self);
1395 struct ifnet *ifp = GET_IFP(sc);
1396
1397 switch (act) {
1398 case DVACT_DEACTIVATE:
1399 if_deactivate(ifp);
1400
1401 mutex_enter(&sc->ure_lock);
1402 sc->ure_dying = true;
1403 mutex_exit(&sc->ure_lock);
1404
1405 mutex_enter(&sc->ure_rxlock);
1406 mutex_enter(&sc->ure_txlock);
1407 sc->ure_stopping = true;
1408 mutex_exit(&sc->ure_txlock);
1409 mutex_exit(&sc->ure_rxlock);
1410
1411 return 0;
1412 default:
1413 return EOPNOTSUPP;
1414 }
1415 return 0;
1416 }
1417
1418 static void
1419 ure_tick_task(void *xsc)
1420 {
1421 struct ure_softc *sc = xsc;
1422 struct ifnet *ifp;
1423 struct mii_data *mii;
1424
1425 if (sc == NULL)
1426 return;
1427
1428 mutex_enter(&sc->ure_lock);
1429 if (sc->ure_stopping || sc->ure_dying) {
1430 mutex_exit(&sc->ure_lock);
1431 return;
1432 }
1433
1434 ifp = GET_IFP(sc);
1435 mii = GET_MII(sc);
1436 if (mii == NULL) {
1437 mutex_exit(&sc->ure_lock);
1438 return;
1439 }
1440
1441 sc->ure_refcnt++;
1442 mutex_exit(&sc->ure_lock);
1443
1444 mii_tick(mii);
1445
1446 if ((sc->ure_flags & URE_FLAG_LINK) == 0)
1447 ure_miibus_statchg(ifp);
1448
1449 mutex_enter(&sc->ure_lock);
1450 if (--sc->ure_refcnt < 0)
1451 cv_broadcast(&sc->ure_detachcv);
1452 if (!sc->ure_stopping && !sc->ure_dying)
1453 callout_schedule(&sc->ure_stat_ch, hz);
1454 mutex_exit(&sc->ure_lock);
1455 }
1456
1457 static void
1458 ure_lock_mii(struct ure_softc *sc)
1459 {
1460
1461 mutex_enter(&sc->ure_lock);
1462 sc->ure_refcnt++;
1463 mutex_exit(&sc->ure_lock);
1464
1465 mutex_enter(&sc->ure_mii_lock);
1466 }
1467
1468 static void
1469 ure_unlock_mii(struct ure_softc *sc)
1470 {
1471
1472 mutex_exit(&sc->ure_mii_lock);
1473 mutex_enter(&sc->ure_lock);
1474 if (--sc->ure_refcnt < 0)
1475 cv_broadcast(&sc->ure_detachcv);
1476 mutex_exit(&sc->ure_lock);
1477 }
1478
1479 static void
1480 ure_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
1481 {
1482 struct ure_chain *c = (struct ure_chain *)priv;
1483 struct ure_softc *sc = c->uc_sc;
1484 struct ifnet *ifp = GET_IFP(sc);
1485 uint8_t *buf = c->uc_buf;
1486 uint32_t total_len;
1487 uint16_t pktlen = 0;
1488 struct mbuf *m;
1489 struct ure_rxpkt rxhdr;
1490
1491 mutex_enter(&sc->ure_rxlock);
1492
1493 if (sc->ure_dying || sc->ure_stopping ||
1494 status == USBD_INVAL || status == USBD_NOT_STARTED ||
1495 status == USBD_CANCELLED || !(ifp->if_flags & IFF_RUNNING)) {
1496 mutex_exit(&sc->ure_rxlock);
1497 return;
1498 }
1499
1500 if (status != USBD_NORMAL_COMPLETION) {
1501 if (usbd_ratecheck(&sc->ure_rx_notice))
1502 URE_PRINTF(sc, "usb errors on rx: %s\n",
1503 usbd_errstr(status));
1504 if (status == USBD_STALLED)
1505 usbd_clear_endpoint_stall_async(
1506 sc->ure_ep[URE_ENDPT_RX]);
1507 goto done;
1508 }
1509
1510 usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);
1511 DPRINTFN(3, ("received %d bytes\n", total_len));
1512
1513 KASSERTMSG(total_len <= sc->ure_bufsz, "%u vs %u",
1514 total_len, sc->ure_bufsz);
1515
1516 do {
1517 if (total_len < sizeof(rxhdr)) {
1518 DPRINTF(("too few bytes left for a packet header\n"));
1519 ifp->if_ierrors++;
1520 goto done;
1521 }
1522
1523 buf += roundup(pktlen, 8);
1524
1525 memcpy(&rxhdr, buf, sizeof(rxhdr));
1526 total_len -= sizeof(rxhdr);
1527
1528 pktlen = le32toh(rxhdr.ure_pktlen) & URE_RXPKT_LEN_MASK;
1529 DPRINTFN(4, ("next packet is %d bytes\n", pktlen));
1530 if (pktlen > total_len) {
1531 DPRINTF(("not enough bytes left for next packet\n"));
1532 ifp->if_ierrors++;
1533 goto done;
1534 }
1535
1536 total_len -= roundup(pktlen, 8);
1537 buf += sizeof(rxhdr);
1538
1539 m = m_devget(buf, pktlen - ETHER_CRC_LEN, 0, ifp);
1540 if (m == NULL) {
1541 DPRINTF(("unable to allocate mbuf for next packet\n"));
1542 ifp->if_ierrors++;
1543 goto done;
1544 }
1545
1546 m->m_pkthdr.csum_flags = ure_rxcsum(ifp, &rxhdr);
1547
1548 mutex_exit(&sc->ure_rxlock);
1549 if_percpuq_enqueue(ifp->if_percpuq, m);
1550 mutex_enter(&sc->ure_rxlock);
1551
1552 if (sc->ure_dying || sc->ure_stopping) {
1553 mutex_exit(&sc->ure_rxlock);
1554 return;
1555 }
1556
1557 } while (total_len > 0);
1558
1559 done:
1560 if (sc->ure_dying || sc->ure_stopping) {
1561 mutex_exit(&sc->ure_rxlock);
1562 return;
1563 }
1564 mutex_exit(&sc->ure_rxlock);
1565
1566 /* Setup new transfer. */
1567 usbd_setup_xfer(xfer, c, c->uc_buf, sc->ure_bufsz,
1568 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ure_rxeof);
1569 usbd_transfer(xfer);
1570 }
1571
1572 static int
1573 ure_rxcsum(struct ifnet *ifp, struct ure_rxpkt *rp)
1574 {
1575 int enabled = ifp->if_csum_flags_rx, flags = 0;
1576 uint32_t csum, misc;
1577
1578 if (enabled == 0)
1579 return 0;
1580
1581 csum = le32toh(rp->ure_csum);
1582 misc = le32toh(rp->ure_misc);
1583
1584 if (csum & URE_RXPKT_IPV4_CS) {
1585 flags |= M_CSUM_IPv4;
1586 if (csum & URE_RXPKT_TCP_CS)
1587 flags |= M_CSUM_TCPv4;
1588 if (csum & URE_RXPKT_UDP_CS)
1589 flags |= M_CSUM_UDPv4;
1590 } else if (csum & URE_RXPKT_IPV6_CS) {
1591 flags = 0;
1592 if (csum & URE_RXPKT_TCP_CS)
1593 flags |= M_CSUM_TCPv6;
1594 if (csum & URE_RXPKT_UDP_CS)
1595 flags |= M_CSUM_UDPv6;
1596 }
1597
1598 flags &= enabled;
1599 if (__predict_false((flags & M_CSUM_IPv4) &&
1600 (misc & URE_RXPKT_IP_F)))
1601 flags |= M_CSUM_IPv4_BAD;
1602 if (__predict_false(
1603 ((flags & (M_CSUM_TCPv4 | M_CSUM_TCPv6)) && (misc & URE_RXPKT_TCP_F))
1604 || ((flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) && (misc & URE_RXPKT_UDP_F))
1605 ))
1606 flags |= M_CSUM_TCP_UDP_BAD;
1607
1608 return flags;
1609 }
1610
1611 static void
1612 ure_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
1613 {
1614 struct ure_chain *c = priv;
1615 struct ure_softc *sc = c->uc_sc;
1616 struct ure_cdata *cd = &sc->ure_cdata;
1617 struct ifnet *ifp = GET_IFP(sc);
1618
1619 mutex_enter(&sc->ure_txlock);
1620 if (sc->ure_stopping || sc->ure_dying) {
1621 mutex_exit(&sc->ure_txlock);
1622 return;
1623 }
1624
1625 DPRINTFN(2, ("tx completion\n"));
1626
1627 KASSERT(cd->tx_cnt > 0);
1628 cd->tx_cnt--;
1629
1630 switch (status) {
1631 case USBD_NOT_STARTED:
1632 case USBD_CANCELLED:
1633 break;
1634
1635 case USBD_NORMAL_COMPLETION:
1636 ifp->if_opackets++;
1637
1638 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1639 ure_start_locked(ifp);
1640 }
1641 break;
1642
1643 default:
1644 ifp->if_oerrors++;
1645 if (usbd_ratecheck(&sc->ure_tx_notice))
1646 URE_PRINTF(sc, "usb error on tx: %s\n",
1647 usbd_errstr(status));
1648 if (status == USBD_STALLED)
1649 usbd_clear_endpoint_stall_async(
1650 sc->ure_ep[URE_ENDPT_TX]);
1651 break;
1652 }
1653
1654 mutex_exit(&sc->ure_txlock);
1655 }
1656
1657 static int
1658 ure_tx_list_init(struct ure_softc *sc)
1659 {
1660 struct ure_cdata *cd;
1661 struct ure_chain *c;
1662 int i, error;
1663
1664 cd = &sc->ure_cdata;
1665 for (i = 0; i < URE_TX_LIST_CNT; i++) {
1666 c = &cd->tx_chain[i];
1667 c->uc_sc = sc;
1668 if (c->uc_xfer == NULL) {
1669 error = usbd_create_xfer(sc->ure_ep[URE_ENDPT_TX],
1670 sc->ure_bufsz, USBD_FORCE_SHORT_XFER, 0,
1671 &c->uc_xfer);
1672 if (error)
1673 return error;
1674 c->uc_buf = usbd_get_buffer(c->uc_xfer);
1675 }
1676 }
1677
1678 cd->tx_prod = cd->tx_cnt = 0;
1679
1680 return 0;
1681 }
1682
1683 static int
1684 ure_rx_list_init(struct ure_softc *sc)
1685 {
1686 struct ure_cdata *cd;
1687 struct ure_chain *c;
1688 int i, error;
1689
1690 cd = &sc->ure_cdata;
1691 for (i = 0; i < URE_RX_LIST_CNT; i++) {
1692 c = &cd->rx_chain[i];
1693 c->uc_sc = sc;
1694 error = usbd_create_xfer(sc->ure_ep[URE_ENDPT_RX],
1695 sc->ure_bufsz, 0, 0, &c->uc_xfer);
1696 if (error)
1697 return error;
1698 c->uc_buf = usbd_get_buffer(c->uc_xfer);
1699 }
1700
1701 return 0;
1702 }
1703
1704 static int
1705 ure_encap(struct ure_softc *sc, struct mbuf *m, int idx)
1706 {
1707 struct ifnet *ifp = GET_IFP(sc);
1708 struct ure_chain *c;
1709 usbd_status err;
1710 struct ure_txpkt txhdr;
1711 uint32_t frm_len = 0;
1712 uint8_t *buf;
1713
1714 KASSERT(mutex_owned(&sc->ure_txlock));
1715
1716 c = &sc->ure_cdata.tx_chain[idx];
1717 buf = c->uc_buf;
1718
1719 /* header */
1720 txhdr.ure_pktlen = htole32(m->m_pkthdr.len | URE_TXPKT_TX_FS |
1721 URE_TXPKT_TX_LS);
1722 txhdr.ure_csum = htole32(ure_txcsum(m));
1723 memcpy(buf, &txhdr, sizeof(txhdr));
1724 buf += sizeof(txhdr);
1725 frm_len = sizeof(txhdr);
1726
1727 /* packet */
1728 m_copydata(m, 0, m->m_pkthdr.len, buf);
1729 frm_len += m->m_pkthdr.len;
1730
1731 if (__predict_false(c->uc_xfer == NULL))
1732 return EIO; /* XXX plugged out or down */
1733
1734 DPRINTFN(2, ("tx %d bytes\n", frm_len));
1735 usbd_setup_xfer(c->uc_xfer, c, c->uc_buf, frm_len,
1736 USBD_FORCE_SHORT_XFER, 10000, ure_txeof);
1737
1738 err = usbd_transfer(c->uc_xfer);
1739 if (err != USBD_IN_PROGRESS) {
1740 /* XXXSMP IFNET_LOCK */
1741 ure_stop(ifp, 0);
1742 return EIO;
1743 }
1744
1745 return 0;
1746 }
1747
1748 /*
1749 * We need to calculate L4 checksum in software, if the offset of
1750 * L4 header is larger than 0x7ff = 2047.
1751 */
1752 static uint32_t
1753 ure_txcsum(struct mbuf *m)
1754 {
1755 struct ether_header *eh;
1756 int flags = m->m_pkthdr.csum_flags;
1757 uint32_t data = m->m_pkthdr.csum_data;
1758 uint32_t reg = 0;
1759 int l3off, l4off;
1760 uint16_t type;
1761
1762 if (flags == 0)
1763 return 0;
1764
1765 if (__predict_true(m->m_len >= (int)sizeof(*eh))) {
1766 eh = mtod(m, struct ether_header *);
1767 type = eh->ether_type;
1768 } else
1769 m_copydata(m, offsetof(struct ether_header, ether_type),
1770 sizeof(type), &type);
1771 switch (type = htons(type)) {
1772 case ETHERTYPE_IP:
1773 case ETHERTYPE_IPV6:
1774 l3off = ETHER_HDR_LEN;
1775 break;
1776 case ETHERTYPE_VLAN:
1777 l3off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1778 break;
1779 default:
1780 return 0;
1781 }
1782
1783 if (flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1784 l4off = l3off + M_CSUM_DATA_IPv4_IPHL(data);
1785 if (__predict_false(l4off > URE_L4_OFFSET_MAX)) {
1786 in_undefer_cksum(m, l3off, flags);
1787 return 0;
1788 }
1789 reg |= URE_TXPKT_IPV4_CS;
1790 if (flags & M_CSUM_TCPv4)
1791 reg |= URE_TXPKT_TCP_CS;
1792 else
1793 reg |= URE_TXPKT_UDP_CS;
1794 reg |= l4off << URE_L4_OFFSET_SHIFT;
1795 }
1796 #ifdef INET6
1797 else if (flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
1798 l4off = l3off + M_CSUM_DATA_IPv6_IPHL(data);
1799 if (__predict_false(l4off > URE_L4_OFFSET_MAX)) {
1800 in6_undefer_cksum(m, l3off, flags);
1801 return 0;
1802 }
1803 reg |= URE_TXPKT_IPV6_CS;
1804 if (flags & M_CSUM_TCPv6)
1805 reg |= URE_TXPKT_TCP_CS;
1806 else
1807 reg |= URE_TXPKT_UDP_CS;
1808 reg |= l4off << URE_L4_OFFSET_SHIFT;
1809 }
1810 #endif
1811 else if (flags & M_CSUM_IPv4)
1812 reg |= URE_TXPKT_IPV4_CS;
1813
1814 return reg;
1815 }
1816