if_ure.c revision 1.14 1 /* $NetBSD: if_ure.c,v 1.14 2019/07/19 04:17:34 mrg Exp $ */
2
3 /* $OpenBSD: if_ure.c,v 1.10 2018/11/02 21:32:30 jcs Exp $ */
4 /*-
5 * Copyright (c) 2015-2016 Kevin Lo <kevlo (at) FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /* RealTek RTL8152/RTL8153 10/100/Gigabit USB Ethernet device */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_ure.c,v 1.14 2019/07/19 04:17:34 mrg Exp $");
34
35 #ifdef _KERNEL_OPT
36 #include "opt_usb.h"
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/bus.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/device.h>
49
50 #include <sys/rndsource.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55 #include <net/if_media.h>
56
57 #include <net/bpf.h>
58
59 #include <netinet/in.h>
60
61 #include <netinet/in_offload.h> /* XXX for in_undefer_cksum() */
62 #ifdef INET6
63 #include <netinet6/in6_offload.h> /* XXX for in6_undefer_cksum() */
64 #endif
65
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
68
69 #include <dev/usb/usb.h>
70 #include <dev/usb/usbdi.h>
71 #include <dev/usb/usbdi_util.h>
72 #include <dev/usb/usbdivar.h>
73 #include <dev/usb/usbdevs.h>
74
75 #include <dev/ic/rtl81x9reg.h> /* XXX for RTK_GMEDIASTAT */
76 #include <dev/usb/if_urereg.h>
77 #include <dev/usb/if_urevar.h>
78
79 #define URE_PRINTF(sc, fmt, args...) \
80 device_printf((sc)->ure_dev, "%s: " fmt, __func__, ##args);
81
82 #define URE_DEBUG
83 #ifdef URE_DEBUG
84 #define DPRINTF(x) do { if (uredebug) printf x; } while (0)
85 #define DPRINTFN(n, x) do { if (uredebug >= (n)) printf x; } while (0)
86 int uredebug = 1;
87 #else
88 #define DPRINTF(x)
89 #define DPRINTFN(n, x)
90 #endif
91
92 static const struct usb_devno ure_devs[] = {
93 { USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8152 },
94 { USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8153 }
95 };
96
97 static int ure_match(device_t, cfdata_t, void *);
98 static void ure_attach(device_t, device_t, void *);
99 static int ure_detach(device_t, int);
100 static int ure_activate(device_t, enum devact);
101
102 static int ure_ctl(struct ure_softc *, uint8_t, uint16_t, uint16_t,
103 void *, int);
104 static int ure_read_mem(struct ure_softc *, uint16_t, uint16_t, void *,
105 int);
106 static int ure_write_mem(struct ure_softc *, uint16_t, uint16_t, void *,
107 int);
108 static uint8_t ure_read_1(struct ure_softc *, uint16_t, uint16_t);
109 static uint16_t ure_read_2(struct ure_softc *, uint16_t, uint16_t);
110 static uint32_t ure_read_4(struct ure_softc *, uint16_t, uint16_t);
111 static int ure_write_1(struct ure_softc *, uint16_t, uint16_t, uint32_t);
112 static int ure_write_2(struct ure_softc *, uint16_t, uint16_t, uint32_t);
113 static int ure_write_4(struct ure_softc *, uint16_t, uint16_t, uint32_t);
114 static uint16_t ure_ocp_reg_read(struct ure_softc *, uint16_t);
115 static void ure_ocp_reg_write(struct ure_softc *, uint16_t, uint16_t);
116
117 static int ure_init(struct ifnet *);
118 static void ure_stop(struct ifnet *, int);
119 static void ure_stop_locked(struct ifnet *, int);
120 static void ure_start(struct ifnet *);
121 static void ure_reset(struct ure_softc *);
122 static void ure_miibus_statchg(struct ifnet *);
123 static int ure_miibus_readreg(device_t, int, int, uint16_t *);
124 static int ure_miibus_writereg(device_t, int, int, uint16_t);
125 static void ure_lock_mii(struct ure_softc *);
126 static void ure_unlock_mii(struct ure_softc *);
127
128 static int ure_encap(struct ure_softc *, struct mbuf *, int);
129 static uint32_t ure_txcsum(struct mbuf *);
130 static void ure_rxeof(struct usbd_xfer *, void *, usbd_status);
131 static int ure_rxcsum(struct ifnet *, struct ure_rxpkt *);
132 static void ure_txeof(struct usbd_xfer *, void *, usbd_status);
133 static int ure_rx_list_init(struct ure_softc *);
134 static int ure_tx_list_init(struct ure_softc *);
135
136 static void ure_tick_task(void *);
137 static void ure_tick(void *);
138
139 static int ure_ifmedia_upd(struct ifnet *);
140 static void ure_ifmedia_sts(struct ifnet *, struct ifmediareq *);
141 static int ure_ioctl(struct ifnet *, u_long, void *);
142 static void ure_rtl8152_init(struct ure_softc *);
143 static void ure_rtl8153_init(struct ure_softc *);
144 static void ure_disable_teredo(struct ure_softc *);
145 static void ure_init_fifo(struct ure_softc *);
146
147 CFATTACH_DECL_NEW(ure, sizeof(struct ure_softc), ure_match, ure_attach,
148 ure_detach, ure_activate);
149
150 static int
151 ure_ctl(struct ure_softc *sc, uint8_t rw, uint16_t val, uint16_t index,
152 void *buf, int len)
153 {
154 usb_device_request_t req;
155 usbd_status err;
156
157 if (sc->ure_dying)
158 return 0;
159
160 if (rw == URE_CTL_WRITE)
161 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
162 else
163 req.bmRequestType = UT_READ_VENDOR_DEVICE;
164 req.bRequest = UR_SET_ADDRESS;
165 USETW(req.wValue, val);
166 USETW(req.wIndex, index);
167 USETW(req.wLength, len);
168
169 DPRINTFN(5, ("ure_ctl: rw %d, val 0x%04hu, index 0x%04hu, len %d\n",
170 rw, val, index, len));
171 err = usbd_do_request(sc->ure_udev, &req, buf);
172 if (err) {
173 DPRINTF(("ure_ctl: error %d\n", err));
174 return -1;
175 }
176
177 return 0;
178 }
179
180 static int
181 ure_read_mem(struct ure_softc *sc, uint16_t addr, uint16_t index,
182 void *buf, int len)
183 {
184
185 return ure_ctl(sc, URE_CTL_READ, addr, index, buf, len);
186 }
187
188 static int
189 ure_write_mem(struct ure_softc *sc, uint16_t addr, uint16_t index,
190 void *buf, int len)
191 {
192
193 return ure_ctl(sc, URE_CTL_WRITE, addr, index, buf, len);
194 }
195
196 static uint8_t
197 ure_read_1(struct ure_softc *sc, uint16_t reg, uint16_t index)
198 {
199 uint32_t val;
200 uint8_t temp[4];
201 uint8_t shift;
202
203 shift = (reg & 3) << 3;
204 reg &= ~3;
205
206 ure_read_mem(sc, reg, index, &temp, 4);
207 val = UGETDW(temp);
208 val >>= shift;
209
210 return val & 0xff;
211 }
212
213 static uint16_t
214 ure_read_2(struct ure_softc *sc, uint16_t reg, uint16_t index)
215 {
216 uint32_t val;
217 uint8_t temp[4];
218 uint8_t shift;
219
220 shift = (reg & 2) << 3;
221 reg &= ~3;
222
223 ure_read_mem(sc, reg, index, &temp, 4);
224 val = UGETDW(temp);
225 val >>= shift;
226
227 return val & 0xffff;
228 }
229
230 static uint32_t
231 ure_read_4(struct ure_softc *sc, uint16_t reg, uint16_t index)
232 {
233 uint8_t temp[4];
234
235 ure_read_mem(sc, reg, index, &temp, 4);
236 return UGETDW(temp);
237 }
238
239 static int
240 ure_write_1(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
241 {
242 uint16_t byen;
243 uint8_t temp[4];
244 uint8_t shift;
245
246 byen = URE_BYTE_EN_BYTE;
247 shift = reg & 3;
248 val &= 0xff;
249
250 if (reg & 3) {
251 byen <<= shift;
252 val <<= (shift << 3);
253 reg &= ~3;
254 }
255
256 USETDW(temp, val);
257 return ure_write_mem(sc, reg, index | byen, &temp, 4);
258 }
259
260 static int
261 ure_write_2(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
262 {
263 uint16_t byen;
264 uint8_t temp[4];
265 uint8_t shift;
266
267 byen = URE_BYTE_EN_WORD;
268 shift = reg & 2;
269 val &= 0xffff;
270
271 if (reg & 2) {
272 byen <<= shift;
273 val <<= (shift << 3);
274 reg &= ~3;
275 }
276
277 USETDW(temp, val);
278 return ure_write_mem(sc, reg, index | byen, &temp, 4);
279 }
280
281 static int
282 ure_write_4(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
283 {
284 uint8_t temp[4];
285
286 USETDW(temp, val);
287 return ure_write_mem(sc, reg, index | URE_BYTE_EN_DWORD, &temp, 4);
288 }
289
290 static uint16_t
291 ure_ocp_reg_read(struct ure_softc *sc, uint16_t addr)
292 {
293 uint16_t reg;
294
295 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000);
296 reg = (addr & 0x0fff) | 0xb000;
297
298 return ure_read_2(sc, reg, URE_MCU_TYPE_PLA);
299 }
300
301 static void
302 ure_ocp_reg_write(struct ure_softc *sc, uint16_t addr, uint16_t data)
303 {
304 uint16_t reg;
305
306 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000);
307 reg = (addr & 0x0fff) | 0xb000;
308
309 ure_write_2(sc, reg, URE_MCU_TYPE_PLA, data);
310 }
311
312 static int
313 ure_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
314 {
315 struct ure_softc *sc = device_private(dev);
316
317 mutex_enter(&sc->ure_lock);
318 if (sc->ure_dying || sc->ure_phyno != phy) {
319 mutex_exit(&sc->ure_lock);
320 return -1;
321 }
322 mutex_exit(&sc->ure_lock);
323
324 /* Let the rgephy driver read the URE_PLA_PHYSTATUS register. */
325 if (reg == RTK_GMEDIASTAT) {
326 *val = ure_read_1(sc, URE_PLA_PHYSTATUS, URE_MCU_TYPE_PLA);
327 return 0;
328 }
329
330 ure_lock_mii(sc);
331 *val = ure_ocp_reg_read(sc, URE_OCP_BASE_MII + reg * 2);
332 ure_unlock_mii(sc);
333
334 return 0;
335 }
336
337 static int
338 ure_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
339 {
340 struct ure_softc *sc = device_private(dev);
341
342 mutex_enter(&sc->ure_lock);
343 if (sc->ure_dying || sc->ure_phyno != phy) {
344 mutex_exit(&sc->ure_lock);
345 return -1;
346 }
347 mutex_exit(&sc->ure_lock);
348
349 ure_lock_mii(sc);
350 ure_ocp_reg_write(sc, URE_OCP_BASE_MII + reg * 2, val);
351 ure_unlock_mii(sc);
352
353 return 0;
354 }
355
356 static void
357 ure_miibus_statchg(struct ifnet *ifp)
358 {
359 struct ure_softc *sc;
360 struct mii_data *mii;
361
362 if (ifp == NULL || (ifp->if_flags & IFF_RUNNING) == 0)
363 return;
364
365 sc = ifp->if_softc;
366 mii = GET_MII(sc);
367
368 if (mii == NULL)
369 return;
370
371 sc->ure_flags &= ~URE_FLAG_LINK;
372 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
373 (IFM_ACTIVE | IFM_AVALID)) {
374 switch (IFM_SUBTYPE(mii->mii_media_active)) {
375 case IFM_10_T:
376 case IFM_100_TX:
377 sc->ure_flags |= URE_FLAG_LINK;
378 break;
379 case IFM_1000_T:
380 if ((sc->ure_flags & URE_FLAG_8152) != 0)
381 break;
382 sc->ure_flags |= URE_FLAG_LINK;
383 break;
384 default:
385 break;
386 }
387 }
388 }
389
390 static int
391 ure_ifmedia_upd(struct ifnet *ifp)
392 {
393 struct ure_softc *sc = ifp->if_softc;
394 struct mii_data *mii = GET_MII(sc);
395 int err;
396
397 sc->ure_flags &= ~URE_FLAG_LINK;
398 if (mii->mii_instance) {
399 struct mii_softc *miisc;
400 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
401 mii_phy_reset(miisc);
402 }
403
404 err = mii_mediachg(mii);
405 if (err == ENXIO)
406 return 0; /* XXX */
407 else
408 return err;
409 }
410
411 static void
412 ure_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
413 {
414 struct ure_softc *sc = ifp->if_softc;
415 struct mii_data *mii = GET_MII(sc);
416
417 mii_pollstat(mii);
418 ifmr->ifm_active = mii->mii_media_active;
419 ifmr->ifm_status = mii->mii_media_status;
420 }
421
422 static void
423 ure_iff_locked(struct ure_softc *sc)
424 {
425 struct ethercom *ec = &sc->ure_ec;
426 struct ifnet *ifp = GET_IFP(sc);
427 struct ether_multi *enm;
428 struct ether_multistep step;
429 uint32_t hashes[2] = { 0, 0 };
430 uint32_t hash;
431 uint32_t rxmode;
432
433 KASSERT(mutex_owned(&sc->ure_lock));
434
435 if (sc->ure_dying)
436 return;
437
438 rxmode = ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA);
439 rxmode &= ~URE_RCR_ACPT_ALL;
440
441 /*
442 * Always accept frames destined to our station address.
443 * Always accept broadcast frames.
444 */
445 rxmode |= URE_RCR_APM | URE_RCR_AB;
446
447 if (ifp->if_flags & IFF_PROMISC) {
448 rxmode |= URE_RCR_AAP;
449 allmulti:
450 ETHER_LOCK(ec);
451 ec->ec_flags |= ETHER_F_ALLMULTI;
452 ETHER_UNLOCK(ec);
453 rxmode |= URE_RCR_AM;
454 hashes[0] = hashes[1] = 0xffffffff;
455 } else {
456 rxmode |= URE_RCR_AM;
457
458 ETHER_LOCK(ec);
459 ec->ec_flags &= ~ETHER_F_ALLMULTI;
460
461 ETHER_FIRST_MULTI(step, ec, enm);
462 while (enm != NULL) {
463 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
464 ETHER_ADDR_LEN)) {
465 ETHER_UNLOCK(ec);
466 goto allmulti;
467 }
468
469 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN)
470 >> 26;
471 if (hash < 32)
472 hashes[0] |= (1 << hash);
473 else
474 hashes[1] |= (1 << (hash - 32));
475
476 ETHER_NEXT_MULTI(step, enm);
477 }
478 ETHER_UNLOCK(ec);
479
480 hash = bswap32(hashes[0]);
481 hashes[0] = bswap32(hashes[1]);
482 hashes[1] = hash;
483 }
484
485 ure_write_4(sc, URE_PLA_MAR0, URE_MCU_TYPE_PLA, hashes[0]);
486 ure_write_4(sc, URE_PLA_MAR4, URE_MCU_TYPE_PLA, hashes[1]);
487 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, rxmode);
488 }
489
490 static void
491 ure_iff(struct ure_softc *sc)
492 {
493
494 mutex_enter(&sc->ure_lock);
495 ure_iff_locked(sc);
496 mutex_exit(&sc->ure_lock);
497 }
498
499 static void
500 ure_reset(struct ure_softc *sc)
501 {
502 int i;
503
504 KASSERT(mutex_owned(&sc->ure_lock));
505
506 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RST);
507
508 for (i = 0; i < URE_TIMEOUT; i++) {
509 if (!(ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) &
510 URE_CR_RST))
511 break;
512 usbd_delay_ms(sc->ure_udev, 10);
513 }
514 if (i == URE_TIMEOUT)
515 URE_PRINTF(sc, "reset never completed\n");
516 }
517
518 static int
519 ure_init_locked(struct ifnet *ifp)
520 {
521 struct ure_softc * const sc = ifp->if_softc;
522 struct ure_chain *c;
523 usbd_status err;
524 int i;
525 uint8_t eaddr[8];
526
527 KASSERT(mutex_owned(&sc->ure_lock));
528
529 if (sc->ure_dying)
530 return EIO;
531
532 /* Cancel pending I/O. */
533 if (ifp->if_flags & IFF_RUNNING)
534 ure_stop_locked(ifp, 1);
535
536 /* Set MAC address. */
537 memset(eaddr, 0, sizeof(eaddr));
538 memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
539 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_CONFIG);
540 ure_write_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA | URE_BYTE_EN_SIX_BYTES,
541 eaddr, 8);
542 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_NORAML);
543
544 /* Reset the packet filter. */
545 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA,
546 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) &
547 ~URE_FMC_FCR_MCU_EN);
548 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA,
549 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) |
550 URE_FMC_FCR_MCU_EN);
551
552 /* Enable transmit and receive. */
553 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA,
554 ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) | URE_CR_RE |
555 URE_CR_TE);
556
557 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA,
558 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) &
559 ~URE_RXDY_GATED_EN);
560
561 /* Load the multicast filter. */
562 ure_iff_locked(sc);
563
564 /* Open RX and TX pipes. */
565 err = usbd_open_pipe(sc->ure_iface, sc->ure_ed[URE_ENDPT_RX],
566 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &sc->ure_ep[URE_ENDPT_RX]);
567 if (err) {
568 URE_PRINTF(sc, "open rx pipe failed: %s\n", usbd_errstr(err));
569 return EIO;
570 }
571
572 err = usbd_open_pipe(sc->ure_iface, sc->ure_ed[URE_ENDPT_TX],
573 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &sc->ure_ep[URE_ENDPT_TX]);
574 if (err) {
575 URE_PRINTF(sc, "open tx pipe failed: %s\n", usbd_errstr(err));
576 return EIO;
577 }
578
579 if (ure_rx_list_init(sc)) {
580 URE_PRINTF(sc, "rx list init failed\n");
581 return ENOBUFS;
582 }
583
584 if (ure_tx_list_init(sc)) {
585 URE_PRINTF(sc, "tx list init failed\n");
586 return ENOBUFS;
587 }
588
589 mutex_enter(&sc->ure_rxlock);
590 mutex_enter(&sc->ure_txlock);
591 sc->ure_stopping = false;
592
593 /* Start up the receive pipe. */
594 for (i = 0; i < URE_RX_LIST_CNT; i++) {
595 c = &sc->ure_cdata.rx_chain[i];
596 usbd_setup_xfer(c->uc_xfer, c, c->uc_buf, sc->ure_bufsz,
597 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ure_rxeof);
598 usbd_transfer(c->uc_xfer);
599 }
600
601 mutex_exit(&sc->ure_txlock);
602 mutex_exit(&sc->ure_rxlock);
603
604 /* Indicate we are up and running. */
605 KASSERT(IFNET_LOCKED(ifp));
606 ifp->if_flags |= IFF_RUNNING;
607
608 callout_reset(&sc->ure_stat_ch, hz, ure_tick, sc);
609
610 return 0;
611 }
612
613 static int
614 ure_init(struct ifnet *ifp)
615 {
616 struct ure_softc * const sc = ifp->if_softc;
617
618 mutex_enter(&sc->ure_lock);
619 int ret = ure_init_locked(ifp);
620 mutex_exit(&sc->ure_lock);
621
622 return ret;
623 }
624
625 static void
626 ure_start_locked(struct ifnet *ifp)
627 {
628 struct ure_softc *sc = ifp->if_softc;
629 struct mbuf *m;
630 struct ure_cdata *cd = &sc->ure_cdata;
631 int idx;
632
633 KASSERT(cd->tx_cnt <= URE_TX_LIST_CNT);
634
635 if (sc->ure_dying || sc->ure_stopping ||
636 (sc->ure_flags & URE_FLAG_LINK) == 0 ||
637 (ifp->if_flags & IFF_RUNNING) == 0 ||
638 cd->tx_cnt == URE_TX_LIST_CNT) {
639 return;
640 }
641
642 idx = cd->tx_prod;
643 while (cd->tx_cnt < URE_TX_LIST_CNT) {
644 IFQ_POLL(&ifp->if_snd, m);
645 if (m == NULL)
646 break;
647
648 if (ure_encap(sc, m, idx)) {
649 ifp->if_oerrors++;
650 break;
651 }
652 IFQ_DEQUEUE(&ifp->if_snd, m);
653
654 bpf_mtap(ifp, m, BPF_D_OUT);
655 m_freem(m);
656
657 idx = (idx + 1) % URE_TX_LIST_CNT;
658 cd->tx_cnt++;
659 }
660 cd->tx_prod = idx;
661 }
662
663 static void
664 ure_start(struct ifnet *ifp)
665 {
666 struct ure_softc * const sc = ifp->if_softc;
667
668 mutex_enter(&sc->ure_txlock);
669 ure_start_locked(ifp);
670 mutex_exit(&sc->ure_txlock);
671 }
672
673 static void
674 ure_tick(void *xsc)
675 {
676 struct ure_softc *sc = xsc;
677
678 if (sc == NULL)
679 return;
680
681 mutex_enter(&sc->ure_lock);
682 if (!sc->ure_stopping && !sc->ure_dying) {
683 /* Perform periodic stuff in process context */
684 usb_add_task(sc->ure_udev, &sc->ure_tick_task, USB_TASKQ_DRIVER);
685 }
686 mutex_exit(&sc->ure_lock);
687 }
688
689 static void
690 ure_stop_locked(struct ifnet *ifp, int disable __unused)
691 {
692 struct ure_softc *sc = ifp->if_softc;
693 struct ure_chain *c;
694 usbd_status err;
695 int i;
696
697 KASSERT(mutex_owned(&sc->ure_lock));
698 mutex_enter(&sc->ure_rxlock);
699 mutex_enter(&sc->ure_txlock);
700 sc->ure_stopping = true;
701 mutex_exit(&sc->ure_txlock);
702 mutex_exit(&sc->ure_rxlock);
703
704 ure_reset(sc);
705
706 /*
707 * XXXSMP Would like to
708 * KASSERT(IFNET_LOCKED(ifp))
709 * here but the locking order is:
710 * ifnet -> sc lock -> rxlock -> txlock
711 * and sc lock is already held.
712 */
713 ifp->if_flags &= ~IFF_RUNNING;
714
715 callout_stop(&sc->ure_stat_ch);
716
717 sc->ure_flags &= ~URE_FLAG_LINK; /* XXX */
718
719 if (sc->ure_ep[URE_ENDPT_RX] != NULL) {
720 err = usbd_abort_pipe(sc->ure_ep[URE_ENDPT_RX]);
721 if (err)
722 URE_PRINTF(sc, "abort rx pipe failed: %s\n",
723 usbd_errstr(err));
724 }
725
726 if (sc->ure_ep[URE_ENDPT_TX] != NULL) {
727 err = usbd_abort_pipe(sc->ure_ep[URE_ENDPT_TX]);
728 if (err)
729 URE_PRINTF(sc, "abort tx pipe failed: %s\n",
730 usbd_errstr(err));
731 }
732
733 for (i = 0; i < URE_RX_LIST_CNT; i++) {
734 c = &sc->ure_cdata.rx_chain[i];
735 if (c->uc_xfer != NULL) {
736 usbd_destroy_xfer(c->uc_xfer);
737 c->uc_xfer = NULL;
738 }
739 }
740
741 for (i = 0; i < URE_TX_LIST_CNT; i++) {
742 c = &sc->ure_cdata.tx_chain[i];
743 if (c->uc_xfer != NULL) {
744 usbd_destroy_xfer(c->uc_xfer);
745 c->uc_xfer = NULL;
746 }
747 }
748
749 if (sc->ure_ep[URE_ENDPT_RX] != NULL) {
750 err = usbd_close_pipe(sc->ure_ep[URE_ENDPT_RX]);
751 if (err)
752 URE_PRINTF(sc, "close rx pipe failed: %s\n",
753 usbd_errstr(err));
754 sc->ure_ep[URE_ENDPT_RX] = NULL;
755 }
756
757 if (sc->ure_ep[URE_ENDPT_TX] != NULL) {
758 err = usbd_close_pipe(sc->ure_ep[URE_ENDPT_TX]);
759 if (err)
760 URE_PRINTF(sc, "close tx pipe failed: %s\n",
761 usbd_errstr(err));
762 sc->ure_ep[URE_ENDPT_TX] = NULL;
763 }
764 }
765
766 static void
767 ure_stop(struct ifnet *ifp, int disable __unused)
768 {
769 struct ure_softc * const sc = ifp->if_softc;
770
771 mutex_enter(&sc->ure_lock);
772 ure_stop_locked(ifp, disable);
773 mutex_exit(&sc->ure_lock);
774 }
775
776 static void
777 ure_rtl8152_init(struct ure_softc *sc)
778 {
779 uint32_t pwrctrl;
780
781 /* Disable ALDPS. */
782 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA |
783 URE_DIS_SDSAVE);
784 usbd_delay_ms(sc->ure_udev, 20);
785
786 if (sc->ure_chip & URE_CHIP_VER_4C00) {
787 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA,
788 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) &
789 ~URE_LED_MODE_MASK);
790 }
791
792 ure_write_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB,
793 ure_read_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB) &
794 ~URE_POWER_CUT);
795 ure_write_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB,
796 ure_read_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB) &
797 ~URE_RESUME_INDICATE);
798
799 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA,
800 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) |
801 URE_TX_10M_IDLE_EN | URE_PFM_PWM_SWITCH);
802 pwrctrl = ure_read_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA);
803 pwrctrl &= ~URE_MCU_CLK_RATIO_MASK;
804 pwrctrl |= URE_MCU_CLK_RATIO | URE_D3_CLK_GATED_EN;
805 ure_write_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, pwrctrl);
806 ure_write_2(sc, URE_PLA_GPHY_INTR_IMR, URE_MCU_TYPE_PLA,
807 URE_GPHY_STS_MSK | URE_SPEED_DOWN_MSK | URE_SPDWN_RXDV_MSK |
808 URE_SPDWN_LINKCHG_MSK);
809
810 /* Enable Rx aggregation. */
811 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB,
812 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) &
813 ~URE_RX_AGG_DISABLE);
814
815 /* Disable ALDPS. */
816 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA |
817 URE_DIS_SDSAVE);
818 usbd_delay_ms(sc->ure_udev, 20);
819
820 ure_init_fifo(sc);
821
822 ure_write_1(sc, URE_USB_TX_AGG, URE_MCU_TYPE_USB,
823 URE_TX_AGG_MAX_THRESHOLD);
824 ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, URE_RX_THR_HIGH);
825 ure_write_4(sc, URE_USB_TX_DMA, URE_MCU_TYPE_USB,
826 URE_TEST_MODE_DISABLE | URE_TX_SIZE_ADJUST1);
827 }
828
829 static void
830 ure_rtl8153_init(struct ure_softc *sc)
831 {
832 uint16_t val;
833 uint8_t u1u2[8];
834 int i;
835
836 /* Disable ALDPS. */
837 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
838 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS);
839 usbd_delay_ms(sc->ure_udev, 20);
840
841 memset(u1u2, 0x00, sizeof(u1u2));
842 ure_write_mem(sc, URE_USB_TOLERANCE,
843 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
844
845 for (i = 0; i < URE_TIMEOUT; i++) {
846 if (ure_read_2(sc, URE_PLA_BOOT_CTRL, URE_MCU_TYPE_PLA) &
847 URE_AUTOLOAD_DONE)
848 break;
849 usbd_delay_ms(sc->ure_udev, 10);
850 }
851 if (i == URE_TIMEOUT)
852 URE_PRINTF(sc, "timeout waiting for chip autoload\n");
853
854 for (i = 0; i < URE_TIMEOUT; i++) {
855 val = ure_ocp_reg_read(sc, URE_OCP_PHY_STATUS) &
856 URE_PHY_STAT_MASK;
857 if (val == URE_PHY_STAT_LAN_ON || val == URE_PHY_STAT_PWRDN)
858 break;
859 usbd_delay_ms(sc->ure_udev, 10);
860 }
861 if (i == URE_TIMEOUT)
862 URE_PRINTF(sc, "timeout waiting for phy to stabilize\n");
863
864 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB,
865 ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB) &
866 ~URE_U2P3_ENABLE);
867
868 if (sc->ure_chip & URE_CHIP_VER_5C10) {
869 val = ure_read_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB);
870 val &= ~URE_PWD_DN_SCALE_MASK;
871 val |= URE_PWD_DN_SCALE(96);
872 ure_write_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB, val);
873
874 ure_write_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB,
875 ure_read_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB) |
876 URE_USB2PHY_L1 | URE_USB2PHY_SUSPEND);
877 } else if (sc->ure_chip & URE_CHIP_VER_5C20) {
878 ure_write_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA,
879 ure_read_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA) &
880 ~URE_ECM_ALDPS);
881 }
882 if (sc->ure_chip & (URE_CHIP_VER_5C20 | URE_CHIP_VER_5C30)) {
883 val = ure_read_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB);
884 if (ure_read_2(sc, URE_USB_BURST_SIZE, URE_MCU_TYPE_USB) ==
885 0)
886 val &= ~URE_DYNAMIC_BURST;
887 else
888 val |= URE_DYNAMIC_BURST;
889 ure_write_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB, val);
890 }
891
892 ure_write_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB,
893 ure_read_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB) |
894 URE_EP4_FULL_FC);
895
896 ure_write_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB,
897 ure_read_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB) &
898 ~URE_TIMER11_EN);
899
900 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA,
901 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) &
902 ~URE_LED_MODE_MASK);
903
904 if ((sc->ure_chip & URE_CHIP_VER_5C10) &&
905 sc->ure_udev->ud_speed != USB_SPEED_SUPER)
906 val = URE_LPM_TIMER_500MS;
907 else
908 val = URE_LPM_TIMER_500US;
909 ure_write_1(sc, URE_USB_LPM_CTRL, URE_MCU_TYPE_USB,
910 val | URE_FIFO_EMPTY_1FB | URE_ROK_EXIT_LPM);
911
912 val = ure_read_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB);
913 val &= ~URE_SEN_VAL_MASK;
914 val |= URE_SEN_VAL_NORMAL | URE_SEL_RXIDLE;
915 ure_write_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB, val);
916
917 ure_write_2(sc, URE_USB_CONNECT_TIMER, URE_MCU_TYPE_USB, 0x0001);
918
919 ure_write_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB,
920 ure_read_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB) &
921 ~(URE_PWR_EN | URE_PHASE2_EN));
922 ure_write_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB,
923 ure_read_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB) &
924 ~URE_PCUT_STATUS);
925
926 memset(u1u2, 0xff, sizeof(u1u2));
927 ure_write_mem(sc, URE_USB_TOLERANCE,
928 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
929
930 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA,
931 URE_ALDPS_SPDWN_RATIO);
932 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA,
933 URE_EEE_SPDWN_RATIO);
934 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA,
935 URE_PKT_AVAIL_SPDWN_EN | URE_SUSPEND_SPDWN_EN |
936 URE_U1U2_SPDWN_EN | URE_L1_SPDWN_EN);
937 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA,
938 URE_PWRSAVE_SPDWN_EN | URE_RXDV_SPDWN_EN | URE_TX10MIDLE_EN |
939 URE_TP100_SPDWN_EN | URE_TP500_SPDWN_EN | URE_TP1000_SPDWN_EN |
940 URE_EEE_SPDWN_EN);
941
942 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB);
943 if (!(sc->ure_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10)))
944 val |= URE_U2P3_ENABLE;
945 else
946 val &= ~URE_U2P3_ENABLE;
947 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val);
948
949 memset(u1u2, 0x00, sizeof(u1u2));
950 ure_write_mem(sc, URE_USB_TOLERANCE,
951 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
952
953 /* Disable ALDPS. */
954 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
955 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS);
956 usbd_delay_ms(sc->ure_udev, 20);
957
958 ure_init_fifo(sc);
959
960 /* Enable Rx aggregation. */
961 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB,
962 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) &
963 ~URE_RX_AGG_DISABLE);
964
965 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB);
966 if (!(sc->ure_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10)))
967 val |= URE_U2P3_ENABLE;
968 else
969 val &= ~URE_U2P3_ENABLE;
970 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val);
971
972 memset(u1u2, 0xff, sizeof(u1u2));
973 ure_write_mem(sc, URE_USB_TOLERANCE,
974 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
975 }
976
977 static void
978 ure_disable_teredo(struct ure_softc *sc)
979 {
980
981 ure_write_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA,
982 ure_read_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA) &
983 ~(URE_TEREDO_SEL | URE_TEREDO_RS_EVENT_MASK | URE_OOB_TEREDO_EN));
984 ure_write_2(sc, URE_PLA_WDT6_CTRL, URE_MCU_TYPE_PLA,
985 URE_WDT6_SET_MODE);
986 ure_write_2(sc, URE_PLA_REALWOW_TIMER, URE_MCU_TYPE_PLA, 0);
987 ure_write_4(sc, URE_PLA_TEREDO_TIMER, URE_MCU_TYPE_PLA, 0);
988 }
989
990 static void
991 ure_init_fifo(struct ure_softc *sc)
992 {
993 uint32_t rx_fifo1, rx_fifo2;
994 int i;
995
996 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA,
997 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) |
998 URE_RXDY_GATED_EN);
999
1000 ure_disable_teredo(sc);
1001
1002 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA,
1003 ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA) &
1004 ~URE_RCR_ACPT_ALL);
1005
1006 if (!(sc->ure_flags & URE_FLAG_8152)) {
1007 if (sc->ure_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10 |
1008 URE_CHIP_VER_5C20))
1009 ure_ocp_reg_write(sc, URE_OCP_ADC_CFG,
1010 URE_CKADSEL_L | URE_ADC_EN | URE_EN_EMI_L);
1011 if (sc->ure_chip & URE_CHIP_VER_5C00)
1012 ure_ocp_reg_write(sc, URE_OCP_EEE_CFG,
1013 ure_ocp_reg_read(sc, URE_OCP_EEE_CFG) &
1014 ~URE_CTAP_SHORT_EN);
1015 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1016 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) |
1017 URE_EEE_CLKDIV_EN);
1018 ure_ocp_reg_write(sc, URE_OCP_DOWN_SPEED,
1019 ure_ocp_reg_read(sc, URE_OCP_DOWN_SPEED) |
1020 URE_EN_10M_BGOFF);
1021 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1022 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) |
1023 URE_EN_10M_PLLOFF);
1024 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_IMPEDANCE);
1025 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0b13);
1026 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA,
1027 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) |
1028 URE_PFM_PWM_SWITCH);
1029
1030 /* Enable LPF corner auto tune. */
1031 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_LPF_CFG);
1032 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0xf70f);
1033
1034 /* Adjust 10M amplitude. */
1035 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP1);
1036 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x00af);
1037 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP2);
1038 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0208);
1039 }
1040
1041 ure_reset(sc);
1042
1043 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, 0);
1044
1045 ure_write_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA,
1046 ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1047 ~URE_NOW_IS_OOB);
1048
1049 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA,
1050 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) &
1051 ~URE_MCU_BORW_EN);
1052 for (i = 0; i < URE_TIMEOUT; i++) {
1053 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1054 URE_LINK_LIST_READY)
1055 break;
1056 usbd_delay_ms(sc->ure_udev, 10);
1057 }
1058 if (i == URE_TIMEOUT)
1059 URE_PRINTF(sc, "timeout waiting for OOB control\n");
1060 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA,
1061 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) |
1062 URE_RE_INIT_LL);
1063 for (i = 0; i < URE_TIMEOUT; i++) {
1064 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1065 URE_LINK_LIST_READY)
1066 break;
1067 usbd_delay_ms(sc->ure_udev, 10);
1068 }
1069 if (i == URE_TIMEOUT)
1070 URE_PRINTF(sc, "timeout waiting for OOB control\n");
1071
1072 ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA,
1073 ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA) &
1074 ~URE_CPCR_RX_VLAN);
1075 ure_write_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA,
1076 ure_read_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA) |
1077 URE_TCR0_AUTO_FIFO);
1078
1079 /* Configure Rx FIFO threshold and coalescing. */
1080 ure_write_4(sc, URE_PLA_RXFIFO_CTRL0, URE_MCU_TYPE_PLA,
1081 URE_RXFIFO_THR1_NORMAL);
1082 if (sc->ure_udev->ud_speed == USB_SPEED_FULL) {
1083 rx_fifo1 = URE_RXFIFO_THR2_FULL;
1084 rx_fifo2 = URE_RXFIFO_THR3_FULL;
1085 } else {
1086 rx_fifo1 = URE_RXFIFO_THR2_HIGH;
1087 rx_fifo2 = URE_RXFIFO_THR3_HIGH;
1088 }
1089 ure_write_4(sc, URE_PLA_RXFIFO_CTRL1, URE_MCU_TYPE_PLA, rx_fifo1);
1090 ure_write_4(sc, URE_PLA_RXFIFO_CTRL2, URE_MCU_TYPE_PLA, rx_fifo2);
1091
1092 /* Configure Tx FIFO threshold. */
1093 ure_write_4(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA,
1094 URE_TXFIFO_THR_NORMAL);
1095 }
1096
1097 int
1098 ure_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1099 {
1100 struct ure_softc *sc = ifp->if_softc;
1101 int error = 0, oflags = ifp->if_flags;
1102
1103 switch (cmd) {
1104 case SIOCSIFFLAGS:
1105 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1106 break;
1107 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1108 case IFF_RUNNING:
1109 ure_stop(ifp, 1);
1110 break;
1111 case IFF_UP:
1112 ure_init(ifp);
1113 break;
1114 case IFF_UP | IFF_RUNNING:
1115 if ((ifp->if_flags ^ oflags) == IFF_PROMISC)
1116 ure_iff(sc);
1117 else
1118 ure_init(ifp);
1119 }
1120 break;
1121 default:
1122 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1123 break;
1124 error = 0;
1125 if ((ifp->if_flags & IFF_RUNNING) == 0)
1126 break;
1127 switch (cmd) {
1128 case SIOCADDMULTI:
1129 case SIOCDELMULTI:
1130 ure_iff(sc);
1131 break;
1132 default:
1133 break;
1134 }
1135 }
1136
1137 return error;
1138 }
1139
1140 static int
1141 ure_match(device_t parent, cfdata_t match, void *aux)
1142 {
1143 struct usb_attach_arg *uaa = aux;
1144
1145 return usb_lookup(ure_devs, uaa->uaa_vendor, uaa->uaa_product) != NULL ?
1146 UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
1147 }
1148
1149 static void
1150 ure_attach(device_t parent, device_t self, void *aux)
1151 {
1152 struct ure_softc *sc = device_private(self);
1153 struct usb_attach_arg *uaa = aux;
1154 struct usbd_device *dev = uaa->uaa_device;
1155 usb_interface_descriptor_t *id;
1156 usb_endpoint_descriptor_t *ed;
1157 struct ifnet *ifp;
1158 struct mii_data *mii;
1159 int error, i;
1160 uint16_t ver;
1161 uint8_t eaddr[8]; /* 2byte padded */
1162 char *devinfop;
1163
1164 aprint_naive("\n");
1165 aprint_normal("\n");
1166
1167 sc->ure_dev = self;
1168 sc->ure_udev = dev;
1169
1170 devinfop = usbd_devinfo_alloc(sc->ure_udev, 0);
1171 aprint_normal_dev(self, "%s\n", devinfop);
1172 usbd_devinfo_free(devinfop);
1173
1174 callout_init(&sc->ure_stat_ch, CALLOUT_MPSAFE);
1175 usb_init_task(&sc->ure_tick_task, ure_tick_task, sc, USB_TASKQ_MPSAFE);
1176 mutex_init(&sc->ure_mii_lock, MUTEX_DEFAULT, IPL_NONE);
1177 mutex_init(&sc->ure_txlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1178 mutex_init(&sc->ure_rxlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1179 mutex_init(&sc->ure_lock, MUTEX_DEFAULT, IPL_NONE);
1180 cv_init(&sc->ure_detachcv, "uredet");
1181
1182 /*
1183 * ure_phyno is set to 0 below when configuration has succeeded.
1184 * if it is still -1 in detach, then ifmedia/mii/etc was not
1185 * setup and should not be torn down.
1186 */
1187 sc->ure_phyno = -1;
1188
1189 #define URE_CONFIG_NO 1 /* XXX */
1190 error = usbd_set_config_no(dev, URE_CONFIG_NO, 1);
1191 if (error) {
1192 aprint_error_dev(self, "failed to set configuration: %s\n",
1193 usbd_errstr(error));
1194 return; /* XXX */
1195 }
1196
1197 if (uaa->uaa_product == USB_PRODUCT_REALTEK_RTL8152)
1198 sc->ure_flags |= URE_FLAG_8152;
1199
1200 #define URE_IFACE_IDX 0 /* XXX */
1201 error = usbd_device2interface_handle(dev, URE_IFACE_IDX, &sc->ure_iface);
1202 if (error) {
1203 aprint_error_dev(self, "failed to get interface handle: %s\n",
1204 usbd_errstr(error));
1205 return; /* XXX */
1206 }
1207
1208 sc->ure_bufsz = 16 * 1024;
1209
1210 id = usbd_get_interface_descriptor(sc->ure_iface);
1211 for (i = 0; i < id->bNumEndpoints; i++) {
1212 ed = usbd_interface2endpoint_descriptor(sc->ure_iface, i);
1213 if (ed == NULL) {
1214 aprint_error_dev(self, "couldn't get ep %d\n", i);
1215 return; /* XXX */
1216 }
1217 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
1218 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
1219 sc->ure_ed[URE_ENDPT_RX] = ed->bEndpointAddress;
1220 } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
1221 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
1222 sc->ure_ed[URE_ENDPT_TX] = ed->bEndpointAddress;
1223 }
1224 }
1225
1226 sc->ure_phyno = 0;
1227
1228 ver = ure_read_2(sc, URE_PLA_TCR1, URE_MCU_TYPE_PLA) & URE_VERSION_MASK;
1229 switch (ver) {
1230 case 0x4c00:
1231 sc->ure_chip |= URE_CHIP_VER_4C00;
1232 break;
1233 case 0x4c10:
1234 sc->ure_chip |= URE_CHIP_VER_4C10;
1235 break;
1236 case 0x5c00:
1237 sc->ure_chip |= URE_CHIP_VER_5C00;
1238 break;
1239 case 0x5c10:
1240 sc->ure_chip |= URE_CHIP_VER_5C10;
1241 break;
1242 case 0x5c20:
1243 sc->ure_chip |= URE_CHIP_VER_5C20;
1244 break;
1245 case 0x5c30:
1246 sc->ure_chip |= URE_CHIP_VER_5C30;
1247 break;
1248 default:
1249 /* fake addr? or just fail? */
1250 break;
1251 }
1252 aprint_normal_dev(self, "RTL%d %sver %04x\n",
1253 (sc->ure_flags & URE_FLAG_8152) ? 8152 : 8153,
1254 (sc->ure_chip != 0) ? "" : "unknown ",
1255 ver);
1256
1257 mutex_enter(&sc->ure_lock);
1258 if (sc->ure_flags & URE_FLAG_8152)
1259 ure_rtl8152_init(sc);
1260 else
1261 ure_rtl8153_init(sc);
1262
1263 if (sc->ure_chip & URE_CHIP_VER_4C00)
1264 ure_read_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA, eaddr,
1265 sizeof(eaddr));
1266 else
1267 ure_read_mem(sc, URE_PLA_BACKUP, URE_MCU_TYPE_PLA, eaddr,
1268 sizeof(eaddr));
1269 mutex_exit(&sc->ure_lock);
1270
1271 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
1272
1273 ifp = GET_IFP(sc);
1274 ifp->if_softc = sc;
1275 strlcpy(ifp->if_xname, device_xname(sc->ure_dev), IFNAMSIZ);
1276 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1277 ifp->if_extflags = IFEF_MPSAFE;
1278 ifp->if_init = ure_init;
1279 ifp->if_ioctl = ure_ioctl;
1280 ifp->if_start = ure_start;
1281 ifp->if_stop = ure_stop;
1282
1283 /*
1284 * We don't support TSOv4 and v6 for now, that are required to
1285 * be handled in software for some cases.
1286 */
1287 ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx |
1288 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
1289 #ifdef INET6
1290 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx;
1291 #endif
1292 if (sc->ure_chip & ~URE_CHIP_VER_4C00) {
1293 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
1294 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1295 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1296 }
1297 sc->ure_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
1298 #ifdef notyet
1299 sc->ure_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1300 #endif
1301
1302 IFQ_SET_READY(&ifp->if_snd);
1303
1304 mii = GET_MII(sc);
1305 mii->mii_ifp = ifp;
1306 mii->mii_readreg = ure_miibus_readreg;
1307 mii->mii_writereg = ure_miibus_writereg;
1308 mii->mii_statchg = ure_miibus_statchg;
1309 mii->mii_flags = MIIF_AUTOTSLEEP;
1310
1311 sc->ure_ec.ec_mii = mii;
1312 ifmedia_init(&mii->mii_media, 0, ure_ifmedia_upd, ure_ifmedia_sts);
1313 mii_attach(self, mii, 0xffffffff, sc->ure_phyno, MII_OFFSET_ANY, 0);
1314
1315 if (LIST_FIRST(&mii->mii_phys) == NULL) {
1316 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
1317 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
1318 } else
1319 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1320
1321 if_attach(ifp);
1322 ether_ifattach(ifp, eaddr);
1323
1324 rnd_attach_source(&sc->ure_rnd_source, device_xname(sc->ure_dev),
1325 RND_TYPE_NET, RND_FLAG_DEFAULT);
1326
1327 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->ure_udev, sc->ure_dev);
1328
1329 if (!pmf_device_register(self, NULL, NULL))
1330 aprint_error_dev(self, "couldn't establish power handler\n");
1331 }
1332
1333 static int
1334 ure_detach(device_t self, int flags)
1335 {
1336 struct ure_softc *sc = device_private(self);
1337 struct ifnet *ifp = GET_IFP(sc);
1338
1339 pmf_device_deregister(self);
1340
1341 mutex_enter(&sc->ure_lock);
1342 sc->ure_dying = true;
1343 mutex_exit(&sc->ure_lock);
1344
1345 callout_halt(&sc->ure_stat_ch, NULL);
1346
1347 usb_rem_task_wait(sc->ure_udev, &sc->ure_tick_task, USB_TASKQ_DRIVER,
1348 NULL);
1349
1350 if (sc->ure_ep[URE_ENDPT_TX] != NULL)
1351 usbd_abort_pipe(sc->ure_ep[URE_ENDPT_TX]);
1352 if (sc->ure_ep[URE_ENDPT_RX] != NULL)
1353 usbd_abort_pipe(sc->ure_ep[URE_ENDPT_RX]);
1354
1355 mutex_enter(&sc->ure_lock);
1356 sc->ure_refcnt--;
1357 while (sc->ure_refcnt > 0) {
1358 /* Wait for processes to go away */
1359 cv_wait(&sc->ure_detachcv, &sc->ure_lock);
1360 }
1361 mutex_exit(&sc->ure_lock);
1362
1363 /* partial-attach, below items weren't configured. */
1364 if (sc->ure_phyno != -1) {
1365 if (ifp->if_flags & IFF_RUNNING) {
1366 IFNET_LOCK(ifp);
1367 ure_stop(ifp, 1);
1368 IFNET_UNLOCK(ifp);
1369 }
1370
1371 rnd_detach_source(&sc->ure_rnd_source);
1372 mii_detach(&sc->ure_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1373 ifmedia_delete_instance(&sc->ure_mii.mii_media, IFM_INST_ANY);
1374 if (ifp->if_softc != NULL) {
1375 ether_ifdetach(ifp);
1376 if_detach(ifp);
1377 }
1378 }
1379
1380 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->ure_udev, sc->ure_dev);
1381
1382 callout_destroy(&sc->ure_stat_ch);
1383 cv_destroy(&sc->ure_detachcv);
1384 mutex_destroy(&sc->ure_lock);
1385 mutex_destroy(&sc->ure_rxlock);
1386 mutex_destroy(&sc->ure_txlock);
1387 mutex_destroy(&sc->ure_mii_lock);
1388
1389 return 0;
1390 }
1391
1392 static int
1393 ure_activate(device_t self, enum devact act)
1394 {
1395 struct ure_softc *sc = device_private(self);
1396 struct ifnet *ifp = GET_IFP(sc);
1397
1398 switch (act) {
1399 case DVACT_DEACTIVATE:
1400 if_deactivate(ifp);
1401
1402 mutex_enter(&sc->ure_lock);
1403 sc->ure_dying = true;
1404 mutex_exit(&sc->ure_lock);
1405
1406 mutex_enter(&sc->ure_rxlock);
1407 mutex_enter(&sc->ure_txlock);
1408 sc->ure_stopping = true;
1409 mutex_exit(&sc->ure_txlock);
1410 mutex_exit(&sc->ure_rxlock);
1411
1412 return 0;
1413 default:
1414 return EOPNOTSUPP;
1415 }
1416 return 0;
1417 }
1418
1419 static void
1420 ure_tick_task(void *xsc)
1421 {
1422 struct ure_softc *sc = xsc;
1423 struct ifnet *ifp;
1424 struct mii_data *mii;
1425
1426 if (sc == NULL)
1427 return;
1428
1429 mutex_enter(&sc->ure_lock);
1430 if (sc->ure_stopping || sc->ure_dying) {
1431 mutex_exit(&sc->ure_lock);
1432 return;
1433 }
1434
1435 ifp = GET_IFP(sc);
1436 mii = GET_MII(sc);
1437 if (mii == NULL) {
1438 mutex_exit(&sc->ure_lock);
1439 return;
1440 }
1441
1442 sc->ure_refcnt++;
1443 mutex_exit(&sc->ure_lock);
1444
1445 mii_tick(mii);
1446
1447 if ((sc->ure_flags & URE_FLAG_LINK) == 0)
1448 ure_miibus_statchg(ifp);
1449
1450 mutex_enter(&sc->ure_lock);
1451 if (--sc->ure_refcnt < 0)
1452 cv_broadcast(&sc->ure_detachcv);
1453 if (!sc->ure_stopping && !sc->ure_dying)
1454 callout_schedule(&sc->ure_stat_ch, hz);
1455 mutex_exit(&sc->ure_lock);
1456 }
1457
1458 static void
1459 ure_lock_mii(struct ure_softc *sc)
1460 {
1461
1462 mutex_enter(&sc->ure_lock);
1463 sc->ure_refcnt++;
1464 mutex_exit(&sc->ure_lock);
1465
1466 mutex_enter(&sc->ure_mii_lock);
1467 }
1468
1469 static void
1470 ure_unlock_mii(struct ure_softc *sc)
1471 {
1472
1473 mutex_exit(&sc->ure_mii_lock);
1474 mutex_enter(&sc->ure_lock);
1475 if (--sc->ure_refcnt < 0)
1476 cv_broadcast(&sc->ure_detachcv);
1477 mutex_exit(&sc->ure_lock);
1478 }
1479
1480 static void
1481 ure_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
1482 {
1483 struct ure_chain *c = (struct ure_chain *)priv;
1484 struct ure_softc *sc = c->uc_sc;
1485 struct ifnet *ifp = GET_IFP(sc);
1486 uint8_t *buf = c->uc_buf;
1487 uint32_t total_len;
1488 uint16_t pktlen = 0;
1489 struct mbuf *m;
1490 struct ure_rxpkt rxhdr;
1491
1492 mutex_enter(&sc->ure_rxlock);
1493
1494 if (sc->ure_dying || sc->ure_stopping ||
1495 status == USBD_INVAL || status == USBD_NOT_STARTED ||
1496 status == USBD_CANCELLED || !(ifp->if_flags & IFF_RUNNING)) {
1497 mutex_exit(&sc->ure_rxlock);
1498 return;
1499 }
1500
1501 if (status != USBD_NORMAL_COMPLETION) {
1502 if (usbd_ratecheck(&sc->ure_rx_notice))
1503 URE_PRINTF(sc, "usb errors on rx: %s\n",
1504 usbd_errstr(status));
1505 if (status == USBD_STALLED)
1506 usbd_clear_endpoint_stall_async(
1507 sc->ure_ep[URE_ENDPT_RX]);
1508 goto done;
1509 }
1510
1511 usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);
1512 DPRINTFN(3, ("received %d bytes\n", total_len));
1513
1514 KASSERTMSG(total_len <= sc->ure_bufsz, "%u vs %u",
1515 total_len, sc->ure_bufsz);
1516
1517 do {
1518 if (total_len < sizeof(rxhdr)) {
1519 DPRINTF(("too few bytes left for a packet header\n"));
1520 ifp->if_ierrors++;
1521 goto done;
1522 }
1523
1524 buf += roundup(pktlen, 8);
1525
1526 memcpy(&rxhdr, buf, sizeof(rxhdr));
1527 total_len -= sizeof(rxhdr);
1528
1529 pktlen = le32toh(rxhdr.ure_pktlen) & URE_RXPKT_LEN_MASK;
1530 DPRINTFN(4, ("next packet is %d bytes\n", pktlen));
1531 if (pktlen > total_len) {
1532 DPRINTF(("not enough bytes left for next packet\n"));
1533 ifp->if_ierrors++;
1534 goto done;
1535 }
1536
1537 total_len -= roundup(pktlen, 8);
1538 buf += sizeof(rxhdr);
1539
1540 m = m_devget(buf, pktlen - ETHER_CRC_LEN, 0, ifp);
1541 if (m == NULL) {
1542 DPRINTF(("unable to allocate mbuf for next packet\n"));
1543 ifp->if_ierrors++;
1544 goto done;
1545 }
1546
1547 m->m_pkthdr.csum_flags = ure_rxcsum(ifp, &rxhdr);
1548
1549 mutex_exit(&sc->ure_rxlock);
1550 if_percpuq_enqueue(ifp->if_percpuq, m);
1551 mutex_enter(&sc->ure_rxlock);
1552
1553 if (sc->ure_dying || sc->ure_stopping) {
1554 mutex_exit(&sc->ure_rxlock);
1555 return;
1556 }
1557
1558 } while (total_len > 0);
1559
1560 done:
1561 if (sc->ure_dying || sc->ure_stopping) {
1562 mutex_exit(&sc->ure_rxlock);
1563 return;
1564 }
1565 mutex_exit(&sc->ure_rxlock);
1566
1567 /* Setup new transfer. */
1568 usbd_setup_xfer(xfer, c, c->uc_buf, sc->ure_bufsz,
1569 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ure_rxeof);
1570 usbd_transfer(xfer);
1571 }
1572
1573 static int
1574 ure_rxcsum(struct ifnet *ifp, struct ure_rxpkt *rp)
1575 {
1576 int enabled = ifp->if_csum_flags_rx, flags = 0;
1577 uint32_t csum, misc;
1578
1579 if (enabled == 0)
1580 return 0;
1581
1582 csum = le32toh(rp->ure_csum);
1583 misc = le32toh(rp->ure_misc);
1584
1585 if (csum & URE_RXPKT_IPV4_CS) {
1586 flags |= M_CSUM_IPv4;
1587 if (csum & URE_RXPKT_TCP_CS)
1588 flags |= M_CSUM_TCPv4;
1589 if (csum & URE_RXPKT_UDP_CS)
1590 flags |= M_CSUM_UDPv4;
1591 } else if (csum & URE_RXPKT_IPV6_CS) {
1592 flags = 0;
1593 if (csum & URE_RXPKT_TCP_CS)
1594 flags |= M_CSUM_TCPv6;
1595 if (csum & URE_RXPKT_UDP_CS)
1596 flags |= M_CSUM_UDPv6;
1597 }
1598
1599 flags &= enabled;
1600 if (__predict_false((flags & M_CSUM_IPv4) &&
1601 (misc & URE_RXPKT_IP_F)))
1602 flags |= M_CSUM_IPv4_BAD;
1603 if (__predict_false(
1604 ((flags & (M_CSUM_TCPv4 | M_CSUM_TCPv6)) && (misc & URE_RXPKT_TCP_F))
1605 || ((flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) && (misc & URE_RXPKT_UDP_F))
1606 ))
1607 flags |= M_CSUM_TCP_UDP_BAD;
1608
1609 return flags;
1610 }
1611
1612 static void
1613 ure_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
1614 {
1615 struct ure_chain *c = priv;
1616 struct ure_softc *sc = c->uc_sc;
1617 struct ure_cdata *cd = &sc->ure_cdata;
1618 struct ifnet *ifp = GET_IFP(sc);
1619
1620 mutex_enter(&sc->ure_txlock);
1621 if (sc->ure_stopping || sc->ure_dying) {
1622 mutex_exit(&sc->ure_txlock);
1623 return;
1624 }
1625
1626 DPRINTFN(2, ("tx completion\n"));
1627
1628 KASSERT(cd->tx_cnt > 0);
1629 cd->tx_cnt--;
1630
1631 switch (status) {
1632 case USBD_NOT_STARTED:
1633 case USBD_CANCELLED:
1634 break;
1635
1636 case USBD_NORMAL_COMPLETION:
1637 ifp->if_opackets++;
1638
1639 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1640 ure_start_locked(ifp);
1641 }
1642 break;
1643
1644 default:
1645 ifp->if_oerrors++;
1646 if (usbd_ratecheck(&sc->ure_tx_notice))
1647 URE_PRINTF(sc, "usb error on tx: %s\n",
1648 usbd_errstr(status));
1649 if (status == USBD_STALLED)
1650 usbd_clear_endpoint_stall_async(
1651 sc->ure_ep[URE_ENDPT_TX]);
1652 break;
1653 }
1654
1655 mutex_exit(&sc->ure_txlock);
1656 }
1657
1658 static int
1659 ure_tx_list_init(struct ure_softc *sc)
1660 {
1661 struct ure_cdata *cd;
1662 struct ure_chain *c;
1663 int i, error;
1664
1665 cd = &sc->ure_cdata;
1666 for (i = 0; i < URE_TX_LIST_CNT; i++) {
1667 c = &cd->tx_chain[i];
1668 c->uc_sc = sc;
1669 if (c->uc_xfer == NULL) {
1670 error = usbd_create_xfer(sc->ure_ep[URE_ENDPT_TX],
1671 sc->ure_bufsz, USBD_FORCE_SHORT_XFER, 0,
1672 &c->uc_xfer);
1673 if (error)
1674 return error;
1675 c->uc_buf = usbd_get_buffer(c->uc_xfer);
1676 }
1677 }
1678
1679 cd->tx_prod = cd->tx_cnt = 0;
1680
1681 return 0;
1682 }
1683
1684 static int
1685 ure_rx_list_init(struct ure_softc *sc)
1686 {
1687 struct ure_cdata *cd;
1688 struct ure_chain *c;
1689 int i, error;
1690
1691 cd = &sc->ure_cdata;
1692 for (i = 0; i < URE_RX_LIST_CNT; i++) {
1693 c = &cd->rx_chain[i];
1694 c->uc_sc = sc;
1695 error = usbd_create_xfer(sc->ure_ep[URE_ENDPT_RX],
1696 sc->ure_bufsz, 0, 0, &c->uc_xfer);
1697 if (error)
1698 return error;
1699 c->uc_buf = usbd_get_buffer(c->uc_xfer);
1700 }
1701
1702 return 0;
1703 }
1704
1705 static int
1706 ure_encap(struct ure_softc *sc, struct mbuf *m, int idx)
1707 {
1708 struct ifnet *ifp = GET_IFP(sc);
1709 struct ure_chain *c;
1710 usbd_status err;
1711 struct ure_txpkt txhdr;
1712 uint32_t frm_len = 0;
1713 uint8_t *buf;
1714
1715 KASSERT(mutex_owned(&sc->ure_txlock));
1716
1717 c = &sc->ure_cdata.tx_chain[idx];
1718 buf = c->uc_buf;
1719
1720 /* header */
1721 txhdr.ure_pktlen = htole32(m->m_pkthdr.len | URE_TXPKT_TX_FS |
1722 URE_TXPKT_TX_LS);
1723 txhdr.ure_csum = htole32(ure_txcsum(m));
1724 memcpy(buf, &txhdr, sizeof(txhdr));
1725 buf += sizeof(txhdr);
1726 frm_len = sizeof(txhdr);
1727
1728 /* packet */
1729 m_copydata(m, 0, m->m_pkthdr.len, buf);
1730 frm_len += m->m_pkthdr.len;
1731
1732 if (__predict_false(c->uc_xfer == NULL))
1733 return EIO; /* XXX plugged out or down */
1734
1735 DPRINTFN(2, ("tx %d bytes\n", frm_len));
1736 usbd_setup_xfer(c->uc_xfer, c, c->uc_buf, frm_len,
1737 USBD_FORCE_SHORT_XFER, 10000, ure_txeof);
1738
1739 err = usbd_transfer(c->uc_xfer);
1740 if (err != USBD_IN_PROGRESS) {
1741 /* XXXSMP IFNET_LOCK */
1742 ure_stop(ifp, 0);
1743 return EIO;
1744 }
1745
1746 return 0;
1747 }
1748
1749 /*
1750 * We need to calculate L4 checksum in software, if the offset of
1751 * L4 header is larger than 0x7ff = 2047.
1752 */
1753 static uint32_t
1754 ure_txcsum(struct mbuf *m)
1755 {
1756 struct ether_header *eh;
1757 int flags = m->m_pkthdr.csum_flags;
1758 uint32_t data = m->m_pkthdr.csum_data;
1759 uint32_t reg = 0;
1760 int l3off, l4off;
1761 uint16_t type;
1762
1763 if (flags == 0)
1764 return 0;
1765
1766 if (__predict_true(m->m_len >= (int)sizeof(*eh))) {
1767 eh = mtod(m, struct ether_header *);
1768 type = eh->ether_type;
1769 } else
1770 m_copydata(m, offsetof(struct ether_header, ether_type),
1771 sizeof(type), &type);
1772 switch (type = htons(type)) {
1773 case ETHERTYPE_IP:
1774 case ETHERTYPE_IPV6:
1775 l3off = ETHER_HDR_LEN;
1776 break;
1777 case ETHERTYPE_VLAN:
1778 l3off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1779 break;
1780 default:
1781 return 0;
1782 }
1783
1784 if (flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1785 l4off = l3off + M_CSUM_DATA_IPv4_IPHL(data);
1786 if (__predict_false(l4off > URE_L4_OFFSET_MAX)) {
1787 in_undefer_cksum(m, l3off, flags);
1788 return 0;
1789 }
1790 reg |= URE_TXPKT_IPV4_CS;
1791 if (flags & M_CSUM_TCPv4)
1792 reg |= URE_TXPKT_TCP_CS;
1793 else
1794 reg |= URE_TXPKT_UDP_CS;
1795 reg |= l4off << URE_L4_OFFSET_SHIFT;
1796 }
1797 #ifdef INET6
1798 else if (flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
1799 l4off = l3off + M_CSUM_DATA_IPv6_IPHL(data);
1800 if (__predict_false(l4off > URE_L4_OFFSET_MAX)) {
1801 in6_undefer_cksum(m, l3off, flags);
1802 return 0;
1803 }
1804 reg |= URE_TXPKT_IPV6_CS;
1805 if (flags & M_CSUM_TCPv6)
1806 reg |= URE_TXPKT_TCP_CS;
1807 else
1808 reg |= URE_TXPKT_UDP_CS;
1809 reg |= l4off << URE_L4_OFFSET_SHIFT;
1810 }
1811 #endif
1812 else if (flags & M_CSUM_IPv4)
1813 reg |= URE_TXPKT_IPV4_CS;
1814
1815 return reg;
1816 }
1817