usbnet.c revision 1.65 1 /* $NetBSD: usbnet.c,v 1.65 2022/03/03 05:49:29 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2019 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * Common code shared between USB network drivers.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: usbnet.c,v 1.65 2022/03/03 05:49:29 riastradh Exp $");
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/kmem.h>
39 #include <sys/module.h>
40 #include <sys/atomic.h>
41
42 #include <dev/usb/usbnet.h>
43 #include <dev/usb/usbhist.h>
44
45 struct usbnet_cdata {
46 struct usbnet_chain *uncd_tx_chain;
47 struct usbnet_chain *uncd_rx_chain;
48
49 int uncd_tx_prod;
50 int uncd_tx_cnt;
51 };
52
53 struct usbnet_private {
54 /*
55 * - unp_core_lock protects most of this structure, the public one,
56 * and the MII / media data.
57 * - unp_rxlock protects the rx path and its data
58 * - unp_txlock protects the tx path and its data
59 * - unp_detachcv handles detach vs open references
60 *
61 * the lock ordering is:
62 * ifnet lock -> unp_core_lock -> unp_rxlock -> unp_txlock
63 * - ifnet lock is not needed for unp_core_lock, but if ifnet lock is
64 * involved, it must be taken first
65 */
66 kmutex_t unp_core_lock;
67 kmutex_t unp_rxlock;
68 kmutex_t unp_txlock;
69 kcondvar_t unp_detachcv;
70
71 struct usbnet_cdata unp_cdata;
72
73 struct ethercom unp_ec;
74 struct mii_data unp_mii;
75 struct usb_task unp_mcasttask;
76 struct usb_task unp_ticktask;
77 struct callout unp_stat_ch;
78 struct usbd_pipe *unp_ep[USBNET_ENDPT_MAX];
79
80 bool unp_dying;
81 bool unp_stopping;
82 bool unp_attached;
83 bool unp_ifp_attached;
84 bool unp_link;
85
86 int unp_refcnt;
87 int unp_timer;
88 unsigned short unp_if_flags;
89 unsigned unp_number;
90
91 krndsource_t unp_rndsrc;
92
93 struct timeval unp_rx_notice;
94 struct timeval unp_tx_notice;
95 struct timeval unp_intr_notice;
96 };
97
98 #define un_cdata(un) (&(un)->un_pri->unp_cdata)
99
100 volatile unsigned usbnet_number;
101
102 static int usbnet_modcmd(modcmd_t, void *);
103
104 #ifdef USB_DEBUG
105 #ifndef USBNET_DEBUG
106 #define usbnetdebug 0
107 #else
108 static int usbnetdebug = 0;
109
110 SYSCTL_SETUP(sysctl_hw_usbnet_setup, "sysctl hw.usbnet setup")
111 {
112 int err;
113 const struct sysctlnode *rnode;
114 const struct sysctlnode *cnode;
115
116 err = sysctl_createv(clog, 0, NULL, &rnode,
117 CTLFLAG_PERMANENT, CTLTYPE_NODE, "usbnet",
118 SYSCTL_DESCR("usbnet global controls"),
119 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
120
121 if (err)
122 goto fail;
123
124 /* control debugging printfs */
125 err = sysctl_createv(clog, 0, &rnode, &cnode,
126 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
127 "debug", SYSCTL_DESCR("Enable debugging output"),
128 NULL, 0, &usbnetdebug, sizeof(usbnetdebug), CTL_CREATE, CTL_EOL);
129 if (err)
130 goto fail;
131
132 return;
133 fail:
134 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
135 }
136
137 #endif /* USBNET_DEBUG */
138 #endif /* USB_DEBUG */
139
140 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(usbnetdebug,1,FMT,A,B,C,D)
141 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(usbnetdebug,N,FMT,A,B,C,D)
142 #define USBNETHIST_FUNC() USBHIST_FUNC()
143 #define USBNETHIST_CALLED(name) USBHIST_CALLED(usbnetdebug)
144 #define USBNETHIST_CALLARGS(FMT,A,B,C,D) \
145 USBHIST_CALLARGS(usbnetdebug,FMT,A,B,C,D)
146 #define USBNETHIST_CALLARGSN(N,FMT,A,B,C,D) \
147 USBHIST_CALLARGSN(usbnetdebug,N,FMT,A,B,C,D)
148
149 /* Callback vectors. */
150
151 static void
152 uno_stop(struct usbnet *un, struct ifnet *ifp, int disable)
153 {
154 KASSERTMSG(!un->un_pri->unp_ifp_attached || IFNET_LOCKED(ifp),
155 "%s", ifp->if_xname);
156 usbnet_isowned_core(un);
157 if (un->un_ops->uno_stop)
158 (*un->un_ops->uno_stop)(ifp, disable);
159 }
160
161 static int
162 uno_ioctl(struct usbnet *un, struct ifnet *ifp, u_long cmd, void *data)
163 {
164 /*
165 * There are cases where IFNET_LOCK will not be held when we
166 * are called (e.g. add/delete multicast address), so we can't
167 * assert it.
168 */
169 if (un->un_ops->uno_ioctl)
170 return (*un->un_ops->uno_ioctl)(ifp, cmd, data);
171 return 0;
172 }
173
174 static int
175 uno_override_ioctl(struct usbnet *un, struct ifnet *ifp, u_long cmd, void *data)
176 {
177 /* See above. */
178 return (*un->un_ops->uno_override_ioctl)(ifp, cmd, data);
179 }
180
181 static int
182 uno_init(struct usbnet *un, struct ifnet *ifp)
183 {
184 KASSERT(IFNET_LOCKED(ifp));
185 return (*un->un_ops->uno_init)(ifp);
186 }
187
188 static int
189 uno_read_reg(struct usbnet *un, int phy, int reg, uint16_t *val)
190 {
191 usbnet_isowned_core(un);
192 return (*un->un_ops->uno_read_reg)(un, phy, reg, val);
193 }
194
195 static int
196 uno_write_reg(struct usbnet *un, int phy, int reg, uint16_t val)
197 {
198 usbnet_isowned_core(un);
199 return (*un->un_ops->uno_write_reg)(un, phy, reg, val);
200 }
201
202 static void
203 uno_mii_statchg(struct usbnet *un, struct ifnet *ifp)
204 {
205 usbnet_isowned_core(un);
206 (*un->un_ops->uno_statchg)(ifp);
207 }
208
209 static unsigned
210 uno_tx_prepare(struct usbnet *un, struct mbuf *m, struct usbnet_chain *c)
211 {
212 usbnet_isowned_tx(un);
213 return (*un->un_ops->uno_tx_prepare)(un, m, c);
214 }
215
216 static void
217 uno_rx_loop(struct usbnet *un, struct usbnet_chain *c, uint32_t total_len)
218 {
219 usbnet_isowned_rx(un);
220 (*un->un_ops->uno_rx_loop)(un, c, total_len);
221 }
222
223 static void
224 uno_tick(struct usbnet *un)
225 {
226 if (un->un_ops->uno_tick)
227 (*un->un_ops->uno_tick)(un);
228 }
229
230 static void
231 uno_intr(struct usbnet *un, usbd_status status)
232 {
233 if (un->un_ops->uno_intr)
234 (*un->un_ops->uno_intr)(un, status);
235 }
236
237 /* Interrupt handling. */
238
239 static struct mbuf *
240 usbnet_newbuf(size_t buflen)
241 {
242 struct mbuf *m;
243
244 if (buflen > MCLBYTES)
245 return NULL;
246
247 MGETHDR(m, M_DONTWAIT, MT_DATA);
248 if (m == NULL)
249 return NULL;
250
251 if (buflen > MHLEN - ETHER_ALIGN) {
252 MCLGET(m, M_DONTWAIT);
253 if (!(m->m_flags & M_EXT)) {
254 m_freem(m);
255 return NULL;
256 }
257 }
258
259 m_adj(m, ETHER_ALIGN);
260 m->m_len = m->m_pkthdr.len = buflen;
261
262 return m;
263 }
264
265 /*
266 * usbnet_rxeof() is designed to be the done callback for rx completion.
267 * it provides generic setup and finalisation, calls a different usbnet
268 * rx_loop callback in the middle, which can use usbnet_enqueue() to
269 * enqueue a packet for higher levels (or usbnet_input() if previously
270 * using if_input() path.)
271 */
272 void
273 usbnet_enqueue(struct usbnet * const un, uint8_t *buf, size_t buflen,
274 int csum_flags, uint32_t csum_data, int mbuf_flags)
275 {
276 USBNETHIST_FUNC();
277 struct ifnet * const ifp = usbnet_ifp(un);
278 struct usbnet_private * const unp __unused = un->un_pri;
279 struct mbuf *m;
280
281 USBNETHIST_CALLARGSN(5, "%jd: enter: len=%ju csf %#jx mbf %#jx",
282 unp->unp_number, buflen, csum_flags, mbuf_flags);
283
284 usbnet_isowned_rx(un);
285
286 m = usbnet_newbuf(buflen);
287 if (m == NULL) {
288 DPRINTF("%jd: no memory", unp->unp_number, 0, 0, 0);
289 if_statinc(ifp, if_ierrors);
290 return;
291 }
292
293 m_set_rcvif(m, ifp);
294 m->m_pkthdr.csum_flags = csum_flags;
295 m->m_pkthdr.csum_data = csum_data;
296 m->m_flags |= mbuf_flags;
297 memcpy(mtod(m, uint8_t *), buf, buflen);
298
299 /* push the packet up */
300 if_percpuq_enqueue(ifp->if_percpuq, m);
301 }
302
303 void
304 usbnet_input(struct usbnet * const un, uint8_t *buf, size_t buflen)
305 {
306 USBNETHIST_FUNC();
307 struct ifnet * const ifp = usbnet_ifp(un);
308 struct usbnet_private * const unp __unused = un->un_pri;
309 struct mbuf *m;
310
311 USBNETHIST_CALLARGSN(5, "%jd: enter: buf %#jx len %ju",
312 unp->unp_number, (uintptr_t)buf, buflen, 0);
313
314 usbnet_isowned_rx(un);
315
316 m = usbnet_newbuf(buflen);
317 if (m == NULL) {
318 if_statinc(ifp, if_ierrors);
319 return;
320 }
321
322 m_set_rcvif(m, ifp);
323 memcpy(mtod(m, char *), buf, buflen);
324
325 /* push the packet up */
326 if_input(ifp, m);
327 }
328
329 /*
330 * A frame has been uploaded: pass the resulting mbuf chain up to
331 * the higher level protocols.
332 */
333 static void
334 usbnet_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
335 {
336 USBNETHIST_FUNC();
337 struct usbnet_chain * const c = priv;
338 struct usbnet * const un = c->unc_un;
339 struct usbnet_private * const unp = un->un_pri;
340 uint32_t total_len;
341
342 USBNETHIST_CALLARGSN(5, "%jd: enter: status %#jx xfer %#jx",
343 unp->unp_number, status, (uintptr_t)xfer, 0);
344
345 mutex_enter(&unp->unp_rxlock);
346
347 if (unp->unp_dying || unp->unp_stopping ||
348 status == USBD_INVAL || status == USBD_NOT_STARTED ||
349 status == USBD_CANCELLED)
350 goto out;
351
352 if (status != USBD_NORMAL_COMPLETION) {
353 if (usbd_ratecheck(&unp->unp_rx_notice))
354 device_printf(un->un_dev, "usb errors on rx: %s\n",
355 usbd_errstr(status));
356 if (status == USBD_STALLED)
357 usbd_clear_endpoint_stall_async(unp->unp_ep[USBNET_ENDPT_RX]);
358 goto done;
359 }
360
361 usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);
362
363 if (total_len > un->un_rx_bufsz) {
364 aprint_error_dev(un->un_dev,
365 "rxeof: too large transfer (%u > %u)\n",
366 total_len, un->un_rx_bufsz);
367 goto done;
368 }
369
370 uno_rx_loop(un, c, total_len);
371 usbnet_isowned_rx(un);
372
373 done:
374 if (unp->unp_dying || unp->unp_stopping)
375 goto out;
376
377 mutex_exit(&unp->unp_rxlock);
378
379 /* Setup new transfer. */
380 usbd_setup_xfer(xfer, c, c->unc_buf, un->un_rx_bufsz,
381 un->un_rx_xfer_flags, USBD_NO_TIMEOUT, usbnet_rxeof);
382 usbd_transfer(xfer);
383 return;
384
385 out:
386 mutex_exit(&unp->unp_rxlock);
387 }
388
389 static void
390 usbnet_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
391 {
392 USBNETHIST_FUNC(); USBNETHIST_CALLED();
393 struct usbnet_chain * const c = priv;
394 struct usbnet * const un = c->unc_un;
395 struct usbnet_cdata * const cd = un_cdata(un);
396 struct usbnet_private * const unp = un->un_pri;
397 struct ifnet * const ifp = usbnet_ifp(un);
398
399 USBNETHIST_CALLARGSN(5, "%jd: enter: status %#jx xfer %#jx",
400 unp->unp_number, status, (uintptr_t)xfer, 0);
401
402 mutex_enter(&unp->unp_txlock);
403 if (unp->unp_stopping || unp->unp_dying) {
404 mutex_exit(&unp->unp_txlock);
405 return;
406 }
407
408 KASSERT(cd->uncd_tx_cnt > 0);
409 cd->uncd_tx_cnt--;
410
411 unp->unp_timer = 0;
412
413 switch (status) {
414 case USBD_NOT_STARTED:
415 case USBD_CANCELLED:
416 break;
417
418 case USBD_NORMAL_COMPLETION:
419 if_statinc(ifp, if_opackets);
420 break;
421
422 default:
423
424 if_statinc(ifp, if_oerrors);
425 if (usbd_ratecheck(&unp->unp_tx_notice))
426 device_printf(un->un_dev, "usb error on tx: %s\n",
427 usbd_errstr(status));
428 if (status == USBD_STALLED)
429 usbd_clear_endpoint_stall_async(unp->unp_ep[USBNET_ENDPT_TX]);
430 break;
431 }
432
433 mutex_exit(&unp->unp_txlock);
434
435 if (status == USBD_NORMAL_COMPLETION && !IFQ_IS_EMPTY(&ifp->if_snd))
436 (*ifp->if_start)(ifp);
437 }
438
439 static void
440 usbnet_pipe_intr(struct usbd_xfer *xfer, void *priv, usbd_status status)
441 {
442 USBNETHIST_FUNC();
443 struct usbnet * const un = priv;
444 struct usbnet_private * const unp = un->un_pri;
445 struct usbnet_intr * const uni = un->un_intr;
446
447 if (uni == NULL || unp->unp_dying || unp->unp_stopping ||
448 status == USBD_INVAL || status == USBD_NOT_STARTED ||
449 status == USBD_CANCELLED) {
450 USBNETHIST_CALLARGS("%jd: uni %#jx d/s %#jx status %#jx",
451 unp->unp_number, (uintptr_t)uni,
452 (unp->unp_dying << 8) | unp->unp_stopping, status);
453 return;
454 }
455
456 if (status != USBD_NORMAL_COMPLETION) {
457 if (usbd_ratecheck(&unp->unp_intr_notice)) {
458 aprint_error_dev(un->un_dev, "usb error on intr: %s\n",
459 usbd_errstr(status));
460 }
461 if (status == USBD_STALLED)
462 usbd_clear_endpoint_stall_async(unp->unp_ep[USBNET_ENDPT_INTR]);
463 USBNETHIST_CALLARGS("%jd: not normal status %#jx",
464 unp->unp_number, status, 0, 0);
465 return;
466 }
467
468 uno_intr(un, status);
469 }
470
471 static void
472 usbnet_start_locked(struct ifnet *ifp)
473 {
474 USBNETHIST_FUNC();
475 struct usbnet * const un = ifp->if_softc;
476 struct usbnet_cdata * const cd = un_cdata(un);
477 struct usbnet_private * const unp = un->un_pri;
478 struct mbuf *m;
479 unsigned length;
480 bool done_transmit = false;
481 int idx, count;
482
483 USBNETHIST_CALLARGS("%jd: tx_cnt %jd list_cnt %jd link %jd",
484 unp->unp_number, cd->uncd_tx_cnt, un->un_tx_list_cnt,
485 unp->unp_link);
486
487 usbnet_isowned_tx(un);
488 KASSERT(cd->uncd_tx_cnt <= un->un_tx_list_cnt);
489
490 if (!unp->unp_link || (ifp->if_flags & IFF_RUNNING) == 0) {
491 DPRINTF("start called no link (%jx) or running (flags %jx)",
492 unp->unp_link, ifp->if_flags, 0, 0);
493 return;
494 }
495
496 if (cd->uncd_tx_cnt == un->un_tx_list_cnt) {
497 DPRINTF("start called, tx busy (%#jx == %#jx)",
498 cd->uncd_tx_cnt, un->un_tx_list_cnt, 0, 0);
499 return;
500 }
501
502 idx = cd->uncd_tx_prod;
503 count = 0;
504 while (cd->uncd_tx_cnt < un->un_tx_list_cnt) {
505 IFQ_POLL(&ifp->if_snd, m);
506 if (m == NULL) {
507 DPRINTF("start called, queue empty", 0, 0, 0, 0);
508 break;
509 }
510 KASSERT(m->m_pkthdr.len <= un->un_tx_bufsz);
511
512 struct usbnet_chain *c = &cd->uncd_tx_chain[idx];
513
514 length = uno_tx_prepare(un, m, c);
515 if (length == 0) {
516 DPRINTF("uno_tx_prepare gave zero length", 0, 0, 0, 0);
517 if_statinc(ifp, if_oerrors);
518 break;
519 }
520
521 if (__predict_false(c->unc_xfer == NULL)) {
522 DPRINTF("unc_xfer is NULL", 0, 0, 0, 0);
523 if_statinc(ifp, if_oerrors);
524 break;
525 }
526
527 usbd_setup_xfer(c->unc_xfer, c, c->unc_buf, length,
528 un->un_tx_xfer_flags, 10000, usbnet_txeof);
529
530 /* Transmit */
531 usbd_status err = usbd_transfer(c->unc_xfer);
532 if (err != USBD_IN_PROGRESS) {
533 DPRINTF("usbd_transfer on %#jx for %ju bytes: %jd",
534 (uintptr_t)c->unc_buf, length, err, 0);
535 if_statinc(ifp, if_oerrors);
536 break;
537 }
538 done_transmit = true;
539
540 IFQ_DEQUEUE(&ifp->if_snd, m);
541
542 /*
543 * If there's a BPF listener, bounce a copy of this frame
544 * to him.
545 */
546 bpf_mtap(ifp, m, BPF_D_OUT);
547 m_freem(m);
548
549 idx = (idx + 1) % un->un_tx_list_cnt;
550 cd->uncd_tx_cnt++;
551 count++;
552 }
553 cd->uncd_tx_prod = idx;
554
555 DPRINTF("finished with start; tx_cnt %jd list_cnt %jd link %jd",
556 cd->uncd_tx_cnt, un->un_tx_list_cnt, unp->unp_link, 0);
557
558 /*
559 * Set a timeout in case the chip goes out to lunch.
560 */
561 if (done_transmit)
562 unp->unp_timer = 5;
563
564 if (count != 0)
565 rnd_add_uint32(&unp->unp_rndsrc, count);
566 }
567
568 static void
569 usbnet_if_start(struct ifnet *ifp)
570 {
571 struct usbnet * const un = ifp->if_softc;
572 struct usbnet_private * const unp = un->un_pri;
573
574 USBNETHIST_FUNC();
575 USBNETHIST_CALLARGS("%jd: stopping %jd",
576 unp->unp_number, unp->unp_stopping, 0, 0);
577
578 mutex_enter(&unp->unp_txlock);
579 if (!unp->unp_stopping)
580 usbnet_start_locked(ifp);
581 mutex_exit(&unp->unp_txlock);
582 }
583
584 /*
585 * Chain management.
586 *
587 * RX and TX are identical. Keep them that way.
588 */
589
590 /* Start of common RX functions */
591
592 static size_t
593 usbnet_rx_list_size(struct usbnet_cdata * const cd, struct usbnet * const un)
594 {
595 return sizeof(*cd->uncd_rx_chain) * un->un_rx_list_cnt;
596 }
597
598 static void
599 usbnet_rx_list_alloc(struct usbnet * const un)
600 {
601 struct usbnet_cdata * const cd = un_cdata(un);
602
603 cd->uncd_rx_chain = kmem_zalloc(usbnet_rx_list_size(cd, un), KM_SLEEP);
604 }
605
606 static void
607 usbnet_rx_list_free(struct usbnet * const un)
608 {
609 struct usbnet_cdata * const cd = un_cdata(un);
610
611 if (cd->uncd_rx_chain) {
612 kmem_free(cd->uncd_rx_chain, usbnet_rx_list_size(cd, un));
613 cd->uncd_rx_chain = NULL;
614 }
615 }
616
617 static int
618 usbnet_rx_list_init(struct usbnet * const un)
619 {
620 struct usbnet_cdata * const cd = un_cdata(un);
621 struct usbnet_private * const unp = un->un_pri;
622
623 for (size_t i = 0; i < un->un_rx_list_cnt; i++) {
624 struct usbnet_chain *c = &cd->uncd_rx_chain[i];
625
626 c->unc_un = un;
627 if (c->unc_xfer == NULL) {
628 int err = usbd_create_xfer(unp->unp_ep[USBNET_ENDPT_RX],
629 un->un_rx_bufsz, un->un_rx_xfer_flags, 0,
630 &c->unc_xfer);
631 if (err)
632 return err;
633 c->unc_buf = usbd_get_buffer(c->unc_xfer);
634 }
635 }
636
637 return 0;
638 }
639
640 static void
641 usbnet_rx_list_fini(struct usbnet * const un)
642 {
643 struct usbnet_cdata * const cd = un_cdata(un);
644
645 for (size_t i = 0; i < un->un_rx_list_cnt; i++) {
646 struct usbnet_chain *c = &cd->uncd_rx_chain[i];
647
648 if (c->unc_xfer != NULL) {
649 usbd_destroy_xfer(c->unc_xfer);
650 c->unc_xfer = NULL;
651 c->unc_buf = NULL;
652 }
653 }
654 }
655
656 /* End of common RX functions */
657
658 static void
659 usbnet_rx_start_pipes(struct usbnet * const un)
660 {
661 struct usbnet_cdata * const cd = un_cdata(un);
662 struct usbnet_private * const unp = un->un_pri;
663
664 mutex_enter(&unp->unp_rxlock);
665 mutex_enter(&unp->unp_txlock);
666 unp->unp_stopping = false;
667
668 for (size_t i = 0; i < un->un_rx_list_cnt; i++) {
669 struct usbnet_chain *c = &cd->uncd_rx_chain[i];
670
671 usbd_setup_xfer(c->unc_xfer, c, c->unc_buf, un->un_rx_bufsz,
672 un->un_rx_xfer_flags, USBD_NO_TIMEOUT, usbnet_rxeof);
673 usbd_transfer(c->unc_xfer);
674 }
675
676 mutex_exit(&unp->unp_txlock);
677 mutex_exit(&unp->unp_rxlock);
678 }
679
680 /* Start of common TX functions */
681
682 static size_t
683 usbnet_tx_list_size(struct usbnet_cdata * const cd, struct usbnet * const un)
684 {
685 return sizeof(*cd->uncd_tx_chain) * un->un_tx_list_cnt;
686 }
687
688 static void
689 usbnet_tx_list_alloc(struct usbnet * const un)
690 {
691 struct usbnet_cdata * const cd = un_cdata(un);
692
693 cd->uncd_tx_chain = kmem_zalloc(usbnet_tx_list_size(cd, un), KM_SLEEP);
694 }
695
696 static void
697 usbnet_tx_list_free(struct usbnet * const un)
698 {
699 struct usbnet_cdata * const cd = un_cdata(un);
700
701 if (cd->uncd_tx_chain) {
702 kmem_free(cd->uncd_tx_chain, usbnet_tx_list_size(cd, un));
703 cd->uncd_tx_chain = NULL;
704 }
705 }
706
707 static int
708 usbnet_tx_list_init(struct usbnet * const un)
709 {
710 struct usbnet_cdata * const cd = un_cdata(un);
711 struct usbnet_private * const unp = un->un_pri;
712
713 for (size_t i = 0; i < un->un_tx_list_cnt; i++) {
714 struct usbnet_chain *c = &cd->uncd_tx_chain[i];
715
716 c->unc_un = un;
717 if (c->unc_xfer == NULL) {
718 int err = usbd_create_xfer(unp->unp_ep[USBNET_ENDPT_TX],
719 un->un_tx_bufsz, un->un_tx_xfer_flags, 0,
720 &c->unc_xfer);
721 if (err)
722 return err;
723 c->unc_buf = usbd_get_buffer(c->unc_xfer);
724 }
725 }
726
727 return 0;
728 }
729
730 static void
731 usbnet_tx_list_fini(struct usbnet * const un)
732 {
733 struct usbnet_cdata * const cd = un_cdata(un);
734
735 for (size_t i = 0; i < un->un_tx_list_cnt; i++) {
736 struct usbnet_chain *c = &cd->uncd_tx_chain[i];
737
738 if (c->unc_xfer != NULL) {
739 usbd_destroy_xfer(c->unc_xfer);
740 c->unc_xfer = NULL;
741 c->unc_buf = NULL;
742 }
743 }
744 cd->uncd_tx_prod = cd->uncd_tx_cnt = 0;
745 }
746
747 /* End of common TX functions */
748
749 /* Endpoint pipe management. */
750
751 static void
752 usbnet_ep_close_pipes(struct usbnet * const un)
753 {
754 struct usbnet_private * const unp = un->un_pri;
755
756 for (size_t i = 0; i < __arraycount(unp->unp_ep); i++) {
757 if (unp->unp_ep[i] == NULL)
758 continue;
759 usbd_status err = usbd_close_pipe(unp->unp_ep[i]);
760 if (err)
761 aprint_error_dev(un->un_dev, "close pipe %zu: %s\n", i,
762 usbd_errstr(err));
763 unp->unp_ep[i] = NULL;
764 }
765 }
766
767 static usbd_status
768 usbnet_ep_open_pipes(struct usbnet * const un)
769 {
770 struct usbnet_intr * const uni = un->un_intr;
771 struct usbnet_private * const unp = un->un_pri;
772
773 for (size_t i = 0; i < __arraycount(unp->unp_ep); i++) {
774 usbd_status err;
775
776 if (un->un_ed[i] == 0)
777 continue;
778
779 if (i == USBNET_ENDPT_INTR && uni) {
780 err = usbd_open_pipe_intr(un->un_iface, un->un_ed[i],
781 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &unp->unp_ep[i], un,
782 uni->uni_buf, uni->uni_bufsz, usbnet_pipe_intr,
783 uni->uni_interval);
784 } else {
785 err = usbd_open_pipe(un->un_iface, un->un_ed[i],
786 USBD_EXCLUSIVE_USE | USBD_MPSAFE, &unp->unp_ep[i]);
787 }
788 if (err) {
789 usbnet_ep_close_pipes(un);
790 return err;
791 }
792 }
793
794 return USBD_NORMAL_COMPLETION;
795 }
796
797 static usbd_status
798 usbnet_ep_stop_pipes(struct usbnet * const un)
799 {
800 struct usbnet_private * const unp = un->un_pri;
801 usbd_status err = USBD_NORMAL_COMPLETION;
802
803 for (size_t i = 0; i < __arraycount(unp->unp_ep); i++) {
804 if (unp->unp_ep[i] == NULL)
805 continue;
806 usbd_status err2 = usbd_abort_pipe(unp->unp_ep[i]);
807 if (err == USBD_NORMAL_COMPLETION && err2)
808 err = err2;
809 }
810
811 return err;
812 }
813
814 int
815 usbnet_init_rx_tx(struct usbnet * const un)
816 {
817 USBNETHIST_FUNC(); USBNETHIST_CALLED();
818 struct usbnet_private * const unp = un->un_pri;
819 struct ifnet * const ifp = usbnet_ifp(un);
820 usbd_status err;
821 int error = 0;
822
823 KASSERTMSG(!unp->unp_ifp_attached || IFNET_LOCKED(ifp),
824 "%s", ifp->if_xname);
825
826 usbnet_isowned_core(un);
827
828 if (unp->unp_dying) {
829 return EIO;
830 }
831
832 usbnet_busy(un);
833
834 /* Open RX and TX pipes. */
835 err = usbnet_ep_open_pipes(un);
836 if (err) {
837 aprint_error_dev(un->un_dev, "open rx/tx pipes failed: %s\n",
838 usbd_errstr(err));
839 error = EIO;
840 goto out;
841 }
842
843 /* Init RX ring. */
844 if (usbnet_rx_list_init(un)) {
845 aprint_error_dev(un->un_dev, "rx list init failed\n");
846 error = ENOBUFS;
847 goto out;
848 }
849
850 /* Init TX ring. */
851 if (usbnet_tx_list_init(un)) {
852 aprint_error_dev(un->un_dev, "tx list init failed\n");
853 error = ENOBUFS;
854 goto out;
855 }
856
857 /* Indicate we are up and running. */
858 /* XXX urndis calls usbnet_init_rx_tx before usbnet_attach_ifp. */
859 KASSERTMSG(!unp->unp_ifp_attached || IFNET_LOCKED(ifp),
860 "%s", ifp->if_xname);
861 ifp->if_flags |= IFF_RUNNING;
862
863 /* Start up the receive pipe(s). */
864 usbnet_rx_start_pipes(un);
865
866 callout_schedule(&unp->unp_stat_ch, hz);
867
868 out:
869 if (error) {
870 usbnet_rx_list_fini(un);
871 usbnet_tx_list_fini(un);
872 usbnet_ep_close_pipes(un);
873 }
874 usbnet_unbusy(un);
875
876 usbnet_isowned_core(un);
877
878 return error;
879 }
880
881 void
882 usbnet_busy(struct usbnet *un)
883 {
884 struct usbnet_private * const unp = un->un_pri;
885
886 usbnet_isowned_core(un);
887
888 unp->unp_refcnt++;
889 }
890
891 void
892 usbnet_unbusy(struct usbnet *un)
893 {
894 struct usbnet_private * const unp = un->un_pri;
895
896 usbnet_isowned_core(un);
897
898 if (--unp->unp_refcnt < 0)
899 cv_broadcast(&unp->unp_detachcv);
900 }
901
902 /* MII management. */
903
904 int
905 usbnet_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
906 {
907 USBNETHIST_FUNC();
908 struct usbnet * const un = device_private(dev);
909 struct usbnet_private * const unp = un->un_pri;
910 int err;
911
912 /* MII layer ensures core_lock is held. */
913 usbnet_isowned_core(un);
914
915 if (unp->unp_dying) {
916 return EIO;
917 }
918
919 usbnet_busy(un);
920 err = uno_read_reg(un, phy, reg, val);
921 usbnet_unbusy(un);
922
923 if (err) {
924 USBNETHIST_CALLARGS("%jd: read PHY failed: %jd",
925 unp->unp_number, err, 0, 0);
926 return err;
927 }
928
929 return 0;
930 }
931
932 int
933 usbnet_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
934 {
935 USBNETHIST_FUNC();
936 struct usbnet * const un = device_private(dev);
937 struct usbnet_private * const unp = un->un_pri;
938 int err;
939
940 /* MII layer ensures core_lock is held. */
941 usbnet_isowned_core(un);
942
943 if (unp->unp_dying) {
944 return EIO;
945 }
946
947 usbnet_busy(un);
948 err = uno_write_reg(un, phy, reg, val);
949 usbnet_unbusy(un);
950
951 if (err) {
952 USBNETHIST_CALLARGS("%jd: write PHY failed: %jd",
953 unp->unp_number, err, 0, 0);
954 return err;
955 }
956
957 return 0;
958 }
959
960 void
961 usbnet_mii_statchg(struct ifnet *ifp)
962 {
963 USBNETHIST_FUNC(); USBNETHIST_CALLED();
964 struct usbnet * const un = ifp->if_softc;
965
966 /* MII layer ensures core_lock is held. */
967 usbnet_isowned_core(un);
968
969 usbnet_busy(un);
970 uno_mii_statchg(un, ifp);
971 usbnet_unbusy(un);
972 }
973
974 static int
975 usbnet_media_upd(struct ifnet *ifp)
976 {
977 USBNETHIST_FUNC(); USBNETHIST_CALLED();
978 struct usbnet * const un = ifp->if_softc;
979 struct usbnet_private * const unp = un->un_pri;
980 struct mii_data * const mii = usbnet_mii(un);
981
982 /* ifmedia layer ensures core_lock is held. */
983 usbnet_isowned_core(un);
984
985 /* ifmedia changes only with IFNET_LOCK held. */
986 KASSERTMSG(IFNET_LOCKED(ifp), "%s", ifp->if_xname);
987
988 if (unp->unp_dying)
989 return EIO;
990
991 unp->unp_link = false;
992
993 if (mii->mii_instance) {
994 struct mii_softc *miisc;
995
996 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
997 mii_phy_reset(miisc);
998 }
999
1000 return ether_mediachange(ifp);
1001 }
1002
1003 /* ioctl */
1004
1005 static int
1006 usbnet_ifflags_cb(struct ethercom *ec)
1007 {
1008 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1009 struct ifnet *ifp = &ec->ec_if;
1010 struct usbnet *un = ifp->if_softc;
1011 struct usbnet_private * const unp = un->un_pri;
1012 int rv = 0;
1013
1014 KASSERTMSG(IFNET_LOCKED(ifp), "%s", ifp->if_xname);
1015
1016 mutex_enter(&unp->unp_core_lock);
1017
1018 const u_short changed = ifp->if_flags ^ unp->unp_if_flags;
1019 if ((changed & ~(IFF_CANTCHANGE | IFF_DEBUG)) == 0) {
1020 unp->unp_if_flags = ifp->if_flags;
1021 if ((changed & IFF_PROMISC) != 0)
1022 rv = ENETRESET;
1023 } else {
1024 rv = ENETRESET;
1025 }
1026
1027 mutex_exit(&unp->unp_core_lock);
1028
1029 return rv;
1030 }
1031
1032 static int
1033 usbnet_if_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1034 {
1035 USBNETHIST_FUNC();
1036 struct usbnet * const un = ifp->if_softc;
1037 struct usbnet_private * const unp __unused = un->un_pri;
1038 int error;
1039
1040 USBNETHIST_CALLARGSN(11, "%jd: enter %#jx data %#jx",
1041 unp->unp_number, cmd, (uintptr_t)data, 0);
1042
1043 if (un->un_ops->uno_override_ioctl)
1044 return uno_override_ioctl(un, ifp, cmd, data);
1045
1046 error = ether_ioctl(ifp, cmd, data);
1047 if (error == ENETRESET) {
1048 switch (cmd) {
1049 case SIOCADDMULTI:
1050 case SIOCDELMULTI:
1051 usb_add_task(un->un_udev, &unp->unp_mcasttask,
1052 USB_TASKQ_DRIVER);
1053 error = 0;
1054 break;
1055 default:
1056 error = uno_ioctl(un, ifp, cmd, data);
1057 }
1058 }
1059
1060 return error;
1061 }
1062
1063 static void
1064 usbnet_mcast_task(void *arg)
1065 {
1066 USBNETHIST_FUNC();
1067 struct usbnet * const un = arg;
1068 struct usbnet_private * const unp = un->un_pri;
1069 struct ifnet * const ifp = usbnet_ifp(un);
1070 bool dying;
1071 struct ifreq ifr;
1072
1073 USBNETHIST_CALLARGSN(10, "%jd: enter", unp->unp_number, 0, 0, 0);
1074
1075 /*
1076 * If we're detaching, we must check unp_dying _before_
1077 * touching IFNET_LOCK -- the ifnet may have been detached by
1078 * the time this task runs. This is racy -- unp_dying may be
1079 * set immediately after we test it -- but nevertheless safe,
1080 * because usbnet_detach waits for the task to complete before
1081 * issuing if_detach, and necessary, so that we don't touch
1082 * IFNET_LOCK after if_detach. See usbnet_detach for details.
1083 */
1084 mutex_enter(&unp->unp_core_lock);
1085 dying = unp->unp_dying;
1086 mutex_exit(&unp->unp_core_lock);
1087 if (dying)
1088 return;
1089
1090 /*
1091 * Pass a bogus ifr with SIOCDELMULTI -- the goal is to just
1092 * notify the driver to reprogram any hardware multicast
1093 * filter, according to what's already stored in the ethercom.
1094 * None of the drivers actually examine this argument, so it
1095 * doesn't change the ABI as far as they can tell.
1096 */
1097 IFNET_LOCK(ifp);
1098 if (ifp->if_flags & IFF_RUNNING) {
1099 memset(&ifr, 0, sizeof(ifr));
1100 (void)uno_ioctl(un, ifp, SIOCDELMULTI, &ifr);
1101 }
1102 IFNET_UNLOCK(ifp);
1103 }
1104
1105 /*
1106 * Generic stop network function:
1107 * - mark as stopping
1108 * - call DD routine to stop the device
1109 * - turn off running, timer, statchg callout, link
1110 * - stop transfers
1111 * - free RX and TX resources
1112 * - close pipes
1113 *
1114 * usbnet_stop() is exported for drivers to use, expects lock held.
1115 *
1116 * usbnet_if_stop() is for the if_stop handler.
1117 */
1118 void
1119 usbnet_stop(struct usbnet *un, struct ifnet *ifp, int disable)
1120 {
1121 struct usbnet_private * const unp = un->un_pri;
1122
1123 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1124
1125 KASSERTMSG(!unp->unp_ifp_attached || IFNET_LOCKED(ifp),
1126 "%s", ifp->if_xname);
1127 usbnet_isowned_core(un);
1128
1129 usbnet_busy(un);
1130
1131 mutex_enter(&unp->unp_rxlock);
1132 mutex_enter(&unp->unp_txlock);
1133 unp->unp_stopping = true;
1134 mutex_exit(&unp->unp_txlock);
1135 mutex_exit(&unp->unp_rxlock);
1136
1137 /*
1138 * Stop the timer first, then the task -- if the timer was
1139 * already firing, we stop the task or wait for it complete
1140 * only after if last fired. Setting unp_stopping prevents the
1141 * timer task from being scheduled again.
1142 */
1143 callout_halt(&unp->unp_stat_ch, &unp->unp_core_lock);
1144 usb_rem_task_wait(un->un_udev, &unp->unp_ticktask, USB_TASKQ_DRIVER,
1145 &unp->unp_core_lock);
1146
1147 /*
1148 * Now that the software is quiescent, ask the driver to stop
1149 * the hardware. The driver's uno_stop routine now has
1150 * exclusive access to any registers that might previously have
1151 * been used by to ifmedia, mii, or ioctl callbacks.
1152 */
1153 uno_stop(un, ifp, disable);
1154
1155 /* Clear the watchdog timer. */
1156 mutex_enter(&unp->unp_txlock);
1157 unp->unp_timer = 0;
1158 mutex_exit(&unp->unp_txlock);
1159
1160 /* Stop transfers. */
1161 usbnet_ep_stop_pipes(un);
1162
1163 /* Free RX/TX resources. */
1164 usbnet_rx_list_fini(un);
1165 usbnet_tx_list_fini(un);
1166
1167 /* Close pipes. */
1168 usbnet_ep_close_pipes(un);
1169
1170 /* Everything is quesced now. */
1171 KASSERTMSG(!unp->unp_ifp_attached || IFNET_LOCKED(ifp),
1172 "%s", ifp->if_xname);
1173 ifp->if_flags &= ~IFF_RUNNING;
1174
1175 usbnet_unbusy(un);
1176 }
1177
1178 static void
1179 usbnet_if_stop(struct ifnet *ifp, int disable)
1180 {
1181 struct usbnet * const un = ifp->if_softc;
1182 struct usbnet_private * const unp = un->un_pri;
1183
1184 KASSERTMSG(IFNET_LOCKED(ifp), "%s", ifp->if_xname);
1185
1186 mutex_enter(&unp->unp_core_lock);
1187 usbnet_stop(un, ifp, disable);
1188 mutex_exit(&unp->unp_core_lock);
1189 }
1190
1191 /*
1192 * Generic tick task function.
1193 *
1194 * usbnet_tick() is triggered from a callout, and triggers a call to
1195 * usbnet_tick_task() from the usb_task subsystem.
1196 */
1197 static void
1198 usbnet_tick(void *arg)
1199 {
1200 USBNETHIST_FUNC();
1201 struct usbnet * const un = arg;
1202 struct usbnet_private * const unp = un->un_pri;
1203
1204 USBNETHIST_CALLARGSN(10, "%jd: enter", unp->unp_number, 0, 0, 0);
1205
1206 /* Perform periodic stuff in process context */
1207 usb_add_task(un->un_udev, &unp->unp_ticktask, USB_TASKQ_DRIVER);
1208 }
1209
1210 static void
1211 usbnet_watchdog(struct ifnet *ifp)
1212 {
1213 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1214 struct usbnet * const un = ifp->if_softc;
1215 struct usbnet_private * const unp = un->un_pri;
1216 struct usbnet_cdata * const cd = un_cdata(un);
1217 usbd_status err;
1218
1219 if_statinc(ifp, if_oerrors);
1220 device_printf(un->un_dev, "watchdog timeout\n");
1221
1222 if (cd->uncd_tx_cnt > 0) {
1223 DPRINTF("uncd_tx_cnt=%ju non zero, aborting pipe", 0, 0, 0, 0);
1224 err = usbd_abort_pipe(unp->unp_ep[USBNET_ENDPT_TX]);
1225 if (err)
1226 device_printf(un->un_dev, "pipe abort failed: %s\n",
1227 usbd_errstr(err));
1228 if (cd->uncd_tx_cnt != 0)
1229 DPRINTF("uncd_tx_cnt now %ju", cd->uncd_tx_cnt, 0, 0, 0);
1230 }
1231
1232 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1233 (*ifp->if_start)(ifp);
1234 }
1235
1236 static void
1237 usbnet_tick_task(void *arg)
1238 {
1239 USBNETHIST_FUNC();
1240 struct usbnet * const un = arg;
1241 struct usbnet_private * const unp = un->un_pri;
1242 struct ifnet * const ifp = usbnet_ifp(un);
1243 struct mii_data * const mii = usbnet_mii(un);
1244
1245 USBNETHIST_CALLARGSN(8, "%jd: enter", unp->unp_number, 0, 0, 0);
1246
1247 mutex_enter(&unp->unp_txlock);
1248 const bool timeout = unp->unp_timer != 0 && --unp->unp_timer == 0;
1249 mutex_exit(&unp->unp_txlock);
1250 if (timeout)
1251 usbnet_watchdog(ifp);
1252
1253 DPRINTFN(8, "mii %#jx ifp %#jx", (uintptr_t)mii, (uintptr_t)ifp, 0, 0);
1254 if (mii) {
1255 mutex_enter(&unp->unp_core_lock);
1256 mii_tick(mii);
1257 if (!unp->unp_link)
1258 (*mii->mii_statchg)(ifp);
1259 mutex_exit(&unp->unp_core_lock);
1260 }
1261
1262 /* Call driver if requested. */
1263 uno_tick(un);
1264
1265 mutex_enter(&unp->unp_core_lock);
1266 if (!unp->unp_stopping && !unp->unp_dying)
1267 callout_schedule(&unp->unp_stat_ch, hz);
1268 mutex_exit(&unp->unp_core_lock);
1269 }
1270
1271 static int
1272 usbnet_if_init(struct ifnet *ifp)
1273 {
1274 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1275 struct usbnet * const un = ifp->if_softc;
1276 bool dying;
1277
1278 KASSERTMSG(IFNET_LOCKED(ifp), "%s", ifp->if_xname);
1279
1280 /*
1281 * Prevent anyone from bringing the interface back up once
1282 * we're detaching.
1283 */
1284 mutex_enter(&un->un_pri->unp_core_lock);
1285 dying = un->un_pri->unp_dying;
1286 mutex_exit(&un->un_pri->unp_core_lock);
1287 if (dying)
1288 return EIO;
1289
1290 return uno_init(un, ifp);
1291 }
1292
1293
1294 /* Various accessors. */
1295
1296 void
1297 usbnet_set_link(struct usbnet *un, bool link)
1298 {
1299 un->un_pri->unp_link = link;
1300 }
1301
1302 struct ifnet *
1303 usbnet_ifp(struct usbnet *un)
1304 {
1305 return &un->un_pri->unp_ec.ec_if;
1306 }
1307
1308 struct ethercom *
1309 usbnet_ec(struct usbnet *un)
1310 {
1311 return &un->un_pri->unp_ec;
1312 }
1313
1314 struct mii_data *
1315 usbnet_mii(struct usbnet *un)
1316 {
1317 return un->un_pri->unp_ec.ec_mii;
1318 }
1319
1320 krndsource_t *
1321 usbnet_rndsrc(struct usbnet *un)
1322 {
1323 return &un->un_pri->unp_rndsrc;
1324 }
1325
1326 void *
1327 usbnet_softc(struct usbnet *un)
1328 {
1329 return un->un_sc;
1330 }
1331
1332 bool
1333 usbnet_havelink(struct usbnet *un)
1334 {
1335 return un->un_pri->unp_link;
1336 }
1337
1338 bool
1339 usbnet_isdying(struct usbnet *un)
1340 {
1341 return un->un_pri->unp_dying;
1342 }
1343
1344
1345 /* Locking. */
1346
1347 void
1348 usbnet_lock_core(struct usbnet *un)
1349 {
1350 mutex_enter(&un->un_pri->unp_core_lock);
1351 }
1352
1353 void
1354 usbnet_unlock_core(struct usbnet *un)
1355 {
1356 mutex_exit(&un->un_pri->unp_core_lock);
1357 }
1358
1359 kmutex_t *
1360 usbnet_mutex_core(struct usbnet *un)
1361 {
1362 return &un->un_pri->unp_core_lock;
1363 }
1364
1365 void
1366 usbnet_lock_rx(struct usbnet *un)
1367 {
1368 mutex_enter(&un->un_pri->unp_rxlock);
1369 }
1370
1371 void
1372 usbnet_unlock_rx(struct usbnet *un)
1373 {
1374 mutex_exit(&un->un_pri->unp_rxlock);
1375 }
1376
1377 kmutex_t *
1378 usbnet_mutex_rx(struct usbnet *un)
1379 {
1380 return &un->un_pri->unp_rxlock;
1381 }
1382
1383 void
1384 usbnet_lock_tx(struct usbnet *un)
1385 {
1386 mutex_enter(&un->un_pri->unp_txlock);
1387 }
1388
1389 void
1390 usbnet_unlock_tx(struct usbnet *un)
1391 {
1392 mutex_exit(&un->un_pri->unp_txlock);
1393 }
1394
1395 kmutex_t *
1396 usbnet_mutex_tx(struct usbnet *un)
1397 {
1398 return &un->un_pri->unp_txlock;
1399 }
1400
1401 /* Autoconf management. */
1402
1403 static bool
1404 usbnet_empty_eaddr(struct usbnet * const un)
1405 {
1406 return (un->un_eaddr[0] == 0 && un->un_eaddr[1] == 0 &&
1407 un->un_eaddr[2] == 0 && un->un_eaddr[3] == 0 &&
1408 un->un_eaddr[4] == 0 && un->un_eaddr[5] == 0);
1409 }
1410
1411 /*
1412 * usbnet_attach() and usbnet_attach_ifp() perform setup of the relevant
1413 * 'usbnet'. The first is enough to enable device access (eg, endpoints
1414 * are connected and commands can be sent), and the second connects the
1415 * device to the system networking.
1416 *
1417 * Always call usbnet_detach(), even if usbnet_attach_ifp() is skippped.
1418 * Also usable as driver detach directly.
1419 *
1420 * To skip ethernet configuration (eg, point-to-point), make sure that
1421 * the un_eaddr[] is fully zero.
1422 */
1423
1424 void
1425 usbnet_attach(struct usbnet *un,
1426 const char *detname) /* detach cv name */
1427 {
1428 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1429
1430 /* Required inputs. */
1431 KASSERT(un->un_ops->uno_tx_prepare);
1432 KASSERT(un->un_ops->uno_rx_loop);
1433 KASSERT(un->un_ops->uno_init);
1434 KASSERT(un->un_rx_bufsz);
1435 KASSERT(un->un_tx_bufsz);
1436 KASSERT(un->un_rx_list_cnt);
1437 KASSERT(un->un_tx_list_cnt);
1438
1439 /* Unfortunate fact. */
1440 KASSERT(un == device_private(un->un_dev));
1441
1442 un->un_pri = kmem_zalloc(sizeof(*un->un_pri), KM_SLEEP);
1443 struct usbnet_private * const unp = un->un_pri;
1444
1445 usb_init_task(&unp->unp_mcasttask, usbnet_mcast_task, un,
1446 USB_TASKQ_MPSAFE);
1447 usb_init_task(&unp->unp_ticktask, usbnet_tick_task, un,
1448 USB_TASKQ_MPSAFE);
1449 callout_init(&unp->unp_stat_ch, CALLOUT_MPSAFE);
1450 callout_setfunc(&unp->unp_stat_ch, usbnet_tick, un);
1451
1452 mutex_init(&unp->unp_txlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1453 mutex_init(&unp->unp_rxlock, MUTEX_DEFAULT, IPL_SOFTUSB);
1454 mutex_init(&unp->unp_core_lock, MUTEX_DEFAULT, IPL_NONE);
1455 cv_init(&unp->unp_detachcv, detname);
1456
1457 rnd_attach_source(&unp->unp_rndsrc, device_xname(un->un_dev),
1458 RND_TYPE_NET, RND_FLAG_DEFAULT);
1459
1460 usbnet_rx_list_alloc(un);
1461 usbnet_tx_list_alloc(un);
1462
1463 unp->unp_number = atomic_inc_uint_nv(&usbnet_number);
1464
1465 unp->unp_attached = true;
1466 }
1467
1468 static void
1469 usbnet_attach_mii(struct usbnet *un, const struct usbnet_mii *unm)
1470 {
1471 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1472 struct usbnet_private * const unp = un->un_pri;
1473 struct mii_data * const mii = &unp->unp_mii;
1474 struct ifnet * const ifp = usbnet_ifp(un);
1475
1476 KASSERT(un->un_ops->uno_read_reg);
1477 KASSERT(un->un_ops->uno_write_reg);
1478 KASSERT(un->un_ops->uno_statchg);
1479
1480 mii->mii_ifp = ifp;
1481 mii->mii_readreg = usbnet_mii_readreg;
1482 mii->mii_writereg = usbnet_mii_writereg;
1483 mii->mii_statchg = usbnet_mii_statchg;
1484 mii->mii_flags = MIIF_AUTOTSLEEP;
1485
1486 usbnet_ec(un)->ec_mii = mii;
1487 ifmedia_init_with_lock(&mii->mii_media, 0,
1488 usbnet_media_upd, ether_mediastatus, usbnet_mutex_core(un));
1489 mii_attach(un->un_dev, mii, unm->un_mii_capmask, unm->un_mii_phyloc,
1490 unm->un_mii_offset, unm->un_mii_flags);
1491
1492 if (LIST_FIRST(&mii->mii_phys) == NULL) {
1493 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
1494 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
1495 } else
1496 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1497 }
1498
1499 void
1500 usbnet_attach_ifp(struct usbnet *un,
1501 unsigned if_flags, /* additional if_flags */
1502 unsigned if_extflags, /* additional if_extflags */
1503 const struct usbnet_mii *unm) /* additional mii_attach flags */
1504 {
1505 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1506 struct usbnet_private * const unp = un->un_pri;
1507 struct ifnet * const ifp = usbnet_ifp(un);
1508
1509 KASSERT(unp->unp_attached);
1510 KASSERT(!unp->unp_ifp_attached);
1511
1512 ifp->if_softc = un;
1513 strlcpy(ifp->if_xname, device_xname(un->un_dev), IFNAMSIZ);
1514 ifp->if_flags = if_flags;
1515 ifp->if_extflags = IFEF_MPSAFE | if_extflags;
1516 ifp->if_ioctl = usbnet_if_ioctl;
1517 ifp->if_start = usbnet_if_start;
1518 ifp->if_init = usbnet_if_init;
1519 ifp->if_stop = usbnet_if_stop;
1520
1521 if (unm)
1522 usbnet_attach_mii(un, unm);
1523 else
1524 unp->unp_link = true;
1525
1526 /* Attach the interface. */
1527 if_initialize(ifp);
1528 if (ifp->_if_input == NULL)
1529 ifp->if_percpuq = if_percpuq_create(ifp);
1530 if_register(ifp);
1531 unp->unp_ifp_attached = true;
1532
1533 /*
1534 * If ethernet address is all zero, skip ether_ifattach() and
1535 * instead attach bpf here..
1536 */
1537 if (!usbnet_empty_eaddr(un)) {
1538 ether_set_ifflags_cb(&unp->unp_ec, usbnet_ifflags_cb);
1539 aprint_normal_dev(un->un_dev, "Ethernet address %s\n",
1540 ether_sprintf(un->un_eaddr));
1541 ether_ifattach(ifp, un->un_eaddr);
1542 } else {
1543 if_alloc_sadl(ifp);
1544 bpf_attach(ifp, DLT_RAW, 0);
1545 }
1546
1547 /* Now ready, and attached. */
1548 IFQ_SET_READY(&ifp->if_snd);
1549
1550 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, un->un_udev, un->un_dev);
1551
1552 if (!pmf_device_register(un->un_dev, NULL, NULL))
1553 aprint_error_dev(un->un_dev, "couldn't establish power handler\n");
1554 }
1555
1556 int
1557 usbnet_detach(device_t self, int flags)
1558 {
1559 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1560 struct usbnet * const un = device_private(self);
1561 struct usbnet_private * const unp = un->un_pri;
1562
1563 /* Detached before attached finished, so just bail out. */
1564 if (unp == NULL || !unp->unp_attached)
1565 return 0;
1566
1567 struct ifnet * const ifp = usbnet_ifp(un);
1568 struct mii_data * const mii = usbnet_mii(un);
1569
1570 /*
1571 * Prevent new activity. After we stop the interface, it
1572 * cannot be brought back up.
1573 */
1574 mutex_enter(&unp->unp_core_lock);
1575 unp->unp_dying = true;
1576 mutex_exit(&unp->unp_core_lock);
1577
1578 /*
1579 * If we're still running on the network, stop and wait for all
1580 * asynchronous activity to finish.
1581 */
1582 IFNET_LOCK(ifp);
1583 if (ifp->if_flags & IFF_RUNNING) {
1584 usbnet_if_stop(ifp, 1);
1585 }
1586 IFNET_UNLOCK(ifp);
1587
1588 /*
1589 * The callout and tick task can't be scheduled anew at this
1590 * point, and usbnet_if_stop has waited for them to complete.
1591 */
1592 KASSERT(!callout_pending(&unp->unp_stat_ch));
1593 KASSERT(!usb_task_pending(un->un_udev, &unp->unp_ticktask));
1594
1595 usb_rem_task_wait(un->un_udev, &unp->unp_mcasttask, USB_TASKQ_DRIVER,
1596 NULL);
1597
1598 if (mii) {
1599 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
1600 ifmedia_fini(&mii->mii_media);
1601 }
1602 if (unp->unp_ifp_attached) {
1603 if (!usbnet_empty_eaddr(un))
1604 ether_ifdetach(ifp);
1605 else
1606 bpf_detach(ifp);
1607 if_detach(ifp);
1608 }
1609 usbnet_ec(un)->ec_mii = NULL;
1610
1611 /*
1612 * We have already waited for the multicast task to complete.
1613 * Unfortunately, until if_detach, nothing has prevented it
1614 * from running again -- another thread might issue if_mcast_op
1615 * between the time of our first usb_rem_task_wait and the time
1616 * we actually get around to if_detach.
1617 *
1618 * Fortunately, the first usb_rem_task_wait ensures that if the
1619 * task is scheduled again, it will witness our setting of
1620 * unp_dying to true[*]. So after that point, if the task is
1621 * scheduled again, it will decline to touch IFNET_LOCK and do
1622 * nothing. But we still need to wait for it to complete.
1623 *
1624 * It would be nice if we could write
1625 *
1626 * if_pleasestopissuingmcastopsthanks(ifp);
1627 * usb_rem_task_wait(..., &unp->unp_mcasttask, ...);
1628 * if_detach(ifp);
1629 *
1630 * and then we would need only one usb_rem_task_wait.
1631 *
1632 * Unfortunately, there is no such operation available in
1633 * sys/net at the moment, and it would require a bit of
1634 * coordination with if_mcast_op and doifioctl probably under a
1635 * new lock. So we'll use this kludge until that mechanism is
1636 * invented.
1637 *
1638 * [*] This is not exactly a documented property of the API,
1639 * but it is implied by the single lock in the task queue
1640 * serializing changes to the task state.
1641 */
1642 usb_rem_task_wait(un->un_udev, &unp->unp_mcasttask, USB_TASKQ_DRIVER,
1643 NULL);
1644
1645 mutex_enter(&unp->unp_core_lock);
1646 unp->unp_refcnt--;
1647 while (unp->unp_refcnt >= 0) {
1648 /* Wait for processes to go away */
1649 cv_wait(&unp->unp_detachcv, &unp->unp_core_lock);
1650 }
1651 mutex_exit(&unp->unp_core_lock);
1652
1653 usbnet_rx_list_free(un);
1654 usbnet_tx_list_free(un);
1655
1656 rnd_detach_source(&unp->unp_rndsrc);
1657
1658 cv_destroy(&unp->unp_detachcv);
1659 mutex_destroy(&unp->unp_core_lock);
1660 mutex_destroy(&unp->unp_rxlock);
1661 mutex_destroy(&unp->unp_txlock);
1662
1663 callout_destroy(&unp->unp_stat_ch);
1664
1665 pmf_device_deregister(un->un_dev);
1666
1667 /*
1668 * Notify userland that we're going away, if we arrived in the
1669 * first place.
1670 */
1671 if (unp->unp_ifp_attached) {
1672 usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, un->un_udev,
1673 un->un_dev);
1674 }
1675
1676 kmem_free(unp, sizeof(*unp));
1677 un->un_pri = NULL;
1678
1679 return 0;
1680 }
1681
1682 int
1683 usbnet_activate(device_t self, devact_t act)
1684 {
1685 USBNETHIST_FUNC(); USBNETHIST_CALLED();
1686 struct usbnet * const un = device_private(self);
1687 struct usbnet_private * const unp = un->un_pri;
1688 struct ifnet * const ifp = usbnet_ifp(un);
1689
1690 switch (act) {
1691 case DVACT_DEACTIVATE:
1692 if_deactivate(ifp);
1693
1694 mutex_enter(&unp->unp_core_lock);
1695 unp->unp_dying = true;
1696 mutex_exit(&unp->unp_core_lock);
1697
1698 mutex_enter(&unp->unp_rxlock);
1699 mutex_enter(&unp->unp_txlock);
1700 unp->unp_stopping = true;
1701 mutex_exit(&unp->unp_txlock);
1702 mutex_exit(&unp->unp_rxlock);
1703
1704 return 0;
1705 default:
1706 return EOPNOTSUPP;
1707 }
1708 }
1709
1710 MODULE(MODULE_CLASS_MISC, usbnet, NULL);
1711
1712 static int
1713 usbnet_modcmd(modcmd_t cmd, void *arg)
1714 {
1715 switch (cmd) {
1716 case MODULE_CMD_INIT:
1717 return 0;
1718 case MODULE_CMD_FINI:
1719 return 0;
1720 case MODULE_CMD_STAT:
1721 case MODULE_CMD_AUTOUNLOAD:
1722 default:
1723 return ENOTTY;
1724 }
1725 }
1726