if_vioif.c revision 1.58 1 /* $NetBSD: if_vioif.c,v 1.58 2020/05/25 08:41:13 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.58 2020/05/25 08:41:13 yamaguchi Exp $");
30
31 #ifdef _KERNEL_OPT
32 #include "opt_net_mpsafe.h"
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/atomic.h>
39 #include <sys/bus.h>
40 #include <sys/condvar.h>
41 #include <sys/device.h>
42 #include <sys/intr.h>
43 #include <sys/kmem.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/sockio.h>
47 #include <sys/cpu.h>
48 #include <sys/module.h>
49 #include <sys/pcq.h>
50 #include <sys/workqueue.h>
51
52 #include <dev/pci/virtioreg.h>
53 #include <dev/pci/virtiovar.h>
54
55 #include <net/if.h>
56 #include <net/if_media.h>
57 #include <net/if_ether.h>
58
59 #include <net/bpf.h>
60
61 #include "ioconf.h"
62
63 #ifdef NET_MPSAFE
64 #define VIOIF_MPSAFE 1
65 #define VIOIF_MULTIQ 1
66 #endif
67
68 /*
69 * if_vioifreg.h:
70 */
71 /* Configuration registers */
72 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
73 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
74 #define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS 8 /* 16bit */
75
76 /* Feature bits */
77 #define VIRTIO_NET_F_CSUM __BIT(0)
78 #define VIRTIO_NET_F_GUEST_CSUM __BIT(1)
79 #define VIRTIO_NET_F_MAC __BIT(5)
80 #define VIRTIO_NET_F_GSO __BIT(6)
81 #define VIRTIO_NET_F_GUEST_TSO4 __BIT(7)
82 #define VIRTIO_NET_F_GUEST_TSO6 __BIT(8)
83 #define VIRTIO_NET_F_GUEST_ECN __BIT(9)
84 #define VIRTIO_NET_F_GUEST_UFO __BIT(10)
85 #define VIRTIO_NET_F_HOST_TSO4 __BIT(11)
86 #define VIRTIO_NET_F_HOST_TSO6 __BIT(12)
87 #define VIRTIO_NET_F_HOST_ECN __BIT(13)
88 #define VIRTIO_NET_F_HOST_UFO __BIT(14)
89 #define VIRTIO_NET_F_MRG_RXBUF __BIT(15)
90 #define VIRTIO_NET_F_STATUS __BIT(16)
91 #define VIRTIO_NET_F_CTRL_VQ __BIT(17)
92 #define VIRTIO_NET_F_CTRL_RX __BIT(18)
93 #define VIRTIO_NET_F_CTRL_VLAN __BIT(19)
94 #define VIRTIO_NET_F_CTRL_RX_EXTRA __BIT(20)
95 #define VIRTIO_NET_F_GUEST_ANNOUNCE __BIT(21)
96 #define VIRTIO_NET_F_MQ __BIT(22)
97
98 #define VIRTIO_NET_FLAG_BITS \
99 VIRTIO_COMMON_FLAG_BITS \
100 "\x17""MQ" \
101 "\x16""GUEST_ANNOUNCE" \
102 "\x15""CTRL_RX_EXTRA" \
103 "\x14""CTRL_VLAN" \
104 "\x13""CTRL_RX" \
105 "\x12""CTRL_VQ" \
106 "\x11""STATUS" \
107 "\x10""MRG_RXBUF" \
108 "\x0f""HOST_UFO" \
109 "\x0e""HOST_ECN" \
110 "\x0d""HOST_TSO6" \
111 "\x0c""HOST_TSO4" \
112 "\x0b""GUEST_UFO" \
113 "\x0a""GUEST_ECN" \
114 "\x09""GUEST_TSO6" \
115 "\x08""GUEST_TSO4" \
116 "\x07""GSO" \
117 "\x06""MAC" \
118 "\x02""GUEST_CSUM" \
119 "\x01""CSUM"
120
121 /* Status */
122 #define VIRTIO_NET_S_LINK_UP 1
123
124 /* Packet header structure */
125 struct virtio_net_hdr {
126 uint8_t flags;
127 uint8_t gso_type;
128 uint16_t hdr_len;
129 uint16_t gso_size;
130 uint16_t csum_start;
131 uint16_t csum_offset;
132 #if 0
133 uint16_t num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
134 #endif
135 } __packed;
136
137 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
138 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
139 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
140 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
141 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
142 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
143
144 #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN)
145
146 /* Control virtqueue */
147 struct virtio_net_ctrl_cmd {
148 uint8_t class;
149 uint8_t command;
150 } __packed;
151 #define VIRTIO_NET_CTRL_RX 0
152 # define VIRTIO_NET_CTRL_RX_PROMISC 0
153 # define VIRTIO_NET_CTRL_RX_ALLMULTI 1
154
155 #define VIRTIO_NET_CTRL_MAC 1
156 # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
157
158 #define VIRTIO_NET_CTRL_VLAN 2
159 # define VIRTIO_NET_CTRL_VLAN_ADD 0
160 # define VIRTIO_NET_CTRL_VLAN_DEL 1
161
162 #define VIRTIO_NET_CTRL_MQ 4
163 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
164 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
165 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
166
167 struct virtio_net_ctrl_status {
168 uint8_t ack;
169 } __packed;
170 #define VIRTIO_NET_OK 0
171 #define VIRTIO_NET_ERR 1
172
173 struct virtio_net_ctrl_rx {
174 uint8_t onoff;
175 } __packed;
176
177 struct virtio_net_ctrl_mac_tbl {
178 uint32_t nentries;
179 uint8_t macs[][ETHER_ADDR_LEN];
180 } __packed;
181
182 struct virtio_net_ctrl_vlan {
183 uint16_t id;
184 } __packed;
185
186 struct virtio_net_ctrl_mq {
187 uint16_t virtqueue_pairs;
188 } __packed;
189
190 struct vioif_ctrl_cmdspec {
191 bus_dmamap_t dmamap;
192 void *buf;
193 bus_size_t bufsize;
194 };
195
196 /*
197 * if_vioifvar.h:
198 */
199
200 /*
201 * Locking notes:
202 * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
203 * a field in vioif_rxqueue is protected by rxq_lock (a spin mutex).
204 * - more than one lock cannot be held at onece
205 * + ctrlq_inuse is protected by ctrlq_wait_lock.
206 * - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
207 * - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
208 */
209
210 struct vioif_work {
211 struct work cookie;
212 void (*func)(void *);
213 void *arg;
214 unsigned int added;
215 };
216
217 struct vioif_txqueue {
218 kmutex_t *txq_lock; /* lock for tx operations */
219
220 struct virtqueue *txq_vq;
221 bool txq_stopping;
222 bool txq_link_active;
223 pcq_t *txq_intrq;
224
225 struct virtio_net_hdr *txq_hdrs;
226 bus_dmamap_t *txq_hdr_dmamaps;
227
228 struct mbuf **txq_mbufs;
229 bus_dmamap_t *txq_dmamaps;
230
231 void *txq_deferred_transmit;
232 void *txq_handle_si;
233 struct vioif_work txq_work;
234 bool txq_workqueue;
235 bool txq_active;
236 };
237
238 struct vioif_rxqueue {
239 kmutex_t *rxq_lock; /* lock for rx operations */
240
241 struct virtqueue *rxq_vq;
242 bool rxq_stopping;
243
244 struct virtio_net_hdr *rxq_hdrs;
245 bus_dmamap_t *rxq_hdr_dmamaps;
246
247 struct mbuf **rxq_mbufs;
248 bus_dmamap_t *rxq_dmamaps;
249
250 void *rxq_softint;
251 void *rxq_handle_si;
252 struct vioif_work rxq_work;
253 bool rxq_workqueue;
254 bool rxq_active;
255 };
256
257 struct vioif_ctrlqueue {
258 struct virtqueue *ctrlq_vq;
259 enum {
260 FREE, INUSE, DONE
261 } ctrlq_inuse;
262 kcondvar_t ctrlq_wait;
263 kmutex_t ctrlq_wait_lock;
264 struct lwp *ctrlq_owner;
265
266 struct virtio_net_ctrl_cmd *ctrlq_cmd;
267 struct virtio_net_ctrl_status *ctrlq_status;
268 struct virtio_net_ctrl_rx *ctrlq_rx;
269 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc;
270 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc;
271 struct virtio_net_ctrl_mq *ctrlq_mq;
272
273 bus_dmamap_t ctrlq_cmd_dmamap;
274 bus_dmamap_t ctrlq_status_dmamap;
275 bus_dmamap_t ctrlq_rx_dmamap;
276 bus_dmamap_t ctrlq_tbl_uc_dmamap;
277 bus_dmamap_t ctrlq_tbl_mc_dmamap;
278 bus_dmamap_t ctrlq_mq_dmamap;
279 };
280
281 struct vioif_softc {
282 device_t sc_dev;
283 struct sysctllog *sc_sysctllog;
284
285 struct virtio_softc *sc_virtio;
286 struct virtqueue *sc_vqs;
287
288 int sc_max_nvq_pairs;
289 int sc_req_nvq_pairs;
290 int sc_act_nvq_pairs;
291
292 uint8_t sc_mac[ETHER_ADDR_LEN];
293 struct ethercom sc_ethercom;
294 short sc_deferred_init_done;
295 bool sc_link_active;
296
297 struct vioif_txqueue *sc_txq;
298 struct vioif_rxqueue *sc_rxq;
299
300 bool sc_has_ctrl;
301 struct vioif_ctrlqueue sc_ctrlq;
302
303 bus_dma_segment_t sc_hdr_segs[1];
304 void *sc_dmamem;
305 void *sc_kmem;
306
307 void *sc_ctl_softint;
308
309 struct workqueue *sc_txrx_workqueue;
310 bool sc_txrx_workqueue_sysctl;
311 u_int sc_tx_intr_process_limit;
312 u_int sc_tx_process_limit;
313 u_int sc_rx_intr_process_limit;
314 u_int sc_rx_process_limit;
315 };
316 #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */
317 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */
318
319 #define VIOIF_TX_INTR_PROCESS_LIMIT 256
320 #define VIOIF_TX_PROCESS_LIMIT 256
321 #define VIOIF_RX_INTR_PROCESS_LIMIT 0U
322 #define VIOIF_RX_PROCESS_LIMIT 256
323
324 #define VIOIF_WORKQUEUE_PRI PRI_SOFTNET
325
326 /* cfattach interface functions */
327 static int vioif_match(device_t, cfdata_t, void *);
328 static void vioif_attach(device_t, device_t, void *);
329 static void vioif_deferred_init(device_t);
330 static int vioif_finalize_teardown(device_t);
331
332 /* ifnet interface functions */
333 static int vioif_init(struct ifnet *);
334 static void vioif_stop(struct ifnet *, int);
335 static void vioif_start(struct ifnet *);
336 static void vioif_start_locked(struct ifnet *, struct vioif_txqueue *);
337 static int vioif_transmit(struct ifnet *, struct mbuf *);
338 static void vioif_transmit_locked(struct ifnet *, struct vioif_txqueue *);
339 static int vioif_ioctl(struct ifnet *, u_long, void *);
340 static void vioif_watchdog(struct ifnet *);
341
342 /* rx */
343 static int vioif_add_rx_mbuf(struct vioif_rxqueue *, int);
344 static void vioif_free_rx_mbuf(struct vioif_rxqueue *, int);
345 static void vioif_populate_rx_mbufs(struct vioif_rxqueue *);
346 static void vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *);
347 static void vioif_rx_queue_clear(struct vioif_rxqueue *);
348 static bool vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
349 struct vioif_rxqueue *, u_int);
350 static int vioif_rx_intr(void *);
351 static void vioif_rx_handle(void *);
352 static void vioif_rx_sched_handle(struct vioif_softc *,
353 struct vioif_rxqueue *);
354 static void vioif_rx_softint(void *);
355 static void vioif_rx_drain(struct vioif_rxqueue *);
356
357 /* tx */
358 static int vioif_tx_intr(void *);
359 static void vioif_tx_handle(void *);
360 static void vioif_tx_sched_handle(struct vioif_softc *,
361 struct vioif_txqueue *);
362 static void vioif_tx_queue_clear(struct vioif_txqueue *);
363 static bool vioif_tx_deq_locked(struct vioif_softc *, struct virtio_softc *,
364 struct vioif_txqueue *, u_int);
365 static void vioif_tx_drain(struct vioif_txqueue *);
366 static void vioif_deferred_transmit(void *);
367
368 /* workqueue */
369 static struct workqueue*
370 vioif_workq_create(const char *, pri_t, int, int);
371 static void vioif_workq_destroy(struct workqueue *);
372 static void vioif_workq_work(struct work *, void *);
373 static void vioif_work_set(struct vioif_work *, void(*)(void *), void *);
374 static void vioif_work_add(struct workqueue *, struct vioif_work *);
375 static void vioif_work_wait(struct workqueue *, struct vioif_work *);
376
377 /* other control */
378 static bool vioif_is_link_up(struct vioif_softc *);
379 static void vioif_update_link_status(struct vioif_softc *);
380 static int vioif_ctrl_rx(struct vioif_softc *, int, bool);
381 static int vioif_set_promisc(struct vioif_softc *, bool);
382 static int vioif_set_allmulti(struct vioif_softc *, bool);
383 static int vioif_set_rx_filter(struct vioif_softc *);
384 static int vioif_rx_filter(struct vioif_softc *);
385 static int vioif_ctrl_intr(void *);
386 static int vioif_config_change(struct virtio_softc *);
387 static void vioif_ctl_softint(void *);
388 static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int);
389 static void vioif_enable_interrupt_vqpairs(struct vioif_softc *);
390 static void vioif_disable_interrupt_vqpairs(struct vioif_softc *);
391 static int vioif_setup_sysctl(struct vioif_softc *);
392
393 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
394 vioif_match, vioif_attach, NULL, NULL);
395
396 static int
397 vioif_match(device_t parent, cfdata_t match, void *aux)
398 {
399 struct virtio_attach_args *va = aux;
400
401 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
402 return 1;
403
404 return 0;
405 }
406
407 static int
408 vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map,
409 bus_size_t size, int nsegs, const char *usage)
410 {
411 int r;
412
413 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size,
414 nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map);
415
416 if (r != 0) {
417 aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, "
418 "error code %d\n", usage, r);
419 }
420
421 return r;
422 }
423
424 static void
425 vioif_dmamap_destroy(struct vioif_softc *sc, bus_dmamap_t *map)
426 {
427
428 if (*map) {
429 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), *map);
430 *map = NULL;
431 }
432 }
433
434 static int
435 vioif_dmamap_create_load(struct vioif_softc *sc, bus_dmamap_t *map,
436 void *buf, bus_size_t size, int nsegs, int rw, const char *usage)
437 {
438 int r;
439
440 r = vioif_dmamap_create(sc, map, size, nsegs, usage);
441 if (r != 0)
442 return 1;
443
444 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), *map, buf,
445 size, NULL, rw | BUS_DMA_NOWAIT);
446 if (r != 0) {
447 vioif_dmamap_destroy(sc, map);
448 aprint_error_dev(sc->sc_dev, "%s dmamap load failed. "
449 "error code %d\n", usage, r);
450 }
451
452 return r;
453 }
454
455 static void *
456 vioif_assign_mem(intptr_t *p, size_t size)
457 {
458 intptr_t rv;
459
460 rv = *p;
461 *p += size;
462
463 return (void *)rv;
464 }
465
466 static void
467 vioif_alloc_queues(struct vioif_softc *sc)
468 {
469 int nvq_pairs = sc->sc_max_nvq_pairs;
470 int nvqs = nvq_pairs * 2;
471 int i;
472
473 KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX);
474
475 sc->sc_rxq = kmem_zalloc(sizeof(sc->sc_rxq[0]) * nvq_pairs,
476 KM_SLEEP);
477 sc->sc_txq = kmem_zalloc(sizeof(sc->sc_txq[0]) * nvq_pairs,
478 KM_SLEEP);
479
480 if (sc->sc_has_ctrl)
481 nvqs++;
482
483 sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP);
484 nvqs = 0;
485 for (i = 0; i < nvq_pairs; i++) {
486 sc->sc_rxq[i].rxq_vq = &sc->sc_vqs[nvqs++];
487 sc->sc_txq[i].txq_vq = &sc->sc_vqs[nvqs++];
488 }
489
490 if (sc->sc_has_ctrl)
491 sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[nvqs++];
492 }
493
494 static void
495 vioif_free_queues(struct vioif_softc *sc)
496 {
497 int nvq_pairs = sc->sc_max_nvq_pairs;
498 int nvqs = nvq_pairs * 2;
499
500 if (sc->sc_ctrlq.ctrlq_vq)
501 nvqs++;
502
503 if (sc->sc_txq) {
504 kmem_free(sc->sc_txq, sizeof(sc->sc_txq[0]) * nvq_pairs);
505 sc->sc_txq = NULL;
506 }
507
508 if (sc->sc_rxq) {
509 kmem_free(sc->sc_rxq, sizeof(sc->sc_rxq[0]) * nvq_pairs);
510 sc->sc_rxq = NULL;
511 }
512
513 if (sc->sc_vqs) {
514 kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
515 sc->sc_vqs = NULL;
516 }
517 }
518
519 /* allocate memory */
520 /*
521 * dma memory is used for:
522 * rxq_hdrs[slot]: metadata array for received frames (READ)
523 * txq_hdrs[slot]: metadata array for frames to be sent (WRITE)
524 * ctrlq_cmd: command to be sent via ctrl vq (WRITE)
525 * ctrlq_status: return value for a command via ctrl vq (READ)
526 * ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command
527 * (WRITE)
528 * ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
529 * class command (WRITE)
530 * ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
531 * class command (WRITE)
532 * ctrlq_* structures are allocated only one each; they are protected by
533 * ctrlq_inuse variable and ctrlq_wait condvar.
534 */
535 /*
536 * dynamically allocated memory is used for:
537 * rxq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot]
538 * txq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot]
539 * rxq_dmamaps[slot]: bus_dmamap_t array for received payload
540 * txq_dmamaps[slot]: bus_dmamap_t array for sent payload
541 * rxq_mbufs[slot]: mbuf pointer array for received frames
542 * txq_mbufs[slot]: mbuf pointer array for sent frames
543 */
544 static int
545 vioif_alloc_mems(struct vioif_softc *sc)
546 {
547 struct virtio_softc *vsc = sc->sc_virtio;
548 struct vioif_txqueue *txq;
549 struct vioif_rxqueue *rxq;
550 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
551 int allocsize, allocsize2, r, rsegs, i, qid;
552 void *vaddr;
553 intptr_t p;
554
555 allocsize = 0;
556 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
557 rxq = &sc->sc_rxq[qid];
558 txq = &sc->sc_txq[qid];
559
560 allocsize +=
561 sizeof(struct virtio_net_hdr) * rxq->rxq_vq->vq_num;
562 allocsize +=
563 sizeof(struct virtio_net_hdr) * txq->txq_vq->vq_num;
564 }
565 if (sc->sc_has_ctrl) {
566 allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
567 allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
568 allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
569 allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
570 + sizeof(struct virtio_net_ctrl_mac_tbl)
571 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
572 allocsize += sizeof(struct virtio_net_ctrl_mq) * 1;
573 }
574 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
575 &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
576 if (r != 0) {
577 aprint_error_dev(sc->sc_dev,
578 "DMA memory allocation failed, size %d, "
579 "error code %d\n", allocsize, r);
580 goto err_none;
581 }
582 r = bus_dmamem_map(virtio_dmat(vsc),
583 &sc->sc_hdr_segs[0], 1, allocsize, &vaddr, BUS_DMA_NOWAIT);
584 if (r != 0) {
585 aprint_error_dev(sc->sc_dev,
586 "DMA memory map failed, error code %d\n", r);
587 goto err_dmamem_alloc;
588 }
589
590 memset(vaddr, 0, allocsize);
591 sc->sc_dmamem = vaddr;
592 p = (intptr_t) vaddr;
593
594 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
595 rxq = &sc->sc_rxq[qid];
596 txq = &sc->sc_txq[qid];
597
598 rxq->rxq_hdrs = vioif_assign_mem(&p,
599 sizeof(rxq->rxq_hdrs[0]) * rxq->rxq_vq->vq_num);
600 txq->txq_hdrs = vioif_assign_mem(&p,
601 sizeof(txq->txq_hdrs[0]) * txq->txq_vq->vq_num);
602 }
603 if (sc->sc_has_ctrl) {
604 ctrlq->ctrlq_cmd = vioif_assign_mem(&p,
605 sizeof(*ctrlq->ctrlq_cmd));
606 ctrlq->ctrlq_status = vioif_assign_mem(&p,
607 sizeof(*ctrlq->ctrlq_status));
608 ctrlq->ctrlq_rx = vioif_assign_mem(&p,
609 sizeof(*ctrlq->ctrlq_rx));
610 ctrlq->ctrlq_mac_tbl_uc = vioif_assign_mem(&p,
611 sizeof(*ctrlq->ctrlq_mac_tbl_uc));
612 ctrlq->ctrlq_mac_tbl_mc = vioif_assign_mem(&p,
613 sizeof(*ctrlq->ctrlq_mac_tbl_mc)
614 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES);
615 ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq));
616 }
617
618 allocsize2 = 0;
619 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
620 int rxqsize, txqsize;
621
622 rxq = &sc->sc_rxq[qid];
623 txq = &sc->sc_txq[qid];
624 rxqsize = rxq->rxq_vq->vq_num;
625 txqsize = txq->txq_vq->vq_num;
626
627 allocsize2 += sizeof(rxq->rxq_dmamaps[0]) * rxqsize;
628 allocsize2 += sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize;
629 allocsize2 += sizeof(rxq->rxq_mbufs[0]) * rxqsize;
630
631 allocsize2 += sizeof(txq->txq_dmamaps[0]) * txqsize;
632 allocsize2 += sizeof(txq->txq_hdr_dmamaps[0]) * txqsize;
633 allocsize2 += sizeof(txq->txq_mbufs[0]) * txqsize;
634 }
635 vaddr = kmem_zalloc(allocsize2, KM_SLEEP);
636 sc->sc_kmem = vaddr;
637 p = (intptr_t) vaddr;
638
639 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
640 int rxqsize, txqsize;
641 rxq = &sc->sc_rxq[qid];
642 txq = &sc->sc_txq[qid];
643 rxqsize = rxq->rxq_vq->vq_num;
644 txqsize = txq->txq_vq->vq_num;
645
646 rxq->rxq_hdr_dmamaps = vioif_assign_mem(&p,
647 sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize);
648 txq->txq_hdr_dmamaps = vioif_assign_mem(&p,
649 sizeof(txq->txq_hdr_dmamaps[0]) * txqsize);
650 rxq->rxq_dmamaps = vioif_assign_mem(&p,
651 sizeof(rxq->rxq_dmamaps[0]) * rxqsize);
652 txq->txq_dmamaps = vioif_assign_mem(&p,
653 sizeof(txq->txq_dmamaps[0]) * txqsize);
654 rxq->rxq_mbufs = vioif_assign_mem(&p,
655 sizeof(rxq->rxq_mbufs[0]) * rxqsize);
656 txq->txq_mbufs = vioif_assign_mem(&p,
657 sizeof(txq->txq_mbufs[0]) * txqsize);
658 }
659
660 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
661 rxq = &sc->sc_rxq[qid];
662 txq = &sc->sc_txq[qid];
663
664 for (i = 0; i < rxq->rxq_vq->vq_num; i++) {
665 r = vioif_dmamap_create_load(sc, &rxq->rxq_hdr_dmamaps[i],
666 &rxq->rxq_hdrs[i], sizeof(rxq->rxq_hdrs[0]), 1,
667 BUS_DMA_READ, "rx header");
668 if (r != 0)
669 goto err_reqs;
670
671 r = vioif_dmamap_create(sc, &rxq->rxq_dmamaps[i],
672 MCLBYTES, 1, "rx payload");
673 if (r != 0)
674 goto err_reqs;
675 }
676
677 for (i = 0; i < txq->txq_vq->vq_num; i++) {
678 r = vioif_dmamap_create_load(sc, &txq->txq_hdr_dmamaps[i],
679 &txq->txq_hdrs[i], sizeof(txq->txq_hdrs[0]), 1,
680 BUS_DMA_READ, "tx header");
681 if (r != 0)
682 goto err_reqs;
683
684 r = vioif_dmamap_create(sc, &txq->txq_dmamaps[i], ETHER_MAX_LEN,
685 VIRTIO_NET_TX_MAXNSEGS, "tx payload");
686 if (r != 0)
687 goto err_reqs;
688 }
689 }
690
691 if (sc->sc_has_ctrl) {
692 /* control vq class & command */
693 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_cmd_dmamap,
694 ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1,
695 BUS_DMA_WRITE, "control command");
696 if (r != 0)
697 goto err_reqs;
698
699 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_status_dmamap,
700 ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1,
701 BUS_DMA_READ, "control status");
702 if (r != 0)
703 goto err_reqs;
704
705 /* control vq rx mode command parameter */
706 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_rx_dmamap,
707 ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1,
708 BUS_DMA_WRITE, "rx mode control command");
709 if (r != 0)
710 goto err_reqs;
711
712 /* multiqueue set command */
713 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_mq_dmamap,
714 ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1,
715 BUS_DMA_WRITE, "multiqueue set command");
716 if (r != 0)
717 goto err_reqs;
718
719 /* control vq MAC filter table for unicast */
720 /* do not load now since its length is variable */
721 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_uc_dmamap,
722 sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0, 1,
723 "unicast MAC address filter command");
724 if (r != 0)
725 goto err_reqs;
726
727 /* control vq MAC filter table for multicast */
728 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_mc_dmamap,
729 sizeof(*ctrlq->ctrlq_mac_tbl_mc)
730 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1,
731 "multicast MAC address filter command");
732 }
733
734 return 0;
735
736 err_reqs:
737 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_mc_dmamap);
738 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_uc_dmamap);
739 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_rx_dmamap);
740 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap);
741 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap);
742 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
743 rxq = &sc->sc_rxq[qid];
744 txq = &sc->sc_txq[qid];
745
746 for (i = 0; i < txq->txq_vq->vq_num; i++) {
747 vioif_dmamap_destroy(sc, &txq->txq_dmamaps[i]);
748 vioif_dmamap_destroy(sc, &txq->txq_hdr_dmamaps[i]);
749 }
750 for (i = 0; i < rxq->rxq_vq->vq_num; i++) {
751 vioif_dmamap_destroy(sc, &rxq->rxq_dmamaps[i]);
752 vioif_dmamap_destroy(sc, &rxq->rxq_hdr_dmamaps[i]);
753 }
754 }
755 if (sc->sc_kmem) {
756 kmem_free(sc->sc_kmem, allocsize2);
757 sc->sc_kmem = NULL;
758 }
759 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, allocsize);
760 err_dmamem_alloc:
761 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
762 err_none:
763 return -1;
764 }
765
766 static void
767 vioif_attach(device_t parent, device_t self, void *aux)
768 {
769 struct vioif_softc *sc = device_private(self);
770 struct virtio_softc *vsc = device_private(parent);
771 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
772 struct vioif_txqueue *txq;
773 struct vioif_rxqueue *rxq;
774 uint32_t features, req_features;
775 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
776 u_int softint_flags;
777 int r, i, nvqs=0, req_flags;
778 char xnamebuf[MAXCOMLEN];
779
780 if (virtio_child(vsc) != NULL) {
781 aprint_normal(": child already attached for %s; "
782 "something wrong...\n", device_xname(parent));
783 return;
784 }
785
786 sc->sc_dev = self;
787 sc->sc_virtio = vsc;
788 sc->sc_link_active = false;
789
790 sc->sc_max_nvq_pairs = 1;
791 sc->sc_req_nvq_pairs = 1;
792 sc->sc_act_nvq_pairs = 1;
793 sc->sc_txrx_workqueue_sysctl = true;
794 sc->sc_tx_intr_process_limit = VIOIF_TX_INTR_PROCESS_LIMIT;
795 sc->sc_tx_process_limit = VIOIF_TX_PROCESS_LIMIT;
796 sc->sc_rx_intr_process_limit = VIOIF_RX_INTR_PROCESS_LIMIT;
797 sc->sc_rx_process_limit = VIOIF_RX_PROCESS_LIMIT;
798
799 snprintf(xnamebuf, sizeof(xnamebuf), "%s_txrx", device_xname(self));
800 sc->sc_txrx_workqueue = vioif_workq_create(xnamebuf, VIOIF_WORKQUEUE_PRI,
801 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
802 if (sc->sc_txrx_workqueue == NULL)
803 goto err;
804
805 req_flags = 0;
806
807 #ifdef VIOIF_MPSAFE
808 req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
809 #endif
810 req_flags |= VIRTIO_F_PCI_INTR_MSIX;
811
812 req_features =
813 VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
814 VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY;
815 #ifdef VIOIF_MULTIQ
816 req_features |= VIRTIO_NET_F_MQ;
817 #endif
818 virtio_child_attach_start(vsc, self, IPL_NET, NULL,
819 vioif_config_change, virtio_vq_intrhand, req_flags,
820 req_features, VIRTIO_NET_FLAG_BITS);
821
822 features = virtio_features(vsc);
823
824 if (features & VIRTIO_NET_F_MAC) {
825 for (i = 0; i < __arraycount(sc->sc_mac); i++) {
826 sc->sc_mac[i] = virtio_read_device_config_1(vsc,
827 VIRTIO_NET_CONFIG_MAC + i);
828 }
829 } else {
830 /* code stolen from sys/net/if_tap.c */
831 struct timeval tv;
832 uint32_t ui;
833 getmicrouptime(&tv);
834 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
835 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
836 for (i = 0; i < __arraycount(sc->sc_mac); i++) {
837 virtio_write_device_config_1(vsc,
838 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
839 }
840 }
841
842 aprint_normal_dev(self, "Ethernet address %s\n",
843 ether_sprintf(sc->sc_mac));
844
845 if ((features & VIRTIO_NET_F_CTRL_VQ) &&
846 (features & VIRTIO_NET_F_CTRL_RX)) {
847 sc->sc_has_ctrl = true;
848
849 cv_init(&ctrlq->ctrlq_wait, "ctrl_vq");
850 mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET);
851 ctrlq->ctrlq_inuse = FREE;
852 } else {
853 sc->sc_has_ctrl = false;
854 }
855
856 if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) {
857 sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc,
858 VIRTIO_NET_CONFIG_MAX_VQ_PAIRS);
859
860 if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
861 goto err;
862
863 /* Limit the number of queue pairs to use */
864 sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu);
865 }
866
867 vioif_alloc_queues(sc);
868 virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs);
869
870 #ifdef VIOIF_MPSAFE
871 softint_flags = SOFTINT_NET | SOFTINT_MPSAFE;
872 #else
873 softint_flags = SOFTINT_NET;
874 #endif
875
876 /*
877 * Allocating virtqueues
878 */
879 for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
880 rxq = &sc->sc_rxq[i];
881 txq = &sc->sc_txq[i];
882 char qname[32];
883
884 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
885
886 rxq->rxq_softint = softint_establish(softint_flags,
887 vioif_rx_softint, rxq);
888 if (rxq->rxq_softint == NULL) {
889 aprint_error_dev(self, "cannot establish rx softint\n");
890 goto err;
891 }
892 rxq->rxq_handle_si = softint_establish(softint_flags,
893 vioif_rx_handle, rxq);
894 if (rxq->rxq_handle_si == NULL) {
895 aprint_error_dev(self, "cannot establish rx softint\n");
896 goto err;
897 }
898
899 snprintf(qname, sizeof(qname), "rx%d", i);
900 r = virtio_alloc_vq(vsc, rxq->rxq_vq, nvqs,
901 MCLBYTES+sizeof(struct virtio_net_hdr), 2, qname);
902 if (r != 0)
903 goto err;
904 nvqs++;
905 rxq->rxq_vq->vq_intrhand = vioif_rx_intr;
906 rxq->rxq_vq->vq_intrhand_arg = (void *)rxq;
907 rxq->rxq_stopping = true;
908 vioif_work_set(&rxq->rxq_work, vioif_rx_handle, rxq);
909
910 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
911
912 txq->txq_deferred_transmit = softint_establish(softint_flags,
913 vioif_deferred_transmit, txq);
914 if (txq->txq_deferred_transmit == NULL) {
915 aprint_error_dev(self, "cannot establish tx softint\n");
916 goto err;
917 }
918 txq->txq_handle_si = softint_establish(softint_flags,
919 vioif_tx_handle, txq);
920 if (txq->txq_handle_si == NULL) {
921 aprint_error_dev(self, "cannot establish tx softint\n");
922 goto err;
923 }
924
925 snprintf(qname, sizeof(qname), "tx%d", i);
926 r = virtio_alloc_vq(vsc, txq->txq_vq, nvqs,
927 sizeof(struct virtio_net_hdr)
928 + (ETHER_MAX_LEN - ETHER_HDR_LEN),
929 VIRTIO_NET_TX_MAXNSEGS + 1, qname);
930 if (r != 0)
931 goto err;
932 nvqs++;
933 txq->txq_vq->vq_intrhand = vioif_tx_intr;
934 txq->txq_vq->vq_intrhand_arg = (void *)txq;
935 txq->txq_link_active = sc->sc_link_active;
936 txq->txq_stopping = false;
937 txq->txq_intrq = pcq_create(txq->txq_vq->vq_num, KM_SLEEP);
938 vioif_work_set(&txq->txq_work, vioif_tx_handle, txq);
939 }
940
941 if (sc->sc_has_ctrl) {
942 /*
943 * Allocating a virtqueue for control channel
944 */
945 r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, nvqs,
946 NBPG, 1, "control");
947 if (r != 0) {
948 aprint_error_dev(self, "failed to allocate "
949 "a virtqueue for control channel, error code %d\n",
950 r);
951
952 sc->sc_has_ctrl = false;
953 cv_destroy(&ctrlq->ctrlq_wait);
954 mutex_destroy(&ctrlq->ctrlq_wait_lock);
955 } else {
956 nvqs++;
957 ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr;
958 ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq;
959 }
960 }
961
962 sc->sc_ctl_softint = softint_establish(softint_flags,
963 vioif_ctl_softint, sc);
964 if (sc->sc_ctl_softint == NULL) {
965 aprint_error_dev(self, "cannot establish ctl softint\n");
966 goto err;
967 }
968
969 if (vioif_alloc_mems(sc) < 0)
970 goto err;
971
972 if (virtio_child_attach_finish(vsc) != 0)
973 goto err;
974
975 if (vioif_setup_sysctl(sc) != 0) {
976 aprint_error_dev(self, "unable to create sysctl node\n");
977 /* continue */
978 }
979
980 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
981 ifp->if_softc = sc;
982 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
983 #ifdef VIOIF_MPSAFE
984 ifp->if_extflags = IFEF_MPSAFE;
985 #endif
986 ifp->if_start = vioif_start;
987 if (sc->sc_req_nvq_pairs > 1)
988 ifp->if_transmit = vioif_transmit;
989 ifp->if_ioctl = vioif_ioctl;
990 ifp->if_init = vioif_init;
991 ifp->if_stop = vioif_stop;
992 ifp->if_capabilities = 0;
993 ifp->if_watchdog = vioif_watchdog;
994 txq = &sc->sc_txq[0];
995 IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq->txq_vq->vq_num, IFQ_MAXLEN));
996 IFQ_SET_READY(&ifp->if_snd);
997
998 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
999
1000 if_attach(ifp);
1001 if_deferred_start_init(ifp, NULL);
1002 ether_ifattach(ifp, sc->sc_mac);
1003
1004 return;
1005
1006 err:
1007 for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
1008 rxq = &sc->sc_rxq[i];
1009 txq = &sc->sc_txq[i];
1010
1011 if (rxq->rxq_lock) {
1012 mutex_obj_free(rxq->rxq_lock);
1013 rxq->rxq_lock = NULL;
1014 }
1015
1016 if (rxq->rxq_softint) {
1017 softint_disestablish(rxq->rxq_softint);
1018 rxq->rxq_softint = NULL;
1019 }
1020
1021 if (rxq->rxq_handle_si) {
1022 softint_disestablish(rxq->rxq_handle_si);
1023 rxq->rxq_handle_si = NULL;
1024 }
1025
1026 if (txq->txq_lock) {
1027 mutex_obj_free(txq->txq_lock);
1028 txq->txq_lock = NULL;
1029 }
1030
1031 if (txq->txq_handle_si) {
1032 softint_disestablish(txq->txq_handle_si);
1033 txq->txq_handle_si = NULL;
1034 }
1035
1036 if (txq->txq_deferred_transmit) {
1037 softint_disestablish(txq->txq_deferred_transmit);
1038 txq->txq_deferred_transmit = NULL;
1039 }
1040
1041 if (txq->txq_intrq) {
1042 pcq_destroy(txq->txq_intrq);
1043 txq->txq_intrq = NULL;
1044 }
1045 }
1046
1047 if (sc->sc_has_ctrl) {
1048 cv_destroy(&ctrlq->ctrlq_wait);
1049 mutex_destroy(&ctrlq->ctrlq_wait_lock);
1050 }
1051
1052 while (nvqs > 0)
1053 virtio_free_vq(vsc, &sc->sc_vqs[--nvqs]);
1054
1055 vioif_free_queues(sc);
1056 virtio_child_attach_failed(vsc);
1057 config_finalize_register(self, vioif_finalize_teardown);
1058
1059 return;
1060 }
1061
1062 static int
1063 vioif_finalize_teardown(device_t self)
1064 {
1065 struct vioif_softc *sc = device_private(self);
1066
1067 if (sc->sc_txrx_workqueue != NULL) {
1068 vioif_workq_destroy(sc->sc_txrx_workqueue);
1069 sc->sc_txrx_workqueue = NULL;
1070 }
1071
1072 return 0;
1073 }
1074
1075 /* we need interrupts to make promiscuous mode off */
1076 static void
1077 vioif_deferred_init(device_t self)
1078 {
1079 struct vioif_softc *sc = device_private(self);
1080 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1081 int r;
1082
1083 if (ifp->if_flags & IFF_PROMISC)
1084 return;
1085
1086 r = vioif_set_promisc(sc, false);
1087 if (r != 0)
1088 aprint_error_dev(self, "resetting promisc mode failed, "
1089 "error code %d\n", r);
1090 }
1091
1092 static void
1093 vioif_enable_interrupt_vqpairs(struct vioif_softc *sc)
1094 {
1095 struct virtio_softc *vsc = sc->sc_virtio;
1096 struct vioif_txqueue *txq;
1097 struct vioif_rxqueue *rxq;
1098 int i;
1099
1100 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1101 txq = &sc->sc_txq[i];
1102 rxq = &sc->sc_rxq[i];
1103
1104 virtio_start_vq_intr(vsc, txq->txq_vq);
1105 virtio_start_vq_intr(vsc, rxq->rxq_vq);
1106 }
1107 }
1108
1109 static void
1110 vioif_disable_interrupt_vqpairs(struct vioif_softc *sc)
1111 {
1112 struct virtio_softc *vsc = sc->sc_virtio;
1113 struct vioif_txqueue *txq;
1114 struct vioif_rxqueue *rxq;
1115 int i;
1116
1117 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1118 txq = &sc->sc_txq[i];
1119 rxq = &sc->sc_rxq[i];
1120
1121 virtio_stop_vq_intr(vsc, txq->txq_vq);
1122 virtio_stop_vq_intr(vsc, rxq->rxq_vq);
1123 }
1124 }
1125
1126 /*
1127 * Interface functions for ifnet
1128 */
1129 static int
1130 vioif_init(struct ifnet *ifp)
1131 {
1132 struct vioif_softc *sc = ifp->if_softc;
1133 struct virtio_softc *vsc = sc->sc_virtio;
1134 struct vioif_rxqueue *rxq;
1135 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1136 int r, i;
1137
1138 vioif_stop(ifp, 0);
1139
1140 virtio_reinit_start(vsc);
1141 virtio_negotiate_features(vsc, virtio_features(vsc));
1142
1143 for (i = 0; i < sc->sc_req_nvq_pairs; i++) {
1144 rxq = &sc->sc_rxq[i];
1145
1146 /* Have to set false before vioif_populate_rx_mbufs */
1147 rxq->rxq_stopping = false;
1148 vioif_populate_rx_mbufs(rxq);
1149 }
1150
1151 virtio_reinit_end(vsc);
1152
1153 if (sc->sc_has_ctrl)
1154 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
1155
1156 r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs);
1157 if (r == 0)
1158 sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs;
1159 else
1160 sc->sc_act_nvq_pairs = 1;
1161
1162 for (i = 0; i < sc->sc_act_nvq_pairs; i++)
1163 sc->sc_txq[i].txq_stopping = false;
1164
1165 vioif_enable_interrupt_vqpairs(sc);
1166
1167 if (!sc->sc_deferred_init_done) {
1168 sc->sc_deferred_init_done = 1;
1169 if (sc->sc_has_ctrl)
1170 vioif_deferred_init(sc->sc_dev);
1171 }
1172
1173 vioif_update_link_status(sc);
1174 ifp->if_flags |= IFF_RUNNING;
1175 ifp->if_flags &= ~IFF_OACTIVE;
1176 vioif_rx_filter(sc);
1177
1178 return 0;
1179 }
1180
1181 static void
1182 vioif_stop(struct ifnet *ifp, int disable)
1183 {
1184 struct vioif_softc *sc = ifp->if_softc;
1185 struct virtio_softc *vsc = sc->sc_virtio;
1186 struct vioif_txqueue *txq;
1187 struct vioif_rxqueue *rxq;
1188 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1189 int i;
1190
1191 /* Take the locks to ensure that ongoing TX/RX finish */
1192 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1193 txq = &sc->sc_txq[i];
1194 rxq = &sc->sc_rxq[i];
1195
1196 mutex_enter(txq->txq_lock);
1197 txq->txq_stopping = true;
1198 mutex_exit(txq->txq_lock);
1199
1200 mutex_enter(rxq->rxq_lock);
1201 rxq->rxq_stopping = true;
1202 mutex_exit(rxq->rxq_lock);
1203 }
1204
1205 /* disable interrupts */
1206 vioif_disable_interrupt_vqpairs(sc);
1207
1208 if (sc->sc_has_ctrl)
1209 virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq);
1210
1211 /* only way to stop I/O and DMA is resetting... */
1212 virtio_reset(vsc);
1213
1214 /* rendezvous for finish of handlers */
1215 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1216 txq = &sc->sc_txq[i];
1217 rxq = &sc->sc_rxq[i];
1218
1219 mutex_enter(txq->txq_lock);
1220 mutex_exit(txq->txq_lock);
1221
1222 mutex_enter(rxq->rxq_lock);
1223 mutex_exit(rxq->rxq_lock);
1224
1225 vioif_work_wait(sc->sc_txrx_workqueue, &txq->txq_work);
1226 vioif_work_wait(sc->sc_txrx_workqueue, &rxq->rxq_work);
1227 }
1228
1229 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1230 vioif_rx_queue_clear(&sc->sc_rxq[i]);
1231 vioif_tx_queue_clear(&sc->sc_txq[i]);
1232 }
1233
1234 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1235 sc->sc_link_active = false;
1236
1237 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1238 txq = &sc->sc_txq[i];
1239 rxq = &sc->sc_rxq[i];
1240
1241 txq->txq_link_active = false;
1242
1243 if (disable)
1244 vioif_rx_drain(rxq);
1245
1246 vioif_tx_drain(txq);
1247 }
1248 }
1249
1250 static void
1251 vioif_send_common_locked(struct ifnet *ifp, struct vioif_txqueue *txq,
1252 bool is_transmit)
1253 {
1254 struct vioif_softc *sc = ifp->if_softc;
1255 struct virtio_softc *vsc = sc->sc_virtio;
1256 struct virtqueue *vq = txq->txq_vq;
1257 struct mbuf *m;
1258 int queued = 0;
1259
1260 KASSERT(mutex_owned(txq->txq_lock));
1261
1262 if ((ifp->if_flags & IFF_RUNNING) == 0)
1263 return;
1264
1265 if (!txq->txq_link_active || txq->txq_stopping)
1266 return;
1267
1268 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
1269 return;
1270
1271 for (;;) {
1272 int slot, r;
1273
1274 if (is_transmit)
1275 m = pcq_get(txq->txq_intrq);
1276 else
1277 IFQ_DEQUEUE(&ifp->if_snd, m);
1278
1279 if (m == NULL)
1280 break;
1281
1282 r = virtio_enqueue_prep(vsc, vq, &slot);
1283 if (r == EAGAIN) {
1284 ifp->if_flags |= IFF_OACTIVE;
1285 m_freem(m);
1286 break;
1287 }
1288 if (r != 0)
1289 panic("enqueue_prep for a tx buffer");
1290
1291 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1292 txq->txq_dmamaps[slot], m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1293 if (r != 0) {
1294 /* maybe just too fragmented */
1295 struct mbuf *newm;
1296
1297 newm = m_defrag(m, M_NOWAIT);
1298 if (newm == NULL) {
1299 aprint_error_dev(sc->sc_dev,
1300 "m_defrag() failed\n");
1301 goto skip;
1302 }
1303
1304 m = newm;
1305 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1306 txq->txq_dmamaps[slot], m,
1307 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1308 if (r != 0) {
1309 aprint_error_dev(sc->sc_dev,
1310 "tx dmamap load failed, error code %d\n",
1311 r);
1312 skip:
1313 m_freem(m);
1314 virtio_enqueue_abort(vsc, vq, slot);
1315 continue;
1316 }
1317 }
1318
1319 /* This should actually never fail */
1320 r = virtio_enqueue_reserve(vsc, vq, slot,
1321 txq->txq_dmamaps[slot]->dm_nsegs + 1);
1322 if (r != 0) {
1323 aprint_error_dev(sc->sc_dev,
1324 "virtio_enqueue_reserve failed, error code %d\n",
1325 r);
1326 bus_dmamap_unload(virtio_dmat(vsc),
1327 txq->txq_dmamaps[slot]);
1328 /* slot already freed by virtio_enqueue_reserve */
1329 m_freem(m);
1330 continue;
1331 }
1332
1333 txq->txq_mbufs[slot] = m;
1334
1335 memset(&txq->txq_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
1336 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
1337 0, txq->txq_dmamaps[slot]->dm_mapsize,
1338 BUS_DMASYNC_PREWRITE);
1339 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
1340 0, txq->txq_hdr_dmamaps[slot]->dm_mapsize,
1341 BUS_DMASYNC_PREWRITE);
1342 virtio_enqueue(vsc, vq, slot, txq->txq_hdr_dmamaps[slot], true);
1343 virtio_enqueue(vsc, vq, slot, txq->txq_dmamaps[slot], true);
1344 virtio_enqueue_commit(vsc, vq, slot, false);
1345
1346 queued++;
1347 bpf_mtap(ifp, m, BPF_D_OUT);
1348 }
1349
1350 if (queued > 0) {
1351 virtio_enqueue_commit(vsc, vq, -1, true);
1352 ifp->if_timer = 5;
1353 }
1354 }
1355
1356 static void
1357 vioif_start_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
1358 {
1359
1360 /*
1361 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
1362 */
1363 vioif_send_common_locked(ifp, txq, false);
1364
1365 }
1366
1367 static void
1368 vioif_start(struct ifnet *ifp)
1369 {
1370 struct vioif_softc *sc = ifp->if_softc;
1371 struct vioif_txqueue *txq = &sc->sc_txq[0];
1372
1373 #ifdef VIOIF_MPSAFE
1374 KASSERT(if_is_mpsafe(ifp));
1375 #endif
1376
1377 mutex_enter(txq->txq_lock);
1378 if (!txq->txq_stopping)
1379 vioif_start_locked(ifp, txq);
1380 mutex_exit(txq->txq_lock);
1381 }
1382
1383 static inline int
1384 vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m)
1385 {
1386 struct vioif_softc *sc = ifp->if_softc;
1387 u_int cpuid = cpu_index(curcpu());
1388
1389 return cpuid % sc->sc_act_nvq_pairs;
1390 }
1391
1392 static void
1393 vioif_transmit_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
1394 {
1395
1396 vioif_send_common_locked(ifp, txq, true);
1397 }
1398
1399 static int
1400 vioif_transmit(struct ifnet *ifp, struct mbuf *m)
1401 {
1402 struct vioif_softc *sc = ifp->if_softc;
1403 struct vioif_txqueue *txq;
1404 int qid;
1405
1406 qid = vioif_select_txqueue(ifp, m);
1407 txq = &sc->sc_txq[qid];
1408
1409 if (__predict_false(!pcq_put(txq->txq_intrq, m))) {
1410 m_freem(m);
1411 return ENOBUFS;
1412 }
1413
1414 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1415 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
1416 if (m->m_flags & M_MCAST)
1417 if_statinc_ref(nsr, if_omcasts);
1418 IF_STAT_PUTREF(ifp);
1419
1420 if (mutex_tryenter(txq->txq_lock)) {
1421 if (!txq->txq_stopping)
1422 vioif_transmit_locked(ifp, txq);
1423 mutex_exit(txq->txq_lock);
1424 }
1425
1426 return 0;
1427 }
1428
1429 static void
1430 vioif_deferred_transmit(void *arg)
1431 {
1432 struct vioif_txqueue *txq = arg;
1433 struct virtio_softc *vsc = txq->txq_vq->vq_owner;
1434 struct vioif_softc *sc = device_private(virtio_child(vsc));
1435 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1436
1437 if (mutex_tryenter(txq->txq_lock)) {
1438 vioif_send_common_locked(ifp, txq, true);
1439 mutex_exit(txq->txq_lock);
1440 }
1441 }
1442
1443 static int
1444 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1445 {
1446 int s, r;
1447
1448 s = splnet();
1449
1450 r = ether_ioctl(ifp, cmd, data);
1451 if ((r == 0 && cmd == SIOCSIFFLAGS) ||
1452 (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
1453 if (ifp->if_flags & IFF_RUNNING)
1454 r = vioif_rx_filter(ifp->if_softc);
1455 else
1456 r = 0;
1457 }
1458
1459 splx(s);
1460
1461 return r;
1462 }
1463
1464 void
1465 vioif_watchdog(struct ifnet *ifp)
1466 {
1467 struct vioif_softc *sc = ifp->if_softc;
1468 int i;
1469
1470 if (ifp->if_flags & IFF_RUNNING) {
1471 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1472 vioif_tx_queue_clear(&sc->sc_txq[i]);
1473 }
1474 }
1475 }
1476
1477 /*
1478 * Receive implementation
1479 */
1480 /* allocate and initialize a mbuf for receive */
1481 static int
1482 vioif_add_rx_mbuf(struct vioif_rxqueue *rxq, int i)
1483 {
1484 struct virtio_softc *vsc = rxq->rxq_vq->vq_owner;
1485 struct mbuf *m;
1486 int r;
1487
1488 MGETHDR(m, M_DONTWAIT, MT_DATA);
1489 if (m == NULL)
1490 return ENOBUFS;
1491 MCLGET(m, M_DONTWAIT);
1492 if ((m->m_flags & M_EXT) == 0) {
1493 m_freem(m);
1494 return ENOBUFS;
1495 }
1496 rxq->rxq_mbufs[i] = m;
1497 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1498 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1499 rxq->rxq_dmamaps[i], m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1500 if (r) {
1501 m_freem(m);
1502 rxq->rxq_mbufs[i] = NULL;
1503 return r;
1504 }
1505
1506 return 0;
1507 }
1508
1509 /* free a mbuf for receive */
1510 static void
1511 vioif_free_rx_mbuf(struct vioif_rxqueue *rxq, int i)
1512 {
1513 struct virtio_softc *vsc = rxq->rxq_vq->vq_owner;
1514
1515 bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[i]);
1516 m_freem(rxq->rxq_mbufs[i]);
1517 rxq->rxq_mbufs[i] = NULL;
1518 }
1519
1520 /* add mbufs for all the empty receive slots */
1521 static void
1522 vioif_populate_rx_mbufs(struct vioif_rxqueue *rxq)
1523 {
1524
1525 mutex_enter(rxq->rxq_lock);
1526 vioif_populate_rx_mbufs_locked(rxq);
1527 mutex_exit(rxq->rxq_lock);
1528 }
1529
1530 static void
1531 vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *rxq)
1532 {
1533 struct virtqueue *vq = rxq->rxq_vq;
1534 struct virtio_softc *vsc = vq->vq_owner;
1535 struct vioif_softc *sc = device_private(virtio_child(vsc));
1536 int i, r, ndone = 0;
1537
1538 KASSERT(mutex_owned(rxq->rxq_lock));
1539
1540 if (rxq->rxq_stopping)
1541 return;
1542
1543 for (i = 0; i < vq->vq_num; i++) {
1544 int slot;
1545 r = virtio_enqueue_prep(vsc, vq, &slot);
1546 if (r == EAGAIN)
1547 break;
1548 if (r != 0)
1549 panic("enqueue_prep for rx buffers");
1550 if (rxq->rxq_mbufs[slot] == NULL) {
1551 r = vioif_add_rx_mbuf(rxq, slot);
1552 if (r != 0) {
1553 aprint_error_dev(sc->sc_dev,
1554 "rx mbuf allocation failed, "
1555 "error code %d\n", r);
1556 break;
1557 }
1558 }
1559 r = virtio_enqueue_reserve(vsc, vq, slot,
1560 rxq->rxq_dmamaps[slot]->dm_nsegs + 1);
1561 if (r != 0) {
1562 vioif_free_rx_mbuf(rxq, slot);
1563 break;
1564 }
1565 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1566 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
1567 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1568 0, MCLBYTES, BUS_DMASYNC_PREREAD);
1569 virtio_enqueue(vsc, vq, slot, rxq->rxq_hdr_dmamaps[slot],
1570 false);
1571 virtio_enqueue(vsc, vq, slot, rxq->rxq_dmamaps[slot], false);
1572 virtio_enqueue_commit(vsc, vq, slot, false);
1573 ndone++;
1574 }
1575 if (ndone > 0)
1576 virtio_enqueue_commit(vsc, vq, -1, true);
1577 }
1578
1579 static void
1580 vioif_rx_queue_clear(struct vioif_rxqueue *rxq)
1581 {
1582 struct virtqueue *vq = rxq->rxq_vq;
1583 struct virtio_softc *vsc = vq->vq_owner;
1584 struct vioif_softc *sc = device_private(virtio_child(vsc));
1585 u_int limit = UINT_MAX;
1586 bool more;
1587
1588 KASSERT(rxq->rxq_stopping);
1589
1590 mutex_enter(rxq->rxq_lock);
1591 for (;;) {
1592 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1593 if (more == false)
1594 break;
1595 }
1596 mutex_exit(rxq->rxq_lock);
1597 }
1598
1599 /* dequeue received packets */
1600 static bool
1601 vioif_rx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
1602 struct vioif_rxqueue *rxq, u_int limit)
1603 {
1604 struct virtqueue *vq = rxq->rxq_vq;
1605 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1606 struct mbuf *m;
1607 int slot, len;
1608 bool more = false, dequeued = false;
1609
1610 KASSERT(mutex_owned(rxq->rxq_lock));
1611
1612 if (virtio_vq_is_enqueued(vsc, vq) == false)
1613 return false;
1614
1615 for (;;) {
1616 if (limit-- == 0) {
1617 more = true;
1618 break;
1619 }
1620
1621 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
1622 break;
1623
1624 dequeued = true;
1625
1626 len -= sizeof(struct virtio_net_hdr);
1627 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1628 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTREAD);
1629 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1630 0, MCLBYTES, BUS_DMASYNC_POSTREAD);
1631 m = rxq->rxq_mbufs[slot];
1632 KASSERT(m != NULL);
1633 bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[slot]);
1634 rxq->rxq_mbufs[slot] = NULL;
1635 virtio_dequeue_commit(vsc, vq, slot);
1636 m_set_rcvif(m, ifp);
1637 m->m_len = m->m_pkthdr.len = len;
1638
1639 mutex_exit(rxq->rxq_lock);
1640 if_percpuq_enqueue(ifp->if_percpuq, m);
1641 mutex_enter(rxq->rxq_lock);
1642
1643 if (rxq->rxq_stopping)
1644 break;
1645 }
1646
1647 if (dequeued)
1648 softint_schedule(rxq->rxq_softint);
1649
1650 return more;
1651 }
1652
1653 /* rx interrupt; call _dequeue above and schedule a softint */
1654 static int
1655 vioif_rx_intr(void *arg)
1656 {
1657 struct vioif_rxqueue *rxq = arg;
1658 struct virtqueue *vq = rxq->rxq_vq;
1659 struct virtio_softc *vsc = vq->vq_owner;
1660 struct vioif_softc *sc = device_private(virtio_child(vsc));
1661 u_int limit;
1662 bool more;
1663
1664 limit = sc->sc_rx_intr_process_limit;
1665
1666 if (atomic_load_relaxed(&rxq->rxq_active) == true)
1667 return 1;
1668
1669 mutex_enter(rxq->rxq_lock);
1670
1671 if (!rxq->rxq_stopping) {
1672 rxq->rxq_workqueue = sc->sc_txrx_workqueue_sysctl;
1673
1674 virtio_stop_vq_intr(vsc, vq);
1675 atomic_store_relaxed(&rxq->rxq_active, true);
1676
1677 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1678 if (more) {
1679 vioif_rx_sched_handle(sc, rxq);
1680 } else {
1681 atomic_store_relaxed(&rxq->rxq_active, false);
1682 virtio_start_vq_intr(vsc, vq);
1683 }
1684 }
1685
1686 mutex_exit(rxq->rxq_lock);
1687 return 1;
1688 }
1689
1690 static void
1691 vioif_rx_handle(void *xrxq)
1692 {
1693 struct vioif_rxqueue *rxq = xrxq;
1694 struct virtqueue *vq = rxq->rxq_vq;
1695 struct virtio_softc *vsc = vq->vq_owner;
1696 struct vioif_softc *sc = device_private(virtio_child(vsc));
1697 u_int limit;
1698 bool more;
1699
1700 limit = sc->sc_rx_process_limit;
1701
1702 mutex_enter(rxq->rxq_lock);
1703
1704 if (!rxq->rxq_stopping) {
1705 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1706 if (more) {
1707 vioif_rx_sched_handle(sc, rxq);
1708 } else {
1709 atomic_store_relaxed(&rxq->rxq_active, false);
1710 virtio_start_vq_intr(vsc, rxq->rxq_vq);
1711 }
1712 }
1713
1714 mutex_exit(rxq->rxq_lock);
1715 }
1716
1717 static void
1718 vioif_rx_sched_handle(struct vioif_softc *sc, struct vioif_rxqueue *rxq)
1719 {
1720
1721 if (rxq->rxq_workqueue)
1722 vioif_work_add(sc->sc_txrx_workqueue, &rxq->rxq_work);
1723 else
1724 softint_schedule(rxq->rxq_handle_si);
1725 }
1726
1727 /* softint: enqueue receive requests for new incoming packets */
1728 static void
1729 vioif_rx_softint(void *arg)
1730 {
1731 struct vioif_rxqueue *rxq = arg;
1732
1733 vioif_populate_rx_mbufs(rxq);
1734 }
1735
1736 /* free all the mbufs; called from if_stop(disable) */
1737 static void
1738 vioif_rx_drain(struct vioif_rxqueue *rxq)
1739 {
1740 struct virtqueue *vq = rxq->rxq_vq;
1741 int i;
1742
1743 for (i = 0; i < vq->vq_num; i++) {
1744 if (rxq->rxq_mbufs[i] == NULL)
1745 continue;
1746 vioif_free_rx_mbuf(rxq, i);
1747 }
1748 }
1749
1750 /*
1751 * Transmition implementation
1752 */
1753 /* actual transmission is done in if_start */
1754 /* tx interrupt; dequeue and free mbufs */
1755 /*
1756 * tx interrupt is actually disabled; this should be called upon
1757 * tx vq full and watchdog
1758 */
1759
1760 static int
1761 vioif_tx_intr(void *arg)
1762 {
1763 struct vioif_txqueue *txq = arg;
1764 struct virtqueue *vq = txq->txq_vq;
1765 struct virtio_softc *vsc = vq->vq_owner;
1766 struct vioif_softc *sc = device_private(virtio_child(vsc));
1767 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1768 bool more;
1769 u_int limit;
1770
1771 limit = sc->sc_tx_intr_process_limit;
1772
1773 if (atomic_load_relaxed(&txq->txq_active) == true)
1774 return 1;
1775
1776 mutex_enter(txq->txq_lock);
1777
1778 if (!txq->txq_stopping) {
1779 txq->txq_workqueue = sc->sc_txrx_workqueue_sysctl;
1780
1781 virtio_stop_vq_intr(vsc, vq);
1782 atomic_store_relaxed(&txq->txq_active, true);
1783
1784 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1785 if (more) {
1786 vioif_tx_sched_handle(sc, txq);
1787 } else {
1788 atomic_store_relaxed(&txq->txq_active, false);
1789
1790 /* for ALTQ */
1791 if (txq == &sc->sc_txq[0]) {
1792 if_schedule_deferred_start(ifp);
1793 ifp->if_flags &= ~IFF_OACTIVE;
1794 }
1795 softint_schedule(txq->txq_deferred_transmit);
1796
1797 virtio_start_vq_intr(vsc, vq);
1798 }
1799 }
1800
1801 mutex_exit(txq->txq_lock);
1802
1803 return 1;
1804 }
1805
1806 static void
1807 vioif_tx_handle(void *xtxq)
1808 {
1809 struct vioif_txqueue *txq = xtxq;
1810 struct virtqueue *vq = txq->txq_vq;
1811 struct virtio_softc *vsc = vq->vq_owner;
1812 struct vioif_softc *sc = device_private(virtio_child(vsc));
1813 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1814 u_int limit;
1815 bool more;
1816
1817 limit = sc->sc_tx_process_limit;
1818
1819 mutex_enter(txq->txq_lock);
1820
1821 if (!txq->txq_stopping) {
1822 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1823 if (more) {
1824 vioif_tx_sched_handle(sc, txq);
1825 } else {
1826 atomic_store_relaxed(&txq->txq_active, false);
1827
1828 /* for ALTQ */
1829 if (txq == &sc->sc_txq[0]) {
1830 if_schedule_deferred_start(ifp);
1831 ifp->if_flags &= ~IFF_OACTIVE;
1832 }
1833 softint_schedule(txq->txq_deferred_transmit);
1834
1835 virtio_start_vq_intr(vsc, txq->txq_vq);
1836 }
1837 }
1838
1839 mutex_exit(txq->txq_lock);
1840 }
1841
1842 static void
1843 vioif_tx_sched_handle(struct vioif_softc *sc, struct vioif_txqueue *txq)
1844 {
1845
1846 if (txq->txq_workqueue)
1847 vioif_work_add(sc->sc_txrx_workqueue, &txq->txq_work);
1848 else
1849 softint_schedule(txq->txq_handle_si);
1850 }
1851
1852 static void
1853 vioif_tx_queue_clear(struct vioif_txqueue *txq)
1854 {
1855 struct virtqueue *vq = txq->txq_vq;
1856 struct virtio_softc *vsc = vq->vq_owner;
1857 struct vioif_softc *sc = device_private(virtio_child(vsc));
1858 u_int limit = UINT_MAX;
1859 bool more;
1860
1861 mutex_enter(txq->txq_lock);
1862 for (;;) {
1863 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1864 if (more == false)
1865 break;
1866 }
1867 mutex_exit(txq->txq_lock);
1868 }
1869
1870 static bool
1871 vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
1872 struct vioif_txqueue *txq, u_int limit)
1873 {
1874 struct virtqueue *vq = txq->txq_vq;
1875 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1876 struct mbuf *m;
1877 int slot, len;
1878 bool more = false;
1879
1880 KASSERT(mutex_owned(txq->txq_lock));
1881
1882 if (virtio_vq_is_enqueued(vsc, vq) == false)
1883 return false;
1884
1885 for (;;) {
1886 if (limit-- == 0) {
1887 more = true;
1888 break;
1889 }
1890
1891 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
1892 break;
1893
1894 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
1895 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTWRITE);
1896 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
1897 0, txq->txq_dmamaps[slot]->dm_mapsize,
1898 BUS_DMASYNC_POSTWRITE);
1899 m = txq->txq_mbufs[slot];
1900 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[slot]);
1901 txq->txq_mbufs[slot] = NULL;
1902 virtio_dequeue_commit(vsc, vq, slot);
1903 if_statinc(ifp, if_opackets);
1904 m_freem(m);
1905 }
1906
1907 return more;
1908 }
1909
1910 /* free all the mbufs already put on vq; called from if_stop(disable) */
1911 static void
1912 vioif_tx_drain(struct vioif_txqueue *txq)
1913 {
1914 struct virtqueue *vq = txq->txq_vq;
1915 struct virtio_softc *vsc = vq->vq_owner;
1916 int i;
1917
1918 KASSERT(txq->txq_stopping);
1919
1920 for (i = 0; i < vq->vq_num; i++) {
1921 if (txq->txq_mbufs[i] == NULL)
1922 continue;
1923 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[i]);
1924 m_freem(txq->txq_mbufs[i]);
1925 txq->txq_mbufs[i] = NULL;
1926 }
1927 }
1928
1929 /*
1930 * Control vq
1931 */
1932 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
1933 static void
1934 vioif_ctrl_acquire(struct vioif_softc *sc)
1935 {
1936 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1937
1938 mutex_enter(&ctrlq->ctrlq_wait_lock);
1939 while (ctrlq->ctrlq_inuse != FREE)
1940 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
1941 ctrlq->ctrlq_inuse = INUSE;
1942 ctrlq->ctrlq_owner = curlwp;
1943 mutex_exit(&ctrlq->ctrlq_wait_lock);
1944 }
1945
1946 static void
1947 vioif_ctrl_release(struct vioif_softc *sc)
1948 {
1949 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1950
1951 KASSERT(ctrlq->ctrlq_inuse != FREE);
1952 KASSERT(ctrlq->ctrlq_owner == curlwp);
1953
1954 mutex_enter(&ctrlq->ctrlq_wait_lock);
1955 ctrlq->ctrlq_inuse = FREE;
1956 ctrlq->ctrlq_owner = NULL;
1957 cv_signal(&ctrlq->ctrlq_wait);
1958 mutex_exit(&ctrlq->ctrlq_wait_lock);
1959 }
1960
1961 static int
1962 vioif_ctrl_load_cmdspec(struct vioif_softc *sc,
1963 struct vioif_ctrl_cmdspec *specs, int nspecs)
1964 {
1965 struct virtio_softc *vsc = sc->sc_virtio;
1966 int i, r, loaded;
1967
1968 loaded = 0;
1969 for (i = 0; i < nspecs; i++) {
1970 r = bus_dmamap_load(virtio_dmat(vsc),
1971 specs[i].dmamap, specs[i].buf, specs[i].bufsize,
1972 NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1973 if (r) {
1974 aprint_error_dev(sc->sc_dev, "control command dmamap"
1975 " load failed, error code %d\n", r);
1976 goto err;
1977 }
1978 loaded++;
1979
1980 }
1981
1982 return r;
1983
1984 err:
1985 for (i = 0; i < loaded; i++) {
1986 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap);
1987 }
1988
1989 return r;
1990 }
1991
1992 static void
1993 vioif_ctrl_unload_cmdspec(struct vioif_softc *sc,
1994 struct vioif_ctrl_cmdspec *specs, int nspecs)
1995 {
1996 struct virtio_softc *vsc = sc->sc_virtio;
1997 int i;
1998
1999 for (i = 0; i < nspecs; i++) {
2000 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap);
2001 }
2002 }
2003
2004 static int
2005 vioif_ctrl_send_command(struct vioif_softc *sc, uint8_t class, uint8_t cmd,
2006 struct vioif_ctrl_cmdspec *specs, int nspecs)
2007 {
2008 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
2009 struct virtqueue *vq = ctrlq->ctrlq_vq;
2010 struct virtio_softc *vsc = sc->sc_virtio;
2011 int i, r, slot;
2012
2013 ctrlq->ctrlq_cmd->class = class;
2014 ctrlq->ctrlq_cmd->command = cmd;
2015
2016 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap,
2017 0, sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_PREWRITE);
2018 for (i = 0; i < nspecs; i++) {
2019 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap,
2020 0, specs[i].bufsize, BUS_DMASYNC_PREWRITE);
2021 }
2022 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap,
2023 0, sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_PREREAD);
2024
2025 r = virtio_enqueue_prep(vsc, vq, &slot);
2026 if (r != 0)
2027 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
2028 r = virtio_enqueue_reserve(vsc, vq, slot, nspecs + 2);
2029 if (r != 0)
2030 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
2031 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true);
2032 for (i = 0; i < nspecs; i++) {
2033 virtio_enqueue(vsc, vq, slot, specs[i].dmamap, true);
2034 }
2035 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false);
2036 virtio_enqueue_commit(vsc, vq, slot, true);
2037
2038 /* wait for done */
2039 mutex_enter(&ctrlq->ctrlq_wait_lock);
2040 while (ctrlq->ctrlq_inuse != DONE)
2041 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
2042 mutex_exit(&ctrlq->ctrlq_wait_lock);
2043 /* already dequeueued */
2044
2045 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0,
2046 sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_POSTWRITE);
2047 for (i = 0; i < nspecs; i++) {
2048 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 0,
2049 specs[i].bufsize, BUS_DMASYNC_POSTWRITE);
2050 }
2051 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0,
2052 sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_POSTREAD);
2053
2054 if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK)
2055 r = 0;
2056 else {
2057 aprint_error_dev(sc->sc_dev, "failed setting rx mode\n");
2058 r = EIO;
2059 }
2060
2061 return r;
2062 }
2063
2064 static int
2065 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
2066 {
2067 struct virtio_net_ctrl_rx *rx = sc->sc_ctrlq.ctrlq_rx;
2068 struct vioif_ctrl_cmdspec specs[1];
2069 int r;
2070
2071 if (!sc->sc_has_ctrl)
2072 return ENOTSUP;
2073
2074 vioif_ctrl_acquire(sc);
2075
2076 rx->onoff = onoff;
2077 specs[0].dmamap = sc->sc_ctrlq.ctrlq_rx_dmamap;
2078 specs[0].buf = rx;
2079 specs[0].bufsize = sizeof(*rx);
2080
2081 r = vioif_ctrl_send_command(sc, VIRTIO_NET_CTRL_RX, cmd,
2082 specs, __arraycount(specs));
2083
2084 vioif_ctrl_release(sc);
2085 return r;
2086 }
2087
2088 static int
2089 vioif_set_promisc(struct vioif_softc *sc, bool onoff)
2090 {
2091 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
2092 }
2093
2094 static int
2095 vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
2096 {
2097 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
2098 }
2099
2100 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
2101 static int
2102 vioif_set_rx_filter(struct vioif_softc *sc)
2103 {
2104 /* filter already set in ctrlq->ctrlq_mac_tbl */
2105 struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc;
2106 struct vioif_ctrl_cmdspec specs[2];
2107 int nspecs = __arraycount(specs);
2108 int r;
2109
2110 mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc;
2111 mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc;
2112
2113 if (!sc->sc_has_ctrl)
2114 return ENOTSUP;
2115
2116 vioif_ctrl_acquire(sc);
2117
2118 specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap;
2119 specs[0].buf = mac_tbl_uc;
2120 specs[0].bufsize = sizeof(*mac_tbl_uc)
2121 + (ETHER_ADDR_LEN * mac_tbl_uc->nentries);
2122
2123 specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap;
2124 specs[1].buf = mac_tbl_mc;
2125 specs[1].bufsize = sizeof(*mac_tbl_mc)
2126 + (ETHER_ADDR_LEN * mac_tbl_mc->nentries);
2127
2128 r = vioif_ctrl_load_cmdspec(sc, specs, nspecs);
2129 if (r != 0)
2130 goto out;
2131
2132 r = vioif_ctrl_send_command(sc,
2133 VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET,
2134 specs, nspecs);
2135
2136 vioif_ctrl_unload_cmdspec(sc, specs, nspecs);
2137
2138 out:
2139 vioif_ctrl_release(sc);
2140
2141 return r;
2142 }
2143
2144 static int
2145 vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs)
2146 {
2147 struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq;
2148 struct vioif_ctrl_cmdspec specs[1];
2149 int r;
2150
2151 if (!sc->sc_has_ctrl)
2152 return ENOTSUP;
2153
2154 if (nvq_pairs <= 1)
2155 return EINVAL;
2156
2157 vioif_ctrl_acquire(sc);
2158
2159 mq->virtqueue_pairs = nvq_pairs;
2160 specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap;
2161 specs[0].buf = mq;
2162 specs[0].bufsize = sizeof(*mq);
2163
2164 r = vioif_ctrl_send_command(sc,
2165 VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
2166 specs, __arraycount(specs));
2167
2168 vioif_ctrl_release(sc);
2169
2170 return r;
2171 }
2172
2173 /* ctrl vq interrupt; wake up the command issuer */
2174 static int
2175 vioif_ctrl_intr(void *arg)
2176 {
2177 struct vioif_ctrlqueue *ctrlq = arg;
2178 struct virtqueue *vq = ctrlq->ctrlq_vq;
2179 struct virtio_softc *vsc = vq->vq_owner;
2180 int r, slot;
2181
2182 if (virtio_vq_is_enqueued(vsc, vq) == false)
2183 return 0;
2184
2185 r = virtio_dequeue(vsc, vq, &slot, NULL);
2186 if (r == ENOENT)
2187 return 0;
2188 virtio_dequeue_commit(vsc, vq, slot);
2189
2190 mutex_enter(&ctrlq->ctrlq_wait_lock);
2191 ctrlq->ctrlq_inuse = DONE;
2192 cv_signal(&ctrlq->ctrlq_wait);
2193 mutex_exit(&ctrlq->ctrlq_wait_lock);
2194
2195 return 1;
2196 }
2197
2198 /*
2199 * If IFF_PROMISC requested, set promiscuous
2200 * If multicast filter small enough (<=MAXENTRIES) set rx filter
2201 * If large multicast filter exist use ALLMULTI
2202 */
2203 /*
2204 * If setting rx filter fails fall back to ALLMULTI
2205 * If ALLMULTI fails fall back to PROMISC
2206 */
2207 static int
2208 vioif_rx_filter(struct vioif_softc *sc)
2209 {
2210 struct ethercom *ec = &sc->sc_ethercom;
2211 struct ifnet *ifp = &ec->ec_if;
2212 struct ether_multi *enm;
2213 struct ether_multistep step;
2214 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
2215 int nentries;
2216 int promisc = 0, allmulti = 0, rxfilter = 0;
2217 int r;
2218
2219 if (!sc->sc_has_ctrl) { /* no ctrl vq; always promisc */
2220 ifp->if_flags |= IFF_PROMISC;
2221 return 0;
2222 }
2223
2224 if (ifp->if_flags & IFF_PROMISC) {
2225 promisc = 1;
2226 goto set;
2227 }
2228
2229 nentries = -1;
2230 ETHER_LOCK(ec);
2231 ETHER_FIRST_MULTI(step, ec, enm);
2232 while (nentries++, enm != NULL) {
2233 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
2234 allmulti = 1;
2235 goto set_unlock;
2236 }
2237 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2238 allmulti = 1;
2239 goto set_unlock;
2240 }
2241 memcpy(ctrlq->ctrlq_mac_tbl_mc->macs[nentries],
2242 enm->enm_addrlo, ETHER_ADDR_LEN);
2243 ETHER_NEXT_MULTI(step, enm);
2244 }
2245 rxfilter = 1;
2246
2247 set_unlock:
2248 ETHER_UNLOCK(ec);
2249
2250 set:
2251 if (rxfilter) {
2252 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
2253 ctrlq->ctrlq_mac_tbl_mc->nentries = nentries;
2254 r = vioif_set_rx_filter(sc);
2255 if (r != 0) {
2256 rxfilter = 0;
2257 allmulti = 1; /* fallback */
2258 }
2259 } else {
2260 /* remove rx filter */
2261 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
2262 ctrlq->ctrlq_mac_tbl_mc->nentries = 0;
2263 r = vioif_set_rx_filter(sc);
2264 /* what to do on failure? */
2265 }
2266 if (allmulti) {
2267 r = vioif_set_allmulti(sc, true);
2268 if (r != 0) {
2269 allmulti = 0;
2270 promisc = 1; /* fallback */
2271 }
2272 } else {
2273 r = vioif_set_allmulti(sc, false);
2274 /* what to do on failure? */
2275 }
2276 if (promisc) {
2277 r = vioif_set_promisc(sc, true);
2278 } else {
2279 r = vioif_set_promisc(sc, false);
2280 }
2281
2282 return r;
2283 }
2284
2285 static bool
2286 vioif_is_link_up(struct vioif_softc *sc)
2287 {
2288 struct virtio_softc *vsc = sc->sc_virtio;
2289 uint16_t status;
2290
2291 if (virtio_features(vsc) & VIRTIO_NET_F_STATUS)
2292 status = virtio_read_device_config_2(vsc,
2293 VIRTIO_NET_CONFIG_STATUS);
2294 else
2295 status = VIRTIO_NET_S_LINK_UP;
2296
2297 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
2298 }
2299
2300 /* change link status */
2301 static void
2302 vioif_update_link_status(struct vioif_softc *sc)
2303 {
2304 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2305 struct vioif_txqueue *txq;
2306 bool active, changed;
2307 int link, i;
2308
2309 active = vioif_is_link_up(sc);
2310 changed = false;
2311
2312 if (active) {
2313 if (!sc->sc_link_active)
2314 changed = true;
2315
2316 link = LINK_STATE_UP;
2317 sc->sc_link_active = true;
2318 } else {
2319 if (sc->sc_link_active)
2320 changed = true;
2321
2322 link = LINK_STATE_DOWN;
2323 sc->sc_link_active = false;
2324 }
2325
2326 if (changed) {
2327 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
2328 txq = &sc->sc_txq[i];
2329
2330 mutex_enter(txq->txq_lock);
2331 txq->txq_link_active = sc->sc_link_active;
2332 mutex_exit(txq->txq_lock);
2333 }
2334
2335 if_link_state_change(ifp, link);
2336 }
2337 }
2338
2339 static int
2340 vioif_config_change(struct virtio_softc *vsc)
2341 {
2342 struct vioif_softc *sc = device_private(virtio_child(vsc));
2343
2344 softint_schedule(sc->sc_ctl_softint);
2345 return 0;
2346 }
2347
2348 static void
2349 vioif_ctl_softint(void *arg)
2350 {
2351 struct vioif_softc *sc = arg;
2352 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2353
2354 vioif_update_link_status(sc);
2355 vioif_start(ifp);
2356 }
2357
2358 static struct workqueue *
2359 vioif_workq_create(const char *name, pri_t prio, int ipl, int flags)
2360 {
2361 struct workqueue *wq;
2362 int error;
2363
2364 error = workqueue_create(&wq, name, vioif_workq_work, NULL,
2365 prio, ipl, flags);
2366
2367 if (error)
2368 return NULL;
2369
2370 return wq;
2371 }
2372
2373 static void
2374 vioif_workq_destroy(struct workqueue *wq)
2375 {
2376
2377 workqueue_destroy(wq);
2378 }
2379
2380 static void
2381 vioif_workq_work(struct work *wk, void *context)
2382 {
2383 struct vioif_work *work;
2384
2385 work = container_of(wk, struct vioif_work, cookie);
2386
2387 atomic_store_relaxed(&work->added, 0);
2388 work->func(work->arg);
2389 }
2390
2391 static void
2392 vioif_work_set(struct vioif_work *work, void (*func)(void *), void *arg)
2393 {
2394
2395 memset(work, 0, sizeof(*work));
2396 work->func = func;
2397 work->arg = arg;
2398 }
2399
2400 static void
2401 vioif_work_add(struct workqueue *wq, struct vioif_work *work)
2402 {
2403
2404 if (atomic_load_relaxed(&work->added) != 0)
2405 return;
2406
2407 atomic_store_relaxed(&work->added, 1);
2408 kpreempt_disable();
2409 workqueue_enqueue(wq, &work->cookie, NULL);
2410 kpreempt_enable();
2411 }
2412
2413 static void
2414 vioif_work_wait(struct workqueue *wq, struct vioif_work *work)
2415 {
2416
2417 workqueue_wait(wq, &work->cookie);
2418 }
2419
2420 static int
2421 vioif_setup_sysctl(struct vioif_softc *sc)
2422 {
2423 const char *devname;
2424 struct sysctllog **log;
2425 const struct sysctlnode *rnode, *rxnode, *txnode;
2426 int error;
2427
2428 log = &sc->sc_sysctllog;
2429 devname = device_xname(sc->sc_dev);
2430
2431 error = sysctl_createv(log, 0, NULL, &rnode,
2432 0, CTLTYPE_NODE, devname,
2433 SYSCTL_DESCR("virtio-net information and settings"),
2434 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
2435 if (error)
2436 goto out;
2437
2438 error = sysctl_createv(log, 0, &rnode, NULL,
2439 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
2440 SYSCTL_DESCR("Use workqueue for packet processing"),
2441 NULL, 0, &sc->sc_txrx_workqueue_sysctl, 0, CTL_CREATE, CTL_EOL);
2442 if (error)
2443 goto out;
2444
2445 error = sysctl_createv(log, 0, &rnode, &rxnode,
2446 0, CTLTYPE_NODE, "rx",
2447 SYSCTL_DESCR("virtio-net information and settings for Rx"),
2448 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
2449 if (error)
2450 goto out;
2451
2452 error = sysctl_createv(log, 0, &rxnode, NULL,
2453 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2454 SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
2455 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2456 if (error)
2457 goto out;
2458
2459 error = sysctl_createv(log, 0, &rxnode, NULL,
2460 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2461 SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
2462 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
2463 if (error)
2464 goto out;
2465
2466 error = sysctl_createv(log, 0, &rnode, &txnode,
2467 0, CTLTYPE_NODE, "tx",
2468 SYSCTL_DESCR("virtio-net information and settings for Tx"),
2469 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
2470 if (error)
2471 goto out;
2472
2473 error = sysctl_createv(log, 0, &txnode, NULL,
2474 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2475 SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
2476 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2477 if (error)
2478 goto out;
2479
2480 error = sysctl_createv(log, 0, &txnode, NULL,
2481 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2482 SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
2483 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
2484
2485 out:
2486 if (error)
2487 sysctl_teardown(log);
2488
2489 return error;
2490 }
2491
2492 MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
2493
2494 #ifdef _MODULE
2495 #include "ioconf.c"
2496 #endif
2497
2498 static int
2499 if_vioif_modcmd(modcmd_t cmd, void *opaque)
2500 {
2501 int error = 0;
2502
2503 #ifdef _MODULE
2504 switch (cmd) {
2505 case MODULE_CMD_INIT:
2506 error = config_init_component(cfdriver_ioconf_if_vioif,
2507 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
2508 break;
2509 case MODULE_CMD_FINI:
2510 error = config_fini_component(cfdriver_ioconf_if_vioif,
2511 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
2512 break;
2513 default:
2514 error = ENOTTY;
2515 break;
2516 }
2517 #endif
2518
2519 return error;
2520 }
2521