if_vioif.c revision 1.62 1 /* $NetBSD: if_vioif.c,v 1.62 2020/05/25 09:36:18 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.62 2020/05/25 09:36:18 yamaguchi Exp $");
30
31 #ifdef _KERNEL_OPT
32 #include "opt_net_mpsafe.h"
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/atomic.h>
39 #include <sys/bus.h>
40 #include <sys/condvar.h>
41 #include <sys/device.h>
42 #include <sys/intr.h>
43 #include <sys/kmem.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/sockio.h>
47 #include <sys/cpu.h>
48 #include <sys/module.h>
49 #include <sys/pcq.h>
50 #include <sys/workqueue.h>
51
52 #include <dev/pci/virtioreg.h>
53 #include <dev/pci/virtiovar.h>
54
55 #include <net/if.h>
56 #include <net/if_media.h>
57 #include <net/if_ether.h>
58
59 #include <net/bpf.h>
60
61 #include "ioconf.h"
62
63 #ifdef NET_MPSAFE
64 #define VIOIF_MPSAFE 1
65 #define VIOIF_MULTIQ 1
66 #endif
67
68 /*
69 * if_vioifreg.h:
70 */
71 /* Configuration registers */
72 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
73 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
74 #define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS 8 /* 16bit */
75
76 /* Feature bits */
77 #define VIRTIO_NET_F_CSUM __BIT(0)
78 #define VIRTIO_NET_F_GUEST_CSUM __BIT(1)
79 #define VIRTIO_NET_F_MAC __BIT(5)
80 #define VIRTIO_NET_F_GSO __BIT(6)
81 #define VIRTIO_NET_F_GUEST_TSO4 __BIT(7)
82 #define VIRTIO_NET_F_GUEST_TSO6 __BIT(8)
83 #define VIRTIO_NET_F_GUEST_ECN __BIT(9)
84 #define VIRTIO_NET_F_GUEST_UFO __BIT(10)
85 #define VIRTIO_NET_F_HOST_TSO4 __BIT(11)
86 #define VIRTIO_NET_F_HOST_TSO6 __BIT(12)
87 #define VIRTIO_NET_F_HOST_ECN __BIT(13)
88 #define VIRTIO_NET_F_HOST_UFO __BIT(14)
89 #define VIRTIO_NET_F_MRG_RXBUF __BIT(15)
90 #define VIRTIO_NET_F_STATUS __BIT(16)
91 #define VIRTIO_NET_F_CTRL_VQ __BIT(17)
92 #define VIRTIO_NET_F_CTRL_RX __BIT(18)
93 #define VIRTIO_NET_F_CTRL_VLAN __BIT(19)
94 #define VIRTIO_NET_F_CTRL_RX_EXTRA __BIT(20)
95 #define VIRTIO_NET_F_GUEST_ANNOUNCE __BIT(21)
96 #define VIRTIO_NET_F_MQ __BIT(22)
97
98 #define VIRTIO_NET_FLAG_BITS \
99 VIRTIO_COMMON_FLAG_BITS \
100 "\x17""MQ" \
101 "\x16""GUEST_ANNOUNCE" \
102 "\x15""CTRL_RX_EXTRA" \
103 "\x14""CTRL_VLAN" \
104 "\x13""CTRL_RX" \
105 "\x12""CTRL_VQ" \
106 "\x11""STATUS" \
107 "\x10""MRG_RXBUF" \
108 "\x0f""HOST_UFO" \
109 "\x0e""HOST_ECN" \
110 "\x0d""HOST_TSO6" \
111 "\x0c""HOST_TSO4" \
112 "\x0b""GUEST_UFO" \
113 "\x0a""GUEST_ECN" \
114 "\x09""GUEST_TSO6" \
115 "\x08""GUEST_TSO4" \
116 "\x07""GSO" \
117 "\x06""MAC" \
118 "\x02""GUEST_CSUM" \
119 "\x01""CSUM"
120
121 /* Status */
122 #define VIRTIO_NET_S_LINK_UP 1
123
124 /* Packet header structure */
125 struct virtio_net_hdr {
126 uint8_t flags;
127 uint8_t gso_type;
128 uint16_t hdr_len;
129 uint16_t gso_size;
130 uint16_t csum_start;
131 uint16_t csum_offset;
132 #if 0
133 uint16_t num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
134 #endif
135 } __packed;
136
137 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
138 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
139 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
140 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
141 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
142 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
143
144 #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN)
145
146 /* Control virtqueue */
147 struct virtio_net_ctrl_cmd {
148 uint8_t class;
149 uint8_t command;
150 } __packed;
151 #define VIRTIO_NET_CTRL_RX 0
152 # define VIRTIO_NET_CTRL_RX_PROMISC 0
153 # define VIRTIO_NET_CTRL_RX_ALLMULTI 1
154
155 #define VIRTIO_NET_CTRL_MAC 1
156 # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
157
158 #define VIRTIO_NET_CTRL_VLAN 2
159 # define VIRTIO_NET_CTRL_VLAN_ADD 0
160 # define VIRTIO_NET_CTRL_VLAN_DEL 1
161
162 #define VIRTIO_NET_CTRL_MQ 4
163 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
164 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
165 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
166
167 struct virtio_net_ctrl_status {
168 uint8_t ack;
169 } __packed;
170 #define VIRTIO_NET_OK 0
171 #define VIRTIO_NET_ERR 1
172
173 struct virtio_net_ctrl_rx {
174 uint8_t onoff;
175 } __packed;
176
177 struct virtio_net_ctrl_mac_tbl {
178 uint32_t nentries;
179 uint8_t macs[][ETHER_ADDR_LEN];
180 } __packed;
181
182 struct virtio_net_ctrl_vlan {
183 uint16_t id;
184 } __packed;
185
186 struct virtio_net_ctrl_mq {
187 uint16_t virtqueue_pairs;
188 } __packed;
189
190 struct vioif_ctrl_cmdspec {
191 bus_dmamap_t dmamap;
192 void *buf;
193 bus_size_t bufsize;
194 };
195
196 /*
197 * if_vioifvar.h:
198 */
199
200 /*
201 * Locking notes:
202 * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
203 * a field in vioif_rxqueue is protected by rxq_lock (a spin mutex).
204 * - more than one lock cannot be held at onece
205 * + ctrlq_inuse is protected by ctrlq_wait_lock.
206 * - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
207 * - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
208 * + fields in vioif_softc except queues are protected by
209 * sc->sc_lock(an adaptive mutex)
210 * - the lock is held before acquisition of other locks
211 */
212
213 struct vioif_work {
214 struct work cookie;
215 void (*func)(void *);
216 void *arg;
217 unsigned int added;
218 };
219
220 struct vioif_txqueue {
221 kmutex_t *txq_lock; /* lock for tx operations */
222
223 struct virtqueue *txq_vq;
224 bool txq_stopping;
225 bool txq_link_active;
226 pcq_t *txq_intrq;
227
228 struct virtio_net_hdr *txq_hdrs;
229 bus_dmamap_t *txq_hdr_dmamaps;
230
231 struct mbuf **txq_mbufs;
232 bus_dmamap_t *txq_dmamaps;
233
234 void *txq_deferred_transmit;
235 void *txq_handle_si;
236 struct vioif_work txq_work;
237 bool txq_workqueue;
238 bool txq_active;
239 };
240
241 struct vioif_rxqueue {
242 kmutex_t *rxq_lock; /* lock for rx operations */
243
244 struct virtqueue *rxq_vq;
245 bool rxq_stopping;
246
247 struct virtio_net_hdr *rxq_hdrs;
248 bus_dmamap_t *rxq_hdr_dmamaps;
249
250 struct mbuf **rxq_mbufs;
251 bus_dmamap_t *rxq_dmamaps;
252
253 void *rxq_handle_si;
254 struct vioif_work rxq_work;
255 bool rxq_workqueue;
256 bool rxq_active;
257 };
258
259 struct vioif_ctrlqueue {
260 struct virtqueue *ctrlq_vq;
261 enum {
262 FREE, INUSE, DONE
263 } ctrlq_inuse;
264 kcondvar_t ctrlq_wait;
265 kmutex_t ctrlq_wait_lock;
266 struct lwp *ctrlq_owner;
267
268 struct virtio_net_ctrl_cmd *ctrlq_cmd;
269 struct virtio_net_ctrl_status *ctrlq_status;
270 struct virtio_net_ctrl_rx *ctrlq_rx;
271 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc;
272 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc;
273 struct virtio_net_ctrl_mq *ctrlq_mq;
274
275 bus_dmamap_t ctrlq_cmd_dmamap;
276 bus_dmamap_t ctrlq_status_dmamap;
277 bus_dmamap_t ctrlq_rx_dmamap;
278 bus_dmamap_t ctrlq_tbl_uc_dmamap;
279 bus_dmamap_t ctrlq_tbl_mc_dmamap;
280 bus_dmamap_t ctrlq_mq_dmamap;
281 };
282
283 struct vioif_softc {
284 device_t sc_dev;
285 kmutex_t sc_lock;
286 struct sysctllog *sc_sysctllog;
287
288 struct virtio_softc *sc_virtio;
289 struct virtqueue *sc_vqs;
290
291 int sc_max_nvq_pairs;
292 int sc_req_nvq_pairs;
293 int sc_act_nvq_pairs;
294
295 uint8_t sc_mac[ETHER_ADDR_LEN];
296 struct ethercom sc_ethercom;
297 short sc_deferred_init_done;
298 bool sc_link_active;
299
300 struct vioif_txqueue *sc_txq;
301 struct vioif_rxqueue *sc_rxq;
302
303 bool sc_has_ctrl;
304 struct vioif_ctrlqueue sc_ctrlq;
305
306 bus_dma_segment_t sc_hdr_segs[1];
307 void *sc_dmamem;
308 void *sc_kmem;
309
310 void *sc_ctl_softint;
311
312 struct workqueue *sc_txrx_workqueue;
313 bool sc_txrx_workqueue_sysctl;
314 u_int sc_tx_intr_process_limit;
315 u_int sc_tx_process_limit;
316 u_int sc_rx_intr_process_limit;
317 u_int sc_rx_process_limit;
318 };
319 #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */
320 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */
321
322 #define VIOIF_TX_INTR_PROCESS_LIMIT 256
323 #define VIOIF_TX_PROCESS_LIMIT 256
324 #define VIOIF_RX_INTR_PROCESS_LIMIT 0U
325 #define VIOIF_RX_PROCESS_LIMIT 256
326
327 #define VIOIF_WORKQUEUE_PRI PRI_SOFTNET
328
329 /* cfattach interface functions */
330 static int vioif_match(device_t, cfdata_t, void *);
331 static void vioif_attach(device_t, device_t, void *);
332 static void vioif_deferred_init(device_t);
333 static int vioif_finalize_teardown(device_t);
334
335 /* ifnet interface functions */
336 static int vioif_init(struct ifnet *);
337 static void vioif_stop(struct ifnet *, int);
338 static void vioif_start(struct ifnet *);
339 static void vioif_start_locked(struct ifnet *, struct vioif_txqueue *);
340 static int vioif_transmit(struct ifnet *, struct mbuf *);
341 static void vioif_transmit_locked(struct ifnet *, struct vioif_txqueue *);
342 static int vioif_ioctl(struct ifnet *, u_long, void *);
343 static void vioif_watchdog(struct ifnet *);
344
345 /* rx */
346 static int vioif_add_rx_mbuf(struct vioif_rxqueue *, int);
347 static void vioif_free_rx_mbuf(struct vioif_rxqueue *, int);
348 static void vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *);
349 static void vioif_rx_queue_clear(struct vioif_rxqueue *);
350 static bool vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
351 struct vioif_rxqueue *, u_int);
352 static int vioif_rx_intr(void *);
353 static void vioif_rx_handle(void *);
354 static void vioif_rx_sched_handle(struct vioif_softc *,
355 struct vioif_rxqueue *);
356 static void vioif_rx_drain(struct vioif_rxqueue *);
357
358 /* tx */
359 static int vioif_tx_intr(void *);
360 static void vioif_tx_handle(void *);
361 static void vioif_tx_sched_handle(struct vioif_softc *,
362 struct vioif_txqueue *);
363 static void vioif_tx_queue_clear(struct vioif_txqueue *);
364 static bool vioif_tx_deq_locked(struct vioif_softc *, struct virtio_softc *,
365 struct vioif_txqueue *, u_int);
366 static void vioif_tx_drain(struct vioif_txqueue *);
367 static void vioif_deferred_transmit(void *);
368
369 /* workqueue */
370 static struct workqueue*
371 vioif_workq_create(const char *, pri_t, int, int);
372 static void vioif_workq_destroy(struct workqueue *);
373 static void vioif_workq_work(struct work *, void *);
374 static void vioif_work_set(struct vioif_work *, void(*)(void *), void *);
375 static void vioif_work_add(struct workqueue *, struct vioif_work *);
376 static void vioif_work_wait(struct workqueue *, struct vioif_work *);
377
378 /* other control */
379 static bool vioif_is_link_up(struct vioif_softc *);
380 static void vioif_update_link_status(struct vioif_softc *);
381 static int vioif_ctrl_rx(struct vioif_softc *, int, bool);
382 static int vioif_set_promisc(struct vioif_softc *, bool);
383 static int vioif_set_allmulti(struct vioif_softc *, bool);
384 static int vioif_set_rx_filter(struct vioif_softc *);
385 static int vioif_rx_filter(struct vioif_softc *);
386 static int vioif_ctrl_intr(void *);
387 static int vioif_config_change(struct virtio_softc *);
388 static void vioif_ctl_softint(void *);
389 static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int);
390 static void vioif_enable_interrupt_vqpairs(struct vioif_softc *);
391 static void vioif_disable_interrupt_vqpairs(struct vioif_softc *);
392 static int vioif_setup_sysctl(struct vioif_softc *);
393
394 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
395 vioif_match, vioif_attach, NULL, NULL);
396
397 static int
398 vioif_match(device_t parent, cfdata_t match, void *aux)
399 {
400 struct virtio_attach_args *va = aux;
401
402 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
403 return 1;
404
405 return 0;
406 }
407
408 static int
409 vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map,
410 bus_size_t size, int nsegs, const char *usage)
411 {
412 int r;
413
414 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size,
415 nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map);
416
417 if (r != 0) {
418 aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, "
419 "error code %d\n", usage, r);
420 }
421
422 return r;
423 }
424
425 static void
426 vioif_dmamap_destroy(struct vioif_softc *sc, bus_dmamap_t *map)
427 {
428
429 if (*map) {
430 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), *map);
431 *map = NULL;
432 }
433 }
434
435 static int
436 vioif_dmamap_create_load(struct vioif_softc *sc, bus_dmamap_t *map,
437 void *buf, bus_size_t size, int nsegs, int rw, const char *usage)
438 {
439 int r;
440
441 r = vioif_dmamap_create(sc, map, size, nsegs, usage);
442 if (r != 0)
443 return 1;
444
445 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), *map, buf,
446 size, NULL, rw | BUS_DMA_NOWAIT);
447 if (r != 0) {
448 vioif_dmamap_destroy(sc, map);
449 aprint_error_dev(sc->sc_dev, "%s dmamap load failed. "
450 "error code %d\n", usage, r);
451 }
452
453 return r;
454 }
455
456 static void *
457 vioif_assign_mem(intptr_t *p, size_t size)
458 {
459 intptr_t rv;
460
461 rv = *p;
462 *p += size;
463
464 return (void *)rv;
465 }
466
467 static void
468 vioif_alloc_queues(struct vioif_softc *sc)
469 {
470 int nvq_pairs = sc->sc_max_nvq_pairs;
471 int nvqs = nvq_pairs * 2;
472 int i;
473
474 KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX);
475
476 sc->sc_rxq = kmem_zalloc(sizeof(sc->sc_rxq[0]) * nvq_pairs,
477 KM_SLEEP);
478 sc->sc_txq = kmem_zalloc(sizeof(sc->sc_txq[0]) * nvq_pairs,
479 KM_SLEEP);
480
481 if (sc->sc_has_ctrl)
482 nvqs++;
483
484 sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP);
485 nvqs = 0;
486 for (i = 0; i < nvq_pairs; i++) {
487 sc->sc_rxq[i].rxq_vq = &sc->sc_vqs[nvqs++];
488 sc->sc_txq[i].txq_vq = &sc->sc_vqs[nvqs++];
489 }
490
491 if (sc->sc_has_ctrl)
492 sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[nvqs++];
493 }
494
495 static void
496 vioif_free_queues(struct vioif_softc *sc)
497 {
498 int nvq_pairs = sc->sc_max_nvq_pairs;
499 int nvqs = nvq_pairs * 2;
500
501 if (sc->sc_ctrlq.ctrlq_vq)
502 nvqs++;
503
504 if (sc->sc_txq) {
505 kmem_free(sc->sc_txq, sizeof(sc->sc_txq[0]) * nvq_pairs);
506 sc->sc_txq = NULL;
507 }
508
509 if (sc->sc_rxq) {
510 kmem_free(sc->sc_rxq, sizeof(sc->sc_rxq[0]) * nvq_pairs);
511 sc->sc_rxq = NULL;
512 }
513
514 if (sc->sc_vqs) {
515 kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
516 sc->sc_vqs = NULL;
517 }
518 }
519
520 /* allocate memory */
521 /*
522 * dma memory is used for:
523 * rxq_hdrs[slot]: metadata array for received frames (READ)
524 * txq_hdrs[slot]: metadata array for frames to be sent (WRITE)
525 * ctrlq_cmd: command to be sent via ctrl vq (WRITE)
526 * ctrlq_status: return value for a command via ctrl vq (READ)
527 * ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command
528 * (WRITE)
529 * ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
530 * class command (WRITE)
531 * ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
532 * class command (WRITE)
533 * ctrlq_* structures are allocated only one each; they are protected by
534 * ctrlq_inuse variable and ctrlq_wait condvar.
535 */
536 /*
537 * dynamically allocated memory is used for:
538 * rxq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot]
539 * txq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot]
540 * rxq_dmamaps[slot]: bus_dmamap_t array for received payload
541 * txq_dmamaps[slot]: bus_dmamap_t array for sent payload
542 * rxq_mbufs[slot]: mbuf pointer array for received frames
543 * txq_mbufs[slot]: mbuf pointer array for sent frames
544 */
545 static int
546 vioif_alloc_mems(struct vioif_softc *sc)
547 {
548 struct virtio_softc *vsc = sc->sc_virtio;
549 struct vioif_txqueue *txq;
550 struct vioif_rxqueue *rxq;
551 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
552 int allocsize, allocsize2, r, rsegs, i, qid;
553 void *vaddr;
554 intptr_t p;
555
556 allocsize = 0;
557 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
558 rxq = &sc->sc_rxq[qid];
559 txq = &sc->sc_txq[qid];
560
561 allocsize +=
562 sizeof(struct virtio_net_hdr) * rxq->rxq_vq->vq_num;
563 allocsize +=
564 sizeof(struct virtio_net_hdr) * txq->txq_vq->vq_num;
565 }
566 if (sc->sc_has_ctrl) {
567 allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
568 allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
569 allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
570 allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
571 + sizeof(struct virtio_net_ctrl_mac_tbl)
572 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
573 allocsize += sizeof(struct virtio_net_ctrl_mq) * 1;
574 }
575 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
576 &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
577 if (r != 0) {
578 aprint_error_dev(sc->sc_dev,
579 "DMA memory allocation failed, size %d, "
580 "error code %d\n", allocsize, r);
581 goto err_none;
582 }
583 r = bus_dmamem_map(virtio_dmat(vsc),
584 &sc->sc_hdr_segs[0], 1, allocsize, &vaddr, BUS_DMA_NOWAIT);
585 if (r != 0) {
586 aprint_error_dev(sc->sc_dev,
587 "DMA memory map failed, error code %d\n", r);
588 goto err_dmamem_alloc;
589 }
590
591 memset(vaddr, 0, allocsize);
592 sc->sc_dmamem = vaddr;
593 p = (intptr_t) vaddr;
594
595 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
596 rxq = &sc->sc_rxq[qid];
597 txq = &sc->sc_txq[qid];
598
599 rxq->rxq_hdrs = vioif_assign_mem(&p,
600 sizeof(rxq->rxq_hdrs[0]) * rxq->rxq_vq->vq_num);
601 txq->txq_hdrs = vioif_assign_mem(&p,
602 sizeof(txq->txq_hdrs[0]) * txq->txq_vq->vq_num);
603 }
604 if (sc->sc_has_ctrl) {
605 ctrlq->ctrlq_cmd = vioif_assign_mem(&p,
606 sizeof(*ctrlq->ctrlq_cmd));
607 ctrlq->ctrlq_status = vioif_assign_mem(&p,
608 sizeof(*ctrlq->ctrlq_status));
609 ctrlq->ctrlq_rx = vioif_assign_mem(&p,
610 sizeof(*ctrlq->ctrlq_rx));
611 ctrlq->ctrlq_mac_tbl_uc = vioif_assign_mem(&p,
612 sizeof(*ctrlq->ctrlq_mac_tbl_uc));
613 ctrlq->ctrlq_mac_tbl_mc = vioif_assign_mem(&p,
614 sizeof(*ctrlq->ctrlq_mac_tbl_mc)
615 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES);
616 ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq));
617 }
618
619 allocsize2 = 0;
620 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
621 int rxqsize, txqsize;
622
623 rxq = &sc->sc_rxq[qid];
624 txq = &sc->sc_txq[qid];
625 rxqsize = rxq->rxq_vq->vq_num;
626 txqsize = txq->txq_vq->vq_num;
627
628 allocsize2 += sizeof(rxq->rxq_dmamaps[0]) * rxqsize;
629 allocsize2 += sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize;
630 allocsize2 += sizeof(rxq->rxq_mbufs[0]) * rxqsize;
631
632 allocsize2 += sizeof(txq->txq_dmamaps[0]) * txqsize;
633 allocsize2 += sizeof(txq->txq_hdr_dmamaps[0]) * txqsize;
634 allocsize2 += sizeof(txq->txq_mbufs[0]) * txqsize;
635 }
636 vaddr = kmem_zalloc(allocsize2, KM_SLEEP);
637 sc->sc_kmem = vaddr;
638 p = (intptr_t) vaddr;
639
640 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
641 int rxqsize, txqsize;
642 rxq = &sc->sc_rxq[qid];
643 txq = &sc->sc_txq[qid];
644 rxqsize = rxq->rxq_vq->vq_num;
645 txqsize = txq->txq_vq->vq_num;
646
647 rxq->rxq_hdr_dmamaps = vioif_assign_mem(&p,
648 sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize);
649 txq->txq_hdr_dmamaps = vioif_assign_mem(&p,
650 sizeof(txq->txq_hdr_dmamaps[0]) * txqsize);
651 rxq->rxq_dmamaps = vioif_assign_mem(&p,
652 sizeof(rxq->rxq_dmamaps[0]) * rxqsize);
653 txq->txq_dmamaps = vioif_assign_mem(&p,
654 sizeof(txq->txq_dmamaps[0]) * txqsize);
655 rxq->rxq_mbufs = vioif_assign_mem(&p,
656 sizeof(rxq->rxq_mbufs[0]) * rxqsize);
657 txq->txq_mbufs = vioif_assign_mem(&p,
658 sizeof(txq->txq_mbufs[0]) * txqsize);
659 }
660
661 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
662 rxq = &sc->sc_rxq[qid];
663 txq = &sc->sc_txq[qid];
664
665 for (i = 0; i < rxq->rxq_vq->vq_num; i++) {
666 r = vioif_dmamap_create_load(sc, &rxq->rxq_hdr_dmamaps[i],
667 &rxq->rxq_hdrs[i], sizeof(rxq->rxq_hdrs[0]), 1,
668 BUS_DMA_READ, "rx header");
669 if (r != 0)
670 goto err_reqs;
671
672 r = vioif_dmamap_create(sc, &rxq->rxq_dmamaps[i],
673 MCLBYTES, 1, "rx payload");
674 if (r != 0)
675 goto err_reqs;
676 }
677
678 for (i = 0; i < txq->txq_vq->vq_num; i++) {
679 r = vioif_dmamap_create_load(sc, &txq->txq_hdr_dmamaps[i],
680 &txq->txq_hdrs[i], sizeof(txq->txq_hdrs[0]), 1,
681 BUS_DMA_READ, "tx header");
682 if (r != 0)
683 goto err_reqs;
684
685 r = vioif_dmamap_create(sc, &txq->txq_dmamaps[i], ETHER_MAX_LEN,
686 VIRTIO_NET_TX_MAXNSEGS, "tx payload");
687 if (r != 0)
688 goto err_reqs;
689 }
690 }
691
692 if (sc->sc_has_ctrl) {
693 /* control vq class & command */
694 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_cmd_dmamap,
695 ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1,
696 BUS_DMA_WRITE, "control command");
697 if (r != 0)
698 goto err_reqs;
699
700 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_status_dmamap,
701 ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1,
702 BUS_DMA_READ, "control status");
703 if (r != 0)
704 goto err_reqs;
705
706 /* control vq rx mode command parameter */
707 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_rx_dmamap,
708 ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1,
709 BUS_DMA_WRITE, "rx mode control command");
710 if (r != 0)
711 goto err_reqs;
712
713 /* multiqueue set command */
714 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_mq_dmamap,
715 ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1,
716 BUS_DMA_WRITE, "multiqueue set command");
717 if (r != 0)
718 goto err_reqs;
719
720 /* control vq MAC filter table for unicast */
721 /* do not load now since its length is variable */
722 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_uc_dmamap,
723 sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0, 1,
724 "unicast MAC address filter command");
725 if (r != 0)
726 goto err_reqs;
727
728 /* control vq MAC filter table for multicast */
729 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_mc_dmamap,
730 sizeof(*ctrlq->ctrlq_mac_tbl_mc)
731 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1,
732 "multicast MAC address filter command");
733 }
734
735 return 0;
736
737 err_reqs:
738 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_mc_dmamap);
739 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_uc_dmamap);
740 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_rx_dmamap);
741 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap);
742 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap);
743 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
744 rxq = &sc->sc_rxq[qid];
745 txq = &sc->sc_txq[qid];
746
747 for (i = 0; i < txq->txq_vq->vq_num; i++) {
748 vioif_dmamap_destroy(sc, &txq->txq_dmamaps[i]);
749 vioif_dmamap_destroy(sc, &txq->txq_hdr_dmamaps[i]);
750 }
751 for (i = 0; i < rxq->rxq_vq->vq_num; i++) {
752 vioif_dmamap_destroy(sc, &rxq->rxq_dmamaps[i]);
753 vioif_dmamap_destroy(sc, &rxq->rxq_hdr_dmamaps[i]);
754 }
755 }
756 if (sc->sc_kmem) {
757 kmem_free(sc->sc_kmem, allocsize2);
758 sc->sc_kmem = NULL;
759 }
760 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, allocsize);
761 err_dmamem_alloc:
762 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
763 err_none:
764 return -1;
765 }
766
767 static void
768 vioif_attach(device_t parent, device_t self, void *aux)
769 {
770 struct vioif_softc *sc = device_private(self);
771 struct virtio_softc *vsc = device_private(parent);
772 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
773 struct vioif_txqueue *txq;
774 struct vioif_rxqueue *rxq;
775 uint32_t features, req_features;
776 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
777 u_int softint_flags;
778 int r, i, nvqs=0, req_flags;
779 char xnamebuf[MAXCOMLEN];
780
781 if (virtio_child(vsc) != NULL) {
782 aprint_normal(": child already attached for %s; "
783 "something wrong...\n", device_xname(parent));
784 return;
785 }
786
787 sc->sc_dev = self;
788 sc->sc_virtio = vsc;
789 sc->sc_link_active = false;
790
791 sc->sc_max_nvq_pairs = 1;
792 sc->sc_req_nvq_pairs = 1;
793 sc->sc_act_nvq_pairs = 1;
794 sc->sc_txrx_workqueue_sysctl = true;
795 sc->sc_tx_intr_process_limit = VIOIF_TX_INTR_PROCESS_LIMIT;
796 sc->sc_tx_process_limit = VIOIF_TX_PROCESS_LIMIT;
797 sc->sc_rx_intr_process_limit = VIOIF_RX_INTR_PROCESS_LIMIT;
798 sc->sc_rx_process_limit = VIOIF_RX_PROCESS_LIMIT;
799
800 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
801
802 snprintf(xnamebuf, sizeof(xnamebuf), "%s_txrx", device_xname(self));
803 sc->sc_txrx_workqueue = vioif_workq_create(xnamebuf, VIOIF_WORKQUEUE_PRI,
804 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
805 if (sc->sc_txrx_workqueue == NULL)
806 goto err;
807
808 req_flags = 0;
809
810 #ifdef VIOIF_MPSAFE
811 req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
812 #endif
813 req_flags |= VIRTIO_F_PCI_INTR_MSIX;
814
815 req_features =
816 VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
817 VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY;
818 #ifdef VIOIF_MULTIQ
819 req_features |= VIRTIO_NET_F_MQ;
820 #endif
821 virtio_child_attach_start(vsc, self, IPL_NET, NULL,
822 vioif_config_change, virtio_vq_intrhand, req_flags,
823 req_features, VIRTIO_NET_FLAG_BITS);
824
825 features = virtio_features(vsc);
826
827 if (features & VIRTIO_NET_F_MAC) {
828 for (i = 0; i < __arraycount(sc->sc_mac); i++) {
829 sc->sc_mac[i] = virtio_read_device_config_1(vsc,
830 VIRTIO_NET_CONFIG_MAC + i);
831 }
832 } else {
833 /* code stolen from sys/net/if_tap.c */
834 struct timeval tv;
835 uint32_t ui;
836 getmicrouptime(&tv);
837 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
838 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
839 for (i = 0; i < __arraycount(sc->sc_mac); i++) {
840 virtio_write_device_config_1(vsc,
841 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
842 }
843 }
844
845 aprint_normal_dev(self, "Ethernet address %s\n",
846 ether_sprintf(sc->sc_mac));
847
848 if ((features & VIRTIO_NET_F_CTRL_VQ) &&
849 (features & VIRTIO_NET_F_CTRL_RX)) {
850 sc->sc_has_ctrl = true;
851
852 cv_init(&ctrlq->ctrlq_wait, "ctrl_vq");
853 mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET);
854 ctrlq->ctrlq_inuse = FREE;
855 } else {
856 sc->sc_has_ctrl = false;
857 }
858
859 if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) {
860 sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc,
861 VIRTIO_NET_CONFIG_MAX_VQ_PAIRS);
862
863 if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
864 goto err;
865
866 /* Limit the number of queue pairs to use */
867 sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu);
868 }
869
870 vioif_alloc_queues(sc);
871 virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs);
872
873 #ifdef VIOIF_MPSAFE
874 softint_flags = SOFTINT_NET | SOFTINT_MPSAFE;
875 #else
876 softint_flags = SOFTINT_NET;
877 #endif
878
879 /*
880 * Allocating virtqueues
881 */
882 for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
883 rxq = &sc->sc_rxq[i];
884 txq = &sc->sc_txq[i];
885 char qname[32];
886
887 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
888
889 rxq->rxq_handle_si = softint_establish(softint_flags,
890 vioif_rx_handle, rxq);
891 if (rxq->rxq_handle_si == NULL) {
892 aprint_error_dev(self, "cannot establish rx softint\n");
893 goto err;
894 }
895
896 snprintf(qname, sizeof(qname), "rx%d", i);
897 r = virtio_alloc_vq(vsc, rxq->rxq_vq, nvqs,
898 MCLBYTES+sizeof(struct virtio_net_hdr), 2, qname);
899 if (r != 0)
900 goto err;
901 nvqs++;
902 rxq->rxq_vq->vq_intrhand = vioif_rx_intr;
903 rxq->rxq_vq->vq_intrhand_arg = (void *)rxq;
904 rxq->rxq_stopping = true;
905 vioif_work_set(&rxq->rxq_work, vioif_rx_handle, rxq);
906
907 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
908
909 txq->txq_deferred_transmit = softint_establish(softint_flags,
910 vioif_deferred_transmit, txq);
911 if (txq->txq_deferred_transmit == NULL) {
912 aprint_error_dev(self, "cannot establish tx softint\n");
913 goto err;
914 }
915 txq->txq_handle_si = softint_establish(softint_flags,
916 vioif_tx_handle, txq);
917 if (txq->txq_handle_si == NULL) {
918 aprint_error_dev(self, "cannot establish tx softint\n");
919 goto err;
920 }
921
922 snprintf(qname, sizeof(qname), "tx%d", i);
923 r = virtio_alloc_vq(vsc, txq->txq_vq, nvqs,
924 sizeof(struct virtio_net_hdr)
925 + (ETHER_MAX_LEN - ETHER_HDR_LEN),
926 VIRTIO_NET_TX_MAXNSEGS + 1, qname);
927 if (r != 0)
928 goto err;
929 nvqs++;
930 txq->txq_vq->vq_intrhand = vioif_tx_intr;
931 txq->txq_vq->vq_intrhand_arg = (void *)txq;
932 txq->txq_link_active = sc->sc_link_active;
933 txq->txq_stopping = false;
934 txq->txq_intrq = pcq_create(txq->txq_vq->vq_num, KM_SLEEP);
935 vioif_work_set(&txq->txq_work, vioif_tx_handle, txq);
936 }
937
938 if (sc->sc_has_ctrl) {
939 /*
940 * Allocating a virtqueue for control channel
941 */
942 r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, nvqs,
943 NBPG, 1, "control");
944 if (r != 0) {
945 aprint_error_dev(self, "failed to allocate "
946 "a virtqueue for control channel, error code %d\n",
947 r);
948
949 sc->sc_has_ctrl = false;
950 cv_destroy(&ctrlq->ctrlq_wait);
951 mutex_destroy(&ctrlq->ctrlq_wait_lock);
952 } else {
953 nvqs++;
954 ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr;
955 ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq;
956 }
957 }
958
959 sc->sc_ctl_softint = softint_establish(softint_flags,
960 vioif_ctl_softint, sc);
961 if (sc->sc_ctl_softint == NULL) {
962 aprint_error_dev(self, "cannot establish ctl softint\n");
963 goto err;
964 }
965
966 if (vioif_alloc_mems(sc) < 0)
967 goto err;
968
969 if (virtio_child_attach_finish(vsc) != 0)
970 goto err;
971
972 if (vioif_setup_sysctl(sc) != 0) {
973 aprint_error_dev(self, "unable to create sysctl node\n");
974 /* continue */
975 }
976
977 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
978 ifp->if_softc = sc;
979 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
980 #ifdef VIOIF_MPSAFE
981 ifp->if_extflags = IFEF_MPSAFE;
982 #endif
983 ifp->if_start = vioif_start;
984 if (sc->sc_req_nvq_pairs > 1)
985 ifp->if_transmit = vioif_transmit;
986 ifp->if_ioctl = vioif_ioctl;
987 ifp->if_init = vioif_init;
988 ifp->if_stop = vioif_stop;
989 ifp->if_capabilities = 0;
990 ifp->if_watchdog = vioif_watchdog;
991 txq = &sc->sc_txq[0];
992 IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq->txq_vq->vq_num, IFQ_MAXLEN));
993 IFQ_SET_READY(&ifp->if_snd);
994
995 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
996
997 if_attach(ifp);
998 if_deferred_start_init(ifp, NULL);
999 ether_ifattach(ifp, sc->sc_mac);
1000
1001 return;
1002
1003 err:
1004 for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
1005 rxq = &sc->sc_rxq[i];
1006 txq = &sc->sc_txq[i];
1007
1008 if (rxq->rxq_lock) {
1009 mutex_obj_free(rxq->rxq_lock);
1010 rxq->rxq_lock = NULL;
1011 }
1012
1013 if (rxq->rxq_handle_si) {
1014 softint_disestablish(rxq->rxq_handle_si);
1015 rxq->rxq_handle_si = NULL;
1016 }
1017
1018 if (txq->txq_lock) {
1019 mutex_obj_free(txq->txq_lock);
1020 txq->txq_lock = NULL;
1021 }
1022
1023 if (txq->txq_handle_si) {
1024 softint_disestablish(txq->txq_handle_si);
1025 txq->txq_handle_si = NULL;
1026 }
1027
1028 if (txq->txq_deferred_transmit) {
1029 softint_disestablish(txq->txq_deferred_transmit);
1030 txq->txq_deferred_transmit = NULL;
1031 }
1032
1033 if (txq->txq_intrq) {
1034 pcq_destroy(txq->txq_intrq);
1035 txq->txq_intrq = NULL;
1036 }
1037 }
1038
1039 if (sc->sc_has_ctrl) {
1040 cv_destroy(&ctrlq->ctrlq_wait);
1041 mutex_destroy(&ctrlq->ctrlq_wait_lock);
1042 }
1043
1044 while (nvqs > 0)
1045 virtio_free_vq(vsc, &sc->sc_vqs[--nvqs]);
1046
1047 vioif_free_queues(sc);
1048 mutex_destroy(&sc->sc_lock);
1049 virtio_child_attach_failed(vsc);
1050 config_finalize_register(self, vioif_finalize_teardown);
1051
1052 return;
1053 }
1054
1055 static int
1056 vioif_finalize_teardown(device_t self)
1057 {
1058 struct vioif_softc *sc = device_private(self);
1059
1060 if (sc->sc_txrx_workqueue != NULL) {
1061 vioif_workq_destroy(sc->sc_txrx_workqueue);
1062 sc->sc_txrx_workqueue = NULL;
1063 }
1064
1065 return 0;
1066 }
1067
1068 /* we need interrupts to make promiscuous mode off */
1069 static void
1070 vioif_deferred_init(device_t self)
1071 {
1072 struct vioif_softc *sc = device_private(self);
1073 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1074 int r;
1075
1076 if (ifp->if_flags & IFF_PROMISC)
1077 return;
1078
1079 r = vioif_set_promisc(sc, false);
1080 if (r != 0)
1081 aprint_error_dev(self, "resetting promisc mode failed, "
1082 "error code %d\n", r);
1083 }
1084
1085 static void
1086 vioif_enable_interrupt_vqpairs(struct vioif_softc *sc)
1087 {
1088 struct virtio_softc *vsc = sc->sc_virtio;
1089 struct vioif_txqueue *txq;
1090 struct vioif_rxqueue *rxq;
1091 int i;
1092
1093 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1094 txq = &sc->sc_txq[i];
1095 rxq = &sc->sc_rxq[i];
1096
1097 virtio_start_vq_intr(vsc, txq->txq_vq);
1098 virtio_start_vq_intr(vsc, rxq->rxq_vq);
1099 }
1100 }
1101
1102 static void
1103 vioif_disable_interrupt_vqpairs(struct vioif_softc *sc)
1104 {
1105 struct virtio_softc *vsc = sc->sc_virtio;
1106 struct vioif_txqueue *txq;
1107 struct vioif_rxqueue *rxq;
1108 int i;
1109
1110 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1111 txq = &sc->sc_txq[i];
1112 rxq = &sc->sc_rxq[i];
1113
1114 virtio_stop_vq_intr(vsc, txq->txq_vq);
1115 virtio_stop_vq_intr(vsc, rxq->rxq_vq);
1116 }
1117 }
1118
1119 /*
1120 * Interface functions for ifnet
1121 */
1122 static int
1123 vioif_init(struct ifnet *ifp)
1124 {
1125 struct vioif_softc *sc = ifp->if_softc;
1126 struct virtio_softc *vsc = sc->sc_virtio;
1127 struct vioif_rxqueue *rxq;
1128 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1129 int r, i;
1130
1131 vioif_stop(ifp, 0);
1132
1133 virtio_reinit_start(vsc);
1134 virtio_negotiate_features(vsc, virtio_features(vsc));
1135
1136 for (i = 0; i < sc->sc_req_nvq_pairs; i++) {
1137 rxq = &sc->sc_rxq[i];
1138
1139 /* Have to set false before vioif_populate_rx_mbufs */
1140 mutex_enter(rxq->rxq_lock);
1141 rxq->rxq_stopping = false;
1142 vioif_populate_rx_mbufs_locked(rxq);
1143 mutex_exit(rxq->rxq_lock);
1144
1145 }
1146
1147 virtio_reinit_end(vsc);
1148
1149 if (sc->sc_has_ctrl)
1150 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
1151
1152 r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs);
1153 if (r == 0)
1154 sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs;
1155 else
1156 sc->sc_act_nvq_pairs = 1;
1157
1158 for (i = 0; i < sc->sc_act_nvq_pairs; i++)
1159 sc->sc_txq[i].txq_stopping = false;
1160
1161 vioif_enable_interrupt_vqpairs(sc);
1162
1163 if (!sc->sc_deferred_init_done) {
1164 sc->sc_deferred_init_done = 1;
1165 if (sc->sc_has_ctrl)
1166 vioif_deferred_init(sc->sc_dev);
1167 }
1168
1169 vioif_update_link_status(sc);
1170 ifp->if_flags |= IFF_RUNNING;
1171 ifp->if_flags &= ~IFF_OACTIVE;
1172 vioif_rx_filter(sc);
1173
1174 return 0;
1175 }
1176
1177 static void
1178 vioif_stop(struct ifnet *ifp, int disable)
1179 {
1180 struct vioif_softc *sc = ifp->if_softc;
1181 struct virtio_softc *vsc = sc->sc_virtio;
1182 struct vioif_txqueue *txq;
1183 struct vioif_rxqueue *rxq;
1184 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1185 int i;
1186
1187 /* Take the locks to ensure that ongoing TX/RX finish */
1188 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1189 txq = &sc->sc_txq[i];
1190 rxq = &sc->sc_rxq[i];
1191
1192 mutex_enter(txq->txq_lock);
1193 txq->txq_stopping = true;
1194 mutex_exit(txq->txq_lock);
1195
1196 mutex_enter(rxq->rxq_lock);
1197 rxq->rxq_stopping = true;
1198 mutex_exit(rxq->rxq_lock);
1199 }
1200
1201 /* disable interrupts */
1202 vioif_disable_interrupt_vqpairs(sc);
1203
1204 if (sc->sc_has_ctrl)
1205 virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq);
1206
1207 /* only way to stop I/O and DMA is resetting... */
1208 virtio_reset(vsc);
1209
1210 /* rendezvous for finish of handlers */
1211 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1212 txq = &sc->sc_txq[i];
1213 rxq = &sc->sc_rxq[i];
1214
1215 mutex_enter(txq->txq_lock);
1216 mutex_exit(txq->txq_lock);
1217
1218 mutex_enter(rxq->rxq_lock);
1219 mutex_exit(rxq->rxq_lock);
1220
1221 vioif_work_wait(sc->sc_txrx_workqueue, &txq->txq_work);
1222 vioif_work_wait(sc->sc_txrx_workqueue, &rxq->rxq_work);
1223 }
1224
1225 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1226 vioif_rx_queue_clear(&sc->sc_rxq[i]);
1227 vioif_tx_queue_clear(&sc->sc_txq[i]);
1228 }
1229
1230 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1231 sc->sc_link_active = false;
1232
1233 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1234 txq = &sc->sc_txq[i];
1235 rxq = &sc->sc_rxq[i];
1236
1237 txq->txq_link_active = false;
1238
1239 if (disable)
1240 vioif_rx_drain(rxq);
1241
1242 vioif_tx_drain(txq);
1243 }
1244 }
1245
1246 static void
1247 vioif_send_common_locked(struct ifnet *ifp, struct vioif_txqueue *txq,
1248 bool is_transmit)
1249 {
1250 struct vioif_softc *sc = ifp->if_softc;
1251 struct virtio_softc *vsc = sc->sc_virtio;
1252 struct virtqueue *vq = txq->txq_vq;
1253 struct mbuf *m;
1254 int queued = 0;
1255
1256 KASSERT(mutex_owned(txq->txq_lock));
1257
1258 if ((ifp->if_flags & IFF_RUNNING) == 0)
1259 return;
1260
1261 if (!txq->txq_link_active || txq->txq_stopping)
1262 return;
1263
1264 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
1265 return;
1266
1267 for (;;) {
1268 int slot, r;
1269
1270 if (is_transmit)
1271 m = pcq_get(txq->txq_intrq);
1272 else
1273 IFQ_DEQUEUE(&ifp->if_snd, m);
1274
1275 if (m == NULL)
1276 break;
1277
1278 r = virtio_enqueue_prep(vsc, vq, &slot);
1279 if (r == EAGAIN) {
1280 ifp->if_flags |= IFF_OACTIVE;
1281 m_freem(m);
1282 break;
1283 }
1284 if (r != 0)
1285 panic("enqueue_prep for a tx buffer");
1286
1287 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1288 txq->txq_dmamaps[slot], m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1289 if (r != 0) {
1290 /* maybe just too fragmented */
1291 struct mbuf *newm;
1292
1293 newm = m_defrag(m, M_NOWAIT);
1294 if (newm == NULL) {
1295 aprint_error_dev(sc->sc_dev,
1296 "m_defrag() failed\n");
1297 goto skip;
1298 }
1299
1300 m = newm;
1301 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1302 txq->txq_dmamaps[slot], m,
1303 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1304 if (r != 0) {
1305 aprint_error_dev(sc->sc_dev,
1306 "tx dmamap load failed, error code %d\n",
1307 r);
1308 skip:
1309 m_freem(m);
1310 virtio_enqueue_abort(vsc, vq, slot);
1311 continue;
1312 }
1313 }
1314
1315 /* This should actually never fail */
1316 r = virtio_enqueue_reserve(vsc, vq, slot,
1317 txq->txq_dmamaps[slot]->dm_nsegs + 1);
1318 if (r != 0) {
1319 aprint_error_dev(sc->sc_dev,
1320 "virtio_enqueue_reserve failed, error code %d\n",
1321 r);
1322 bus_dmamap_unload(virtio_dmat(vsc),
1323 txq->txq_dmamaps[slot]);
1324 /* slot already freed by virtio_enqueue_reserve */
1325 m_freem(m);
1326 continue;
1327 }
1328
1329 txq->txq_mbufs[slot] = m;
1330
1331 memset(&txq->txq_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
1332 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
1333 0, txq->txq_dmamaps[slot]->dm_mapsize,
1334 BUS_DMASYNC_PREWRITE);
1335 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
1336 0, txq->txq_hdr_dmamaps[slot]->dm_mapsize,
1337 BUS_DMASYNC_PREWRITE);
1338 virtio_enqueue(vsc, vq, slot, txq->txq_hdr_dmamaps[slot], true);
1339 virtio_enqueue(vsc, vq, slot, txq->txq_dmamaps[slot], true);
1340 virtio_enqueue_commit(vsc, vq, slot, false);
1341
1342 queued++;
1343 bpf_mtap(ifp, m, BPF_D_OUT);
1344 }
1345
1346 if (queued > 0) {
1347 virtio_enqueue_commit(vsc, vq, -1, true);
1348 ifp->if_timer = 5;
1349 }
1350 }
1351
1352 static void
1353 vioif_start_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
1354 {
1355
1356 /*
1357 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
1358 */
1359 vioif_send_common_locked(ifp, txq, false);
1360
1361 }
1362
1363 static void
1364 vioif_start(struct ifnet *ifp)
1365 {
1366 struct vioif_softc *sc = ifp->if_softc;
1367 struct vioif_txqueue *txq = &sc->sc_txq[0];
1368
1369 #ifdef VIOIF_MPSAFE
1370 KASSERT(if_is_mpsafe(ifp));
1371 #endif
1372
1373 mutex_enter(txq->txq_lock);
1374 vioif_start_locked(ifp, txq);
1375 mutex_exit(txq->txq_lock);
1376 }
1377
1378 static inline int
1379 vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m)
1380 {
1381 struct vioif_softc *sc = ifp->if_softc;
1382 u_int cpuid = cpu_index(curcpu());
1383
1384 return cpuid % sc->sc_act_nvq_pairs;
1385 }
1386
1387 static void
1388 vioif_transmit_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
1389 {
1390
1391 vioif_send_common_locked(ifp, txq, true);
1392 }
1393
1394 static int
1395 vioif_transmit(struct ifnet *ifp, struct mbuf *m)
1396 {
1397 struct vioif_softc *sc = ifp->if_softc;
1398 struct vioif_txqueue *txq;
1399 int qid;
1400
1401 qid = vioif_select_txqueue(ifp, m);
1402 txq = &sc->sc_txq[qid];
1403
1404 if (__predict_false(!pcq_put(txq->txq_intrq, m))) {
1405 m_freem(m);
1406 return ENOBUFS;
1407 }
1408
1409 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1410 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
1411 if (m->m_flags & M_MCAST)
1412 if_statinc_ref(nsr, if_omcasts);
1413 IF_STAT_PUTREF(ifp);
1414
1415 if (mutex_tryenter(txq->txq_lock)) {
1416 vioif_transmit_locked(ifp, txq);
1417 mutex_exit(txq->txq_lock);
1418 }
1419
1420 return 0;
1421 }
1422
1423 static void
1424 vioif_deferred_transmit(void *arg)
1425 {
1426 struct vioif_txqueue *txq = arg;
1427 struct virtio_softc *vsc = txq->txq_vq->vq_owner;
1428 struct vioif_softc *sc = device_private(virtio_child(vsc));
1429 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1430
1431 mutex_enter(txq->txq_lock);
1432 vioif_send_common_locked(ifp, txq, true);
1433 mutex_exit(txq->txq_lock);
1434 }
1435
1436 static int
1437 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1438 {
1439 int s, r;
1440
1441 s = splnet();
1442
1443 r = ether_ioctl(ifp, cmd, data);
1444 if ((r == 0 && cmd == SIOCSIFFLAGS) ||
1445 (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
1446 if (ifp->if_flags & IFF_RUNNING)
1447 r = vioif_rx_filter(ifp->if_softc);
1448 else
1449 r = 0;
1450 }
1451
1452 splx(s);
1453
1454 return r;
1455 }
1456
1457 void
1458 vioif_watchdog(struct ifnet *ifp)
1459 {
1460 struct vioif_softc *sc = ifp->if_softc;
1461 int i;
1462
1463 if (ifp->if_flags & IFF_RUNNING) {
1464 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1465 vioif_tx_queue_clear(&sc->sc_txq[i]);
1466 }
1467 }
1468 }
1469
1470 /*
1471 * Receive implementation
1472 */
1473 /* allocate and initialize a mbuf for receive */
1474 static int
1475 vioif_add_rx_mbuf(struct vioif_rxqueue *rxq, int i)
1476 {
1477 struct virtio_softc *vsc = rxq->rxq_vq->vq_owner;
1478 struct mbuf *m;
1479 int r;
1480
1481 MGETHDR(m, M_DONTWAIT, MT_DATA);
1482 if (m == NULL)
1483 return ENOBUFS;
1484 MCLGET(m, M_DONTWAIT);
1485 if ((m->m_flags & M_EXT) == 0) {
1486 m_freem(m);
1487 return ENOBUFS;
1488 }
1489 rxq->rxq_mbufs[i] = m;
1490 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1491 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1492 rxq->rxq_dmamaps[i], m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1493 if (r) {
1494 m_freem(m);
1495 rxq->rxq_mbufs[i] = NULL;
1496 return r;
1497 }
1498
1499 return 0;
1500 }
1501
1502 /* free a mbuf for receive */
1503 static void
1504 vioif_free_rx_mbuf(struct vioif_rxqueue *rxq, int i)
1505 {
1506 struct virtio_softc *vsc = rxq->rxq_vq->vq_owner;
1507
1508 bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[i]);
1509 m_freem(rxq->rxq_mbufs[i]);
1510 rxq->rxq_mbufs[i] = NULL;
1511 }
1512
1513 /* add mbufs for all the empty receive slots */
1514 static void
1515 vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *rxq)
1516 {
1517 struct virtqueue *vq = rxq->rxq_vq;
1518 struct virtio_softc *vsc = vq->vq_owner;
1519 struct vioif_softc *sc = device_private(virtio_child(vsc));
1520 int i, r, ndone = 0;
1521
1522 KASSERT(mutex_owned(rxq->rxq_lock));
1523
1524 if (rxq->rxq_stopping)
1525 return;
1526
1527 for (i = 0; i < vq->vq_num; i++) {
1528 int slot;
1529 r = virtio_enqueue_prep(vsc, vq, &slot);
1530 if (r == EAGAIN)
1531 break;
1532 if (r != 0)
1533 panic("enqueue_prep for rx buffers");
1534 if (rxq->rxq_mbufs[slot] == NULL) {
1535 r = vioif_add_rx_mbuf(rxq, slot);
1536 if (r != 0) {
1537 aprint_error_dev(sc->sc_dev,
1538 "rx mbuf allocation failed, "
1539 "error code %d\n", r);
1540 break;
1541 }
1542 }
1543 r = virtio_enqueue_reserve(vsc, vq, slot,
1544 rxq->rxq_dmamaps[slot]->dm_nsegs + 1);
1545 if (r != 0) {
1546 vioif_free_rx_mbuf(rxq, slot);
1547 break;
1548 }
1549 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1550 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
1551 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1552 0, MCLBYTES, BUS_DMASYNC_PREREAD);
1553 virtio_enqueue(vsc, vq, slot, rxq->rxq_hdr_dmamaps[slot],
1554 false);
1555 virtio_enqueue(vsc, vq, slot, rxq->rxq_dmamaps[slot], false);
1556 virtio_enqueue_commit(vsc, vq, slot, false);
1557 ndone++;
1558 }
1559 if (ndone > 0)
1560 virtio_enqueue_commit(vsc, vq, -1, true);
1561 }
1562
1563 static void
1564 vioif_rx_queue_clear(struct vioif_rxqueue *rxq)
1565 {
1566 struct virtqueue *vq = rxq->rxq_vq;
1567 struct virtio_softc *vsc = vq->vq_owner;
1568 struct vioif_softc *sc = device_private(virtio_child(vsc));
1569 u_int limit = UINT_MAX;
1570 bool more;
1571
1572 KASSERT(rxq->rxq_stopping);
1573
1574 mutex_enter(rxq->rxq_lock);
1575 for (;;) {
1576 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1577 if (more == false)
1578 break;
1579 }
1580 mutex_exit(rxq->rxq_lock);
1581 }
1582
1583 /* dequeue received packets */
1584 static bool
1585 vioif_rx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
1586 struct vioif_rxqueue *rxq, u_int limit)
1587 {
1588 struct virtqueue *vq = rxq->rxq_vq;
1589 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1590 struct mbuf *m;
1591 int slot, len;
1592 bool more = false, dequeued = false;
1593
1594 KASSERT(mutex_owned(rxq->rxq_lock));
1595
1596 if (virtio_vq_is_enqueued(vsc, vq) == false)
1597 return false;
1598
1599 for (;;) {
1600 if (limit-- == 0) {
1601 more = true;
1602 break;
1603 }
1604
1605 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
1606 break;
1607
1608 dequeued = true;
1609
1610 len -= sizeof(struct virtio_net_hdr);
1611 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1612 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTREAD);
1613 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1614 0, MCLBYTES, BUS_DMASYNC_POSTREAD);
1615 m = rxq->rxq_mbufs[slot];
1616 KASSERT(m != NULL);
1617 bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[slot]);
1618 rxq->rxq_mbufs[slot] = NULL;
1619 virtio_dequeue_commit(vsc, vq, slot);
1620 m_set_rcvif(m, ifp);
1621 m->m_len = m->m_pkthdr.len = len;
1622
1623 mutex_exit(rxq->rxq_lock);
1624 if_percpuq_enqueue(ifp->if_percpuq, m);
1625 mutex_enter(rxq->rxq_lock);
1626
1627 if (rxq->rxq_stopping)
1628 break;
1629 }
1630
1631 if (dequeued)
1632 vioif_populate_rx_mbufs_locked(rxq);
1633
1634 return more;
1635 }
1636
1637 /* rx interrupt; call _dequeue above and schedule a softint */
1638 static int
1639 vioif_rx_intr(void *arg)
1640 {
1641 struct vioif_rxqueue *rxq = arg;
1642 struct virtqueue *vq = rxq->rxq_vq;
1643 struct virtio_softc *vsc = vq->vq_owner;
1644 struct vioif_softc *sc = device_private(virtio_child(vsc));
1645 u_int limit;
1646 bool more;
1647
1648 limit = sc->sc_rx_intr_process_limit;
1649
1650 if (atomic_load_relaxed(&rxq->rxq_active) == true)
1651 return 1;
1652
1653 mutex_enter(rxq->rxq_lock);
1654
1655 if (!rxq->rxq_stopping) {
1656 rxq->rxq_workqueue = sc->sc_txrx_workqueue_sysctl;
1657
1658 virtio_stop_vq_intr(vsc, vq);
1659 atomic_store_relaxed(&rxq->rxq_active, true);
1660
1661 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1662 if (more) {
1663 vioif_rx_sched_handle(sc, rxq);
1664 } else {
1665 atomic_store_relaxed(&rxq->rxq_active, false);
1666 virtio_start_vq_intr(vsc, vq);
1667 }
1668 }
1669
1670 mutex_exit(rxq->rxq_lock);
1671 return 1;
1672 }
1673
1674 static void
1675 vioif_rx_handle(void *xrxq)
1676 {
1677 struct vioif_rxqueue *rxq = xrxq;
1678 struct virtqueue *vq = rxq->rxq_vq;
1679 struct virtio_softc *vsc = vq->vq_owner;
1680 struct vioif_softc *sc = device_private(virtio_child(vsc));
1681 u_int limit;
1682 bool more;
1683
1684 limit = sc->sc_rx_process_limit;
1685
1686 mutex_enter(rxq->rxq_lock);
1687
1688 if (!rxq->rxq_stopping) {
1689 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1690 if (more) {
1691 vioif_rx_sched_handle(sc, rxq);
1692 } else {
1693 atomic_store_relaxed(&rxq->rxq_active, false);
1694 virtio_start_vq_intr(vsc, rxq->rxq_vq);
1695 }
1696 }
1697
1698 mutex_exit(rxq->rxq_lock);
1699 }
1700
1701 static void
1702 vioif_rx_sched_handle(struct vioif_softc *sc, struct vioif_rxqueue *rxq)
1703 {
1704
1705 if (rxq->rxq_workqueue)
1706 vioif_work_add(sc->sc_txrx_workqueue, &rxq->rxq_work);
1707 else
1708 softint_schedule(rxq->rxq_handle_si);
1709 }
1710
1711 /* free all the mbufs; called from if_stop(disable) */
1712 static void
1713 vioif_rx_drain(struct vioif_rxqueue *rxq)
1714 {
1715 struct virtqueue *vq = rxq->rxq_vq;
1716 int i;
1717
1718 for (i = 0; i < vq->vq_num; i++) {
1719 if (rxq->rxq_mbufs[i] == NULL)
1720 continue;
1721 vioif_free_rx_mbuf(rxq, i);
1722 }
1723 }
1724
1725 /*
1726 * Transmition implementation
1727 */
1728 /* actual transmission is done in if_start */
1729 /* tx interrupt; dequeue and free mbufs */
1730 /*
1731 * tx interrupt is actually disabled; this should be called upon
1732 * tx vq full and watchdog
1733 */
1734
1735 static int
1736 vioif_tx_intr(void *arg)
1737 {
1738 struct vioif_txqueue *txq = arg;
1739 struct virtqueue *vq = txq->txq_vq;
1740 struct virtio_softc *vsc = vq->vq_owner;
1741 struct vioif_softc *sc = device_private(virtio_child(vsc));
1742 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1743 bool more;
1744 u_int limit;
1745
1746 limit = sc->sc_tx_intr_process_limit;
1747
1748 if (atomic_load_relaxed(&txq->txq_active) == true)
1749 return 1;
1750
1751 mutex_enter(txq->txq_lock);
1752
1753 if (!txq->txq_stopping) {
1754 txq->txq_workqueue = sc->sc_txrx_workqueue_sysctl;
1755
1756 virtio_stop_vq_intr(vsc, vq);
1757 atomic_store_relaxed(&txq->txq_active, true);
1758
1759 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1760 if (more) {
1761 vioif_tx_sched_handle(sc, txq);
1762 } else {
1763 atomic_store_relaxed(&txq->txq_active, false);
1764
1765 /* for ALTQ */
1766 if (txq == &sc->sc_txq[0]) {
1767 if_schedule_deferred_start(ifp);
1768 ifp->if_flags &= ~IFF_OACTIVE;
1769 }
1770 softint_schedule(txq->txq_deferred_transmit);
1771
1772 virtio_start_vq_intr(vsc, vq);
1773 }
1774 }
1775
1776 mutex_exit(txq->txq_lock);
1777
1778 return 1;
1779 }
1780
1781 static void
1782 vioif_tx_handle(void *xtxq)
1783 {
1784 struct vioif_txqueue *txq = xtxq;
1785 struct virtqueue *vq = txq->txq_vq;
1786 struct virtio_softc *vsc = vq->vq_owner;
1787 struct vioif_softc *sc = device_private(virtio_child(vsc));
1788 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1789 u_int limit;
1790 bool more;
1791
1792 limit = sc->sc_tx_process_limit;
1793
1794 mutex_enter(txq->txq_lock);
1795
1796 if (!txq->txq_stopping) {
1797 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1798 if (more) {
1799 vioif_tx_sched_handle(sc, txq);
1800 } else {
1801 atomic_store_relaxed(&txq->txq_active, false);
1802
1803 /* for ALTQ */
1804 if (txq == &sc->sc_txq[0]) {
1805 if_schedule_deferred_start(ifp);
1806 ifp->if_flags &= ~IFF_OACTIVE;
1807 }
1808 softint_schedule(txq->txq_deferred_transmit);
1809
1810 virtio_start_vq_intr(vsc, txq->txq_vq);
1811 }
1812 }
1813
1814 mutex_exit(txq->txq_lock);
1815 }
1816
1817 static void
1818 vioif_tx_sched_handle(struct vioif_softc *sc, struct vioif_txqueue *txq)
1819 {
1820
1821 if (txq->txq_workqueue)
1822 vioif_work_add(sc->sc_txrx_workqueue, &txq->txq_work);
1823 else
1824 softint_schedule(txq->txq_handle_si);
1825 }
1826
1827 static void
1828 vioif_tx_queue_clear(struct vioif_txqueue *txq)
1829 {
1830 struct virtqueue *vq = txq->txq_vq;
1831 struct virtio_softc *vsc = vq->vq_owner;
1832 struct vioif_softc *sc = device_private(virtio_child(vsc));
1833 u_int limit = UINT_MAX;
1834 bool more;
1835
1836 mutex_enter(txq->txq_lock);
1837 for (;;) {
1838 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1839 if (more == false)
1840 break;
1841 }
1842 mutex_exit(txq->txq_lock);
1843 }
1844
1845 static bool
1846 vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
1847 struct vioif_txqueue *txq, u_int limit)
1848 {
1849 struct virtqueue *vq = txq->txq_vq;
1850 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1851 struct mbuf *m;
1852 int slot, len;
1853 bool more = false;
1854
1855 KASSERT(mutex_owned(txq->txq_lock));
1856
1857 if (virtio_vq_is_enqueued(vsc, vq) == false)
1858 return false;
1859
1860 for (;;) {
1861 if (limit-- == 0) {
1862 more = true;
1863 break;
1864 }
1865
1866 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
1867 break;
1868
1869 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
1870 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTWRITE);
1871 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
1872 0, txq->txq_dmamaps[slot]->dm_mapsize,
1873 BUS_DMASYNC_POSTWRITE);
1874 m = txq->txq_mbufs[slot];
1875 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[slot]);
1876 txq->txq_mbufs[slot] = NULL;
1877 virtio_dequeue_commit(vsc, vq, slot);
1878 if_statinc(ifp, if_opackets);
1879 m_freem(m);
1880 }
1881
1882 return more;
1883 }
1884
1885 /* free all the mbufs already put on vq; called from if_stop(disable) */
1886 static void
1887 vioif_tx_drain(struct vioif_txqueue *txq)
1888 {
1889 struct virtqueue *vq = txq->txq_vq;
1890 struct virtio_softc *vsc = vq->vq_owner;
1891 int i;
1892
1893 KASSERT(txq->txq_stopping);
1894
1895 for (i = 0; i < vq->vq_num; i++) {
1896 if (txq->txq_mbufs[i] == NULL)
1897 continue;
1898 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[i]);
1899 m_freem(txq->txq_mbufs[i]);
1900 txq->txq_mbufs[i] = NULL;
1901 }
1902 }
1903
1904 /*
1905 * Control vq
1906 */
1907 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
1908 static void
1909 vioif_ctrl_acquire(struct vioif_softc *sc)
1910 {
1911 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1912
1913 mutex_enter(&ctrlq->ctrlq_wait_lock);
1914 while (ctrlq->ctrlq_inuse != FREE)
1915 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
1916 ctrlq->ctrlq_inuse = INUSE;
1917 ctrlq->ctrlq_owner = curlwp;
1918 mutex_exit(&ctrlq->ctrlq_wait_lock);
1919 }
1920
1921 static void
1922 vioif_ctrl_release(struct vioif_softc *sc)
1923 {
1924 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1925
1926 KASSERT(ctrlq->ctrlq_inuse != FREE);
1927 KASSERT(ctrlq->ctrlq_owner == curlwp);
1928
1929 mutex_enter(&ctrlq->ctrlq_wait_lock);
1930 ctrlq->ctrlq_inuse = FREE;
1931 ctrlq->ctrlq_owner = NULL;
1932 cv_signal(&ctrlq->ctrlq_wait);
1933 mutex_exit(&ctrlq->ctrlq_wait_lock);
1934 }
1935
1936 static int
1937 vioif_ctrl_load_cmdspec(struct vioif_softc *sc,
1938 struct vioif_ctrl_cmdspec *specs, int nspecs)
1939 {
1940 struct virtio_softc *vsc = sc->sc_virtio;
1941 int i, r, loaded;
1942
1943 loaded = 0;
1944 for (i = 0; i < nspecs; i++) {
1945 r = bus_dmamap_load(virtio_dmat(vsc),
1946 specs[i].dmamap, specs[i].buf, specs[i].bufsize,
1947 NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1948 if (r) {
1949 aprint_error_dev(sc->sc_dev, "control command dmamap"
1950 " load failed, error code %d\n", r);
1951 goto err;
1952 }
1953 loaded++;
1954
1955 }
1956
1957 return r;
1958
1959 err:
1960 for (i = 0; i < loaded; i++) {
1961 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap);
1962 }
1963
1964 return r;
1965 }
1966
1967 static void
1968 vioif_ctrl_unload_cmdspec(struct vioif_softc *sc,
1969 struct vioif_ctrl_cmdspec *specs, int nspecs)
1970 {
1971 struct virtio_softc *vsc = sc->sc_virtio;
1972 int i;
1973
1974 for (i = 0; i < nspecs; i++) {
1975 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap);
1976 }
1977 }
1978
1979 static int
1980 vioif_ctrl_send_command(struct vioif_softc *sc, uint8_t class, uint8_t cmd,
1981 struct vioif_ctrl_cmdspec *specs, int nspecs)
1982 {
1983 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1984 struct virtqueue *vq = ctrlq->ctrlq_vq;
1985 struct virtio_softc *vsc = sc->sc_virtio;
1986 int i, r, slot;
1987
1988 ctrlq->ctrlq_cmd->class = class;
1989 ctrlq->ctrlq_cmd->command = cmd;
1990
1991 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap,
1992 0, sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_PREWRITE);
1993 for (i = 0; i < nspecs; i++) {
1994 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap,
1995 0, specs[i].bufsize, BUS_DMASYNC_PREWRITE);
1996 }
1997 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap,
1998 0, sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_PREREAD);
1999
2000 r = virtio_enqueue_prep(vsc, vq, &slot);
2001 if (r != 0)
2002 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
2003 r = virtio_enqueue_reserve(vsc, vq, slot, nspecs + 2);
2004 if (r != 0)
2005 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
2006 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true);
2007 for (i = 0; i < nspecs; i++) {
2008 virtio_enqueue(vsc, vq, slot, specs[i].dmamap, true);
2009 }
2010 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false);
2011 virtio_enqueue_commit(vsc, vq, slot, true);
2012
2013 /* wait for done */
2014 mutex_enter(&ctrlq->ctrlq_wait_lock);
2015 while (ctrlq->ctrlq_inuse != DONE)
2016 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
2017 mutex_exit(&ctrlq->ctrlq_wait_lock);
2018 /* already dequeueued */
2019
2020 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0,
2021 sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_POSTWRITE);
2022 for (i = 0; i < nspecs; i++) {
2023 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 0,
2024 specs[i].bufsize, BUS_DMASYNC_POSTWRITE);
2025 }
2026 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0,
2027 sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_POSTREAD);
2028
2029 if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK)
2030 r = 0;
2031 else {
2032 aprint_error_dev(sc->sc_dev, "failed setting rx mode\n");
2033 r = EIO;
2034 }
2035
2036 return r;
2037 }
2038
2039 static int
2040 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
2041 {
2042 struct virtio_net_ctrl_rx *rx = sc->sc_ctrlq.ctrlq_rx;
2043 struct vioif_ctrl_cmdspec specs[1];
2044 int r;
2045
2046 if (!sc->sc_has_ctrl)
2047 return ENOTSUP;
2048
2049 vioif_ctrl_acquire(sc);
2050
2051 rx->onoff = onoff;
2052 specs[0].dmamap = sc->sc_ctrlq.ctrlq_rx_dmamap;
2053 specs[0].buf = rx;
2054 specs[0].bufsize = sizeof(*rx);
2055
2056 r = vioif_ctrl_send_command(sc, VIRTIO_NET_CTRL_RX, cmd,
2057 specs, __arraycount(specs));
2058
2059 vioif_ctrl_release(sc);
2060 return r;
2061 }
2062
2063 static int
2064 vioif_set_promisc(struct vioif_softc *sc, bool onoff)
2065 {
2066 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
2067 }
2068
2069 static int
2070 vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
2071 {
2072 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
2073 }
2074
2075 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
2076 static int
2077 vioif_set_rx_filter(struct vioif_softc *sc)
2078 {
2079 /* filter already set in ctrlq->ctrlq_mac_tbl */
2080 struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc;
2081 struct vioif_ctrl_cmdspec specs[2];
2082 int nspecs = __arraycount(specs);
2083 int r;
2084
2085 mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc;
2086 mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc;
2087
2088 if (!sc->sc_has_ctrl)
2089 return ENOTSUP;
2090
2091 vioif_ctrl_acquire(sc);
2092
2093 specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap;
2094 specs[0].buf = mac_tbl_uc;
2095 specs[0].bufsize = sizeof(*mac_tbl_uc)
2096 + (ETHER_ADDR_LEN * mac_tbl_uc->nentries);
2097
2098 specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap;
2099 specs[1].buf = mac_tbl_mc;
2100 specs[1].bufsize = sizeof(*mac_tbl_mc)
2101 + (ETHER_ADDR_LEN * mac_tbl_mc->nentries);
2102
2103 r = vioif_ctrl_load_cmdspec(sc, specs, nspecs);
2104 if (r != 0)
2105 goto out;
2106
2107 r = vioif_ctrl_send_command(sc,
2108 VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET,
2109 specs, nspecs);
2110
2111 vioif_ctrl_unload_cmdspec(sc, specs, nspecs);
2112
2113 out:
2114 vioif_ctrl_release(sc);
2115
2116 return r;
2117 }
2118
2119 static int
2120 vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs)
2121 {
2122 struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq;
2123 struct vioif_ctrl_cmdspec specs[1];
2124 int r;
2125
2126 if (!sc->sc_has_ctrl)
2127 return ENOTSUP;
2128
2129 if (nvq_pairs <= 1)
2130 return EINVAL;
2131
2132 vioif_ctrl_acquire(sc);
2133
2134 mq->virtqueue_pairs = nvq_pairs;
2135 specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap;
2136 specs[0].buf = mq;
2137 specs[0].bufsize = sizeof(*mq);
2138
2139 r = vioif_ctrl_send_command(sc,
2140 VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
2141 specs, __arraycount(specs));
2142
2143 vioif_ctrl_release(sc);
2144
2145 return r;
2146 }
2147
2148 /* ctrl vq interrupt; wake up the command issuer */
2149 static int
2150 vioif_ctrl_intr(void *arg)
2151 {
2152 struct vioif_ctrlqueue *ctrlq = arg;
2153 struct virtqueue *vq = ctrlq->ctrlq_vq;
2154 struct virtio_softc *vsc = vq->vq_owner;
2155 int r, slot;
2156
2157 if (virtio_vq_is_enqueued(vsc, vq) == false)
2158 return 0;
2159
2160 r = virtio_dequeue(vsc, vq, &slot, NULL);
2161 if (r == ENOENT)
2162 return 0;
2163 virtio_dequeue_commit(vsc, vq, slot);
2164
2165 mutex_enter(&ctrlq->ctrlq_wait_lock);
2166 ctrlq->ctrlq_inuse = DONE;
2167 cv_signal(&ctrlq->ctrlq_wait);
2168 mutex_exit(&ctrlq->ctrlq_wait_lock);
2169
2170 return 1;
2171 }
2172
2173 /*
2174 * If IFF_PROMISC requested, set promiscuous
2175 * If multicast filter small enough (<=MAXENTRIES) set rx filter
2176 * If large multicast filter exist use ALLMULTI
2177 */
2178 /*
2179 * If setting rx filter fails fall back to ALLMULTI
2180 * If ALLMULTI fails fall back to PROMISC
2181 */
2182 static int
2183 vioif_rx_filter(struct vioif_softc *sc)
2184 {
2185 struct ethercom *ec = &sc->sc_ethercom;
2186 struct ifnet *ifp = &ec->ec_if;
2187 struct ether_multi *enm;
2188 struct ether_multistep step;
2189 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
2190 int nentries;
2191 int promisc = 0, allmulti = 0, rxfilter = 0;
2192 int r;
2193
2194 if (!sc->sc_has_ctrl) { /* no ctrl vq; always promisc */
2195 ifp->if_flags |= IFF_PROMISC;
2196 return 0;
2197 }
2198
2199 if (ifp->if_flags & IFF_PROMISC) {
2200 promisc = 1;
2201 goto set;
2202 }
2203
2204 nentries = -1;
2205 ETHER_LOCK(ec);
2206 ETHER_FIRST_MULTI(step, ec, enm);
2207 while (nentries++, enm != NULL) {
2208 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
2209 allmulti = 1;
2210 goto set_unlock;
2211 }
2212 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2213 allmulti = 1;
2214 goto set_unlock;
2215 }
2216 memcpy(ctrlq->ctrlq_mac_tbl_mc->macs[nentries],
2217 enm->enm_addrlo, ETHER_ADDR_LEN);
2218 ETHER_NEXT_MULTI(step, enm);
2219 }
2220 rxfilter = 1;
2221
2222 set_unlock:
2223 ETHER_UNLOCK(ec);
2224
2225 set:
2226 if (rxfilter) {
2227 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
2228 ctrlq->ctrlq_mac_tbl_mc->nentries = nentries;
2229 r = vioif_set_rx_filter(sc);
2230 if (r != 0) {
2231 rxfilter = 0;
2232 allmulti = 1; /* fallback */
2233 }
2234 } else {
2235 /* remove rx filter */
2236 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
2237 ctrlq->ctrlq_mac_tbl_mc->nentries = 0;
2238 r = vioif_set_rx_filter(sc);
2239 /* what to do on failure? */
2240 }
2241 if (allmulti) {
2242 r = vioif_set_allmulti(sc, true);
2243 if (r != 0) {
2244 allmulti = 0;
2245 promisc = 1; /* fallback */
2246 }
2247 } else {
2248 r = vioif_set_allmulti(sc, false);
2249 /* what to do on failure? */
2250 }
2251 if (promisc) {
2252 r = vioif_set_promisc(sc, true);
2253 } else {
2254 r = vioif_set_promisc(sc, false);
2255 }
2256
2257 return r;
2258 }
2259
2260 static bool
2261 vioif_is_link_up(struct vioif_softc *sc)
2262 {
2263 struct virtio_softc *vsc = sc->sc_virtio;
2264 uint16_t status;
2265
2266 if (virtio_features(vsc) & VIRTIO_NET_F_STATUS)
2267 status = virtio_read_device_config_2(vsc,
2268 VIRTIO_NET_CONFIG_STATUS);
2269 else
2270 status = VIRTIO_NET_S_LINK_UP;
2271
2272 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
2273 }
2274
2275 /* change link status */
2276 static void
2277 vioif_update_link_status(struct vioif_softc *sc)
2278 {
2279 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2280 struct vioif_txqueue *txq;
2281 bool active, changed;
2282 int link, i;
2283
2284 mutex_enter(&sc->sc_lock);
2285
2286 active = vioif_is_link_up(sc);
2287 changed = false;
2288
2289 if (active) {
2290 if (!sc->sc_link_active)
2291 changed = true;
2292
2293 link = LINK_STATE_UP;
2294 sc->sc_link_active = true;
2295 } else {
2296 if (sc->sc_link_active)
2297 changed = true;
2298
2299 link = LINK_STATE_DOWN;
2300 sc->sc_link_active = false;
2301 }
2302
2303 if (changed) {
2304 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
2305 txq = &sc->sc_txq[i];
2306
2307 mutex_enter(txq->txq_lock);
2308 txq->txq_link_active = sc->sc_link_active;
2309 mutex_exit(txq->txq_lock);
2310 }
2311
2312 if_link_state_change(ifp, link);
2313 }
2314
2315 mutex_exit(&sc->sc_lock);
2316 }
2317
2318 static int
2319 vioif_config_change(struct virtio_softc *vsc)
2320 {
2321 struct vioif_softc *sc = device_private(virtio_child(vsc));
2322
2323 softint_schedule(sc->sc_ctl_softint);
2324 return 0;
2325 }
2326
2327 static void
2328 vioif_ctl_softint(void *arg)
2329 {
2330 struct vioif_softc *sc = arg;
2331 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2332
2333 vioif_update_link_status(sc);
2334 vioif_start(ifp);
2335 }
2336
2337 static struct workqueue *
2338 vioif_workq_create(const char *name, pri_t prio, int ipl, int flags)
2339 {
2340 struct workqueue *wq;
2341 int error;
2342
2343 error = workqueue_create(&wq, name, vioif_workq_work, NULL,
2344 prio, ipl, flags);
2345
2346 if (error)
2347 return NULL;
2348
2349 return wq;
2350 }
2351
2352 static void
2353 vioif_workq_destroy(struct workqueue *wq)
2354 {
2355
2356 workqueue_destroy(wq);
2357 }
2358
2359 static void
2360 vioif_workq_work(struct work *wk, void *context)
2361 {
2362 struct vioif_work *work;
2363
2364 work = container_of(wk, struct vioif_work, cookie);
2365
2366 atomic_store_relaxed(&work->added, 0);
2367 work->func(work->arg);
2368 }
2369
2370 static void
2371 vioif_work_set(struct vioif_work *work, void (*func)(void *), void *arg)
2372 {
2373
2374 memset(work, 0, sizeof(*work));
2375 work->func = func;
2376 work->arg = arg;
2377 }
2378
2379 static void
2380 vioif_work_add(struct workqueue *wq, struct vioif_work *work)
2381 {
2382
2383 if (atomic_load_relaxed(&work->added) != 0)
2384 return;
2385
2386 atomic_store_relaxed(&work->added, 1);
2387 kpreempt_disable();
2388 workqueue_enqueue(wq, &work->cookie, NULL);
2389 kpreempt_enable();
2390 }
2391
2392 static void
2393 vioif_work_wait(struct workqueue *wq, struct vioif_work *work)
2394 {
2395
2396 workqueue_wait(wq, &work->cookie);
2397 }
2398
2399 static int
2400 vioif_setup_sysctl(struct vioif_softc *sc)
2401 {
2402 const char *devname;
2403 struct sysctllog **log;
2404 const struct sysctlnode *rnode, *rxnode, *txnode;
2405 int error;
2406
2407 log = &sc->sc_sysctllog;
2408 devname = device_xname(sc->sc_dev);
2409
2410 error = sysctl_createv(log, 0, NULL, &rnode,
2411 0, CTLTYPE_NODE, devname,
2412 SYSCTL_DESCR("virtio-net information and settings"),
2413 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
2414 if (error)
2415 goto out;
2416
2417 error = sysctl_createv(log, 0, &rnode, NULL,
2418 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
2419 SYSCTL_DESCR("Use workqueue for packet processing"),
2420 NULL, 0, &sc->sc_txrx_workqueue_sysctl, 0, CTL_CREATE, CTL_EOL);
2421 if (error)
2422 goto out;
2423
2424 error = sysctl_createv(log, 0, &rnode, &rxnode,
2425 0, CTLTYPE_NODE, "rx",
2426 SYSCTL_DESCR("virtio-net information and settings for Rx"),
2427 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
2428 if (error)
2429 goto out;
2430
2431 error = sysctl_createv(log, 0, &rxnode, NULL,
2432 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2433 SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
2434 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2435 if (error)
2436 goto out;
2437
2438 error = sysctl_createv(log, 0, &rxnode, NULL,
2439 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2440 SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
2441 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
2442 if (error)
2443 goto out;
2444
2445 error = sysctl_createv(log, 0, &rnode, &txnode,
2446 0, CTLTYPE_NODE, "tx",
2447 SYSCTL_DESCR("virtio-net information and settings for Tx"),
2448 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
2449 if (error)
2450 goto out;
2451
2452 error = sysctl_createv(log, 0, &txnode, NULL,
2453 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2454 SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
2455 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2456 if (error)
2457 goto out;
2458
2459 error = sysctl_createv(log, 0, &txnode, NULL,
2460 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2461 SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
2462 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
2463
2464 out:
2465 if (error)
2466 sysctl_teardown(log);
2467
2468 return error;
2469 }
2470
2471 MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
2472
2473 #ifdef _MODULE
2474 #include "ioconf.c"
2475 #endif
2476
2477 static int
2478 if_vioif_modcmd(modcmd_t cmd, void *opaque)
2479 {
2480 int error = 0;
2481
2482 #ifdef _MODULE
2483 switch (cmd) {
2484 case MODULE_CMD_INIT:
2485 error = config_init_component(cfdriver_ioconf_if_vioif,
2486 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
2487 break;
2488 case MODULE_CMD_FINI:
2489 error = config_fini_component(cfdriver_ioconf_if_vioif,
2490 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
2491 break;
2492 default:
2493 error = ENOTTY;
2494 break;
2495 }
2496 #endif
2497
2498 return error;
2499 }
2500