if_vioif.c revision 1.63 1 /* $NetBSD: if_vioif.c,v 1.63 2020/05/25 09:41:27 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.63 2020/05/25 09:41:27 yamaguchi Exp $");
30
31 #ifdef _KERNEL_OPT
32 #include "opt_net_mpsafe.h"
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/atomic.h>
39 #include <sys/bus.h>
40 #include <sys/condvar.h>
41 #include <sys/device.h>
42 #include <sys/evcnt.h>
43 #include <sys/intr.h>
44 #include <sys/kmem.h>
45 #include <sys/mbuf.h>
46 #include <sys/mutex.h>
47 #include <sys/sockio.h>
48 #include <sys/cpu.h>
49 #include <sys/module.h>
50 #include <sys/pcq.h>
51 #include <sys/workqueue.h>
52
53 #include <dev/pci/virtioreg.h>
54 #include <dev/pci/virtiovar.h>
55
56 #include <net/if.h>
57 #include <net/if_media.h>
58 #include <net/if_ether.h>
59
60 #include <net/bpf.h>
61
62 #include "ioconf.h"
63
64 #ifdef NET_MPSAFE
65 #define VIOIF_MPSAFE 1
66 #define VIOIF_MULTIQ 1
67 #endif
68
69 /*
70 * if_vioifreg.h:
71 */
72 /* Configuration registers */
73 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
74 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
75 #define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS 8 /* 16bit */
76
77 /* Feature bits */
78 #define VIRTIO_NET_F_CSUM __BIT(0)
79 #define VIRTIO_NET_F_GUEST_CSUM __BIT(1)
80 #define VIRTIO_NET_F_MAC __BIT(5)
81 #define VIRTIO_NET_F_GSO __BIT(6)
82 #define VIRTIO_NET_F_GUEST_TSO4 __BIT(7)
83 #define VIRTIO_NET_F_GUEST_TSO6 __BIT(8)
84 #define VIRTIO_NET_F_GUEST_ECN __BIT(9)
85 #define VIRTIO_NET_F_GUEST_UFO __BIT(10)
86 #define VIRTIO_NET_F_HOST_TSO4 __BIT(11)
87 #define VIRTIO_NET_F_HOST_TSO6 __BIT(12)
88 #define VIRTIO_NET_F_HOST_ECN __BIT(13)
89 #define VIRTIO_NET_F_HOST_UFO __BIT(14)
90 #define VIRTIO_NET_F_MRG_RXBUF __BIT(15)
91 #define VIRTIO_NET_F_STATUS __BIT(16)
92 #define VIRTIO_NET_F_CTRL_VQ __BIT(17)
93 #define VIRTIO_NET_F_CTRL_RX __BIT(18)
94 #define VIRTIO_NET_F_CTRL_VLAN __BIT(19)
95 #define VIRTIO_NET_F_CTRL_RX_EXTRA __BIT(20)
96 #define VIRTIO_NET_F_GUEST_ANNOUNCE __BIT(21)
97 #define VIRTIO_NET_F_MQ __BIT(22)
98
99 #define VIRTIO_NET_FLAG_BITS \
100 VIRTIO_COMMON_FLAG_BITS \
101 "\x17""MQ" \
102 "\x16""GUEST_ANNOUNCE" \
103 "\x15""CTRL_RX_EXTRA" \
104 "\x14""CTRL_VLAN" \
105 "\x13""CTRL_RX" \
106 "\x12""CTRL_VQ" \
107 "\x11""STATUS" \
108 "\x10""MRG_RXBUF" \
109 "\x0f""HOST_UFO" \
110 "\x0e""HOST_ECN" \
111 "\x0d""HOST_TSO6" \
112 "\x0c""HOST_TSO4" \
113 "\x0b""GUEST_UFO" \
114 "\x0a""GUEST_ECN" \
115 "\x09""GUEST_TSO6" \
116 "\x08""GUEST_TSO4" \
117 "\x07""GSO" \
118 "\x06""MAC" \
119 "\x02""GUEST_CSUM" \
120 "\x01""CSUM"
121
122 /* Status */
123 #define VIRTIO_NET_S_LINK_UP 1
124
125 /* Packet header structure */
126 struct virtio_net_hdr {
127 uint8_t flags;
128 uint8_t gso_type;
129 uint16_t hdr_len;
130 uint16_t gso_size;
131 uint16_t csum_start;
132 uint16_t csum_offset;
133 #if 0
134 uint16_t num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
135 #endif
136 } __packed;
137
138 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
139 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
140 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
141 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
142 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
143 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
144
145 #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN)
146
147 /* Control virtqueue */
148 struct virtio_net_ctrl_cmd {
149 uint8_t class;
150 uint8_t command;
151 } __packed;
152 #define VIRTIO_NET_CTRL_RX 0
153 # define VIRTIO_NET_CTRL_RX_PROMISC 0
154 # define VIRTIO_NET_CTRL_RX_ALLMULTI 1
155
156 #define VIRTIO_NET_CTRL_MAC 1
157 # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
158
159 #define VIRTIO_NET_CTRL_VLAN 2
160 # define VIRTIO_NET_CTRL_VLAN_ADD 0
161 # define VIRTIO_NET_CTRL_VLAN_DEL 1
162
163 #define VIRTIO_NET_CTRL_MQ 4
164 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
165 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
166 # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
167
168 struct virtio_net_ctrl_status {
169 uint8_t ack;
170 } __packed;
171 #define VIRTIO_NET_OK 0
172 #define VIRTIO_NET_ERR 1
173
174 struct virtio_net_ctrl_rx {
175 uint8_t onoff;
176 } __packed;
177
178 struct virtio_net_ctrl_mac_tbl {
179 uint32_t nentries;
180 uint8_t macs[][ETHER_ADDR_LEN];
181 } __packed;
182
183 struct virtio_net_ctrl_vlan {
184 uint16_t id;
185 } __packed;
186
187 struct virtio_net_ctrl_mq {
188 uint16_t virtqueue_pairs;
189 } __packed;
190
191 struct vioif_ctrl_cmdspec {
192 bus_dmamap_t dmamap;
193 void *buf;
194 bus_size_t bufsize;
195 };
196
197 /*
198 * if_vioifvar.h:
199 */
200
201 /*
202 * Locking notes:
203 * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
204 * a field in vioif_rxqueue is protected by rxq_lock (a spin mutex).
205 * - more than one lock cannot be held at onece
206 * + ctrlq_inuse is protected by ctrlq_wait_lock.
207 * - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
208 * - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
209 * + fields in vioif_softc except queues are protected by
210 * sc->sc_lock(an adaptive mutex)
211 * - the lock is held before acquisition of other locks
212 */
213
214 struct vioif_work {
215 struct work cookie;
216 void (*func)(void *);
217 void *arg;
218 unsigned int added;
219 };
220
221 struct vioif_txqueue {
222 kmutex_t *txq_lock; /* lock for tx operations */
223
224 struct virtqueue *txq_vq;
225 bool txq_stopping;
226 bool txq_link_active;
227 pcq_t *txq_intrq;
228
229 struct virtio_net_hdr *txq_hdrs;
230 bus_dmamap_t *txq_hdr_dmamaps;
231
232 struct mbuf **txq_mbufs;
233 bus_dmamap_t *txq_dmamaps;
234
235 void *txq_deferred_transmit;
236 void *txq_handle_si;
237 struct vioif_work txq_work;
238 bool txq_workqueue;
239 bool txq_active;
240
241 struct evcnt txq_defrag_failed;
242 struct evcnt txq_mbuf_load_failed;
243 struct evcnt txq_enqueue_reserve_failed;
244 };
245
246 struct vioif_rxqueue {
247 kmutex_t *rxq_lock; /* lock for rx operations */
248
249 struct virtqueue *rxq_vq;
250 bool rxq_stopping;
251
252 struct virtio_net_hdr *rxq_hdrs;
253 bus_dmamap_t *rxq_hdr_dmamaps;
254
255 struct mbuf **rxq_mbufs;
256 bus_dmamap_t *rxq_dmamaps;
257
258 void *rxq_handle_si;
259 struct vioif_work rxq_work;
260 bool rxq_workqueue;
261 bool rxq_active;
262
263 struct evcnt rxq_mbuf_add_failed;
264 };
265
266 struct vioif_ctrlqueue {
267 struct virtqueue *ctrlq_vq;
268 enum {
269 FREE, INUSE, DONE
270 } ctrlq_inuse;
271 kcondvar_t ctrlq_wait;
272 kmutex_t ctrlq_wait_lock;
273 struct lwp *ctrlq_owner;
274
275 struct virtio_net_ctrl_cmd *ctrlq_cmd;
276 struct virtio_net_ctrl_status *ctrlq_status;
277 struct virtio_net_ctrl_rx *ctrlq_rx;
278 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc;
279 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc;
280 struct virtio_net_ctrl_mq *ctrlq_mq;
281
282 bus_dmamap_t ctrlq_cmd_dmamap;
283 bus_dmamap_t ctrlq_status_dmamap;
284 bus_dmamap_t ctrlq_rx_dmamap;
285 bus_dmamap_t ctrlq_tbl_uc_dmamap;
286 bus_dmamap_t ctrlq_tbl_mc_dmamap;
287 bus_dmamap_t ctrlq_mq_dmamap;
288
289 struct evcnt ctrlq_cmd_load_failed;
290 struct evcnt ctrlq_cmd_failed;
291 };
292
293 struct vioif_softc {
294 device_t sc_dev;
295 kmutex_t sc_lock;
296 struct sysctllog *sc_sysctllog;
297
298 struct virtio_softc *sc_virtio;
299 struct virtqueue *sc_vqs;
300
301 int sc_max_nvq_pairs;
302 int sc_req_nvq_pairs;
303 int sc_act_nvq_pairs;
304
305 uint8_t sc_mac[ETHER_ADDR_LEN];
306 struct ethercom sc_ethercom;
307 short sc_deferred_init_done;
308 bool sc_link_active;
309
310 struct vioif_txqueue *sc_txq;
311 struct vioif_rxqueue *sc_rxq;
312
313 bool sc_has_ctrl;
314 struct vioif_ctrlqueue sc_ctrlq;
315
316 bus_dma_segment_t sc_hdr_segs[1];
317 void *sc_dmamem;
318 void *sc_kmem;
319
320 void *sc_ctl_softint;
321
322 struct workqueue *sc_txrx_workqueue;
323 bool sc_txrx_workqueue_sysctl;
324 u_int sc_tx_intr_process_limit;
325 u_int sc_tx_process_limit;
326 u_int sc_rx_intr_process_limit;
327 u_int sc_rx_process_limit;
328 };
329 #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */
330 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */
331
332 #define VIOIF_TX_INTR_PROCESS_LIMIT 256
333 #define VIOIF_TX_PROCESS_LIMIT 256
334 #define VIOIF_RX_INTR_PROCESS_LIMIT 0U
335 #define VIOIF_RX_PROCESS_LIMIT 256
336
337 #define VIOIF_WORKQUEUE_PRI PRI_SOFTNET
338
339 /* cfattach interface functions */
340 static int vioif_match(device_t, cfdata_t, void *);
341 static void vioif_attach(device_t, device_t, void *);
342 static void vioif_deferred_init(device_t);
343 static int vioif_finalize_teardown(device_t);
344
345 /* ifnet interface functions */
346 static int vioif_init(struct ifnet *);
347 static void vioif_stop(struct ifnet *, int);
348 static void vioif_start(struct ifnet *);
349 static void vioif_start_locked(struct ifnet *, struct vioif_txqueue *);
350 static int vioif_transmit(struct ifnet *, struct mbuf *);
351 static void vioif_transmit_locked(struct ifnet *, struct vioif_txqueue *);
352 static int vioif_ioctl(struct ifnet *, u_long, void *);
353 static void vioif_watchdog(struct ifnet *);
354
355 /* rx */
356 static int vioif_add_rx_mbuf(struct vioif_rxqueue *, int);
357 static void vioif_free_rx_mbuf(struct vioif_rxqueue *, int);
358 static void vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *);
359 static void vioif_rx_queue_clear(struct vioif_rxqueue *);
360 static bool vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
361 struct vioif_rxqueue *, u_int);
362 static int vioif_rx_intr(void *);
363 static void vioif_rx_handle(void *);
364 static void vioif_rx_sched_handle(struct vioif_softc *,
365 struct vioif_rxqueue *);
366 static void vioif_rx_drain(struct vioif_rxqueue *);
367
368 /* tx */
369 static int vioif_tx_intr(void *);
370 static void vioif_tx_handle(void *);
371 static void vioif_tx_sched_handle(struct vioif_softc *,
372 struct vioif_txqueue *);
373 static void vioif_tx_queue_clear(struct vioif_txqueue *);
374 static bool vioif_tx_deq_locked(struct vioif_softc *, struct virtio_softc *,
375 struct vioif_txqueue *, u_int);
376 static void vioif_tx_drain(struct vioif_txqueue *);
377 static void vioif_deferred_transmit(void *);
378
379 /* workqueue */
380 static struct workqueue*
381 vioif_workq_create(const char *, pri_t, int, int);
382 static void vioif_workq_destroy(struct workqueue *);
383 static void vioif_workq_work(struct work *, void *);
384 static void vioif_work_set(struct vioif_work *, void(*)(void *), void *);
385 static void vioif_work_add(struct workqueue *, struct vioif_work *);
386 static void vioif_work_wait(struct workqueue *, struct vioif_work *);
387
388 /* other control */
389 static bool vioif_is_link_up(struct vioif_softc *);
390 static void vioif_update_link_status(struct vioif_softc *);
391 static int vioif_ctrl_rx(struct vioif_softc *, int, bool);
392 static int vioif_set_promisc(struct vioif_softc *, bool);
393 static int vioif_set_allmulti(struct vioif_softc *, bool);
394 static int vioif_set_rx_filter(struct vioif_softc *);
395 static int vioif_rx_filter(struct vioif_softc *);
396 static int vioif_ctrl_intr(void *);
397 static int vioif_config_change(struct virtio_softc *);
398 static void vioif_ctl_softint(void *);
399 static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int);
400 static void vioif_enable_interrupt_vqpairs(struct vioif_softc *);
401 static void vioif_disable_interrupt_vqpairs(struct vioif_softc *);
402 static int vioif_setup_sysctl(struct vioif_softc *);
403 static void vioif_setup_stats(struct vioif_softc *);
404
405 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
406 vioif_match, vioif_attach, NULL, NULL);
407
408 static int
409 vioif_match(device_t parent, cfdata_t match, void *aux)
410 {
411 struct virtio_attach_args *va = aux;
412
413 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
414 return 1;
415
416 return 0;
417 }
418
419 static int
420 vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map,
421 bus_size_t size, int nsegs, const char *usage)
422 {
423 int r;
424
425 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size,
426 nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map);
427
428 if (r != 0) {
429 aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, "
430 "error code %d\n", usage, r);
431 }
432
433 return r;
434 }
435
436 static void
437 vioif_dmamap_destroy(struct vioif_softc *sc, bus_dmamap_t *map)
438 {
439
440 if (*map) {
441 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), *map);
442 *map = NULL;
443 }
444 }
445
446 static int
447 vioif_dmamap_create_load(struct vioif_softc *sc, bus_dmamap_t *map,
448 void *buf, bus_size_t size, int nsegs, int rw, const char *usage)
449 {
450 int r;
451
452 r = vioif_dmamap_create(sc, map, size, nsegs, usage);
453 if (r != 0)
454 return 1;
455
456 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), *map, buf,
457 size, NULL, rw | BUS_DMA_NOWAIT);
458 if (r != 0) {
459 vioif_dmamap_destroy(sc, map);
460 aprint_error_dev(sc->sc_dev, "%s dmamap load failed. "
461 "error code %d\n", usage, r);
462 }
463
464 return r;
465 }
466
467 static void *
468 vioif_assign_mem(intptr_t *p, size_t size)
469 {
470 intptr_t rv;
471
472 rv = *p;
473 *p += size;
474
475 return (void *)rv;
476 }
477
478 static void
479 vioif_alloc_queues(struct vioif_softc *sc)
480 {
481 int nvq_pairs = sc->sc_max_nvq_pairs;
482 int nvqs = nvq_pairs * 2;
483 int i;
484
485 KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX);
486
487 sc->sc_rxq = kmem_zalloc(sizeof(sc->sc_rxq[0]) * nvq_pairs,
488 KM_SLEEP);
489 sc->sc_txq = kmem_zalloc(sizeof(sc->sc_txq[0]) * nvq_pairs,
490 KM_SLEEP);
491
492 if (sc->sc_has_ctrl)
493 nvqs++;
494
495 sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP);
496 nvqs = 0;
497 for (i = 0; i < nvq_pairs; i++) {
498 sc->sc_rxq[i].rxq_vq = &sc->sc_vqs[nvqs++];
499 sc->sc_txq[i].txq_vq = &sc->sc_vqs[nvqs++];
500 }
501
502 if (sc->sc_has_ctrl)
503 sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[nvqs++];
504 }
505
506 static void
507 vioif_free_queues(struct vioif_softc *sc)
508 {
509 int nvq_pairs = sc->sc_max_nvq_pairs;
510 int nvqs = nvq_pairs * 2;
511
512 if (sc->sc_ctrlq.ctrlq_vq)
513 nvqs++;
514
515 if (sc->sc_txq) {
516 kmem_free(sc->sc_txq, sizeof(sc->sc_txq[0]) * nvq_pairs);
517 sc->sc_txq = NULL;
518 }
519
520 if (sc->sc_rxq) {
521 kmem_free(sc->sc_rxq, sizeof(sc->sc_rxq[0]) * nvq_pairs);
522 sc->sc_rxq = NULL;
523 }
524
525 if (sc->sc_vqs) {
526 kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
527 sc->sc_vqs = NULL;
528 }
529 }
530
531 /* allocate memory */
532 /*
533 * dma memory is used for:
534 * rxq_hdrs[slot]: metadata array for received frames (READ)
535 * txq_hdrs[slot]: metadata array for frames to be sent (WRITE)
536 * ctrlq_cmd: command to be sent via ctrl vq (WRITE)
537 * ctrlq_status: return value for a command via ctrl vq (READ)
538 * ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command
539 * (WRITE)
540 * ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
541 * class command (WRITE)
542 * ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
543 * class command (WRITE)
544 * ctrlq_* structures are allocated only one each; they are protected by
545 * ctrlq_inuse variable and ctrlq_wait condvar.
546 */
547 /*
548 * dynamically allocated memory is used for:
549 * rxq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot]
550 * txq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot]
551 * rxq_dmamaps[slot]: bus_dmamap_t array for received payload
552 * txq_dmamaps[slot]: bus_dmamap_t array for sent payload
553 * rxq_mbufs[slot]: mbuf pointer array for received frames
554 * txq_mbufs[slot]: mbuf pointer array for sent frames
555 */
556 static int
557 vioif_alloc_mems(struct vioif_softc *sc)
558 {
559 struct virtio_softc *vsc = sc->sc_virtio;
560 struct vioif_txqueue *txq;
561 struct vioif_rxqueue *rxq;
562 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
563 int allocsize, allocsize2, r, rsegs, i, qid;
564 void *vaddr;
565 intptr_t p;
566
567 allocsize = 0;
568 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
569 rxq = &sc->sc_rxq[qid];
570 txq = &sc->sc_txq[qid];
571
572 allocsize +=
573 sizeof(struct virtio_net_hdr) * rxq->rxq_vq->vq_num;
574 allocsize +=
575 sizeof(struct virtio_net_hdr) * txq->txq_vq->vq_num;
576 }
577 if (sc->sc_has_ctrl) {
578 allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
579 allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
580 allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
581 allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
582 + sizeof(struct virtio_net_ctrl_mac_tbl)
583 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
584 allocsize += sizeof(struct virtio_net_ctrl_mq) * 1;
585 }
586 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
587 &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
588 if (r != 0) {
589 aprint_error_dev(sc->sc_dev,
590 "DMA memory allocation failed, size %d, "
591 "error code %d\n", allocsize, r);
592 goto err_none;
593 }
594 r = bus_dmamem_map(virtio_dmat(vsc),
595 &sc->sc_hdr_segs[0], 1, allocsize, &vaddr, BUS_DMA_NOWAIT);
596 if (r != 0) {
597 aprint_error_dev(sc->sc_dev,
598 "DMA memory map failed, error code %d\n", r);
599 goto err_dmamem_alloc;
600 }
601
602 memset(vaddr, 0, allocsize);
603 sc->sc_dmamem = vaddr;
604 p = (intptr_t) vaddr;
605
606 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
607 rxq = &sc->sc_rxq[qid];
608 txq = &sc->sc_txq[qid];
609
610 rxq->rxq_hdrs = vioif_assign_mem(&p,
611 sizeof(rxq->rxq_hdrs[0]) * rxq->rxq_vq->vq_num);
612 txq->txq_hdrs = vioif_assign_mem(&p,
613 sizeof(txq->txq_hdrs[0]) * txq->txq_vq->vq_num);
614 }
615 if (sc->sc_has_ctrl) {
616 ctrlq->ctrlq_cmd = vioif_assign_mem(&p,
617 sizeof(*ctrlq->ctrlq_cmd));
618 ctrlq->ctrlq_status = vioif_assign_mem(&p,
619 sizeof(*ctrlq->ctrlq_status));
620 ctrlq->ctrlq_rx = vioif_assign_mem(&p,
621 sizeof(*ctrlq->ctrlq_rx));
622 ctrlq->ctrlq_mac_tbl_uc = vioif_assign_mem(&p,
623 sizeof(*ctrlq->ctrlq_mac_tbl_uc));
624 ctrlq->ctrlq_mac_tbl_mc = vioif_assign_mem(&p,
625 sizeof(*ctrlq->ctrlq_mac_tbl_mc)
626 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES);
627 ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq));
628 }
629
630 allocsize2 = 0;
631 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
632 int rxqsize, txqsize;
633
634 rxq = &sc->sc_rxq[qid];
635 txq = &sc->sc_txq[qid];
636 rxqsize = rxq->rxq_vq->vq_num;
637 txqsize = txq->txq_vq->vq_num;
638
639 allocsize2 += sizeof(rxq->rxq_dmamaps[0]) * rxqsize;
640 allocsize2 += sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize;
641 allocsize2 += sizeof(rxq->rxq_mbufs[0]) * rxqsize;
642
643 allocsize2 += sizeof(txq->txq_dmamaps[0]) * txqsize;
644 allocsize2 += sizeof(txq->txq_hdr_dmamaps[0]) * txqsize;
645 allocsize2 += sizeof(txq->txq_mbufs[0]) * txqsize;
646 }
647 vaddr = kmem_zalloc(allocsize2, KM_SLEEP);
648 sc->sc_kmem = vaddr;
649 p = (intptr_t) vaddr;
650
651 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
652 int rxqsize, txqsize;
653 rxq = &sc->sc_rxq[qid];
654 txq = &sc->sc_txq[qid];
655 rxqsize = rxq->rxq_vq->vq_num;
656 txqsize = txq->txq_vq->vq_num;
657
658 rxq->rxq_hdr_dmamaps = vioif_assign_mem(&p,
659 sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize);
660 txq->txq_hdr_dmamaps = vioif_assign_mem(&p,
661 sizeof(txq->txq_hdr_dmamaps[0]) * txqsize);
662 rxq->rxq_dmamaps = vioif_assign_mem(&p,
663 sizeof(rxq->rxq_dmamaps[0]) * rxqsize);
664 txq->txq_dmamaps = vioif_assign_mem(&p,
665 sizeof(txq->txq_dmamaps[0]) * txqsize);
666 rxq->rxq_mbufs = vioif_assign_mem(&p,
667 sizeof(rxq->rxq_mbufs[0]) * rxqsize);
668 txq->txq_mbufs = vioif_assign_mem(&p,
669 sizeof(txq->txq_mbufs[0]) * txqsize);
670 }
671
672 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
673 rxq = &sc->sc_rxq[qid];
674 txq = &sc->sc_txq[qid];
675
676 for (i = 0; i < rxq->rxq_vq->vq_num; i++) {
677 r = vioif_dmamap_create_load(sc, &rxq->rxq_hdr_dmamaps[i],
678 &rxq->rxq_hdrs[i], sizeof(rxq->rxq_hdrs[0]), 1,
679 BUS_DMA_READ, "rx header");
680 if (r != 0)
681 goto err_reqs;
682
683 r = vioif_dmamap_create(sc, &rxq->rxq_dmamaps[i],
684 MCLBYTES, 1, "rx payload");
685 if (r != 0)
686 goto err_reqs;
687 }
688
689 for (i = 0; i < txq->txq_vq->vq_num; i++) {
690 r = vioif_dmamap_create_load(sc, &txq->txq_hdr_dmamaps[i],
691 &txq->txq_hdrs[i], sizeof(txq->txq_hdrs[0]), 1,
692 BUS_DMA_READ, "tx header");
693 if (r != 0)
694 goto err_reqs;
695
696 r = vioif_dmamap_create(sc, &txq->txq_dmamaps[i], ETHER_MAX_LEN,
697 VIRTIO_NET_TX_MAXNSEGS, "tx payload");
698 if (r != 0)
699 goto err_reqs;
700 }
701 }
702
703 if (sc->sc_has_ctrl) {
704 /* control vq class & command */
705 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_cmd_dmamap,
706 ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1,
707 BUS_DMA_WRITE, "control command");
708 if (r != 0)
709 goto err_reqs;
710
711 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_status_dmamap,
712 ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1,
713 BUS_DMA_READ, "control status");
714 if (r != 0)
715 goto err_reqs;
716
717 /* control vq rx mode command parameter */
718 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_rx_dmamap,
719 ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1,
720 BUS_DMA_WRITE, "rx mode control command");
721 if (r != 0)
722 goto err_reqs;
723
724 /* multiqueue set command */
725 r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_mq_dmamap,
726 ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1,
727 BUS_DMA_WRITE, "multiqueue set command");
728 if (r != 0)
729 goto err_reqs;
730
731 /* control vq MAC filter table for unicast */
732 /* do not load now since its length is variable */
733 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_uc_dmamap,
734 sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0, 1,
735 "unicast MAC address filter command");
736 if (r != 0)
737 goto err_reqs;
738
739 /* control vq MAC filter table for multicast */
740 r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_mc_dmamap,
741 sizeof(*ctrlq->ctrlq_mac_tbl_mc)
742 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1,
743 "multicast MAC address filter command");
744 }
745
746 return 0;
747
748 err_reqs:
749 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_mc_dmamap);
750 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_uc_dmamap);
751 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_rx_dmamap);
752 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap);
753 vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap);
754 for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
755 rxq = &sc->sc_rxq[qid];
756 txq = &sc->sc_txq[qid];
757
758 for (i = 0; i < txq->txq_vq->vq_num; i++) {
759 vioif_dmamap_destroy(sc, &txq->txq_dmamaps[i]);
760 vioif_dmamap_destroy(sc, &txq->txq_hdr_dmamaps[i]);
761 }
762 for (i = 0; i < rxq->rxq_vq->vq_num; i++) {
763 vioif_dmamap_destroy(sc, &rxq->rxq_dmamaps[i]);
764 vioif_dmamap_destroy(sc, &rxq->rxq_hdr_dmamaps[i]);
765 }
766 }
767 if (sc->sc_kmem) {
768 kmem_free(sc->sc_kmem, allocsize2);
769 sc->sc_kmem = NULL;
770 }
771 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, allocsize);
772 err_dmamem_alloc:
773 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
774 err_none:
775 return -1;
776 }
777
778 static void
779 vioif_attach(device_t parent, device_t self, void *aux)
780 {
781 struct vioif_softc *sc = device_private(self);
782 struct virtio_softc *vsc = device_private(parent);
783 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
784 struct vioif_txqueue *txq;
785 struct vioif_rxqueue *rxq;
786 uint32_t features, req_features;
787 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
788 u_int softint_flags;
789 int r, i, nvqs=0, req_flags;
790 char xnamebuf[MAXCOMLEN];
791
792 if (virtio_child(vsc) != NULL) {
793 aprint_normal(": child already attached for %s; "
794 "something wrong...\n", device_xname(parent));
795 return;
796 }
797
798 sc->sc_dev = self;
799 sc->sc_virtio = vsc;
800 sc->sc_link_active = false;
801
802 sc->sc_max_nvq_pairs = 1;
803 sc->sc_req_nvq_pairs = 1;
804 sc->sc_act_nvq_pairs = 1;
805 sc->sc_txrx_workqueue_sysctl = true;
806 sc->sc_tx_intr_process_limit = VIOIF_TX_INTR_PROCESS_LIMIT;
807 sc->sc_tx_process_limit = VIOIF_TX_PROCESS_LIMIT;
808 sc->sc_rx_intr_process_limit = VIOIF_RX_INTR_PROCESS_LIMIT;
809 sc->sc_rx_process_limit = VIOIF_RX_PROCESS_LIMIT;
810
811 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
812
813 snprintf(xnamebuf, sizeof(xnamebuf), "%s_txrx", device_xname(self));
814 sc->sc_txrx_workqueue = vioif_workq_create(xnamebuf, VIOIF_WORKQUEUE_PRI,
815 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
816 if (sc->sc_txrx_workqueue == NULL)
817 goto err;
818
819 req_flags = 0;
820
821 #ifdef VIOIF_MPSAFE
822 req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
823 #endif
824 req_flags |= VIRTIO_F_PCI_INTR_MSIX;
825
826 req_features =
827 VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
828 VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY;
829 #ifdef VIOIF_MULTIQ
830 req_features |= VIRTIO_NET_F_MQ;
831 #endif
832 virtio_child_attach_start(vsc, self, IPL_NET, NULL,
833 vioif_config_change, virtio_vq_intrhand, req_flags,
834 req_features, VIRTIO_NET_FLAG_BITS);
835
836 features = virtio_features(vsc);
837
838 if (features & VIRTIO_NET_F_MAC) {
839 for (i = 0; i < __arraycount(sc->sc_mac); i++) {
840 sc->sc_mac[i] = virtio_read_device_config_1(vsc,
841 VIRTIO_NET_CONFIG_MAC + i);
842 }
843 } else {
844 /* code stolen from sys/net/if_tap.c */
845 struct timeval tv;
846 uint32_t ui;
847 getmicrouptime(&tv);
848 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
849 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
850 for (i = 0; i < __arraycount(sc->sc_mac); i++) {
851 virtio_write_device_config_1(vsc,
852 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
853 }
854 }
855
856 aprint_normal_dev(self, "Ethernet address %s\n",
857 ether_sprintf(sc->sc_mac));
858
859 if ((features & VIRTIO_NET_F_CTRL_VQ) &&
860 (features & VIRTIO_NET_F_CTRL_RX)) {
861 sc->sc_has_ctrl = true;
862
863 cv_init(&ctrlq->ctrlq_wait, "ctrl_vq");
864 mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET);
865 ctrlq->ctrlq_inuse = FREE;
866 } else {
867 sc->sc_has_ctrl = false;
868 }
869
870 if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) {
871 sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc,
872 VIRTIO_NET_CONFIG_MAX_VQ_PAIRS);
873
874 if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
875 goto err;
876
877 /* Limit the number of queue pairs to use */
878 sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu);
879 }
880
881 vioif_alloc_queues(sc);
882 virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs);
883
884 #ifdef VIOIF_MPSAFE
885 softint_flags = SOFTINT_NET | SOFTINT_MPSAFE;
886 #else
887 softint_flags = SOFTINT_NET;
888 #endif
889
890 /*
891 * Allocating virtqueues
892 */
893 for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
894 rxq = &sc->sc_rxq[i];
895 txq = &sc->sc_txq[i];
896 char qname[32];
897
898 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
899
900 rxq->rxq_handle_si = softint_establish(softint_flags,
901 vioif_rx_handle, rxq);
902 if (rxq->rxq_handle_si == NULL) {
903 aprint_error_dev(self, "cannot establish rx softint\n");
904 goto err;
905 }
906
907 snprintf(qname, sizeof(qname), "rx%d", i);
908 r = virtio_alloc_vq(vsc, rxq->rxq_vq, nvqs,
909 MCLBYTES+sizeof(struct virtio_net_hdr), 2, qname);
910 if (r != 0)
911 goto err;
912 nvqs++;
913 rxq->rxq_vq->vq_intrhand = vioif_rx_intr;
914 rxq->rxq_vq->vq_intrhand_arg = (void *)rxq;
915 rxq->rxq_stopping = true;
916 vioif_work_set(&rxq->rxq_work, vioif_rx_handle, rxq);
917
918 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
919
920 txq->txq_deferred_transmit = softint_establish(softint_flags,
921 vioif_deferred_transmit, txq);
922 if (txq->txq_deferred_transmit == NULL) {
923 aprint_error_dev(self, "cannot establish tx softint\n");
924 goto err;
925 }
926 txq->txq_handle_si = softint_establish(softint_flags,
927 vioif_tx_handle, txq);
928 if (txq->txq_handle_si == NULL) {
929 aprint_error_dev(self, "cannot establish tx softint\n");
930 goto err;
931 }
932
933 snprintf(qname, sizeof(qname), "tx%d", i);
934 r = virtio_alloc_vq(vsc, txq->txq_vq, nvqs,
935 sizeof(struct virtio_net_hdr)
936 + (ETHER_MAX_LEN - ETHER_HDR_LEN),
937 VIRTIO_NET_TX_MAXNSEGS + 1, qname);
938 if (r != 0)
939 goto err;
940 nvqs++;
941 txq->txq_vq->vq_intrhand = vioif_tx_intr;
942 txq->txq_vq->vq_intrhand_arg = (void *)txq;
943 txq->txq_link_active = sc->sc_link_active;
944 txq->txq_stopping = false;
945 txq->txq_intrq = pcq_create(txq->txq_vq->vq_num, KM_SLEEP);
946 vioif_work_set(&txq->txq_work, vioif_tx_handle, txq);
947 }
948
949 if (sc->sc_has_ctrl) {
950 /*
951 * Allocating a virtqueue for control channel
952 */
953 r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, nvqs,
954 NBPG, 1, "control");
955 if (r != 0) {
956 aprint_error_dev(self, "failed to allocate "
957 "a virtqueue for control channel, error code %d\n",
958 r);
959
960 sc->sc_has_ctrl = false;
961 cv_destroy(&ctrlq->ctrlq_wait);
962 mutex_destroy(&ctrlq->ctrlq_wait_lock);
963 } else {
964 nvqs++;
965 ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr;
966 ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq;
967 }
968 }
969
970 sc->sc_ctl_softint = softint_establish(softint_flags,
971 vioif_ctl_softint, sc);
972 if (sc->sc_ctl_softint == NULL) {
973 aprint_error_dev(self, "cannot establish ctl softint\n");
974 goto err;
975 }
976
977 if (vioif_alloc_mems(sc) < 0)
978 goto err;
979
980 if (virtio_child_attach_finish(vsc) != 0)
981 goto err;
982
983 if (vioif_setup_sysctl(sc) != 0) {
984 aprint_error_dev(self, "unable to create sysctl node\n");
985 /* continue */
986 }
987
988 vioif_setup_stats(sc);
989
990 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
991 ifp->if_softc = sc;
992 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
993 #ifdef VIOIF_MPSAFE
994 ifp->if_extflags = IFEF_MPSAFE;
995 #endif
996 ifp->if_start = vioif_start;
997 if (sc->sc_req_nvq_pairs > 1)
998 ifp->if_transmit = vioif_transmit;
999 ifp->if_ioctl = vioif_ioctl;
1000 ifp->if_init = vioif_init;
1001 ifp->if_stop = vioif_stop;
1002 ifp->if_capabilities = 0;
1003 ifp->if_watchdog = vioif_watchdog;
1004 txq = &sc->sc_txq[0];
1005 IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq->txq_vq->vq_num, IFQ_MAXLEN));
1006 IFQ_SET_READY(&ifp->if_snd);
1007
1008 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
1009
1010 if_attach(ifp);
1011 if_deferred_start_init(ifp, NULL);
1012 ether_ifattach(ifp, sc->sc_mac);
1013
1014 return;
1015
1016 err:
1017 for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
1018 rxq = &sc->sc_rxq[i];
1019 txq = &sc->sc_txq[i];
1020
1021 if (rxq->rxq_lock) {
1022 mutex_obj_free(rxq->rxq_lock);
1023 rxq->rxq_lock = NULL;
1024 }
1025
1026 if (rxq->rxq_handle_si) {
1027 softint_disestablish(rxq->rxq_handle_si);
1028 rxq->rxq_handle_si = NULL;
1029 }
1030
1031 if (txq->txq_lock) {
1032 mutex_obj_free(txq->txq_lock);
1033 txq->txq_lock = NULL;
1034 }
1035
1036 if (txq->txq_handle_si) {
1037 softint_disestablish(txq->txq_handle_si);
1038 txq->txq_handle_si = NULL;
1039 }
1040
1041 if (txq->txq_deferred_transmit) {
1042 softint_disestablish(txq->txq_deferred_transmit);
1043 txq->txq_deferred_transmit = NULL;
1044 }
1045
1046 if (txq->txq_intrq) {
1047 pcq_destroy(txq->txq_intrq);
1048 txq->txq_intrq = NULL;
1049 }
1050 }
1051
1052 if (sc->sc_has_ctrl) {
1053 cv_destroy(&ctrlq->ctrlq_wait);
1054 mutex_destroy(&ctrlq->ctrlq_wait_lock);
1055 }
1056
1057 while (nvqs > 0)
1058 virtio_free_vq(vsc, &sc->sc_vqs[--nvqs]);
1059
1060 vioif_free_queues(sc);
1061 mutex_destroy(&sc->sc_lock);
1062 virtio_child_attach_failed(vsc);
1063 config_finalize_register(self, vioif_finalize_teardown);
1064
1065 return;
1066 }
1067
1068 static int
1069 vioif_finalize_teardown(device_t self)
1070 {
1071 struct vioif_softc *sc = device_private(self);
1072
1073 if (sc->sc_txrx_workqueue != NULL) {
1074 vioif_workq_destroy(sc->sc_txrx_workqueue);
1075 sc->sc_txrx_workqueue = NULL;
1076 }
1077
1078 return 0;
1079 }
1080
1081 /* we need interrupts to make promiscuous mode off */
1082 static void
1083 vioif_deferred_init(device_t self)
1084 {
1085 struct vioif_softc *sc = device_private(self);
1086 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1087 int r;
1088
1089 if (ifp->if_flags & IFF_PROMISC)
1090 return;
1091
1092 r = vioif_set_promisc(sc, false);
1093 if (r != 0)
1094 aprint_error_dev(self, "resetting promisc mode failed, "
1095 "error code %d\n", r);
1096 }
1097
1098 static void
1099 vioif_enable_interrupt_vqpairs(struct vioif_softc *sc)
1100 {
1101 struct virtio_softc *vsc = sc->sc_virtio;
1102 struct vioif_txqueue *txq;
1103 struct vioif_rxqueue *rxq;
1104 int i;
1105
1106 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1107 txq = &sc->sc_txq[i];
1108 rxq = &sc->sc_rxq[i];
1109
1110 virtio_start_vq_intr(vsc, txq->txq_vq);
1111 virtio_start_vq_intr(vsc, rxq->rxq_vq);
1112 }
1113 }
1114
1115 static void
1116 vioif_disable_interrupt_vqpairs(struct vioif_softc *sc)
1117 {
1118 struct virtio_softc *vsc = sc->sc_virtio;
1119 struct vioif_txqueue *txq;
1120 struct vioif_rxqueue *rxq;
1121 int i;
1122
1123 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1124 txq = &sc->sc_txq[i];
1125 rxq = &sc->sc_rxq[i];
1126
1127 virtio_stop_vq_intr(vsc, txq->txq_vq);
1128 virtio_stop_vq_intr(vsc, rxq->rxq_vq);
1129 }
1130 }
1131
1132 /*
1133 * Interface functions for ifnet
1134 */
1135 static int
1136 vioif_init(struct ifnet *ifp)
1137 {
1138 struct vioif_softc *sc = ifp->if_softc;
1139 struct virtio_softc *vsc = sc->sc_virtio;
1140 struct vioif_rxqueue *rxq;
1141 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1142 int r, i;
1143
1144 vioif_stop(ifp, 0);
1145
1146 virtio_reinit_start(vsc);
1147 virtio_negotiate_features(vsc, virtio_features(vsc));
1148
1149 for (i = 0; i < sc->sc_req_nvq_pairs; i++) {
1150 rxq = &sc->sc_rxq[i];
1151
1152 /* Have to set false before vioif_populate_rx_mbufs */
1153 mutex_enter(rxq->rxq_lock);
1154 rxq->rxq_stopping = false;
1155 vioif_populate_rx_mbufs_locked(rxq);
1156 mutex_exit(rxq->rxq_lock);
1157
1158 }
1159
1160 virtio_reinit_end(vsc);
1161
1162 if (sc->sc_has_ctrl)
1163 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
1164
1165 r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs);
1166 if (r == 0)
1167 sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs;
1168 else
1169 sc->sc_act_nvq_pairs = 1;
1170
1171 for (i = 0; i < sc->sc_act_nvq_pairs; i++)
1172 sc->sc_txq[i].txq_stopping = false;
1173
1174 vioif_enable_interrupt_vqpairs(sc);
1175
1176 if (!sc->sc_deferred_init_done) {
1177 sc->sc_deferred_init_done = 1;
1178 if (sc->sc_has_ctrl)
1179 vioif_deferred_init(sc->sc_dev);
1180 }
1181
1182 vioif_update_link_status(sc);
1183 ifp->if_flags |= IFF_RUNNING;
1184 ifp->if_flags &= ~IFF_OACTIVE;
1185 vioif_rx_filter(sc);
1186
1187 return 0;
1188 }
1189
1190 static void
1191 vioif_stop(struct ifnet *ifp, int disable)
1192 {
1193 struct vioif_softc *sc = ifp->if_softc;
1194 struct virtio_softc *vsc = sc->sc_virtio;
1195 struct vioif_txqueue *txq;
1196 struct vioif_rxqueue *rxq;
1197 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1198 int i;
1199
1200 /* Take the locks to ensure that ongoing TX/RX finish */
1201 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1202 txq = &sc->sc_txq[i];
1203 rxq = &sc->sc_rxq[i];
1204
1205 mutex_enter(txq->txq_lock);
1206 txq->txq_stopping = true;
1207 mutex_exit(txq->txq_lock);
1208
1209 mutex_enter(rxq->rxq_lock);
1210 rxq->rxq_stopping = true;
1211 mutex_exit(rxq->rxq_lock);
1212 }
1213
1214 /* disable interrupts */
1215 vioif_disable_interrupt_vqpairs(sc);
1216
1217 if (sc->sc_has_ctrl)
1218 virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq);
1219
1220 /* only way to stop I/O and DMA is resetting... */
1221 virtio_reset(vsc);
1222
1223 /* rendezvous for finish of handlers */
1224 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1225 txq = &sc->sc_txq[i];
1226 rxq = &sc->sc_rxq[i];
1227
1228 mutex_enter(txq->txq_lock);
1229 mutex_exit(txq->txq_lock);
1230
1231 mutex_enter(rxq->rxq_lock);
1232 mutex_exit(rxq->rxq_lock);
1233
1234 vioif_work_wait(sc->sc_txrx_workqueue, &txq->txq_work);
1235 vioif_work_wait(sc->sc_txrx_workqueue, &rxq->rxq_work);
1236 }
1237
1238 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1239 vioif_rx_queue_clear(&sc->sc_rxq[i]);
1240 vioif_tx_queue_clear(&sc->sc_txq[i]);
1241 }
1242
1243 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1244 sc->sc_link_active = false;
1245
1246 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1247 txq = &sc->sc_txq[i];
1248 rxq = &sc->sc_rxq[i];
1249
1250 txq->txq_link_active = false;
1251
1252 if (disable)
1253 vioif_rx_drain(rxq);
1254
1255 vioif_tx_drain(txq);
1256 }
1257 }
1258
1259 static void
1260 vioif_send_common_locked(struct ifnet *ifp, struct vioif_txqueue *txq,
1261 bool is_transmit)
1262 {
1263 struct vioif_softc *sc = ifp->if_softc;
1264 struct virtio_softc *vsc = sc->sc_virtio;
1265 struct virtqueue *vq = txq->txq_vq;
1266 struct mbuf *m;
1267 int queued = 0;
1268
1269 KASSERT(mutex_owned(txq->txq_lock));
1270
1271 if ((ifp->if_flags & IFF_RUNNING) == 0)
1272 return;
1273
1274 if (!txq->txq_link_active || txq->txq_stopping)
1275 return;
1276
1277 if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
1278 return;
1279
1280 for (;;) {
1281 int slot, r;
1282
1283 if (is_transmit)
1284 m = pcq_get(txq->txq_intrq);
1285 else
1286 IFQ_DEQUEUE(&ifp->if_snd, m);
1287
1288 if (m == NULL)
1289 break;
1290
1291 r = virtio_enqueue_prep(vsc, vq, &slot);
1292 if (r == EAGAIN) {
1293 ifp->if_flags |= IFF_OACTIVE;
1294 m_freem(m);
1295 break;
1296 }
1297 if (r != 0)
1298 panic("enqueue_prep for a tx buffer");
1299
1300 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1301 txq->txq_dmamaps[slot], m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1302 if (r != 0) {
1303 /* maybe just too fragmented */
1304 struct mbuf *newm;
1305
1306 newm = m_defrag(m, M_NOWAIT);
1307 if (newm == NULL) {
1308 txq->txq_defrag_failed.ev_count++;
1309 goto skip;
1310 }
1311
1312 m = newm;
1313 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1314 txq->txq_dmamaps[slot], m,
1315 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1316 if (r != 0) {
1317 txq->txq_mbuf_load_failed.ev_count++;
1318 skip:
1319 m_freem(m);
1320 virtio_enqueue_abort(vsc, vq, slot);
1321 continue;
1322 }
1323 }
1324
1325 /* This should actually never fail */
1326 r = virtio_enqueue_reserve(vsc, vq, slot,
1327 txq->txq_dmamaps[slot]->dm_nsegs + 1);
1328 if (r != 0) {
1329 txq->txq_enqueue_reserve_failed.ev_count++;
1330 bus_dmamap_unload(virtio_dmat(vsc),
1331 txq->txq_dmamaps[slot]);
1332 /* slot already freed by virtio_enqueue_reserve */
1333 m_freem(m);
1334 continue;
1335 }
1336
1337 txq->txq_mbufs[slot] = m;
1338
1339 memset(&txq->txq_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
1340 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
1341 0, txq->txq_dmamaps[slot]->dm_mapsize,
1342 BUS_DMASYNC_PREWRITE);
1343 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
1344 0, txq->txq_hdr_dmamaps[slot]->dm_mapsize,
1345 BUS_DMASYNC_PREWRITE);
1346 virtio_enqueue(vsc, vq, slot, txq->txq_hdr_dmamaps[slot], true);
1347 virtio_enqueue(vsc, vq, slot, txq->txq_dmamaps[slot], true);
1348 virtio_enqueue_commit(vsc, vq, slot, false);
1349
1350 queued++;
1351 bpf_mtap(ifp, m, BPF_D_OUT);
1352 }
1353
1354 if (queued > 0) {
1355 virtio_enqueue_commit(vsc, vq, -1, true);
1356 ifp->if_timer = 5;
1357 }
1358 }
1359
1360 static void
1361 vioif_start_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
1362 {
1363
1364 /*
1365 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
1366 */
1367 vioif_send_common_locked(ifp, txq, false);
1368
1369 }
1370
1371 static void
1372 vioif_start(struct ifnet *ifp)
1373 {
1374 struct vioif_softc *sc = ifp->if_softc;
1375 struct vioif_txqueue *txq = &sc->sc_txq[0];
1376
1377 #ifdef VIOIF_MPSAFE
1378 KASSERT(if_is_mpsafe(ifp));
1379 #endif
1380
1381 mutex_enter(txq->txq_lock);
1382 vioif_start_locked(ifp, txq);
1383 mutex_exit(txq->txq_lock);
1384 }
1385
1386 static inline int
1387 vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m)
1388 {
1389 struct vioif_softc *sc = ifp->if_softc;
1390 u_int cpuid = cpu_index(curcpu());
1391
1392 return cpuid % sc->sc_act_nvq_pairs;
1393 }
1394
1395 static void
1396 vioif_transmit_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
1397 {
1398
1399 vioif_send_common_locked(ifp, txq, true);
1400 }
1401
1402 static int
1403 vioif_transmit(struct ifnet *ifp, struct mbuf *m)
1404 {
1405 struct vioif_softc *sc = ifp->if_softc;
1406 struct vioif_txqueue *txq;
1407 int qid;
1408
1409 qid = vioif_select_txqueue(ifp, m);
1410 txq = &sc->sc_txq[qid];
1411
1412 if (__predict_false(!pcq_put(txq->txq_intrq, m))) {
1413 m_freem(m);
1414 return ENOBUFS;
1415 }
1416
1417 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1418 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
1419 if (m->m_flags & M_MCAST)
1420 if_statinc_ref(nsr, if_omcasts);
1421 IF_STAT_PUTREF(ifp);
1422
1423 if (mutex_tryenter(txq->txq_lock)) {
1424 vioif_transmit_locked(ifp, txq);
1425 mutex_exit(txq->txq_lock);
1426 }
1427
1428 return 0;
1429 }
1430
1431 static void
1432 vioif_deferred_transmit(void *arg)
1433 {
1434 struct vioif_txqueue *txq = arg;
1435 struct virtio_softc *vsc = txq->txq_vq->vq_owner;
1436 struct vioif_softc *sc = device_private(virtio_child(vsc));
1437 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1438
1439 mutex_enter(txq->txq_lock);
1440 vioif_send_common_locked(ifp, txq, true);
1441 mutex_exit(txq->txq_lock);
1442 }
1443
1444 static int
1445 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1446 {
1447 int s, r;
1448
1449 s = splnet();
1450
1451 r = ether_ioctl(ifp, cmd, data);
1452 if ((r == 0 && cmd == SIOCSIFFLAGS) ||
1453 (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
1454 if (ifp->if_flags & IFF_RUNNING)
1455 r = vioif_rx_filter(ifp->if_softc);
1456 else
1457 r = 0;
1458 }
1459
1460 splx(s);
1461
1462 return r;
1463 }
1464
1465 void
1466 vioif_watchdog(struct ifnet *ifp)
1467 {
1468 struct vioif_softc *sc = ifp->if_softc;
1469 int i;
1470
1471 if (ifp->if_flags & IFF_RUNNING) {
1472 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
1473 vioif_tx_queue_clear(&sc->sc_txq[i]);
1474 }
1475 }
1476 }
1477
1478 /*
1479 * Receive implementation
1480 */
1481 /* allocate and initialize a mbuf for receive */
1482 static int
1483 vioif_add_rx_mbuf(struct vioif_rxqueue *rxq, int i)
1484 {
1485 struct virtio_softc *vsc = rxq->rxq_vq->vq_owner;
1486 struct mbuf *m;
1487 int r;
1488
1489 MGETHDR(m, M_DONTWAIT, MT_DATA);
1490 if (m == NULL)
1491 return ENOBUFS;
1492 MCLGET(m, M_DONTWAIT);
1493 if ((m->m_flags & M_EXT) == 0) {
1494 m_freem(m);
1495 return ENOBUFS;
1496 }
1497 rxq->rxq_mbufs[i] = m;
1498 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1499 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
1500 rxq->rxq_dmamaps[i], m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1501 if (r) {
1502 m_freem(m);
1503 rxq->rxq_mbufs[i] = NULL;
1504 return r;
1505 }
1506
1507 return 0;
1508 }
1509
1510 /* free a mbuf for receive */
1511 static void
1512 vioif_free_rx_mbuf(struct vioif_rxqueue *rxq, int i)
1513 {
1514 struct virtio_softc *vsc = rxq->rxq_vq->vq_owner;
1515
1516 bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[i]);
1517 m_freem(rxq->rxq_mbufs[i]);
1518 rxq->rxq_mbufs[i] = NULL;
1519 }
1520
1521 /* add mbufs for all the empty receive slots */
1522 static void
1523 vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *rxq)
1524 {
1525 struct virtqueue *vq = rxq->rxq_vq;
1526 struct virtio_softc *vsc = vq->vq_owner;
1527 int i, r, ndone = 0;
1528
1529 KASSERT(mutex_owned(rxq->rxq_lock));
1530
1531 if (rxq->rxq_stopping)
1532 return;
1533
1534 for (i = 0; i < vq->vq_num; i++) {
1535 int slot;
1536 r = virtio_enqueue_prep(vsc, vq, &slot);
1537 if (r == EAGAIN)
1538 break;
1539 if (r != 0)
1540 panic("enqueue_prep for rx buffers");
1541 if (rxq->rxq_mbufs[slot] == NULL) {
1542 r = vioif_add_rx_mbuf(rxq, slot);
1543 if (r != 0) {
1544 rxq->rxq_mbuf_add_failed.ev_count++;
1545 break;
1546 }
1547 }
1548 r = virtio_enqueue_reserve(vsc, vq, slot,
1549 rxq->rxq_dmamaps[slot]->dm_nsegs + 1);
1550 if (r != 0) {
1551 vioif_free_rx_mbuf(rxq, slot);
1552 break;
1553 }
1554 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1555 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
1556 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1557 0, MCLBYTES, BUS_DMASYNC_PREREAD);
1558 virtio_enqueue(vsc, vq, slot, rxq->rxq_hdr_dmamaps[slot],
1559 false);
1560 virtio_enqueue(vsc, vq, slot, rxq->rxq_dmamaps[slot], false);
1561 virtio_enqueue_commit(vsc, vq, slot, false);
1562 ndone++;
1563 }
1564 if (ndone > 0)
1565 virtio_enqueue_commit(vsc, vq, -1, true);
1566 }
1567
1568 static void
1569 vioif_rx_queue_clear(struct vioif_rxqueue *rxq)
1570 {
1571 struct virtqueue *vq = rxq->rxq_vq;
1572 struct virtio_softc *vsc = vq->vq_owner;
1573 struct vioif_softc *sc = device_private(virtio_child(vsc));
1574 u_int limit = UINT_MAX;
1575 bool more;
1576
1577 KASSERT(rxq->rxq_stopping);
1578
1579 mutex_enter(rxq->rxq_lock);
1580 for (;;) {
1581 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1582 if (more == false)
1583 break;
1584 }
1585 mutex_exit(rxq->rxq_lock);
1586 }
1587
1588 /* dequeue received packets */
1589 static bool
1590 vioif_rx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
1591 struct vioif_rxqueue *rxq, u_int limit)
1592 {
1593 struct virtqueue *vq = rxq->rxq_vq;
1594 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1595 struct mbuf *m;
1596 int slot, len;
1597 bool more = false, dequeued = false;
1598
1599 KASSERT(mutex_owned(rxq->rxq_lock));
1600
1601 if (virtio_vq_is_enqueued(vsc, vq) == false)
1602 return false;
1603
1604 for (;;) {
1605 if (limit-- == 0) {
1606 more = true;
1607 break;
1608 }
1609
1610 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
1611 break;
1612
1613 dequeued = true;
1614
1615 len -= sizeof(struct virtio_net_hdr);
1616 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1617 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTREAD);
1618 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1619 0, MCLBYTES, BUS_DMASYNC_POSTREAD);
1620 m = rxq->rxq_mbufs[slot];
1621 KASSERT(m != NULL);
1622 bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[slot]);
1623 rxq->rxq_mbufs[slot] = NULL;
1624 virtio_dequeue_commit(vsc, vq, slot);
1625 m_set_rcvif(m, ifp);
1626 m->m_len = m->m_pkthdr.len = len;
1627
1628 mutex_exit(rxq->rxq_lock);
1629 if_percpuq_enqueue(ifp->if_percpuq, m);
1630 mutex_enter(rxq->rxq_lock);
1631
1632 if (rxq->rxq_stopping)
1633 break;
1634 }
1635
1636 if (dequeued)
1637 vioif_populate_rx_mbufs_locked(rxq);
1638
1639 return more;
1640 }
1641
1642 /* rx interrupt; call _dequeue above and schedule a softint */
1643 static int
1644 vioif_rx_intr(void *arg)
1645 {
1646 struct vioif_rxqueue *rxq = arg;
1647 struct virtqueue *vq = rxq->rxq_vq;
1648 struct virtio_softc *vsc = vq->vq_owner;
1649 struct vioif_softc *sc = device_private(virtio_child(vsc));
1650 u_int limit;
1651 bool more;
1652
1653 limit = sc->sc_rx_intr_process_limit;
1654
1655 if (atomic_load_relaxed(&rxq->rxq_active) == true)
1656 return 1;
1657
1658 mutex_enter(rxq->rxq_lock);
1659
1660 if (!rxq->rxq_stopping) {
1661 rxq->rxq_workqueue = sc->sc_txrx_workqueue_sysctl;
1662
1663 virtio_stop_vq_intr(vsc, vq);
1664 atomic_store_relaxed(&rxq->rxq_active, true);
1665
1666 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1667 if (more) {
1668 vioif_rx_sched_handle(sc, rxq);
1669 } else {
1670 atomic_store_relaxed(&rxq->rxq_active, false);
1671 virtio_start_vq_intr(vsc, vq);
1672 }
1673 }
1674
1675 mutex_exit(rxq->rxq_lock);
1676 return 1;
1677 }
1678
1679 static void
1680 vioif_rx_handle(void *xrxq)
1681 {
1682 struct vioif_rxqueue *rxq = xrxq;
1683 struct virtqueue *vq = rxq->rxq_vq;
1684 struct virtio_softc *vsc = vq->vq_owner;
1685 struct vioif_softc *sc = device_private(virtio_child(vsc));
1686 u_int limit;
1687 bool more;
1688
1689 limit = sc->sc_rx_process_limit;
1690
1691 mutex_enter(rxq->rxq_lock);
1692
1693 if (!rxq->rxq_stopping) {
1694 more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
1695 if (more) {
1696 vioif_rx_sched_handle(sc, rxq);
1697 } else {
1698 atomic_store_relaxed(&rxq->rxq_active, false);
1699 virtio_start_vq_intr(vsc, rxq->rxq_vq);
1700 }
1701 }
1702
1703 mutex_exit(rxq->rxq_lock);
1704 }
1705
1706 static void
1707 vioif_rx_sched_handle(struct vioif_softc *sc, struct vioif_rxqueue *rxq)
1708 {
1709
1710 if (rxq->rxq_workqueue)
1711 vioif_work_add(sc->sc_txrx_workqueue, &rxq->rxq_work);
1712 else
1713 softint_schedule(rxq->rxq_handle_si);
1714 }
1715
1716 /* free all the mbufs; called from if_stop(disable) */
1717 static void
1718 vioif_rx_drain(struct vioif_rxqueue *rxq)
1719 {
1720 struct virtqueue *vq = rxq->rxq_vq;
1721 int i;
1722
1723 for (i = 0; i < vq->vq_num; i++) {
1724 if (rxq->rxq_mbufs[i] == NULL)
1725 continue;
1726 vioif_free_rx_mbuf(rxq, i);
1727 }
1728 }
1729
1730 /*
1731 * Transmition implementation
1732 */
1733 /* actual transmission is done in if_start */
1734 /* tx interrupt; dequeue and free mbufs */
1735 /*
1736 * tx interrupt is actually disabled; this should be called upon
1737 * tx vq full and watchdog
1738 */
1739
1740 static int
1741 vioif_tx_intr(void *arg)
1742 {
1743 struct vioif_txqueue *txq = arg;
1744 struct virtqueue *vq = txq->txq_vq;
1745 struct virtio_softc *vsc = vq->vq_owner;
1746 struct vioif_softc *sc = device_private(virtio_child(vsc));
1747 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1748 bool more;
1749 u_int limit;
1750
1751 limit = sc->sc_tx_intr_process_limit;
1752
1753 if (atomic_load_relaxed(&txq->txq_active) == true)
1754 return 1;
1755
1756 mutex_enter(txq->txq_lock);
1757
1758 if (!txq->txq_stopping) {
1759 txq->txq_workqueue = sc->sc_txrx_workqueue_sysctl;
1760
1761 virtio_stop_vq_intr(vsc, vq);
1762 atomic_store_relaxed(&txq->txq_active, true);
1763
1764 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1765 if (more) {
1766 vioif_tx_sched_handle(sc, txq);
1767 } else {
1768 atomic_store_relaxed(&txq->txq_active, false);
1769
1770 /* for ALTQ */
1771 if (txq == &sc->sc_txq[0]) {
1772 if_schedule_deferred_start(ifp);
1773 ifp->if_flags &= ~IFF_OACTIVE;
1774 }
1775 softint_schedule(txq->txq_deferred_transmit);
1776
1777 virtio_start_vq_intr(vsc, vq);
1778 }
1779 }
1780
1781 mutex_exit(txq->txq_lock);
1782
1783 return 1;
1784 }
1785
1786 static void
1787 vioif_tx_handle(void *xtxq)
1788 {
1789 struct vioif_txqueue *txq = xtxq;
1790 struct virtqueue *vq = txq->txq_vq;
1791 struct virtio_softc *vsc = vq->vq_owner;
1792 struct vioif_softc *sc = device_private(virtio_child(vsc));
1793 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1794 u_int limit;
1795 bool more;
1796
1797 limit = sc->sc_tx_process_limit;
1798
1799 mutex_enter(txq->txq_lock);
1800
1801 if (!txq->txq_stopping) {
1802 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1803 if (more) {
1804 vioif_tx_sched_handle(sc, txq);
1805 } else {
1806 atomic_store_relaxed(&txq->txq_active, false);
1807
1808 /* for ALTQ */
1809 if (txq == &sc->sc_txq[0]) {
1810 if_schedule_deferred_start(ifp);
1811 ifp->if_flags &= ~IFF_OACTIVE;
1812 }
1813 softint_schedule(txq->txq_deferred_transmit);
1814
1815 virtio_start_vq_intr(vsc, txq->txq_vq);
1816 }
1817 }
1818
1819 mutex_exit(txq->txq_lock);
1820 }
1821
1822 static void
1823 vioif_tx_sched_handle(struct vioif_softc *sc, struct vioif_txqueue *txq)
1824 {
1825
1826 if (txq->txq_workqueue)
1827 vioif_work_add(sc->sc_txrx_workqueue, &txq->txq_work);
1828 else
1829 softint_schedule(txq->txq_handle_si);
1830 }
1831
1832 static void
1833 vioif_tx_queue_clear(struct vioif_txqueue *txq)
1834 {
1835 struct virtqueue *vq = txq->txq_vq;
1836 struct virtio_softc *vsc = vq->vq_owner;
1837 struct vioif_softc *sc = device_private(virtio_child(vsc));
1838 u_int limit = UINT_MAX;
1839 bool more;
1840
1841 mutex_enter(txq->txq_lock);
1842 for (;;) {
1843 more = vioif_tx_deq_locked(sc, vsc, txq, limit);
1844 if (more == false)
1845 break;
1846 }
1847 mutex_exit(txq->txq_lock);
1848 }
1849
1850 static bool
1851 vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
1852 struct vioif_txqueue *txq, u_int limit)
1853 {
1854 struct virtqueue *vq = txq->txq_vq;
1855 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1856 struct mbuf *m;
1857 int slot, len;
1858 bool more = false;
1859
1860 KASSERT(mutex_owned(txq->txq_lock));
1861
1862 if (virtio_vq_is_enqueued(vsc, vq) == false)
1863 return false;
1864
1865 for (;;) {
1866 if (limit-- == 0) {
1867 more = true;
1868 break;
1869 }
1870
1871 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
1872 break;
1873
1874 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
1875 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTWRITE);
1876 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
1877 0, txq->txq_dmamaps[slot]->dm_mapsize,
1878 BUS_DMASYNC_POSTWRITE);
1879 m = txq->txq_mbufs[slot];
1880 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[slot]);
1881 txq->txq_mbufs[slot] = NULL;
1882 virtio_dequeue_commit(vsc, vq, slot);
1883 if_statinc(ifp, if_opackets);
1884 m_freem(m);
1885 }
1886
1887 return more;
1888 }
1889
1890 /* free all the mbufs already put on vq; called from if_stop(disable) */
1891 static void
1892 vioif_tx_drain(struct vioif_txqueue *txq)
1893 {
1894 struct virtqueue *vq = txq->txq_vq;
1895 struct virtio_softc *vsc = vq->vq_owner;
1896 int i;
1897
1898 KASSERT(txq->txq_stopping);
1899
1900 for (i = 0; i < vq->vq_num; i++) {
1901 if (txq->txq_mbufs[i] == NULL)
1902 continue;
1903 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[i]);
1904 m_freem(txq->txq_mbufs[i]);
1905 txq->txq_mbufs[i] = NULL;
1906 }
1907 }
1908
1909 /*
1910 * Control vq
1911 */
1912 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
1913 static void
1914 vioif_ctrl_acquire(struct vioif_softc *sc)
1915 {
1916 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1917
1918 mutex_enter(&ctrlq->ctrlq_wait_lock);
1919 while (ctrlq->ctrlq_inuse != FREE)
1920 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
1921 ctrlq->ctrlq_inuse = INUSE;
1922 ctrlq->ctrlq_owner = curlwp;
1923 mutex_exit(&ctrlq->ctrlq_wait_lock);
1924 }
1925
1926 static void
1927 vioif_ctrl_release(struct vioif_softc *sc)
1928 {
1929 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1930
1931 KASSERT(ctrlq->ctrlq_inuse != FREE);
1932 KASSERT(ctrlq->ctrlq_owner == curlwp);
1933
1934 mutex_enter(&ctrlq->ctrlq_wait_lock);
1935 ctrlq->ctrlq_inuse = FREE;
1936 ctrlq->ctrlq_owner = NULL;
1937 cv_signal(&ctrlq->ctrlq_wait);
1938 mutex_exit(&ctrlq->ctrlq_wait_lock);
1939 }
1940
1941 static int
1942 vioif_ctrl_load_cmdspec(struct vioif_softc *sc,
1943 struct vioif_ctrl_cmdspec *specs, int nspecs)
1944 {
1945 struct virtio_softc *vsc = sc->sc_virtio;
1946 int i, r, loaded;
1947
1948 loaded = 0;
1949 for (i = 0; i < nspecs; i++) {
1950 r = bus_dmamap_load(virtio_dmat(vsc),
1951 specs[i].dmamap, specs[i].buf, specs[i].bufsize,
1952 NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1953 if (r) {
1954 sc->sc_ctrlq.ctrlq_cmd_load_failed.ev_count++;
1955 goto err;
1956 }
1957 loaded++;
1958
1959 }
1960
1961 return r;
1962
1963 err:
1964 for (i = 0; i < loaded; i++) {
1965 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap);
1966 }
1967
1968 return r;
1969 }
1970
1971 static void
1972 vioif_ctrl_unload_cmdspec(struct vioif_softc *sc,
1973 struct vioif_ctrl_cmdspec *specs, int nspecs)
1974 {
1975 struct virtio_softc *vsc = sc->sc_virtio;
1976 int i;
1977
1978 for (i = 0; i < nspecs; i++) {
1979 bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap);
1980 }
1981 }
1982
1983 static int
1984 vioif_ctrl_send_command(struct vioif_softc *sc, uint8_t class, uint8_t cmd,
1985 struct vioif_ctrl_cmdspec *specs, int nspecs)
1986 {
1987 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1988 struct virtqueue *vq = ctrlq->ctrlq_vq;
1989 struct virtio_softc *vsc = sc->sc_virtio;
1990 int i, r, slot;
1991
1992 ctrlq->ctrlq_cmd->class = class;
1993 ctrlq->ctrlq_cmd->command = cmd;
1994
1995 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap,
1996 0, sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_PREWRITE);
1997 for (i = 0; i < nspecs; i++) {
1998 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap,
1999 0, specs[i].bufsize, BUS_DMASYNC_PREWRITE);
2000 }
2001 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap,
2002 0, sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_PREREAD);
2003
2004 r = virtio_enqueue_prep(vsc, vq, &slot);
2005 if (r != 0)
2006 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
2007 r = virtio_enqueue_reserve(vsc, vq, slot, nspecs + 2);
2008 if (r != 0)
2009 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
2010 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true);
2011 for (i = 0; i < nspecs; i++) {
2012 virtio_enqueue(vsc, vq, slot, specs[i].dmamap, true);
2013 }
2014 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false);
2015 virtio_enqueue_commit(vsc, vq, slot, true);
2016
2017 /* wait for done */
2018 mutex_enter(&ctrlq->ctrlq_wait_lock);
2019 while (ctrlq->ctrlq_inuse != DONE)
2020 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
2021 mutex_exit(&ctrlq->ctrlq_wait_lock);
2022 /* already dequeueued */
2023
2024 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0,
2025 sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_POSTWRITE);
2026 for (i = 0; i < nspecs; i++) {
2027 bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 0,
2028 specs[i].bufsize, BUS_DMASYNC_POSTWRITE);
2029 }
2030 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0,
2031 sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_POSTREAD);
2032
2033 if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK)
2034 r = 0;
2035 else {
2036 device_printf(sc->sc_dev, "failed setting rx mode\n");
2037 sc->sc_ctrlq.ctrlq_cmd_failed.ev_count++;
2038 r = EIO;
2039 }
2040
2041 return r;
2042 }
2043
2044 static int
2045 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
2046 {
2047 struct virtio_net_ctrl_rx *rx = sc->sc_ctrlq.ctrlq_rx;
2048 struct vioif_ctrl_cmdspec specs[1];
2049 int r;
2050
2051 if (!sc->sc_has_ctrl)
2052 return ENOTSUP;
2053
2054 vioif_ctrl_acquire(sc);
2055
2056 rx->onoff = onoff;
2057 specs[0].dmamap = sc->sc_ctrlq.ctrlq_rx_dmamap;
2058 specs[0].buf = rx;
2059 specs[0].bufsize = sizeof(*rx);
2060
2061 r = vioif_ctrl_send_command(sc, VIRTIO_NET_CTRL_RX, cmd,
2062 specs, __arraycount(specs));
2063
2064 vioif_ctrl_release(sc);
2065 return r;
2066 }
2067
2068 static int
2069 vioif_set_promisc(struct vioif_softc *sc, bool onoff)
2070 {
2071 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
2072 }
2073
2074 static int
2075 vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
2076 {
2077 return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
2078 }
2079
2080 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
2081 static int
2082 vioif_set_rx_filter(struct vioif_softc *sc)
2083 {
2084 /* filter already set in ctrlq->ctrlq_mac_tbl */
2085 struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc;
2086 struct vioif_ctrl_cmdspec specs[2];
2087 int nspecs = __arraycount(specs);
2088 int r;
2089
2090 mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc;
2091 mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc;
2092
2093 if (!sc->sc_has_ctrl)
2094 return ENOTSUP;
2095
2096 vioif_ctrl_acquire(sc);
2097
2098 specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap;
2099 specs[0].buf = mac_tbl_uc;
2100 specs[0].bufsize = sizeof(*mac_tbl_uc)
2101 + (ETHER_ADDR_LEN * mac_tbl_uc->nentries);
2102
2103 specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap;
2104 specs[1].buf = mac_tbl_mc;
2105 specs[1].bufsize = sizeof(*mac_tbl_mc)
2106 + (ETHER_ADDR_LEN * mac_tbl_mc->nentries);
2107
2108 r = vioif_ctrl_load_cmdspec(sc, specs, nspecs);
2109 if (r != 0)
2110 goto out;
2111
2112 r = vioif_ctrl_send_command(sc,
2113 VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET,
2114 specs, nspecs);
2115
2116 vioif_ctrl_unload_cmdspec(sc, specs, nspecs);
2117
2118 out:
2119 vioif_ctrl_release(sc);
2120
2121 return r;
2122 }
2123
2124 static int
2125 vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs)
2126 {
2127 struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq;
2128 struct vioif_ctrl_cmdspec specs[1];
2129 int r;
2130
2131 if (!sc->sc_has_ctrl)
2132 return ENOTSUP;
2133
2134 if (nvq_pairs <= 1)
2135 return EINVAL;
2136
2137 vioif_ctrl_acquire(sc);
2138
2139 mq->virtqueue_pairs = nvq_pairs;
2140 specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap;
2141 specs[0].buf = mq;
2142 specs[0].bufsize = sizeof(*mq);
2143
2144 r = vioif_ctrl_send_command(sc,
2145 VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
2146 specs, __arraycount(specs));
2147
2148 vioif_ctrl_release(sc);
2149
2150 return r;
2151 }
2152
2153 /* ctrl vq interrupt; wake up the command issuer */
2154 static int
2155 vioif_ctrl_intr(void *arg)
2156 {
2157 struct vioif_ctrlqueue *ctrlq = arg;
2158 struct virtqueue *vq = ctrlq->ctrlq_vq;
2159 struct virtio_softc *vsc = vq->vq_owner;
2160 int r, slot;
2161
2162 if (virtio_vq_is_enqueued(vsc, vq) == false)
2163 return 0;
2164
2165 r = virtio_dequeue(vsc, vq, &slot, NULL);
2166 if (r == ENOENT)
2167 return 0;
2168 virtio_dequeue_commit(vsc, vq, slot);
2169
2170 mutex_enter(&ctrlq->ctrlq_wait_lock);
2171 ctrlq->ctrlq_inuse = DONE;
2172 cv_signal(&ctrlq->ctrlq_wait);
2173 mutex_exit(&ctrlq->ctrlq_wait_lock);
2174
2175 return 1;
2176 }
2177
2178 /*
2179 * If IFF_PROMISC requested, set promiscuous
2180 * If multicast filter small enough (<=MAXENTRIES) set rx filter
2181 * If large multicast filter exist use ALLMULTI
2182 */
2183 /*
2184 * If setting rx filter fails fall back to ALLMULTI
2185 * If ALLMULTI fails fall back to PROMISC
2186 */
2187 static int
2188 vioif_rx_filter(struct vioif_softc *sc)
2189 {
2190 struct ethercom *ec = &sc->sc_ethercom;
2191 struct ifnet *ifp = &ec->ec_if;
2192 struct ether_multi *enm;
2193 struct ether_multistep step;
2194 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
2195 int nentries;
2196 int promisc = 0, allmulti = 0, rxfilter = 0;
2197 int r;
2198
2199 if (!sc->sc_has_ctrl) { /* no ctrl vq; always promisc */
2200 ifp->if_flags |= IFF_PROMISC;
2201 return 0;
2202 }
2203
2204 if (ifp->if_flags & IFF_PROMISC) {
2205 promisc = 1;
2206 goto set;
2207 }
2208
2209 nentries = -1;
2210 ETHER_LOCK(ec);
2211 ETHER_FIRST_MULTI(step, ec, enm);
2212 while (nentries++, enm != NULL) {
2213 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
2214 allmulti = 1;
2215 goto set_unlock;
2216 }
2217 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2218 allmulti = 1;
2219 goto set_unlock;
2220 }
2221 memcpy(ctrlq->ctrlq_mac_tbl_mc->macs[nentries],
2222 enm->enm_addrlo, ETHER_ADDR_LEN);
2223 ETHER_NEXT_MULTI(step, enm);
2224 }
2225 rxfilter = 1;
2226
2227 set_unlock:
2228 ETHER_UNLOCK(ec);
2229
2230 set:
2231 if (rxfilter) {
2232 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
2233 ctrlq->ctrlq_mac_tbl_mc->nentries = nentries;
2234 r = vioif_set_rx_filter(sc);
2235 if (r != 0) {
2236 rxfilter = 0;
2237 allmulti = 1; /* fallback */
2238 }
2239 } else {
2240 /* remove rx filter */
2241 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
2242 ctrlq->ctrlq_mac_tbl_mc->nentries = 0;
2243 r = vioif_set_rx_filter(sc);
2244 /* what to do on failure? */
2245 }
2246 if (allmulti) {
2247 r = vioif_set_allmulti(sc, true);
2248 if (r != 0) {
2249 allmulti = 0;
2250 promisc = 1; /* fallback */
2251 }
2252 } else {
2253 r = vioif_set_allmulti(sc, false);
2254 /* what to do on failure? */
2255 }
2256 if (promisc) {
2257 r = vioif_set_promisc(sc, true);
2258 } else {
2259 r = vioif_set_promisc(sc, false);
2260 }
2261
2262 return r;
2263 }
2264
2265 static bool
2266 vioif_is_link_up(struct vioif_softc *sc)
2267 {
2268 struct virtio_softc *vsc = sc->sc_virtio;
2269 uint16_t status;
2270
2271 if (virtio_features(vsc) & VIRTIO_NET_F_STATUS)
2272 status = virtio_read_device_config_2(vsc,
2273 VIRTIO_NET_CONFIG_STATUS);
2274 else
2275 status = VIRTIO_NET_S_LINK_UP;
2276
2277 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
2278 }
2279
2280 /* change link status */
2281 static void
2282 vioif_update_link_status(struct vioif_softc *sc)
2283 {
2284 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2285 struct vioif_txqueue *txq;
2286 bool active, changed;
2287 int link, i;
2288
2289 mutex_enter(&sc->sc_lock);
2290
2291 active = vioif_is_link_up(sc);
2292 changed = false;
2293
2294 if (active) {
2295 if (!sc->sc_link_active)
2296 changed = true;
2297
2298 link = LINK_STATE_UP;
2299 sc->sc_link_active = true;
2300 } else {
2301 if (sc->sc_link_active)
2302 changed = true;
2303
2304 link = LINK_STATE_DOWN;
2305 sc->sc_link_active = false;
2306 }
2307
2308 if (changed) {
2309 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
2310 txq = &sc->sc_txq[i];
2311
2312 mutex_enter(txq->txq_lock);
2313 txq->txq_link_active = sc->sc_link_active;
2314 mutex_exit(txq->txq_lock);
2315 }
2316
2317 if_link_state_change(ifp, link);
2318 }
2319
2320 mutex_exit(&sc->sc_lock);
2321 }
2322
2323 static int
2324 vioif_config_change(struct virtio_softc *vsc)
2325 {
2326 struct vioif_softc *sc = device_private(virtio_child(vsc));
2327
2328 softint_schedule(sc->sc_ctl_softint);
2329 return 0;
2330 }
2331
2332 static void
2333 vioif_ctl_softint(void *arg)
2334 {
2335 struct vioif_softc *sc = arg;
2336 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2337
2338 vioif_update_link_status(sc);
2339 vioif_start(ifp);
2340 }
2341
2342 static struct workqueue *
2343 vioif_workq_create(const char *name, pri_t prio, int ipl, int flags)
2344 {
2345 struct workqueue *wq;
2346 int error;
2347
2348 error = workqueue_create(&wq, name, vioif_workq_work, NULL,
2349 prio, ipl, flags);
2350
2351 if (error)
2352 return NULL;
2353
2354 return wq;
2355 }
2356
2357 static void
2358 vioif_workq_destroy(struct workqueue *wq)
2359 {
2360
2361 workqueue_destroy(wq);
2362 }
2363
2364 static void
2365 vioif_workq_work(struct work *wk, void *context)
2366 {
2367 struct vioif_work *work;
2368
2369 work = container_of(wk, struct vioif_work, cookie);
2370
2371 atomic_store_relaxed(&work->added, 0);
2372 work->func(work->arg);
2373 }
2374
2375 static void
2376 vioif_work_set(struct vioif_work *work, void (*func)(void *), void *arg)
2377 {
2378
2379 memset(work, 0, sizeof(*work));
2380 work->func = func;
2381 work->arg = arg;
2382 }
2383
2384 static void
2385 vioif_work_add(struct workqueue *wq, struct vioif_work *work)
2386 {
2387
2388 if (atomic_load_relaxed(&work->added) != 0)
2389 return;
2390
2391 atomic_store_relaxed(&work->added, 1);
2392 kpreempt_disable();
2393 workqueue_enqueue(wq, &work->cookie, NULL);
2394 kpreempt_enable();
2395 }
2396
2397 static void
2398 vioif_work_wait(struct workqueue *wq, struct vioif_work *work)
2399 {
2400
2401 workqueue_wait(wq, &work->cookie);
2402 }
2403
2404 static int
2405 vioif_setup_sysctl(struct vioif_softc *sc)
2406 {
2407 const char *devname;
2408 struct sysctllog **log;
2409 const struct sysctlnode *rnode, *rxnode, *txnode;
2410 int error;
2411
2412 log = &sc->sc_sysctllog;
2413 devname = device_xname(sc->sc_dev);
2414
2415 error = sysctl_createv(log, 0, NULL, &rnode,
2416 0, CTLTYPE_NODE, devname,
2417 SYSCTL_DESCR("virtio-net information and settings"),
2418 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
2419 if (error)
2420 goto out;
2421
2422 error = sysctl_createv(log, 0, &rnode, NULL,
2423 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
2424 SYSCTL_DESCR("Use workqueue for packet processing"),
2425 NULL, 0, &sc->sc_txrx_workqueue_sysctl, 0, CTL_CREATE, CTL_EOL);
2426 if (error)
2427 goto out;
2428
2429 error = sysctl_createv(log, 0, &rnode, &rxnode,
2430 0, CTLTYPE_NODE, "rx",
2431 SYSCTL_DESCR("virtio-net information and settings for Rx"),
2432 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
2433 if (error)
2434 goto out;
2435
2436 error = sysctl_createv(log, 0, &rxnode, NULL,
2437 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2438 SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
2439 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2440 if (error)
2441 goto out;
2442
2443 error = sysctl_createv(log, 0, &rxnode, NULL,
2444 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2445 SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
2446 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
2447 if (error)
2448 goto out;
2449
2450 error = sysctl_createv(log, 0, &rnode, &txnode,
2451 0, CTLTYPE_NODE, "tx",
2452 SYSCTL_DESCR("virtio-net information and settings for Tx"),
2453 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
2454 if (error)
2455 goto out;
2456
2457 error = sysctl_createv(log, 0, &txnode, NULL,
2458 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2459 SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
2460 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2461 if (error)
2462 goto out;
2463
2464 error = sysctl_createv(log, 0, &txnode, NULL,
2465 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2466 SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
2467 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
2468
2469 out:
2470 if (error)
2471 sysctl_teardown(log);
2472
2473 return error;
2474 }
2475
2476 static void
2477 vioif_setup_stats(struct vioif_softc *sc)
2478 {
2479 struct vioif_rxqueue *rxq;
2480 struct vioif_txqueue *txq;
2481
2482 char namebuf[16];
2483 int i;
2484
2485 for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
2486 rxq = &sc->sc_rxq[i];
2487 txq = &sc->sc_txq[i];
2488
2489 snprintf(namebuf, sizeof(namebuf), "%s-TX%d",
2490 device_xname(sc->sc_dev), i);
2491 evcnt_attach_dynamic(&txq->txq_defrag_failed, EVCNT_TYPE_MISC,
2492 NULL, namebuf, "tx m_defrag() failed");
2493 evcnt_attach_dynamic(&txq->txq_mbuf_load_failed, EVCNT_TYPE_MISC,
2494 NULL, namebuf, "tx dmamap load failed");
2495 evcnt_attach_dynamic(&txq->txq_enqueue_reserve_failed, EVCNT_TYPE_MISC,
2496 NULL, namebuf, "virtio_enqueue_reserve failed");
2497
2498 snprintf(namebuf, sizeof(namebuf), "%s-RX%d",
2499 device_xname(sc->sc_dev), i);
2500 evcnt_attach_dynamic(&rxq->rxq_mbuf_add_failed, EVCNT_TYPE_MISC,
2501 NULL, namebuf, "rx mbuf allocation failed");
2502 }
2503
2504 snprintf(namebuf, sizeof(namebuf), "%s-CTRL", device_xname(sc->sc_dev));
2505 evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_load_failed, EVCNT_TYPE_MISC,
2506 NULL, namebuf, "control command dmamap load failed");
2507 evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_failed, EVCNT_TYPE_MISC,
2508 NULL, namebuf, "control command failed");
2509 }
2510
2511 MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
2512
2513 #ifdef _MODULE
2514 #include "ioconf.c"
2515 #endif
2516
2517 static int
2518 if_vioif_modcmd(modcmd_t cmd, void *opaque)
2519 {
2520 int error = 0;
2521
2522 #ifdef _MODULE
2523 switch (cmd) {
2524 case MODULE_CMD_INIT:
2525 error = config_init_component(cfdriver_ioconf_if_vioif,
2526 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
2527 break;
2528 case MODULE_CMD_FINI:
2529 error = config_fini_component(cfdriver_ioconf_if_vioif,
2530 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
2531 break;
2532 default:
2533 error = ENOTTY;
2534 break;
2535 }
2536 #endif
2537
2538 return error;
2539 }
2540