if_vioif.c revision 1.43 1 /* $NetBSD: if_vioif.c,v 1.43 2019/01/14 14:35:52 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.43 2019/01/14 14:35:52 yamaguchi Exp $");
30
31 #ifdef _KERNEL_OPT
32 #include "opt_net_mpsafe.h"
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/condvar.h>
40 #include <sys/device.h>
41 #include <sys/intr.h>
42 #include <sys/kmem.h>
43 #include <sys/mbuf.h>
44 #include <sys/mutex.h>
45 #include <sys/sockio.h>
46 #include <sys/cpu.h>
47 #include <sys/module.h>
48
49 #include <dev/pci/virtioreg.h>
50 #include <dev/pci/virtiovar.h>
51
52 #include <net/if.h>
53 #include <net/if_media.h>
54 #include <net/if_ether.h>
55
56 #include <net/bpf.h>
57
58 #include "ioconf.h"
59
60 #ifdef NET_MPSAFE
61 #define VIOIF_MPSAFE 1
62 #endif
63
64 #ifdef SOFTINT_INTR
65 #define VIOIF_SOFTINT_INTR 1
66 #endif
67
68 /*
69 * if_vioifreg.h:
70 */
71 /* Configuration registers */
72 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
73 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
74
75 /* Feature bits */
76 #define VIRTIO_NET_F_CSUM (1<<0)
77 #define VIRTIO_NET_F_GUEST_CSUM (1<<1)
78 #define VIRTIO_NET_F_MAC (1<<5)
79 #define VIRTIO_NET_F_GSO (1<<6)
80 #define VIRTIO_NET_F_GUEST_TSO4 (1<<7)
81 #define VIRTIO_NET_F_GUEST_TSO6 (1<<8)
82 #define VIRTIO_NET_F_GUEST_ECN (1<<9)
83 #define VIRTIO_NET_F_GUEST_UFO (1<<10)
84 #define VIRTIO_NET_F_HOST_TSO4 (1<<11)
85 #define VIRTIO_NET_F_HOST_TSO6 (1<<12)
86 #define VIRTIO_NET_F_HOST_ECN (1<<13)
87 #define VIRTIO_NET_F_HOST_UFO (1<<14)
88 #define VIRTIO_NET_F_MRG_RXBUF (1<<15)
89 #define VIRTIO_NET_F_STATUS (1<<16)
90 #define VIRTIO_NET_F_CTRL_VQ (1<<17)
91 #define VIRTIO_NET_F_CTRL_RX (1<<18)
92 #define VIRTIO_NET_F_CTRL_VLAN (1<<19)
93
94 #define VIRTIO_NET_FLAG_BITS \
95 VIRTIO_COMMON_FLAG_BITS \
96 "\x14""CTRL_VLAN" \
97 "\x13""CTRL_RX" \
98 "\x12""CTRL_VQ" \
99 "\x11""STATUS" \
100 "\x10""MRG_RXBUF" \
101 "\x0f""HOST_UFO" \
102 "\x0e""HOST_ECN" \
103 "\x0d""HOST_TSO6" \
104 "\x0c""HOST_TSO4" \
105 "\x0b""GUEST_UFO" \
106 "\x0a""GUEST_ECN" \
107 "\x09""GUEST_TSO6" \
108 "\x08""GUEST_TSO4" \
109 "\x07""GSO" \
110 "\x06""MAC" \
111 "\x02""GUEST_CSUM" \
112 "\x01""CSUM"
113
114 /* Status */
115 #define VIRTIO_NET_S_LINK_UP 1
116
117 /* Packet header structure */
118 struct virtio_net_hdr {
119 uint8_t flags;
120 uint8_t gso_type;
121 uint16_t hdr_len;
122 uint16_t gso_size;
123 uint16_t csum_start;
124 uint16_t csum_offset;
125 #if 0
126 uint16_t num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
127 #endif
128 } __packed;
129
130 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
131 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
132 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
133 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
134 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
135 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
136
137 #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN)
138
139 /* Control virtqueue */
140 struct virtio_net_ctrl_cmd {
141 uint8_t class;
142 uint8_t command;
143 } __packed;
144 #define VIRTIO_NET_CTRL_RX 0
145 # define VIRTIO_NET_CTRL_RX_PROMISC 0
146 # define VIRTIO_NET_CTRL_RX_ALLMULTI 1
147
148 #define VIRTIO_NET_CTRL_MAC 1
149 # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
150
151 #define VIRTIO_NET_CTRL_VLAN 2
152 # define VIRTIO_NET_CTRL_VLAN_ADD 0
153 # define VIRTIO_NET_CTRL_VLAN_DEL 1
154
155 struct virtio_net_ctrl_status {
156 uint8_t ack;
157 } __packed;
158 #define VIRTIO_NET_OK 0
159 #define VIRTIO_NET_ERR 1
160
161 struct virtio_net_ctrl_rx {
162 uint8_t onoff;
163 } __packed;
164
165 struct virtio_net_ctrl_mac_tbl {
166 uint32_t nentries;
167 uint8_t macs[][ETHER_ADDR_LEN];
168 } __packed;
169
170 struct virtio_net_ctrl_vlan {
171 uint16_t id;
172 } __packed;
173
174
175 /*
176 * if_vioifvar.h:
177 */
178
179 /*
180 * Locking notes:
181 * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
182 * a filds in vioif_rxqueue is protected by rxq_lock (a spin mutex).
183 * - more than one lock cannot be held at onece
184 * + ctrlq_inuse is protected by ctrlq_wait_lock.
185 * - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
186 * - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
187 */
188
189 struct vioif_txqueue {
190 kmutex_t *txq_lock; /* lock for tx operations */
191
192 struct virtqueue *txq_vq;
193 bool txq_stopping;
194 bool txq_link_active;
195
196 struct virtio_net_hdr *txq_hdrs;
197 bus_dmamap_t *txq_hdr_dmamaps;
198
199 struct mbuf **txq_mbufs;
200 bus_dmamap_t *txq_dmamaps;
201 };
202
203 struct vioif_rxqueue {
204 kmutex_t *rxq_lock; /* lock for rx operations */
205
206 struct virtqueue *rxq_vq;
207 bool rxq_stopping;
208
209 struct virtio_net_hdr *rxq_hdrs;
210 bus_dmamap_t *rxq_hdr_dmamaps;
211
212 struct mbuf **rxq_mbufs;
213 bus_dmamap_t *rxq_dmamaps;
214
215 void *rxq_softint;
216 };
217
218 struct vioif_ctrlqueue {
219 struct virtqueue *ctrlq_vq;
220 enum {
221 FREE, INUSE, DONE
222 } ctrlq_inuse;
223 kcondvar_t ctrlq_wait;
224 kmutex_t ctrlq_wait_lock;
225
226 struct virtio_net_ctrl_cmd *ctrlq_cmd;
227 struct virtio_net_ctrl_status *ctrlq_status;
228 struct virtio_net_ctrl_rx *ctrlq_rx;
229 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc;
230 struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc;
231
232 bus_dmamap_t ctrlq_cmd_dmamap;
233 bus_dmamap_t ctrlq_status_dmamap;
234 bus_dmamap_t ctrlq_rx_dmamap;
235 bus_dmamap_t ctrlq_tbl_uc_dmamap;
236 bus_dmamap_t ctrlq_tbl_mc_dmamap;
237 };
238
239 struct vioif_softc {
240 device_t sc_dev;
241
242 struct virtio_softc *sc_virtio;
243 struct virtqueue sc_vq[3];
244 #define VQ_RX 0
245 #define VQ_TX 1
246 #define VQ_CTRL 2
247
248 uint8_t sc_mac[ETHER_ADDR_LEN];
249 struct ethercom sc_ethercom;
250 short sc_deferred_init_done;
251 bool sc_link_active;
252
253 struct vioif_txqueue sc_txq;
254 struct vioif_rxqueue sc_rxq;
255
256 bool sc_has_ctrl;
257 struct vioif_ctrlqueue sc_ctrlq;
258
259 bus_dma_segment_t sc_hdr_segs[1];
260 void *sc_dmamem;
261 void *sc_kmem;
262
263 void *sc_ctl_softint;
264 };
265 #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */
266 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */
267
268 #define VIOIF_TXQ_LOCK(_q) mutex_enter((_q)->txq_lock)
269 #define VIOIF_TXQ_UNLOCK(_q) mutex_exit((_q)->txq_lock)
270 #define VIOIF_TXQ_LOCKED(_q) mutex_owned((_q)->txq_lock)
271
272 #define VIOIF_RXQ_LOCK(_q) mutex_enter((_q)->rxq_lock)
273 #define VIOIF_RXQ_UNLOCK(_q) mutex_exit((_q)->rxq_lock)
274 #define VIOIF_RXQ_LOCKED(_q) mutex_owned((_q)->rxq_lock)
275
276 /* cfattach interface functions */
277 static int vioif_match(device_t, cfdata_t, void *);
278 static void vioif_attach(device_t, device_t, void *);
279 static void vioif_deferred_init(device_t);
280
281 /* ifnet interface functions */
282 static int vioif_init(struct ifnet *);
283 static void vioif_stop(struct ifnet *, int);
284 static void vioif_start(struct ifnet *);
285 static int vioif_ioctl(struct ifnet *, u_long, void *);
286 static void vioif_watchdog(struct ifnet *);
287
288 /* rx */
289 static int vioif_add_rx_mbuf(struct vioif_softc *, int);
290 static void vioif_free_rx_mbuf(struct vioif_softc *, int);
291 static void vioif_populate_rx_mbufs(struct vioif_softc *);
292 static void vioif_populate_rx_mbufs_locked(struct vioif_softc *);
293 static int vioif_rx_deq(struct vioif_softc *);
294 static int vioif_rx_deq_locked(struct vioif_softc *);
295 static int vioif_rx_vq_done(struct virtqueue *);
296 static void vioif_rx_softint(void *);
297 static void vioif_rx_drain(struct vioif_softc *);
298
299 /* tx */
300 static int vioif_tx_vq_done(struct virtqueue *);
301 static int vioif_tx_vq_done_locked(struct virtqueue *);
302 static void vioif_tx_drain(struct vioif_softc *);
303
304 /* other control */
305 static bool vioif_is_link_up(struct vioif_softc *);
306 static void vioif_update_link_status(struct vioif_softc *);
307 static int vioif_ctrl_rx(struct vioif_softc *, int, bool);
308 static int vioif_set_promisc(struct vioif_softc *, bool);
309 static int vioif_set_allmulti(struct vioif_softc *, bool);
310 static int vioif_set_rx_filter(struct vioif_softc *);
311 static int vioif_rx_filter(struct vioif_softc *);
312 static int vioif_ctrl_vq_done(struct virtqueue *);
313 static int vioif_config_change(struct virtio_softc *);
314 static void vioif_ctl_softint(void *);
315
316 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
317 vioif_match, vioif_attach, NULL, NULL);
318
319 static int
320 vioif_match(device_t parent, cfdata_t match, void *aux)
321 {
322 struct virtio_attach_args *va = aux;
323
324 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
325 return 1;
326
327 return 0;
328 }
329
330 /* allocate memory */
331 /*
332 * dma memory is used for:
333 * rxq_hdrs[slot]: metadata array for received frames (READ)
334 * txq_hdrs[slot]: metadata array for frames to be sent (WRITE)
335 * ctrlq_cmd: command to be sent via ctrl vq (WRITE)
336 * ctrlq_status: return value for a command via ctrl vq (READ)
337 * ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command
338 * (WRITE)
339 * ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
340 * class command (WRITE)
341 * ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
342 * class command (WRITE)
343 * ctrlq_* structures are allocated only one each; they are protected by
344 * ctrlq_inuse variable and ctrlq_wait condvar.
345 */
346 /*
347 * dynamically allocated memory is used for:
348 * rxq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot]
349 * txq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot]
350 * rxq_dmamaps[slot]: bus_dmamap_t array for received payload
351 * txq_dmamaps[slot]: bus_dmamap_t array for sent payload
352 * rxq_mbufs[slot]: mbuf pointer array for received frames
353 * txq_mbufs[slot]: mbuf pointer array for sent frames
354 */
355 static int
356 vioif_alloc_mems(struct vioif_softc *sc)
357 {
358 struct virtio_softc *vsc = sc->sc_virtio;
359 struct vioif_txqueue *txq = &sc->sc_txq;
360 struct vioif_rxqueue *rxq = &sc->sc_rxq;
361 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
362 int allocsize, allocsize2, r, rsegs, i;
363 void *vaddr;
364 intptr_t p;
365 int rxqsize, txqsize;
366
367 rxqsize = rxq->rxq_vq->vq_num;
368 txqsize = txq->txq_vq->vq_num;
369
370 allocsize = sizeof(struct virtio_net_hdr) * rxqsize;
371 allocsize += sizeof(struct virtio_net_hdr) * txqsize;
372 if (sc->sc_has_ctrl) {
373 allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
374 allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
375 allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
376 allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
377 + sizeof(struct virtio_net_ctrl_mac_tbl)
378 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
379 }
380 r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
381 &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
382 if (r != 0) {
383 aprint_error_dev(sc->sc_dev,
384 "DMA memory allocation failed, size %d, "
385 "error code %d\n", allocsize, r);
386 goto err_none;
387 }
388 r = bus_dmamem_map(virtio_dmat(vsc),
389 &sc->sc_hdr_segs[0], 1, allocsize,
390 &vaddr, BUS_DMA_NOWAIT);
391 if (r != 0) {
392 aprint_error_dev(sc->sc_dev,
393 "DMA memory map failed, "
394 "error code %d\n", r);
395 goto err_dmamem_alloc;
396 }
397
398 #define P(p, p0, p0size) do { p0 = (void *) p; \
399 p += p0size; } while (0)
400 memset(vaddr, 0, allocsize);
401 sc->sc_dmamem = vaddr;
402 p = (intptr_t) vaddr;
403
404 P(p, rxq->rxq_hdrs, sizeof(rxq->rxq_hdrs[0]) * rxqsize);
405 P(p, txq->txq_hdrs, sizeof(txq->txq_hdrs[0]) * txqsize);
406 if (sc->sc_has_ctrl) {
407 P(p, ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd));
408 P(p, ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status));
409 P(p, ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx));
410 P(p, ctrlq->ctrlq_mac_tbl_uc, sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0);
411 P(p, ctrlq->ctrlq_mac_tbl_mc,
412 (sizeof(*ctrlq->ctrlq_mac_tbl_mc)
413 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES));
414 }
415
416 allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize);
417 allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize);
418 allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize);
419 vaddr = kmem_zalloc(allocsize2, KM_SLEEP);
420 sc->sc_kmem = vaddr;
421 p = (intptr_t) vaddr;
422
423 P(p, rxq->rxq_hdr_dmamaps, sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize);
424 P(p, txq->txq_hdr_dmamaps, sizeof(txq->txq_hdr_dmamaps[0]) * txqsize);
425 P(p, rxq->rxq_dmamaps, sizeof(rxq->rxq_dmamaps[0]) * rxqsize);
426 P(p, txq->txq_dmamaps, sizeof(txq->txq_dmamaps[0]) * txqsize);
427 P(p, rxq->rxq_mbufs, sizeof(rxq->rxq_mbufs[0]) * rxqsize);
428 P(p, txq->txq_mbufs, sizeof(txq->txq_mbufs[0]) * txqsize);
429 #undef P
430
431 #define C(map, size, nsegs, usage) \
432 do { \
433 r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \
434 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, \
435 &map); \
436 if (r != 0) { \
437 aprint_error_dev(sc->sc_dev, \
438 "%s dmamap creation failed, " \
439 "error code %d\n", usage, r); \
440 goto err_reqs; \
441 } \
442 } while (0)
443 #define C_L(map, buf, size, nsegs, rw, usage) \
444 C(map, size, nsegs, usage); \
445 do { \
446 r = bus_dmamap_load(virtio_dmat(vsc), map, \
447 buf, size, NULL, \
448 rw | BUS_DMA_NOWAIT); \
449 if (r != 0) { \
450 aprint_error_dev(sc->sc_dev, \
451 usage " dmamap load failed, " \
452 "error code %d\n", r); \
453 goto err_reqs; \
454 } \
455 } while (0)
456 for (i = 0; i < rxqsize; i++) {
457 C_L(rxq->rxq_hdr_dmamaps[i], &rxq->rxq_hdrs[i], sizeof(rxq->rxq_hdrs[0]), 1,
458 BUS_DMA_READ, "rx header");
459 C(rxq->rxq_dmamaps[i], MCLBYTES, 1, "rx payload");
460 }
461
462 for (i = 0; i < txqsize; i++) {
463 C_L(txq->txq_hdr_dmamaps[i], &txq->txq_hdrs[i], sizeof(txq->txq_hdrs[0]), 1,
464 BUS_DMA_READ, "tx header");
465 C(txq->txq_dmamaps[i], ETHER_MAX_LEN, VIRTIO_NET_TX_MAXNSEGS, "tx payload");
466 }
467
468 if (sc->sc_has_ctrl) {
469 /* control vq class & command */
470 C_L(ctrlq->ctrlq_cmd_dmamap,
471 ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1,
472 BUS_DMA_WRITE, "control command");
473 C_L(ctrlq->ctrlq_status_dmamap,
474 ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1,
475 BUS_DMA_READ, "control status");
476
477 /* control vq rx mode command parameter */
478 C_L(ctrlq->ctrlq_rx_dmamap,
479 ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1,
480 BUS_DMA_WRITE, "rx mode control command");
481
482 /* control vq MAC filter table for unicast */
483 /* do not load now since its length is variable */
484 C(ctrlq->ctrlq_tbl_uc_dmamap,
485 sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0, 1,
486 "unicast MAC address filter command");
487
488 /* control vq MAC filter table for multicast */
489 C(ctrlq->ctrlq_tbl_mc_dmamap,
490 sizeof(*ctrlq->ctrlq_mac_tbl_mc)
491 + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1,
492 "multicast MAC address filter command");
493 }
494 #undef C_L
495 #undef C
496
497 return 0;
498
499 err_reqs:
500 #define D(map) \
501 do { \
502 if (map) { \
503 bus_dmamap_destroy(virtio_dmat(vsc), map); \
504 map = NULL; \
505 } \
506 } while (0)
507 D(ctrlq->ctrlq_tbl_mc_dmamap);
508 D(ctrlq->ctrlq_tbl_uc_dmamap);
509 D(ctrlq->ctrlq_rx_dmamap);
510 D(ctrlq->ctrlq_status_dmamap);
511 D(ctrlq->ctrlq_cmd_dmamap);
512 for (i = 0; i < txqsize; i++) {
513 D(txq->txq_dmamaps[i]);
514 D(txq->txq_hdr_dmamaps[i]);
515 }
516 for (i = 0; i < rxqsize; i++) {
517 D(rxq->rxq_dmamaps[i]);
518 D(rxq->rxq_hdr_dmamaps[i]);
519 }
520 #undef D
521 if (sc->sc_kmem) {
522 kmem_free(sc->sc_kmem, allocsize2);
523 sc->sc_kmem = NULL;
524 }
525 bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, allocsize);
526 err_dmamem_alloc:
527 bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
528 err_none:
529 return -1;
530 }
531
532 static void
533 vioif_attach(device_t parent, device_t self, void *aux)
534 {
535 struct vioif_softc *sc = device_private(self);
536 struct virtio_softc *vsc = device_private(parent);
537 struct vioif_txqueue *txq = &sc->sc_txq;
538 struct vioif_rxqueue *rxq = &sc->sc_rxq;
539 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
540 uint32_t features;
541 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
542 u_int softint_flags;
543 int r, nvqs=0, req_flags;
544
545 if (virtio_child(vsc) != NULL) {
546 aprint_normal(": child already attached for %s; "
547 "something wrong...\n",
548 device_xname(parent));
549 return;
550 }
551
552 sc->sc_dev = self;
553 sc->sc_virtio = vsc;
554 sc->sc_link_active = false;
555
556 req_flags = 0;
557
558 #ifdef VIOIF_MPSAFE
559 req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
560 #endif
561 #ifdef VIOIF_SOFTINT_INTR
562 req_flags |= VIRTIO_F_PCI_INTR_SOFTINT;
563 #endif
564 req_flags |= VIRTIO_F_PCI_INTR_MSIX;
565
566 virtio_child_attach_start(vsc, self, IPL_NET, sc->sc_vq,
567 vioif_config_change, virtio_vq_intr, req_flags,
568 (VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
569 VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY),
570 VIRTIO_NET_FLAG_BITS);
571
572 features = virtio_features(vsc);
573
574 if (features & VIRTIO_NET_F_MAC) {
575 sc->sc_mac[0] = virtio_read_device_config_1(vsc,
576 VIRTIO_NET_CONFIG_MAC+0);
577 sc->sc_mac[1] = virtio_read_device_config_1(vsc,
578 VIRTIO_NET_CONFIG_MAC+1);
579 sc->sc_mac[2] = virtio_read_device_config_1(vsc,
580 VIRTIO_NET_CONFIG_MAC+2);
581 sc->sc_mac[3] = virtio_read_device_config_1(vsc,
582 VIRTIO_NET_CONFIG_MAC+3);
583 sc->sc_mac[4] = virtio_read_device_config_1(vsc,
584 VIRTIO_NET_CONFIG_MAC+4);
585 sc->sc_mac[5] = virtio_read_device_config_1(vsc,
586 VIRTIO_NET_CONFIG_MAC+5);
587 } else {
588 /* code stolen from sys/net/if_tap.c */
589 struct timeval tv;
590 uint32_t ui;
591 getmicrouptime(&tv);
592 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
593 memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
594 virtio_write_device_config_1(vsc,
595 VIRTIO_NET_CONFIG_MAC+0,
596 sc->sc_mac[0]);
597 virtio_write_device_config_1(vsc,
598 VIRTIO_NET_CONFIG_MAC+1,
599 sc->sc_mac[1]);
600 virtio_write_device_config_1(vsc,
601 VIRTIO_NET_CONFIG_MAC+2,
602 sc->sc_mac[2]);
603 virtio_write_device_config_1(vsc,
604 VIRTIO_NET_CONFIG_MAC+3,
605 sc->sc_mac[3]);
606 virtio_write_device_config_1(vsc,
607 VIRTIO_NET_CONFIG_MAC+4,
608 sc->sc_mac[4]);
609 virtio_write_device_config_1(vsc,
610 VIRTIO_NET_CONFIG_MAC+5,
611 sc->sc_mac[5]);
612 }
613
614 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(sc->sc_mac));
615
616 #ifdef VIOIF_MPSAFE
617 softint_flags = SOFTINT_NET | SOFTINT_MPSAFE;
618 #else
619 softint_flags = SOFTINT_NET;
620 #endif
621
622 /*
623 * Allocating a virtqueue for Rx
624 */
625 rxq->rxq_vq = &sc->sc_vq[VQ_RX];
626 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
627
628 rxq->rxq_softint = softint_establish(softint_flags, vioif_rx_softint, sc);
629 if (rxq->rxq_softint == NULL) {
630 aprint_error_dev(self, "cannot establish rx softint\n");
631 goto err;
632 }
633 r = virtio_alloc_vq(vsc, rxq->rxq_vq, VQ_RX,
634 MCLBYTES+sizeof(struct virtio_net_hdr), 2, "rx");
635 if (r != 0)
636 goto err;
637 nvqs = 1;
638 rxq->rxq_vq->vq_done = vioif_rx_vq_done;
639 rxq->rxq_stopping = true;
640
641 /*
642 * Allocating a virtqueue for Tx
643 */
644 txq->txq_vq = &sc->sc_vq[VQ_TX];
645 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
646 r = virtio_alloc_vq(vsc, txq->txq_vq, VQ_TX,
647 (sizeof(struct virtio_net_hdr) + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
648 VIRTIO_NET_TX_MAXNSEGS + 1, "tx");
649 if (r != 0)
650 goto err;
651 nvqs = 2;
652 txq->txq_vq->vq_done = vioif_tx_vq_done;
653 txq->txq_link_active = sc->sc_link_active;
654 txq->txq_stopping = false;
655
656 virtio_start_vq_intr(vsc, rxq->rxq_vq);
657 virtio_stop_vq_intr(vsc, txq->txq_vq); /* not urgent; do it later */
658
659 ctrlq->ctrlq_vq = &sc->sc_vq[VQ_CTRL];
660 if ((features & VIRTIO_NET_F_CTRL_VQ) &&
661 (features & VIRTIO_NET_F_CTRL_RX)) {
662 /*
663 * Allocating a virtqueue for control channel
664 */
665 r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, VQ_CTRL,
666 NBPG, 1, "control");
667 if (r != 0) {
668 aprint_error_dev(self, "failed to allocate "
669 "a virtqueue for control channel\n");
670 goto skip;
671 }
672
673 ctrlq->ctrlq_vq->vq_done = vioif_ctrl_vq_done;
674 cv_init(&ctrlq->ctrlq_wait, "ctrl_vq");
675 mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET);
676 ctrlq->ctrlq_inuse = FREE;
677 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
678 sc->sc_has_ctrl = true;
679 nvqs = 3;
680 }
681 skip:
682
683
684 sc->sc_ctl_softint = softint_establish(softint_flags, vioif_ctl_softint, sc);
685 if (sc->sc_ctl_softint == NULL) {
686 aprint_error_dev(self, "cannot establish ctl softint\n");
687 goto err;
688 }
689
690 if (vioif_alloc_mems(sc) < 0)
691 goto err;
692
693 if (virtio_child_attach_finish(vsc) != 0)
694 goto err;
695
696 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
697 ifp->if_softc = sc;
698 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
699 ifp->if_start = vioif_start;
700 ifp->if_ioctl = vioif_ioctl;
701 ifp->if_init = vioif_init;
702 ifp->if_stop = vioif_stop;
703 ifp->if_capabilities = 0;
704 ifp->if_watchdog = vioif_watchdog;
705 IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq->txq_vq->vq_num, IFQ_MAXLEN));
706 IFQ_SET_READY(&ifp->if_snd);
707
708 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
709
710 if_attach(ifp);
711 if_deferred_start_init(ifp, NULL);
712 ether_ifattach(ifp, sc->sc_mac);
713
714 return;
715
716 err:
717 if (rxq->rxq_lock) {
718 mutex_obj_free(rxq->rxq_lock);
719 rxq->rxq_lock = NULL;
720 }
721
722 if (rxq->rxq_softint) {
723 softint_disestablish(rxq->rxq_softint);
724 rxq->rxq_softint = NULL;
725 }
726
727 if (txq->txq_lock) {
728 mutex_obj_free(txq->txq_lock);
729 txq->txq_lock = NULL;
730 }
731
732 if (sc->sc_has_ctrl) {
733 cv_destroy(&ctrlq->ctrlq_wait);
734 mutex_destroy(&ctrlq->ctrlq_wait_lock);
735 }
736
737 while (nvqs > 0)
738 virtio_free_vq(vsc, &sc->sc_vq[--nvqs]);
739
740 virtio_child_attach_failed(vsc);
741 return;
742 }
743
744 /* we need interrupts to make promiscuous mode off */
745 static void
746 vioif_deferred_init(device_t self)
747 {
748 struct vioif_softc *sc = device_private(self);
749 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
750 int r;
751
752 if (ifp->if_flags & IFF_PROMISC)
753 return;
754
755 r = vioif_set_promisc(sc, false);
756 if (r != 0)
757 aprint_error_dev(self, "resetting promisc mode failed, "
758 "errror code %d\n", r);
759 }
760
761 /*
762 * Interface functions for ifnet
763 */
764 static int
765 vioif_init(struct ifnet *ifp)
766 {
767 struct vioif_softc *sc = ifp->if_softc;
768 struct virtio_softc *vsc = sc->sc_virtio;
769 struct vioif_txqueue *txq = &sc->sc_txq;
770 struct vioif_rxqueue *rxq = &sc->sc_rxq;
771 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
772
773 vioif_stop(ifp, 0);
774
775 virtio_reinit_start(vsc);
776 virtio_negotiate_features(vsc, virtio_features(vsc));
777 virtio_start_vq_intr(vsc, rxq->rxq_vq);
778 virtio_stop_vq_intr(vsc, txq->txq_vq);
779 if (sc->sc_has_ctrl)
780 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
781 virtio_reinit_end(vsc);
782
783 if (!sc->sc_deferred_init_done) {
784 sc->sc_deferred_init_done = 1;
785 if (sc->sc_has_ctrl)
786 vioif_deferred_init(sc->sc_dev);
787 }
788
789 /* Have to set false before vioif_populate_rx_mbufs */
790 rxq->rxq_stopping = false;
791 txq->txq_stopping = false;
792
793 vioif_populate_rx_mbufs(sc);
794
795 vioif_update_link_status(sc);
796 ifp->if_flags |= IFF_RUNNING;
797 ifp->if_flags &= ~IFF_OACTIVE;
798 vioif_rx_filter(sc);
799
800 return 0;
801 }
802
803 static void
804 vioif_stop(struct ifnet *ifp, int disable)
805 {
806 struct vioif_softc *sc = ifp->if_softc;
807 struct virtio_softc *vsc = sc->sc_virtio;
808 struct vioif_txqueue *txq = &sc->sc_txq;
809 struct vioif_rxqueue *rxq = &sc->sc_rxq;
810 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
811
812 /* Take the locks to ensure that ongoing TX/RX finish */
813 VIOIF_TXQ_LOCK(txq);
814 txq->txq_stopping = true;
815 VIOIF_TXQ_UNLOCK(txq);
816
817 VIOIF_RXQ_LOCK(rxq);
818 rxq->rxq_stopping = true;
819 VIOIF_RXQ_UNLOCK(rxq);
820
821 /* disable interrupts */
822 virtio_stop_vq_intr(vsc, rxq->rxq_vq);
823 virtio_stop_vq_intr(vsc, txq->txq_vq);
824 if (sc->sc_has_ctrl)
825 virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq);
826
827 /* only way to stop I/O and DMA is resetting... */
828 virtio_reset(vsc);
829 vioif_rx_deq(sc);
830 vioif_tx_drain(sc);
831 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
832 sc->sc_link_active = false;
833 txq->txq_link_active = false;
834
835 if (disable)
836 vioif_rx_drain(sc);
837 }
838
839 static void
840 vioif_start(struct ifnet *ifp)
841 {
842 struct vioif_softc *sc = ifp->if_softc;
843 struct virtio_softc *vsc = sc->sc_virtio;
844 struct vioif_txqueue *txq = &sc->sc_txq;
845 struct virtqueue *vq = txq->txq_vq;
846 struct mbuf *m;
847 int queued = 0;
848
849 VIOIF_TXQ_LOCK(txq);
850
851 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING ||
852 !txq->txq_link_active)
853 goto out;
854
855 if (txq->txq_stopping)
856 goto out;
857
858 for (;;) {
859 int slot, r;
860
861 IFQ_DEQUEUE(&ifp->if_snd, m);
862 if (m == NULL)
863 break;
864
865 r = virtio_enqueue_prep(vsc, vq, &slot);
866 if (r == EAGAIN) {
867 ifp->if_flags |= IFF_OACTIVE;
868 m_freem(m);
869 break;
870 }
871 if (r != 0)
872 panic("enqueue_prep for a tx buffer");
873
874 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
875 txq->txq_dmamaps[slot],
876 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
877 if (r != 0) {
878 /* maybe just too fragmented */
879 struct mbuf *newm;
880
881 newm = m_defrag(m, M_NOWAIT);
882 if (newm == NULL) {
883 aprint_error_dev(sc->sc_dev,
884 "m_defrag() failed\n");
885 goto skip;
886 }
887
888 m = newm;
889 r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
890 txq->txq_dmamaps[slot],
891 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
892 if (r != 0) {
893 aprint_error_dev(sc->sc_dev,
894 "tx dmamap load failed, error code %d\n",
895 r);
896 skip:
897 m_freem(m);
898 virtio_enqueue_abort(vsc, vq, slot);
899 continue;
900 }
901 }
902
903 /* This should actually never fail */
904 r = virtio_enqueue_reserve(vsc, vq, slot,
905 txq->txq_dmamaps[slot]->dm_nsegs + 1);
906 if (r != 0) {
907 aprint_error_dev(sc->sc_dev,
908 "virtio_enqueue_reserve failed, error code %d\n",
909 r);
910 bus_dmamap_unload(virtio_dmat(vsc),
911 txq->txq_dmamaps[slot]);
912 /* slot already freed by virtio_enqueue_reserve */
913 m_freem(m);
914 continue;
915 }
916
917 txq->txq_mbufs[slot] = m;
918
919 memset(&txq->txq_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
920 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
921 0, txq->txq_dmamaps[slot]->dm_mapsize,
922 BUS_DMASYNC_PREWRITE);
923 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
924 0, txq->txq_hdr_dmamaps[slot]->dm_mapsize,
925 BUS_DMASYNC_PREWRITE);
926 virtio_enqueue(vsc, vq, slot, txq->txq_hdr_dmamaps[slot], true);
927 virtio_enqueue(vsc, vq, slot, txq->txq_dmamaps[slot], true);
928 virtio_enqueue_commit(vsc, vq, slot, false);
929
930 queued++;
931 bpf_mtap(ifp, m, BPF_D_OUT);
932 }
933
934 if (queued > 0) {
935 virtio_enqueue_commit(vsc, vq, -1, true);
936 ifp->if_timer = 5;
937 }
938
939 out:
940 VIOIF_TXQ_UNLOCK(txq);
941 }
942
943 static int
944 vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
945 {
946 int s, r;
947
948 s = splnet();
949
950 r = ether_ioctl(ifp, cmd, data);
951 if ((r == 0 && cmd == SIOCSIFFLAGS) ||
952 (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
953 if (ifp->if_flags & IFF_RUNNING)
954 r = vioif_rx_filter(ifp->if_softc);
955 else
956 r = 0;
957 }
958
959 splx(s);
960
961 return r;
962 }
963
964 void
965 vioif_watchdog(struct ifnet *ifp)
966 {
967 struct vioif_softc *sc = ifp->if_softc;
968 struct vioif_txqueue *txq = &sc->sc_txq;
969
970 if (ifp->if_flags & IFF_RUNNING)
971 vioif_tx_vq_done(txq->txq_vq);
972 }
973
974
975 /*
976 * Receive implementation
977 */
978 /* allocate and initialize a mbuf for receive */
979 static int
980 vioif_add_rx_mbuf(struct vioif_softc *sc, int i)
981 {
982 struct vioif_rxqueue *rxq = &sc->sc_rxq;
983 struct mbuf *m;
984 int r;
985
986 MGETHDR(m, M_DONTWAIT, MT_DATA);
987 if (m == NULL)
988 return ENOBUFS;
989 MCLGET(m, M_DONTWAIT);
990 if ((m->m_flags & M_EXT) == 0) {
991 m_freem(m);
992 return ENOBUFS;
993 }
994 rxq->rxq_mbufs[i] = m;
995 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
996 r = bus_dmamap_load_mbuf(virtio_dmat(sc->sc_virtio),
997 rxq->rxq_dmamaps[i],
998 m, BUS_DMA_READ|BUS_DMA_NOWAIT);
999 if (r) {
1000 m_freem(m);
1001 rxq->rxq_mbufs[i] = 0;
1002 return r;
1003 }
1004
1005 return 0;
1006 }
1007
1008 /* free a mbuf for receive */
1009 static void
1010 vioif_free_rx_mbuf(struct vioif_softc *sc, int i)
1011 {
1012 struct vioif_rxqueue *rxq = &sc->sc_rxq;
1013
1014 bus_dmamap_unload(virtio_dmat(sc->sc_virtio), rxq->rxq_dmamaps[i]);
1015 m_freem(rxq->rxq_mbufs[i]);
1016 rxq->rxq_mbufs[i] = NULL;
1017 }
1018
1019 /* add mbufs for all the empty receive slots */
1020 static void
1021 vioif_populate_rx_mbufs(struct vioif_softc *sc)
1022 {
1023 struct vioif_rxqueue *rxq = &sc->sc_rxq;
1024
1025 VIOIF_RXQ_LOCK(rxq);
1026 vioif_populate_rx_mbufs_locked(sc);
1027 VIOIF_RXQ_UNLOCK(rxq);
1028 }
1029
1030 static void
1031 vioif_populate_rx_mbufs_locked(struct vioif_softc *sc)
1032 {
1033 struct virtio_softc *vsc = sc->sc_virtio;
1034 struct vioif_rxqueue *rxq = &sc->sc_rxq;
1035 int i, r, ndone = 0;
1036 struct virtqueue *vq = rxq->rxq_vq;
1037
1038 KASSERT(VIOIF_RXQ_LOCKED(rxq));
1039
1040 if (rxq->rxq_stopping)
1041 return;
1042
1043 for (i = 0; i < vq->vq_num; i++) {
1044 int slot;
1045 r = virtio_enqueue_prep(vsc, vq, &slot);
1046 if (r == EAGAIN)
1047 break;
1048 if (r != 0)
1049 panic("enqueue_prep for rx buffers");
1050 if (rxq->rxq_mbufs[slot] == NULL) {
1051 r = vioif_add_rx_mbuf(sc, slot);
1052 if (r != 0) {
1053 printf("%s: rx mbuf allocation failed, "
1054 "error code %d\n",
1055 device_xname(sc->sc_dev), r);
1056 break;
1057 }
1058 }
1059 r = virtio_enqueue_reserve(vsc, vq, slot,
1060 rxq->rxq_dmamaps[slot]->dm_nsegs + 1);
1061 if (r != 0) {
1062 vioif_free_rx_mbuf(sc, slot);
1063 break;
1064 }
1065 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1066 0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
1067 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1068 0, MCLBYTES, BUS_DMASYNC_PREREAD);
1069 virtio_enqueue(vsc, vq, slot, rxq->rxq_hdr_dmamaps[slot], false);
1070 virtio_enqueue(vsc, vq, slot, rxq->rxq_dmamaps[slot], false);
1071 virtio_enqueue_commit(vsc, vq, slot, false);
1072 ndone++;
1073 }
1074 if (ndone > 0)
1075 virtio_enqueue_commit(vsc, vq, -1, true);
1076 }
1077
1078 /* dequeue received packets */
1079 static int
1080 vioif_rx_deq(struct vioif_softc *sc)
1081 {
1082 struct vioif_rxqueue *rxq = &sc->sc_rxq;
1083 int r;
1084
1085 KASSERT(rxq->rxq_stopping);
1086
1087 VIOIF_RXQ_LOCK(rxq);
1088 r = vioif_rx_deq_locked(sc);
1089 VIOIF_RXQ_UNLOCK(rxq);
1090
1091 return r;
1092 }
1093
1094 /* dequeue received packets */
1095 static int
1096 vioif_rx_deq_locked(struct vioif_softc *sc)
1097 {
1098 struct virtio_softc *vsc = sc->sc_virtio;
1099 struct vioif_rxqueue *rxq = &sc->sc_rxq;
1100 struct virtqueue *vq = rxq->rxq_vq;
1101 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1102 struct mbuf *m;
1103 int r = 0;
1104 int slot, len;
1105
1106 KASSERT(VIOIF_RXQ_LOCKED(rxq));
1107
1108 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1109 len -= sizeof(struct virtio_net_hdr);
1110 r = 1;
1111 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
1112 0, sizeof(struct virtio_net_hdr),
1113 BUS_DMASYNC_POSTREAD);
1114 bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
1115 0, MCLBYTES,
1116 BUS_DMASYNC_POSTREAD);
1117 m = rxq->rxq_mbufs[slot];
1118 KASSERT(m != NULL);
1119 bus_dmamap_unload(virtio_dmat(vsc), rxq->rxq_dmamaps[slot]);
1120 rxq->rxq_mbufs[slot] = 0;
1121 virtio_dequeue_commit(vsc, vq, slot);
1122 m_set_rcvif(m, ifp);
1123 m->m_len = m->m_pkthdr.len = len;
1124
1125 VIOIF_RXQ_UNLOCK(rxq);
1126 if_percpuq_enqueue(ifp->if_percpuq, m);
1127 VIOIF_RXQ_LOCK(rxq);
1128
1129 if (rxq->rxq_stopping)
1130 break;
1131 }
1132
1133 return r;
1134 }
1135
1136 /* rx interrupt; call _dequeue above and schedule a softint */
1137 static int
1138 vioif_rx_vq_done(struct virtqueue *vq)
1139 {
1140 struct virtio_softc *vsc = vq->vq_owner;
1141 struct vioif_softc *sc = device_private(virtio_child(vsc));
1142 struct vioif_rxqueue *rxq = &sc->sc_rxq;
1143 int r = 0;
1144
1145 #ifdef VIOIF_SOFTINT_INTR
1146 KASSERT(!cpu_intr_p());
1147 #endif
1148
1149 VIOIF_RXQ_LOCK(rxq);
1150
1151 if (rxq->rxq_stopping)
1152 goto out;
1153
1154 r = vioif_rx_deq_locked(sc);
1155 if (r)
1156 #ifdef VIOIF_SOFTINT_INTR
1157 vioif_populate_rx_mbufs_locked(sc);
1158 #else
1159 softint_schedule(rxq->rxq_softint);
1160 #endif
1161
1162 out:
1163 VIOIF_RXQ_UNLOCK(rxq);
1164 return r;
1165 }
1166
1167 /* softint: enqueue receive requests for new incoming packets */
1168 static void
1169 vioif_rx_softint(void *arg)
1170 {
1171 struct vioif_softc *sc = arg;
1172
1173 vioif_populate_rx_mbufs(sc);
1174 }
1175
1176 /* free all the mbufs; called from if_stop(disable) */
1177 static void
1178 vioif_rx_drain(struct vioif_softc *sc)
1179 {
1180 struct vioif_rxqueue *rxq = &sc->sc_rxq;
1181 struct virtqueue *vq = rxq->rxq_vq;
1182 int i;
1183
1184 for (i = 0; i < vq->vq_num; i++) {
1185 if (rxq->rxq_mbufs[i] == NULL)
1186 continue;
1187 vioif_free_rx_mbuf(sc, i);
1188 }
1189 }
1190
1191
1192 /*
1193 * Transmition implementation
1194 */
1195 /* actual transmission is done in if_start */
1196 /* tx interrupt; dequeue and free mbufs */
1197 /*
1198 * tx interrupt is actually disabled; this should be called upon
1199 * tx vq full and watchdog
1200 */
1201 static int
1202 vioif_tx_vq_done(struct virtqueue *vq)
1203 {
1204 struct virtio_softc *vsc = vq->vq_owner;
1205 struct vioif_softc *sc = device_private(virtio_child(vsc));
1206 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1207 struct vioif_txqueue *txq = &sc->sc_txq;
1208 int r = 0;
1209
1210 VIOIF_TXQ_LOCK(txq);
1211
1212 if (txq->txq_stopping)
1213 goto out;
1214
1215 r = vioif_tx_vq_done_locked(vq);
1216
1217 out:
1218 VIOIF_TXQ_UNLOCK(txq);
1219 if (r)
1220 if_schedule_deferred_start(ifp);
1221 return r;
1222 }
1223
1224 static int
1225 vioif_tx_vq_done_locked(struct virtqueue *vq)
1226 {
1227 struct virtio_softc *vsc = vq->vq_owner;
1228 struct vioif_softc *sc = device_private(virtio_child(vsc));
1229 struct vioif_txqueue *txq = &sc->sc_txq;
1230 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1231 struct mbuf *m;
1232 int r = 0;
1233 int slot, len;
1234
1235 KASSERT(VIOIF_TXQ_LOCKED(txq));
1236
1237 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
1238 r++;
1239 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
1240 0, sizeof(struct virtio_net_hdr),
1241 BUS_DMASYNC_POSTWRITE);
1242 bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
1243 0, txq->txq_dmamaps[slot]->dm_mapsize,
1244 BUS_DMASYNC_POSTWRITE);
1245 m = txq->txq_mbufs[slot];
1246 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[slot]);
1247 txq->txq_mbufs[slot] = 0;
1248 virtio_dequeue_commit(vsc, vq, slot);
1249 ifp->if_opackets++;
1250 m_freem(m);
1251 }
1252
1253 if (r)
1254 ifp->if_flags &= ~IFF_OACTIVE;
1255 return r;
1256 }
1257
1258 /* free all the mbufs already put on vq; called from if_stop(disable) */
1259 static void
1260 vioif_tx_drain(struct vioif_softc *sc)
1261 {
1262 struct virtio_softc *vsc = sc->sc_virtio;
1263 struct vioif_txqueue *txq = &sc->sc_txq;
1264 struct virtqueue *vq = txq->txq_vq;
1265 int i;
1266
1267 KASSERT(txq->txq_stopping);
1268
1269 for (i = 0; i < vq->vq_num; i++) {
1270 if (txq->txq_mbufs[i] == NULL)
1271 continue;
1272 bus_dmamap_unload(virtio_dmat(vsc), txq->txq_dmamaps[i]);
1273 m_freem(txq->txq_mbufs[i]);
1274 txq->txq_mbufs[i] = NULL;
1275 }
1276 }
1277
1278 /*
1279 * Control vq
1280 */
1281 /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
1282 static int
1283 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
1284 {
1285 struct virtio_softc *vsc = sc->sc_virtio;
1286 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1287 struct virtqueue *vq = ctrlq->ctrlq_vq;
1288 int r, slot;
1289
1290 if (!sc->sc_has_ctrl)
1291 return ENOTSUP;
1292
1293 mutex_enter(&ctrlq->ctrlq_wait_lock);
1294 while (ctrlq->ctrlq_inuse != FREE)
1295 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
1296 ctrlq->ctrlq_inuse = INUSE;
1297 mutex_exit(&ctrlq->ctrlq_wait_lock);
1298
1299 ctrlq->ctrlq_cmd->class = VIRTIO_NET_CTRL_RX;
1300 ctrlq->ctrlq_cmd->command = cmd;
1301 ctrlq->ctrlq_rx->onoff = onoff;
1302
1303 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap,
1304 0, sizeof(struct virtio_net_ctrl_cmd),
1305 BUS_DMASYNC_PREWRITE);
1306 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_rx_dmamap,
1307 0, sizeof(struct virtio_net_ctrl_rx),
1308 BUS_DMASYNC_PREWRITE);
1309 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap,
1310 0, sizeof(struct virtio_net_ctrl_status),
1311 BUS_DMASYNC_PREREAD);
1312
1313 r = virtio_enqueue_prep(vsc, vq, &slot);
1314 if (r != 0)
1315 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1316 r = virtio_enqueue_reserve(vsc, vq, slot, 3);
1317 if (r != 0)
1318 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1319 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true);
1320 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_rx_dmamap, true);
1321 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false);
1322 virtio_enqueue_commit(vsc, vq, slot, true);
1323
1324 /* wait for done */
1325 mutex_enter(&ctrlq->ctrlq_wait_lock);
1326 while (ctrlq->ctrlq_inuse != DONE)
1327 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
1328 mutex_exit(&ctrlq->ctrlq_wait_lock);
1329 /* already dequeueued */
1330
1331 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0,
1332 sizeof(struct virtio_net_ctrl_cmd),
1333 BUS_DMASYNC_POSTWRITE);
1334 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_rx_dmamap, 0,
1335 sizeof(struct virtio_net_ctrl_rx),
1336 BUS_DMASYNC_POSTWRITE);
1337 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0,
1338 sizeof(struct virtio_net_ctrl_status),
1339 BUS_DMASYNC_POSTREAD);
1340
1341 if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK)
1342 r = 0;
1343 else {
1344 printf("%s: failed setting rx mode\n",
1345 device_xname(sc->sc_dev));
1346 r = EIO;
1347 }
1348
1349 mutex_enter(&ctrlq->ctrlq_wait_lock);
1350 ctrlq->ctrlq_inuse = FREE;
1351 cv_signal(&ctrlq->ctrlq_wait);
1352 mutex_exit(&ctrlq->ctrlq_wait_lock);
1353
1354 return r;
1355 }
1356
1357 static int
1358 vioif_set_promisc(struct vioif_softc *sc, bool onoff)
1359 {
1360 int r;
1361
1362 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
1363
1364 return r;
1365 }
1366
1367 static int
1368 vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
1369 {
1370 int r;
1371
1372 r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
1373
1374 return r;
1375 }
1376
1377 /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
1378 static int
1379 vioif_set_rx_filter(struct vioif_softc *sc)
1380 {
1381 /* filter already set in ctrlq->ctrlq_mac_tbl */
1382 struct virtio_softc *vsc = sc->sc_virtio;
1383 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1384 struct virtqueue *vq = ctrlq->ctrlq_vq;
1385 int r, slot;
1386
1387 if (!sc->sc_has_ctrl)
1388 return ENOTSUP;
1389
1390 mutex_enter(&ctrlq->ctrlq_wait_lock);
1391 while (ctrlq->ctrlq_inuse != FREE)
1392 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
1393 ctrlq->ctrlq_inuse = INUSE;
1394 mutex_exit(&ctrlq->ctrlq_wait_lock);
1395
1396 ctrlq->ctrlq_cmd->class = VIRTIO_NET_CTRL_MAC;
1397 ctrlq->ctrlq_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1398
1399 r = bus_dmamap_load(virtio_dmat(vsc), ctrlq->ctrlq_tbl_uc_dmamap,
1400 ctrlq->ctrlq_mac_tbl_uc,
1401 (sizeof(struct virtio_net_ctrl_mac_tbl)
1402 + ETHER_ADDR_LEN * ctrlq->ctrlq_mac_tbl_uc->nentries),
1403 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1404 if (r) {
1405 printf("%s: control command dmamap load failed, "
1406 "error code %d\n", device_xname(sc->sc_dev), r);
1407 goto out;
1408 }
1409 r = bus_dmamap_load(virtio_dmat(vsc), ctrlq->ctrlq_tbl_mc_dmamap,
1410 ctrlq->ctrlq_mac_tbl_mc,
1411 (sizeof(struct virtio_net_ctrl_mac_tbl)
1412 + ETHER_ADDR_LEN * ctrlq->ctrlq_mac_tbl_mc->nentries),
1413 NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1414 if (r) {
1415 printf("%s: control command dmamap load failed, "
1416 "error code %d\n", device_xname(sc->sc_dev), r);
1417 bus_dmamap_unload(virtio_dmat(vsc), ctrlq->ctrlq_tbl_uc_dmamap);
1418 goto out;
1419 }
1420
1421 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap,
1422 0, sizeof(struct virtio_net_ctrl_cmd),
1423 BUS_DMASYNC_PREWRITE);
1424 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_tbl_uc_dmamap, 0,
1425 (sizeof(struct virtio_net_ctrl_mac_tbl)
1426 + ETHER_ADDR_LEN * ctrlq->ctrlq_mac_tbl_uc->nentries),
1427 BUS_DMASYNC_PREWRITE);
1428 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_tbl_mc_dmamap, 0,
1429 (sizeof(struct virtio_net_ctrl_mac_tbl)
1430 + ETHER_ADDR_LEN * ctrlq->ctrlq_mac_tbl_mc->nentries),
1431 BUS_DMASYNC_PREWRITE);
1432 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap,
1433 0, sizeof(struct virtio_net_ctrl_status),
1434 BUS_DMASYNC_PREREAD);
1435
1436 r = virtio_enqueue_prep(vsc, vq, &slot);
1437 if (r != 0)
1438 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1439 r = virtio_enqueue_reserve(vsc, vq, slot, 4);
1440 if (r != 0)
1441 panic("%s: control vq busy!?", device_xname(sc->sc_dev));
1442 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true);
1443 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_tbl_uc_dmamap, true);
1444 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_tbl_mc_dmamap, true);
1445 virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false);
1446 virtio_enqueue_commit(vsc, vq, slot, true);
1447
1448 /* wait for done */
1449 mutex_enter(&ctrlq->ctrlq_wait_lock);
1450 while (ctrlq->ctrlq_inuse != DONE)
1451 cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock);
1452 mutex_exit(&ctrlq->ctrlq_wait_lock);
1453 /* already dequeueued */
1454
1455 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0,
1456 sizeof(struct virtio_net_ctrl_cmd),
1457 BUS_DMASYNC_POSTWRITE);
1458 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_tbl_uc_dmamap, 0,
1459 (sizeof(struct virtio_net_ctrl_mac_tbl)
1460 + ETHER_ADDR_LEN * ctrlq->ctrlq_mac_tbl_uc->nentries),
1461 BUS_DMASYNC_POSTWRITE);
1462 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_tbl_mc_dmamap, 0,
1463 (sizeof(struct virtio_net_ctrl_mac_tbl)
1464 + ETHER_ADDR_LEN * ctrlq->ctrlq_mac_tbl_mc->nentries),
1465 BUS_DMASYNC_POSTWRITE);
1466 bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0,
1467 sizeof(struct virtio_net_ctrl_status),
1468 BUS_DMASYNC_POSTREAD);
1469 bus_dmamap_unload(virtio_dmat(vsc), ctrlq->ctrlq_tbl_uc_dmamap);
1470 bus_dmamap_unload(virtio_dmat(vsc), ctrlq->ctrlq_tbl_mc_dmamap);
1471
1472 if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK)
1473 r = 0;
1474 else {
1475 printf("%s: failed setting rx filter\n",
1476 device_xname(sc->sc_dev));
1477 r = EIO;
1478 }
1479
1480 out:
1481 mutex_enter(&ctrlq->ctrlq_wait_lock);
1482 ctrlq->ctrlq_inuse = FREE;
1483 cv_signal(&ctrlq->ctrlq_wait);
1484 mutex_exit(&ctrlq->ctrlq_wait_lock);
1485
1486 return r;
1487 }
1488
1489 /* ctrl vq interrupt; wake up the command issuer */
1490 static int
1491 vioif_ctrl_vq_done(struct virtqueue *vq)
1492 {
1493 struct virtio_softc *vsc = vq->vq_owner;
1494 struct vioif_softc *sc = device_private(virtio_child(vsc));
1495 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1496 int r, slot;
1497
1498 r = virtio_dequeue(vsc, vq, &slot, NULL);
1499 if (r == ENOENT)
1500 return 0;
1501 virtio_dequeue_commit(vsc, vq, slot);
1502
1503 mutex_enter(&ctrlq->ctrlq_wait_lock);
1504 ctrlq->ctrlq_inuse = DONE;
1505 cv_signal(&ctrlq->ctrlq_wait);
1506 mutex_exit(&ctrlq->ctrlq_wait_lock);
1507
1508 return 1;
1509 }
1510
1511 /*
1512 * If IFF_PROMISC requested, set promiscuous
1513 * If multicast filter small enough (<=MAXENTRIES) set rx filter
1514 * If large multicast filter exist use ALLMULTI
1515 */
1516 /*
1517 * If setting rx filter fails fall back to ALLMULTI
1518 * If ALLMULTI fails fall back to PROMISC
1519 */
1520 static int
1521 vioif_rx_filter(struct vioif_softc *sc)
1522 {
1523 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1524 struct ether_multi *enm;
1525 struct ether_multistep step;
1526 struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
1527 int nentries;
1528 int promisc = 0, allmulti = 0, rxfilter = 0;
1529 int r;
1530
1531 if (!sc->sc_has_ctrl) { /* no ctrl vq; always promisc */
1532 ifp->if_flags |= IFF_PROMISC;
1533 return 0;
1534 }
1535
1536 if (ifp->if_flags & IFF_PROMISC) {
1537 promisc = 1;
1538 goto set;
1539 }
1540
1541 nentries = -1;
1542 ETHER_LOCK(&sc->sc_ethercom);
1543 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1544 while (nentries++, enm != NULL) {
1545 if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
1546 allmulti = 1;
1547 goto set_unlock;
1548 }
1549 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1550 ETHER_ADDR_LEN)) {
1551 allmulti = 1;
1552 goto set_unlock;
1553 }
1554 memcpy(ctrlq->ctrlq_mac_tbl_mc->macs[nentries],
1555 enm->enm_addrlo, ETHER_ADDR_LEN);
1556 ETHER_NEXT_MULTI(step, enm);
1557 }
1558 rxfilter = 1;
1559
1560 set_unlock:
1561 ETHER_UNLOCK(&sc->sc_ethercom);
1562
1563 set:
1564 if (rxfilter) {
1565 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
1566 ctrlq->ctrlq_mac_tbl_mc->nentries = nentries;
1567 r = vioif_set_rx_filter(sc);
1568 if (r != 0) {
1569 rxfilter = 0;
1570 allmulti = 1; /* fallback */
1571 }
1572 } else {
1573 /* remove rx filter */
1574 ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
1575 ctrlq->ctrlq_mac_tbl_mc->nentries = 0;
1576 r = vioif_set_rx_filter(sc);
1577 /* what to do on failure? */
1578 }
1579 if (allmulti) {
1580 r = vioif_set_allmulti(sc, true);
1581 if (r != 0) {
1582 allmulti = 0;
1583 promisc = 1; /* fallback */
1584 }
1585 } else {
1586 r = vioif_set_allmulti(sc, false);
1587 /* what to do on failure? */
1588 }
1589 if (promisc) {
1590 r = vioif_set_promisc(sc, true);
1591 } else {
1592 r = vioif_set_promisc(sc, false);
1593 }
1594
1595 return r;
1596 }
1597
1598 static bool
1599 vioif_is_link_up(struct vioif_softc *sc)
1600 {
1601 struct virtio_softc *vsc = sc->sc_virtio;
1602 uint16_t status;
1603
1604 if (virtio_features(vsc) & VIRTIO_NET_F_STATUS)
1605 status = virtio_read_device_config_2(vsc,
1606 VIRTIO_NET_CONFIG_STATUS);
1607 else
1608 status = VIRTIO_NET_S_LINK_UP;
1609
1610 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
1611 }
1612
1613 /* change link status */
1614 static void
1615 vioif_update_link_status(struct vioif_softc *sc)
1616 {
1617 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1618 struct vioif_txqueue *txq = &sc->sc_txq;
1619 bool active, changed;
1620 int link;
1621
1622 active = vioif_is_link_up(sc);
1623 changed = false;
1624
1625 if (active) {
1626 if (!sc->sc_link_active)
1627 changed = true;
1628
1629 link = LINK_STATE_UP;
1630 sc->sc_link_active = true;
1631 } else {
1632 if (sc->sc_link_active)
1633 changed = true;
1634
1635 link = LINK_STATE_DOWN;
1636 sc->sc_link_active = false;
1637 }
1638
1639 if (changed) {
1640 VIOIF_TXQ_LOCK(txq);
1641 txq->txq_link_active = sc->sc_link_active;
1642 VIOIF_TXQ_UNLOCK(txq);
1643
1644 if_link_state_change(ifp, link);
1645 }
1646 }
1647
1648 static int
1649 vioif_config_change(struct virtio_softc *vsc)
1650 {
1651 struct vioif_softc *sc = device_private(virtio_child(vsc));
1652
1653 #ifdef VIOIF_SOFTINT_INTR
1654 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1655 #endif
1656
1657 #ifdef VIOIF_SOFTINT_INTR
1658 KASSERT(!cpu_intr_p());
1659 vioif_update_link_status(sc);
1660 vioif_start(ifp);
1661 #else
1662 softint_schedule(sc->sc_ctl_softint);
1663 #endif
1664
1665 return 0;
1666 }
1667
1668 static void
1669 vioif_ctl_softint(void *arg)
1670 {
1671 struct vioif_softc *sc = arg;
1672 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1673
1674 vioif_update_link_status(sc);
1675 vioif_start(ifp);
1676 }
1677
1678 MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
1679
1680 #ifdef _MODULE
1681 #include "ioconf.c"
1682 #endif
1683
1684 static int
1685 if_vioif_modcmd(modcmd_t cmd, void *opaque)
1686 {
1687 int error = 0;
1688
1689 #ifdef _MODULE
1690 switch (cmd) {
1691 case MODULE_CMD_INIT:
1692 error = config_init_component(cfdriver_ioconf_if_vioif,
1693 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
1694 break;
1695 case MODULE_CMD_FINI:
1696 error = config_fini_component(cfdriver_ioconf_if_vioif,
1697 cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif);
1698 break;
1699 default:
1700 error = ENOTTY;
1701 break;
1702 }
1703 #endif
1704
1705 return error;
1706 }
1707