Home | History | Annotate | Line # | Download | only in pci
if_vioif.c revision 1.11.2.2
      1  1.11.2.2     skrll /*	$NetBSD: if_vioif.c,v 1.11.2.2 2015/06/06 14:40:09 skrll Exp $	*/
      2       1.1   hannken 
      3       1.1   hannken /*
      4       1.1   hannken  * Copyright (c) 2010 Minoura Makoto.
      5       1.1   hannken  * All rights reserved.
      6       1.1   hannken  *
      7       1.1   hannken  * Redistribution and use in source and binary forms, with or without
      8       1.1   hannken  * modification, are permitted provided that the following conditions
      9       1.1   hannken  * are met:
     10       1.1   hannken  * 1. Redistributions of source code must retain the above copyright
     11       1.1   hannken  *    notice, this list of conditions and the following disclaimer.
     12       1.1   hannken  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1   hannken  *    notice, this list of conditions and the following disclaimer in the
     14       1.1   hannken  *    documentation and/or other materials provided with the distribution.
     15       1.1   hannken  *
     16       1.1   hannken  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17       1.1   hannken  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18       1.1   hannken  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19       1.1   hannken  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20       1.1   hannken  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21       1.1   hannken  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22       1.1   hannken  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23       1.1   hannken  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24       1.1   hannken  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25       1.1   hannken  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26       1.1   hannken  */
     27       1.1   hannken 
     28       1.1   hannken #include <sys/cdefs.h>
     29  1.11.2.2     skrll __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.11.2.2 2015/06/06 14:40:09 skrll Exp $");
     30  1.11.2.1     skrll 
     31  1.11.2.1     skrll #ifdef _KERNEL_OPT
     32  1.11.2.1     skrll #include "opt_net_mpsafe.h"
     33  1.11.2.1     skrll #endif
     34       1.1   hannken 
     35       1.1   hannken #include <sys/param.h>
     36       1.1   hannken #include <sys/systm.h>
     37       1.1   hannken #include <sys/kernel.h>
     38       1.1   hannken #include <sys/bus.h>
     39       1.1   hannken #include <sys/condvar.h>
     40       1.1   hannken #include <sys/device.h>
     41       1.1   hannken #include <sys/intr.h>
     42       1.1   hannken #include <sys/kmem.h>
     43       1.1   hannken #include <sys/mbuf.h>
     44       1.1   hannken #include <sys/mutex.h>
     45       1.1   hannken #include <sys/sockio.h>
     46  1.11.2.1     skrll #include <sys/cpu.h>
     47       1.1   hannken 
     48       1.1   hannken #include <dev/pci/pcidevs.h>
     49       1.1   hannken #include <dev/pci/pcireg.h>
     50       1.1   hannken #include <dev/pci/pcivar.h>
     51       1.1   hannken #include <dev/pci/virtioreg.h>
     52       1.1   hannken #include <dev/pci/virtiovar.h>
     53       1.1   hannken 
     54       1.1   hannken #include <net/if.h>
     55       1.1   hannken #include <net/if_media.h>
     56       1.1   hannken #include <net/if_ether.h>
     57       1.1   hannken 
     58       1.1   hannken #include <net/bpf.h>
     59       1.1   hannken 
     60       1.1   hannken 
     61       1.7     ozaki #ifdef NET_MPSAFE
     62       1.7     ozaki #define VIOIF_MPSAFE	1
     63       1.7     ozaki #endif
     64       1.7     ozaki 
     65  1.11.2.1     skrll #ifdef SOFTINT_INTR
     66  1.11.2.1     skrll #define VIOIF_SOFTINT_INTR	1
     67  1.11.2.1     skrll #endif
     68  1.11.2.1     skrll 
     69       1.1   hannken /*
     70       1.1   hannken  * if_vioifreg.h:
     71       1.1   hannken  */
     72       1.1   hannken /* Configuration registers */
     73       1.1   hannken #define VIRTIO_NET_CONFIG_MAC		0 /* 8bit x 6byte */
     74       1.1   hannken #define VIRTIO_NET_CONFIG_STATUS	6 /* 16bit */
     75       1.1   hannken 
     76       1.1   hannken /* Feature bits */
     77       1.1   hannken #define VIRTIO_NET_F_CSUM	(1<<0)
     78       1.1   hannken #define VIRTIO_NET_F_GUEST_CSUM	(1<<1)
     79       1.1   hannken #define VIRTIO_NET_F_MAC	(1<<5)
     80       1.1   hannken #define VIRTIO_NET_F_GSO	(1<<6)
     81       1.1   hannken #define VIRTIO_NET_F_GUEST_TSO4	(1<<7)
     82       1.1   hannken #define VIRTIO_NET_F_GUEST_TSO6	(1<<8)
     83       1.1   hannken #define VIRTIO_NET_F_GUEST_ECN	(1<<9)
     84       1.1   hannken #define VIRTIO_NET_F_GUEST_UFO	(1<<10)
     85       1.1   hannken #define VIRTIO_NET_F_HOST_TSO4	(1<<11)
     86       1.1   hannken #define VIRTIO_NET_F_HOST_TSO6	(1<<12)
     87       1.1   hannken #define VIRTIO_NET_F_HOST_ECN	(1<<13)
     88       1.1   hannken #define VIRTIO_NET_F_HOST_UFO	(1<<14)
     89       1.1   hannken #define VIRTIO_NET_F_MRG_RXBUF	(1<<15)
     90       1.1   hannken #define VIRTIO_NET_F_STATUS	(1<<16)
     91       1.1   hannken #define VIRTIO_NET_F_CTRL_VQ	(1<<17)
     92       1.1   hannken #define VIRTIO_NET_F_CTRL_RX	(1<<18)
     93       1.1   hannken #define VIRTIO_NET_F_CTRL_VLAN	(1<<19)
     94       1.1   hannken 
     95       1.1   hannken /* Status */
     96       1.1   hannken #define VIRTIO_NET_S_LINK_UP	1
     97       1.1   hannken 
     98       1.1   hannken /* Packet header structure */
     99       1.1   hannken struct virtio_net_hdr {
    100       1.1   hannken 	uint8_t		flags;
    101       1.1   hannken 	uint8_t		gso_type;
    102       1.1   hannken 	uint16_t	hdr_len;
    103       1.1   hannken 	uint16_t	gso_size;
    104       1.1   hannken 	uint16_t	csum_start;
    105       1.1   hannken 	uint16_t	csum_offset;
    106       1.1   hannken #if 0
    107       1.1   hannken 	uint16_t	num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
    108       1.1   hannken #endif
    109       1.1   hannken } __packed;
    110       1.1   hannken 
    111       1.1   hannken #define VIRTIO_NET_HDR_F_NEEDS_CSUM	1 /* flags */
    112       1.1   hannken #define VIRTIO_NET_HDR_GSO_NONE		0 /* gso_type */
    113       1.1   hannken #define VIRTIO_NET_HDR_GSO_TCPV4	1 /* gso_type */
    114       1.1   hannken #define VIRTIO_NET_HDR_GSO_UDP		3 /* gso_type */
    115       1.1   hannken #define VIRTIO_NET_HDR_GSO_TCPV6	4 /* gso_type */
    116       1.1   hannken #define VIRTIO_NET_HDR_GSO_ECN		0x80 /* gso_type, |'ed */
    117       1.1   hannken 
    118       1.1   hannken #define VIRTIO_NET_MAX_GSO_LEN		(65536+ETHER_HDR_LEN)
    119       1.1   hannken 
    120       1.1   hannken /* Control virtqueue */
    121       1.1   hannken struct virtio_net_ctrl_cmd {
    122       1.1   hannken 	uint8_t	class;
    123       1.1   hannken 	uint8_t	command;
    124       1.1   hannken } __packed;
    125       1.1   hannken #define VIRTIO_NET_CTRL_RX		0
    126       1.1   hannken # define VIRTIO_NET_CTRL_RX_PROMISC	0
    127       1.1   hannken # define VIRTIO_NET_CTRL_RX_ALLMULTI	1
    128       1.1   hannken 
    129       1.1   hannken #define VIRTIO_NET_CTRL_MAC		1
    130       1.1   hannken # define VIRTIO_NET_CTRL_MAC_TABLE_SET	0
    131       1.1   hannken 
    132       1.1   hannken #define VIRTIO_NET_CTRL_VLAN		2
    133       1.1   hannken # define VIRTIO_NET_CTRL_VLAN_ADD	0
    134       1.1   hannken # define VIRTIO_NET_CTRL_VLAN_DEL	1
    135       1.1   hannken 
    136       1.1   hannken struct virtio_net_ctrl_status {
    137       1.1   hannken 	uint8_t	ack;
    138       1.1   hannken } __packed;
    139       1.1   hannken #define VIRTIO_NET_OK			0
    140       1.1   hannken #define VIRTIO_NET_ERR			1
    141       1.1   hannken 
    142       1.1   hannken struct virtio_net_ctrl_rx {
    143       1.1   hannken 	uint8_t	onoff;
    144       1.1   hannken } __packed;
    145       1.1   hannken 
    146       1.1   hannken struct virtio_net_ctrl_mac_tbl {
    147       1.1   hannken 	uint32_t nentries;
    148       1.1   hannken 	uint8_t macs[][ETHER_ADDR_LEN];
    149       1.1   hannken } __packed;
    150       1.1   hannken 
    151       1.1   hannken struct virtio_net_ctrl_vlan {
    152       1.1   hannken 	uint16_t id;
    153       1.1   hannken } __packed;
    154       1.1   hannken 
    155       1.1   hannken 
    156       1.1   hannken /*
    157       1.1   hannken  * if_vioifvar.h:
    158       1.1   hannken  */
    159       1.1   hannken struct vioif_softc {
    160       1.1   hannken 	device_t		sc_dev;
    161       1.1   hannken 
    162       1.1   hannken 	struct virtio_softc	*sc_virtio;
    163       1.1   hannken 	struct virtqueue	sc_vq[3];
    164       1.1   hannken 
    165       1.1   hannken 	uint8_t			sc_mac[ETHER_ADDR_LEN];
    166       1.1   hannken 	struct ethercom		sc_ethercom;
    167       1.8     pooka 	short			sc_deferred_init_done;
    168       1.1   hannken 
    169       1.1   hannken 	/* bus_dmamem */
    170       1.1   hannken 	bus_dma_segment_t	sc_hdr_segs[1];
    171       1.1   hannken 	struct virtio_net_hdr	*sc_hdrs;
    172       1.1   hannken #define sc_rx_hdrs	sc_hdrs
    173       1.1   hannken 	struct virtio_net_hdr	*sc_tx_hdrs;
    174       1.1   hannken 	struct virtio_net_ctrl_cmd *sc_ctrl_cmd;
    175       1.1   hannken 	struct virtio_net_ctrl_status *sc_ctrl_status;
    176       1.1   hannken 	struct virtio_net_ctrl_rx *sc_ctrl_rx;
    177       1.1   hannken 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc;
    178       1.1   hannken 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc;
    179       1.1   hannken 
    180       1.1   hannken 	/* kmem */
    181       1.1   hannken 	bus_dmamap_t		*sc_arrays;
    182       1.1   hannken #define sc_rxhdr_dmamaps sc_arrays
    183       1.1   hannken 	bus_dmamap_t		*sc_txhdr_dmamaps;
    184       1.1   hannken 	bus_dmamap_t		*sc_rx_dmamaps;
    185       1.1   hannken 	bus_dmamap_t		*sc_tx_dmamaps;
    186       1.1   hannken 	struct mbuf		**sc_rx_mbufs;
    187       1.1   hannken 	struct mbuf		**sc_tx_mbufs;
    188       1.1   hannken 
    189       1.1   hannken 	bus_dmamap_t		sc_ctrl_cmd_dmamap;
    190       1.1   hannken 	bus_dmamap_t		sc_ctrl_status_dmamap;
    191       1.1   hannken 	bus_dmamap_t		sc_ctrl_rx_dmamap;
    192       1.1   hannken 	bus_dmamap_t		sc_ctrl_tbl_uc_dmamap;
    193       1.1   hannken 	bus_dmamap_t		sc_ctrl_tbl_mc_dmamap;
    194       1.1   hannken 
    195       1.1   hannken 	void			*sc_rx_softint;
    196       1.1   hannken 
    197       1.1   hannken 	enum {
    198       1.1   hannken 		FREE, INUSE, DONE
    199       1.1   hannken 	}			sc_ctrl_inuse;
    200       1.1   hannken 	kcondvar_t		sc_ctrl_wait;
    201       1.1   hannken 	kmutex_t		sc_ctrl_wait_lock;
    202       1.7     ozaki 	kmutex_t		*sc_tx_lock;
    203       1.7     ozaki 	kmutex_t		*sc_rx_lock;
    204       1.7     ozaki 	bool			sc_stopping;
    205       1.1   hannken };
    206       1.1   hannken #define VIRTIO_NET_TX_MAXNSEGS		(16) /* XXX */
    207       1.1   hannken #define VIRTIO_NET_CTRL_MAC_MAXENTRIES	(64) /* XXX */
    208       1.1   hannken 
    209       1.7     ozaki #define VIOIF_TX_LOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
    210       1.7     ozaki #define VIOIF_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
    211       1.7     ozaki #define VIOIF_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
    212       1.7     ozaki #define VIOIF_RX_LOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
    213       1.7     ozaki #define VIOIF_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
    214       1.7     ozaki #define VIOIF_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
    215       1.7     ozaki 
    216       1.1   hannken /* cfattach interface functions */
    217       1.1   hannken static int	vioif_match(device_t, cfdata_t, void *);
    218       1.1   hannken static void	vioif_attach(device_t, device_t, void *);
    219       1.1   hannken static void	vioif_deferred_init(device_t);
    220       1.1   hannken 
    221       1.1   hannken /* ifnet interface functions */
    222       1.1   hannken static int	vioif_init(struct ifnet *);
    223       1.1   hannken static void	vioif_stop(struct ifnet *, int);
    224       1.1   hannken static void	vioif_start(struct ifnet *);
    225       1.1   hannken static int	vioif_ioctl(struct ifnet *, u_long, void *);
    226       1.1   hannken static void	vioif_watchdog(struct ifnet *);
    227       1.1   hannken 
    228       1.1   hannken /* rx */
    229       1.1   hannken static int	vioif_add_rx_mbuf(struct vioif_softc *, int);
    230       1.1   hannken static void	vioif_free_rx_mbuf(struct vioif_softc *, int);
    231       1.1   hannken static void	vioif_populate_rx_mbufs(struct vioif_softc *);
    232  1.11.2.1     skrll static void	vioif_populate_rx_mbufs_locked(struct vioif_softc *);
    233       1.1   hannken static int	vioif_rx_deq(struct vioif_softc *);
    234       1.7     ozaki static int	vioif_rx_deq_locked(struct vioif_softc *);
    235       1.1   hannken static int	vioif_rx_vq_done(struct virtqueue *);
    236       1.1   hannken static void	vioif_rx_softint(void *);
    237       1.1   hannken static void	vioif_rx_drain(struct vioif_softc *);
    238       1.1   hannken 
    239       1.1   hannken /* tx */
    240       1.1   hannken static int	vioif_tx_vq_done(struct virtqueue *);
    241       1.7     ozaki static int	vioif_tx_vq_done_locked(struct virtqueue *);
    242       1.1   hannken static void	vioif_tx_drain(struct vioif_softc *);
    243       1.1   hannken 
    244       1.1   hannken /* other control */
    245       1.1   hannken static int	vioif_updown(struct vioif_softc *, bool);
    246       1.1   hannken static int	vioif_ctrl_rx(struct vioif_softc *, int, bool);
    247       1.1   hannken static int	vioif_set_promisc(struct vioif_softc *, bool);
    248       1.1   hannken static int	vioif_set_allmulti(struct vioif_softc *, bool);
    249       1.1   hannken static int	vioif_set_rx_filter(struct vioif_softc *);
    250       1.1   hannken static int	vioif_rx_filter(struct vioif_softc *);
    251       1.1   hannken static int	vioif_ctrl_vq_done(struct virtqueue *);
    252       1.1   hannken 
    253       1.1   hannken CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
    254       1.1   hannken 		  vioif_match, vioif_attach, NULL, NULL);
    255       1.1   hannken 
    256       1.1   hannken static int
    257       1.1   hannken vioif_match(device_t parent, cfdata_t match, void *aux)
    258       1.1   hannken {
    259       1.1   hannken 	struct virtio_softc *va = aux;
    260       1.1   hannken 
    261       1.1   hannken 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
    262       1.1   hannken 		return 1;
    263       1.1   hannken 
    264       1.1   hannken 	return 0;
    265       1.1   hannken }
    266       1.1   hannken 
    267       1.1   hannken /* allocate memory */
    268       1.1   hannken /*
    269       1.1   hannken  * dma memory is used for:
    270       1.1   hannken  *   sc_rx_hdrs[slot]:	 metadata array for recieved frames (READ)
    271       1.1   hannken  *   sc_tx_hdrs[slot]:	 metadata array for frames to be sent (WRITE)
    272       1.1   hannken  *   sc_ctrl_cmd:	 command to be sent via ctrl vq (WRITE)
    273       1.1   hannken  *   sc_ctrl_status:	 return value for a command via ctrl vq (READ)
    274       1.1   hannken  *   sc_ctrl_rx:	 parameter for a VIRTIO_NET_CTRL_RX class command
    275       1.1   hannken  *			 (WRITE)
    276       1.1   hannken  *   sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
    277       1.1   hannken  *			 class command (WRITE)
    278       1.1   hannken  *   sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
    279       1.1   hannken  *			 class command (WRITE)
    280       1.1   hannken  * sc_ctrl_* structures are allocated only one each; they are protected by
    281       1.1   hannken  * sc_ctrl_inuse variable and sc_ctrl_wait condvar.
    282       1.1   hannken  */
    283       1.1   hannken /*
    284       1.1   hannken  * dynamically allocated memory is used for:
    285       1.1   hannken  *   sc_rxhdr_dmamaps[slot]:	bus_dmamap_t array for sc_rx_hdrs[slot]
    286       1.1   hannken  *   sc_txhdr_dmamaps[slot]:	bus_dmamap_t array for sc_tx_hdrs[slot]
    287       1.1   hannken  *   sc_rx_dmamaps[slot]:	bus_dmamap_t array for recieved payload
    288       1.1   hannken  *   sc_tx_dmamaps[slot]:	bus_dmamap_t array for sent payload
    289       1.1   hannken  *   sc_rx_mbufs[slot]:		mbuf pointer array for recieved frames
    290       1.1   hannken  *   sc_tx_mbufs[slot]:		mbuf pointer array for sent frames
    291       1.1   hannken  */
    292       1.1   hannken static int
    293       1.1   hannken vioif_alloc_mems(struct vioif_softc *sc)
    294       1.1   hannken {
    295       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    296       1.1   hannken 	int allocsize, allocsize2, r, rsegs, i;
    297       1.1   hannken 	void *vaddr;
    298       1.1   hannken 	intptr_t p;
    299       1.1   hannken 	int rxqsize, txqsize;
    300       1.1   hannken 
    301       1.1   hannken 	rxqsize = vsc->sc_vqs[0].vq_num;
    302       1.1   hannken 	txqsize = vsc->sc_vqs[1].vq_num;
    303       1.1   hannken 
    304       1.1   hannken 	allocsize = sizeof(struct virtio_net_hdr) * rxqsize;
    305       1.1   hannken 	allocsize += sizeof(struct virtio_net_hdr) * txqsize;
    306       1.1   hannken 	if (vsc->sc_nvqs == 3) {
    307       1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
    308       1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
    309       1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
    310       1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
    311       1.1   hannken 			+ sizeof(struct virtio_net_ctrl_mac_tbl)
    312       1.1   hannken 			+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
    313       1.1   hannken 	}
    314       1.1   hannken 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
    315       1.1   hannken 			     &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
    316       1.1   hannken 	if (r != 0) {
    317       1.1   hannken 		aprint_error_dev(sc->sc_dev,
    318       1.1   hannken 				 "DMA memory allocation failed, size %d, "
    319       1.1   hannken 				 "error code %d\n", allocsize, r);
    320       1.1   hannken 		goto err_none;
    321       1.1   hannken 	}
    322       1.1   hannken 	r = bus_dmamem_map(vsc->sc_dmat,
    323       1.1   hannken 			   &sc->sc_hdr_segs[0], 1, allocsize,
    324       1.1   hannken 			   &vaddr, BUS_DMA_NOWAIT);
    325       1.1   hannken 	if (r != 0) {
    326       1.1   hannken 		aprint_error_dev(sc->sc_dev,
    327       1.1   hannken 				 "DMA memory map failed, "
    328       1.1   hannken 				 "error code %d\n", r);
    329       1.1   hannken 		goto err_dmamem_alloc;
    330       1.1   hannken 	}
    331       1.1   hannken 	sc->sc_hdrs = vaddr;
    332       1.1   hannken 	memset(vaddr, 0, allocsize);
    333       1.1   hannken 	p = (intptr_t) vaddr;
    334       1.1   hannken 	p += sizeof(struct virtio_net_hdr) * rxqsize;
    335       1.1   hannken #define P(name,size)	do { sc->sc_ ##name = (void*) p;	\
    336       1.1   hannken 			     p += size; } while (0)
    337       1.1   hannken 	P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize);
    338       1.1   hannken 	if (vsc->sc_nvqs == 3) {
    339       1.1   hannken 		P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd));
    340       1.1   hannken 		P(ctrl_status, sizeof(struct virtio_net_ctrl_status));
    341       1.1   hannken 		P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx));
    342       1.1   hannken 		P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl));
    343       1.1   hannken 		P(ctrl_mac_tbl_mc,
    344       1.1   hannken 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
    345       1.1   hannken 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES));
    346       1.1   hannken 	}
    347       1.1   hannken #undef P
    348       1.1   hannken 
    349       1.1   hannken 	allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize);
    350       1.1   hannken 	allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize);
    351       1.1   hannken 	allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize);
    352       1.1   hannken 	sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP);
    353       1.1   hannken 	if (sc->sc_arrays == NULL)
    354       1.1   hannken 		goto err_dmamem_map;
    355       1.1   hannken 	sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize;
    356       1.1   hannken 	sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize;
    357       1.1   hannken 	sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize;
    358       1.1   hannken 	sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize);
    359       1.1   hannken 	sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize;
    360       1.1   hannken 
    361       1.1   hannken #define C(map, buf, size, nsegs, rw, usage)				\
    362       1.1   hannken 	do {								\
    363       1.1   hannken 		r = bus_dmamap_create(vsc->sc_dmat, size, nsegs, size, 0, \
    364       1.1   hannken 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,	\
    365       1.1   hannken 				      &sc->sc_ ##map);			\
    366       1.1   hannken 		if (r != 0) {						\
    367       1.1   hannken 			aprint_error_dev(sc->sc_dev,			\
    368       1.1   hannken 					 usage " dmamap creation failed, " \
    369       1.1   hannken 					 "error code %d\n", r);		\
    370       1.1   hannken 					 goto err_reqs;			\
    371       1.1   hannken 		}							\
    372       1.1   hannken 	} while (0)
    373       1.1   hannken #define C_L1(map, buf, size, nsegs, rw, usage)				\
    374       1.1   hannken 	C(map, buf, size, nsegs, rw, usage);				\
    375       1.1   hannken 	do {								\
    376       1.1   hannken 		r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map,	\
    377       1.1   hannken 				    &sc->sc_ ##buf, size, NULL,		\
    378       1.1   hannken 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
    379       1.1   hannken 		if (r != 0) {						\
    380       1.1   hannken 			aprint_error_dev(sc->sc_dev,			\
    381       1.1   hannken 					 usage " dmamap load failed, "	\
    382       1.1   hannken 					 "error code %d\n", r);		\
    383       1.1   hannken 			goto err_reqs;					\
    384       1.1   hannken 		}							\
    385       1.1   hannken 	} while (0)
    386       1.1   hannken #define C_L2(map, buf, size, nsegs, rw, usage)				\
    387       1.1   hannken 	C(map, buf, size, nsegs, rw, usage);				\
    388       1.1   hannken 	do {								\
    389       1.1   hannken 		r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map,	\
    390       1.1   hannken 				    sc->sc_ ##buf, size, NULL,		\
    391       1.1   hannken 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
    392       1.1   hannken 		if (r != 0) {						\
    393       1.1   hannken 			aprint_error_dev(sc->sc_dev,			\
    394       1.1   hannken 					 usage " dmamap load failed, "	\
    395       1.1   hannken 					 "error code %d\n", r);		\
    396       1.1   hannken 			goto err_reqs;					\
    397       1.1   hannken 		}							\
    398       1.1   hannken 	} while (0)
    399       1.1   hannken 	for (i = 0; i < rxqsize; i++) {
    400       1.1   hannken 		C_L1(rxhdr_dmamaps[i], rx_hdrs[i],
    401       1.1   hannken 		    sizeof(struct virtio_net_hdr), 1,
    402       1.1   hannken 		    READ, "rx header");
    403       1.1   hannken 		C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload");
    404       1.1   hannken 	}
    405       1.1   hannken 
    406       1.1   hannken 	for (i = 0; i < txqsize; i++) {
    407       1.1   hannken 		C_L1(txhdr_dmamaps[i], rx_hdrs[i],
    408       1.1   hannken 		    sizeof(struct virtio_net_hdr), 1,
    409       1.1   hannken 		    WRITE, "tx header");
    410       1.1   hannken 		C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, 256 /* XXX */, 0,
    411       1.1   hannken 		  "tx payload");
    412       1.1   hannken 	}
    413       1.1   hannken 
    414       1.1   hannken 	if (vsc->sc_nvqs == 3) {
    415       1.1   hannken 		/* control vq class & command */
    416       1.1   hannken 		C_L2(ctrl_cmd_dmamap, ctrl_cmd,
    417       1.1   hannken 		    sizeof(struct virtio_net_ctrl_cmd), 1, WRITE,
    418       1.1   hannken 		    "control command");
    419  1.11.2.2     skrll 
    420       1.1   hannken 		/* control vq status */
    421       1.1   hannken 		C_L2(ctrl_status_dmamap, ctrl_status,
    422       1.1   hannken 		    sizeof(struct virtio_net_ctrl_status), 1, READ,
    423       1.1   hannken 		    "control status");
    424       1.1   hannken 
    425       1.1   hannken 		/* control vq rx mode command parameter */
    426       1.1   hannken 		C_L2(ctrl_rx_dmamap, ctrl_rx,
    427       1.1   hannken 		    sizeof(struct virtio_net_ctrl_rx), 1, WRITE,
    428       1.1   hannken 		    "rx mode control command");
    429       1.1   hannken 
    430       1.1   hannken 		/* control vq MAC filter table for unicast */
    431       1.1   hannken 		/* do not load now since its length is variable */
    432       1.1   hannken 		C(ctrl_tbl_uc_dmamap, NULL,
    433       1.1   hannken 		  sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE,
    434       1.1   hannken 		  "unicast MAC address filter command");
    435       1.1   hannken 
    436       1.1   hannken 		/* control vq MAC filter table for multicast */
    437       1.1   hannken 		C(ctrl_tbl_mc_dmamap, NULL,
    438       1.1   hannken 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
    439       1.1   hannken 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES),
    440       1.1   hannken 		  1, WRITE, "multicast MAC address filter command");
    441       1.1   hannken 	}
    442       1.1   hannken #undef C_L2
    443       1.1   hannken #undef C_L1
    444       1.1   hannken #undef C
    445       1.1   hannken 
    446       1.1   hannken 	return 0;
    447       1.1   hannken 
    448       1.1   hannken err_reqs:
    449       1.1   hannken #define D(map)								\
    450       1.1   hannken 	do {								\
    451       1.1   hannken 		if (sc->sc_ ##map) {					\
    452       1.1   hannken 			bus_dmamap_destroy(vsc->sc_dmat, sc->sc_ ##map); \
    453       1.1   hannken 			sc->sc_ ##map = NULL;				\
    454       1.1   hannken 		}							\
    455       1.1   hannken 	} while (0)
    456       1.1   hannken 	D(ctrl_tbl_mc_dmamap);
    457       1.1   hannken 	D(ctrl_tbl_uc_dmamap);
    458       1.1   hannken 	D(ctrl_rx_dmamap);
    459       1.1   hannken 	D(ctrl_status_dmamap);
    460       1.1   hannken 	D(ctrl_cmd_dmamap);
    461       1.1   hannken 	for (i = 0; i < txqsize; i++) {
    462       1.1   hannken 		D(tx_dmamaps[i]);
    463       1.1   hannken 		D(txhdr_dmamaps[i]);
    464       1.1   hannken 	}
    465       1.1   hannken 	for (i = 0; i < rxqsize; i++) {
    466       1.1   hannken 		D(rx_dmamaps[i]);
    467       1.1   hannken 		D(rxhdr_dmamaps[i]);
    468       1.1   hannken 	}
    469       1.1   hannken #undef D
    470       1.1   hannken 	if (sc->sc_arrays) {
    471       1.1   hannken 		kmem_free(sc->sc_arrays, allocsize2);
    472       1.1   hannken 		sc->sc_arrays = 0;
    473       1.1   hannken 	}
    474       1.1   hannken err_dmamem_map:
    475       1.1   hannken 	bus_dmamem_unmap(vsc->sc_dmat, sc->sc_hdrs, allocsize);
    476       1.1   hannken err_dmamem_alloc:
    477       1.1   hannken 	bus_dmamem_free(vsc->sc_dmat, &sc->sc_hdr_segs[0], 1);
    478       1.1   hannken err_none:
    479       1.1   hannken 	return -1;
    480       1.1   hannken }
    481       1.1   hannken 
    482       1.1   hannken static void
    483       1.1   hannken vioif_attach(device_t parent, device_t self, void *aux)
    484       1.1   hannken {
    485       1.1   hannken 	struct vioif_softc *sc = device_private(self);
    486       1.1   hannken 	struct virtio_softc *vsc = device_private(parent);
    487       1.1   hannken 	uint32_t features;
    488       1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    489       1.7     ozaki 	u_int flags;
    490       1.1   hannken 
    491       1.1   hannken 	if (vsc->sc_child != NULL) {
    492       1.1   hannken 		aprint_normal(": child already attached for %s; "
    493       1.1   hannken 			      "something wrong...\n",
    494       1.1   hannken 			      device_xname(parent));
    495       1.1   hannken 		return;
    496       1.1   hannken 	}
    497       1.1   hannken 
    498       1.1   hannken 	sc->sc_dev = self;
    499       1.1   hannken 	sc->sc_virtio = vsc;
    500       1.1   hannken 
    501       1.1   hannken 	vsc->sc_child = self;
    502       1.1   hannken 	vsc->sc_ipl = IPL_NET;
    503       1.1   hannken 	vsc->sc_vqs = &sc->sc_vq[0];
    504  1.11.2.2     skrll 	vsc->sc_config_change = NULL;
    505       1.1   hannken 	vsc->sc_intrhand = virtio_vq_intr;
    506       1.6     ozaki 	vsc->sc_flags = 0;
    507       1.1   hannken 
    508       1.7     ozaki #ifdef VIOIF_MPSAFE
    509       1.7     ozaki 	vsc->sc_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
    510       1.7     ozaki #endif
    511  1.11.2.1     skrll #ifdef VIOIF_SOFTINT_INTR
    512  1.11.2.1     skrll 	vsc->sc_flags |= VIRTIO_F_PCI_INTR_SOFTINT;
    513  1.11.2.1     skrll #endif
    514       1.7     ozaki 
    515       1.1   hannken 	features = virtio_negotiate_features(vsc,
    516       1.1   hannken 					     (VIRTIO_NET_F_MAC |
    517       1.1   hannken 					      VIRTIO_NET_F_STATUS |
    518       1.1   hannken 					      VIRTIO_NET_F_CTRL_VQ |
    519       1.1   hannken 					      VIRTIO_NET_F_CTRL_RX |
    520       1.1   hannken 					      VIRTIO_F_NOTIFY_ON_EMPTY));
    521       1.1   hannken 	if (features & VIRTIO_NET_F_MAC) {
    522       1.1   hannken 		sc->sc_mac[0] = virtio_read_device_config_1(vsc,
    523       1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+0);
    524       1.1   hannken 		sc->sc_mac[1] = virtio_read_device_config_1(vsc,
    525       1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+1);
    526       1.1   hannken 		sc->sc_mac[2] = virtio_read_device_config_1(vsc,
    527       1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+2);
    528       1.1   hannken 		sc->sc_mac[3] = virtio_read_device_config_1(vsc,
    529       1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+3);
    530       1.1   hannken 		sc->sc_mac[4] = virtio_read_device_config_1(vsc,
    531       1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+4);
    532       1.1   hannken 		sc->sc_mac[5] = virtio_read_device_config_1(vsc,
    533       1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+5);
    534       1.1   hannken 	} else {
    535       1.1   hannken 		/* code stolen from sys/net/if_tap.c */
    536       1.1   hannken 		struct timeval tv;
    537       1.1   hannken 		uint32_t ui;
    538       1.1   hannken 		getmicrouptime(&tv);
    539       1.1   hannken 		ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
    540       1.1   hannken 		memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
    541       1.1   hannken 		virtio_write_device_config_1(vsc,
    542       1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+0,
    543       1.1   hannken 					     sc->sc_mac[0]);
    544       1.1   hannken 		virtio_write_device_config_1(vsc,
    545       1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+1,
    546       1.1   hannken 					     sc->sc_mac[1]);
    547       1.1   hannken 		virtio_write_device_config_1(vsc,
    548       1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+2,
    549       1.1   hannken 					     sc->sc_mac[2]);
    550       1.1   hannken 		virtio_write_device_config_1(vsc,
    551       1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+3,
    552       1.1   hannken 					     sc->sc_mac[3]);
    553       1.1   hannken 		virtio_write_device_config_1(vsc,
    554       1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+4,
    555       1.1   hannken 					     sc->sc_mac[4]);
    556       1.1   hannken 		virtio_write_device_config_1(vsc,
    557       1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+5,
    558       1.1   hannken 					     sc->sc_mac[5]);
    559       1.1   hannken 	}
    560       1.1   hannken 	aprint_normal(": Ethernet address %s\n", ether_sprintf(sc->sc_mac));
    561       1.1   hannken 	aprint_naive("\n");
    562       1.1   hannken 
    563       1.1   hannken 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0,
    564       1.1   hannken 			    MCLBYTES+sizeof(struct virtio_net_hdr), 2,
    565       1.1   hannken 			    "rx") != 0) {
    566       1.1   hannken 		goto err;
    567       1.1   hannken 	}
    568       1.1   hannken 	vsc->sc_nvqs = 1;
    569       1.1   hannken 	sc->sc_vq[0].vq_done = vioif_rx_vq_done;
    570       1.1   hannken 	if (virtio_alloc_vq(vsc, &sc->sc_vq[1], 1,
    571       1.1   hannken 			    (sizeof(struct virtio_net_hdr)
    572       1.1   hannken 			     + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
    573       1.1   hannken 			    VIRTIO_NET_TX_MAXNSEGS + 1,
    574       1.1   hannken 			    "tx") != 0) {
    575       1.1   hannken 		goto err;
    576       1.1   hannken 	}
    577       1.7     ozaki 
    578       1.7     ozaki #ifdef VIOIF_MPSAFE
    579       1.7     ozaki 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    580       1.7     ozaki 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    581       1.7     ozaki #else
    582       1.7     ozaki 	sc->sc_tx_lock = NULL;
    583       1.7     ozaki 	sc->sc_rx_lock = NULL;
    584       1.7     ozaki #endif
    585       1.7     ozaki 	sc->sc_stopping = false;
    586       1.7     ozaki 
    587       1.1   hannken 	vsc->sc_nvqs = 2;
    588       1.1   hannken 	sc->sc_vq[1].vq_done = vioif_tx_vq_done;
    589       1.1   hannken 	virtio_start_vq_intr(vsc, &sc->sc_vq[0]);
    590       1.1   hannken 	virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); /* not urgent; do it later */
    591       1.1   hannken 	if ((features & VIRTIO_NET_F_CTRL_VQ)
    592       1.1   hannken 	    && (features & VIRTIO_NET_F_CTRL_RX)) {
    593       1.1   hannken 		if (virtio_alloc_vq(vsc, &sc->sc_vq[2], 2,
    594       1.1   hannken 				    NBPG, 1, "control") == 0) {
    595       1.1   hannken 			sc->sc_vq[2].vq_done = vioif_ctrl_vq_done;
    596       1.1   hannken 			cv_init(&sc->sc_ctrl_wait, "ctrl_vq");
    597       1.1   hannken 			mutex_init(&sc->sc_ctrl_wait_lock,
    598       1.1   hannken 				   MUTEX_DEFAULT, IPL_NET);
    599       1.1   hannken 			sc->sc_ctrl_inuse = FREE;
    600       1.1   hannken 			virtio_start_vq_intr(vsc, &sc->sc_vq[2]);
    601       1.1   hannken 			vsc->sc_nvqs = 3;
    602       1.1   hannken 		}
    603       1.1   hannken 	}
    604       1.1   hannken 
    605       1.7     ozaki #ifdef VIOIF_MPSAFE
    606       1.7     ozaki 	flags = SOFTINT_NET | SOFTINT_MPSAFE;
    607       1.7     ozaki #else
    608       1.7     ozaki 	flags = SOFTINT_NET;
    609       1.7     ozaki #endif
    610       1.7     ozaki 	sc->sc_rx_softint = softint_establish(flags, vioif_rx_softint, sc);
    611       1.1   hannken 	if (sc->sc_rx_softint == NULL) {
    612       1.1   hannken 		aprint_error_dev(self, "cannot establish softint\n");
    613       1.1   hannken 		goto err;
    614       1.1   hannken 	}
    615       1.1   hannken 
    616       1.1   hannken 	if (vioif_alloc_mems(sc) < 0)
    617       1.1   hannken 		goto err;
    618       1.1   hannken 
    619       1.1   hannken 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
    620       1.1   hannken 	ifp->if_softc = sc;
    621       1.1   hannken 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    622       1.1   hannken 	ifp->if_start = vioif_start;
    623       1.1   hannken 	ifp->if_ioctl = vioif_ioctl;
    624       1.1   hannken 	ifp->if_init = vioif_init;
    625       1.1   hannken 	ifp->if_stop = vioif_stop;
    626       1.1   hannken 	ifp->if_capabilities = 0;
    627       1.1   hannken 	ifp->if_watchdog = vioif_watchdog;
    628       1.1   hannken 
    629      1.11     ozaki 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
    630      1.11     ozaki 
    631       1.1   hannken 	if_attach(ifp);
    632       1.1   hannken 	ether_ifattach(ifp, sc->sc_mac);
    633       1.1   hannken 
    634       1.1   hannken 	return;
    635       1.1   hannken 
    636       1.1   hannken err:
    637       1.7     ozaki 	if (sc->sc_tx_lock)
    638       1.7     ozaki 		mutex_obj_free(sc->sc_tx_lock);
    639       1.7     ozaki 	if (sc->sc_rx_lock)
    640       1.7     ozaki 		mutex_obj_free(sc->sc_rx_lock);
    641       1.7     ozaki 
    642       1.1   hannken 	if (vsc->sc_nvqs == 3) {
    643       1.1   hannken 		virtio_free_vq(vsc, &sc->sc_vq[2]);
    644       1.1   hannken 		cv_destroy(&sc->sc_ctrl_wait);
    645       1.1   hannken 		mutex_destroy(&sc->sc_ctrl_wait_lock);
    646       1.1   hannken 		vsc->sc_nvqs = 2;
    647       1.1   hannken 	}
    648       1.1   hannken 	if (vsc->sc_nvqs == 2) {
    649       1.1   hannken 		virtio_free_vq(vsc, &sc->sc_vq[1]);
    650       1.1   hannken 		vsc->sc_nvqs = 1;
    651       1.1   hannken 	}
    652       1.1   hannken 	if (vsc->sc_nvqs == 1) {
    653       1.1   hannken 		virtio_free_vq(vsc, &sc->sc_vq[0]);
    654       1.1   hannken 		vsc->sc_nvqs = 0;
    655       1.1   hannken 	}
    656       1.1   hannken 	vsc->sc_child = (void*)1;
    657       1.1   hannken 	return;
    658       1.1   hannken }
    659       1.1   hannken 
    660       1.1   hannken /* we need interrupts to make promiscuous mode off */
    661       1.1   hannken static void
    662       1.1   hannken vioif_deferred_init(device_t self)
    663       1.1   hannken {
    664       1.1   hannken 	struct vioif_softc *sc = device_private(self);
    665       1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    666       1.1   hannken 	int r;
    667       1.1   hannken 
    668       1.9     ozaki 	if (ifp->if_flags & IFF_PROMISC)
    669      1.10     ozaki 		return;
    670       1.9     ozaki 
    671       1.1   hannken 	r =  vioif_set_promisc(sc, false);
    672       1.1   hannken 	if (r != 0)
    673       1.1   hannken 		aprint_error_dev(self, "resetting promisc mode failed, "
    674       1.1   hannken 				 "errror code %d\n", r);
    675       1.1   hannken }
    676       1.1   hannken 
    677       1.1   hannken /*
    678       1.1   hannken  * Interface functions for ifnet
    679       1.1   hannken  */
    680       1.1   hannken static int
    681       1.1   hannken vioif_init(struct ifnet *ifp)
    682       1.1   hannken {
    683       1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    684       1.1   hannken 
    685       1.1   hannken 	vioif_stop(ifp, 0);
    686       1.7     ozaki 
    687       1.8     pooka 	if (!sc->sc_deferred_init_done) {
    688       1.8     pooka 		struct virtio_softc *vsc = sc->sc_virtio;
    689       1.8     pooka 
    690       1.8     pooka 		sc->sc_deferred_init_done = 1;
    691       1.8     pooka 		if (vsc->sc_nvqs == 3)
    692       1.8     pooka 			vioif_deferred_init(sc->sc_dev);
    693       1.8     pooka 	}
    694       1.8     pooka 
    695       1.7     ozaki 	/* Have to set false before vioif_populate_rx_mbufs */
    696       1.7     ozaki 	sc->sc_stopping = false;
    697       1.7     ozaki 
    698       1.1   hannken 	vioif_populate_rx_mbufs(sc);
    699       1.7     ozaki 
    700       1.1   hannken 	vioif_updown(sc, true);
    701       1.1   hannken 	ifp->if_flags |= IFF_RUNNING;
    702       1.1   hannken 	ifp->if_flags &= ~IFF_OACTIVE;
    703       1.1   hannken 	vioif_rx_filter(sc);
    704       1.1   hannken 
    705       1.1   hannken 	return 0;
    706       1.1   hannken }
    707       1.1   hannken 
    708       1.1   hannken static void
    709       1.1   hannken vioif_stop(struct ifnet *ifp, int disable)
    710       1.1   hannken {
    711       1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    712       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    713       1.1   hannken 
    714  1.11.2.1     skrll 	/* Take the locks to ensure that ongoing TX/RX finish */
    715  1.11.2.1     skrll 	VIOIF_TX_LOCK(sc);
    716  1.11.2.1     skrll 	VIOIF_RX_LOCK(sc);
    717       1.7     ozaki 	sc->sc_stopping = true;
    718  1.11.2.1     skrll 	VIOIF_RX_UNLOCK(sc);
    719  1.11.2.1     skrll 	VIOIF_TX_UNLOCK(sc);
    720       1.7     ozaki 
    721       1.1   hannken 	/* only way to stop I/O and DMA is resetting... */
    722       1.1   hannken 	virtio_reset(vsc);
    723       1.1   hannken 	vioif_rx_deq(sc);
    724       1.1   hannken 	vioif_tx_drain(sc);
    725       1.1   hannken 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    726       1.1   hannken 
    727       1.1   hannken 	if (disable)
    728       1.1   hannken 		vioif_rx_drain(sc);
    729       1.3  christos 
    730       1.1   hannken 	virtio_reinit_start(vsc);
    731       1.4   minoura 	virtio_negotiate_features(vsc, vsc->sc_features);
    732       1.1   hannken 	virtio_start_vq_intr(vsc, &sc->sc_vq[0]);
    733       1.1   hannken 	virtio_stop_vq_intr(vsc, &sc->sc_vq[1]);
    734       1.1   hannken 	if (vsc->sc_nvqs >= 3)
    735       1.1   hannken 		virtio_start_vq_intr(vsc, &sc->sc_vq[2]);
    736       1.1   hannken 	virtio_reinit_end(vsc);
    737       1.1   hannken 	vioif_updown(sc, false);
    738       1.1   hannken }
    739       1.1   hannken 
    740       1.1   hannken static void
    741       1.1   hannken vioif_start(struct ifnet *ifp)
    742       1.1   hannken {
    743       1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    744       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    745       1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[1]; /* tx vq */
    746       1.1   hannken 	struct mbuf *m;
    747       1.1   hannken 	int queued = 0, retry = 0;
    748       1.1   hannken 
    749       1.7     ozaki 	VIOIF_TX_LOCK(sc);
    750       1.7     ozaki 
    751       1.1   hannken 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
    752       1.7     ozaki 		goto out;
    753       1.7     ozaki 
    754       1.7     ozaki 	if (sc->sc_stopping)
    755       1.7     ozaki 		goto out;
    756       1.1   hannken 
    757       1.2  jmcneill 	for (;;) {
    758       1.1   hannken 		int slot, r;
    759       1.1   hannken 
    760       1.7     ozaki 		IFQ_DEQUEUE(&ifp->if_snd, m);
    761       1.7     ozaki 
    762       1.2  jmcneill 		if (m == NULL)
    763       1.2  jmcneill 			break;
    764       1.2  jmcneill 
    765  1.11.2.1     skrll retry:
    766       1.1   hannken 		r = virtio_enqueue_prep(vsc, vq, &slot);
    767       1.1   hannken 		if (r == EAGAIN) {
    768       1.1   hannken 			ifp->if_flags |= IFF_OACTIVE;
    769       1.7     ozaki 			vioif_tx_vq_done_locked(vq);
    770       1.1   hannken 			if (retry++ == 0)
    771  1.11.2.1     skrll 				goto retry;
    772       1.1   hannken 			else
    773       1.1   hannken 				break;
    774       1.1   hannken 		}
    775       1.1   hannken 		if (r != 0)
    776       1.1   hannken 			panic("enqueue_prep for a tx buffer");
    777       1.1   hannken 		r = bus_dmamap_load_mbuf(vsc->sc_dmat,
    778       1.1   hannken 					 sc->sc_tx_dmamaps[slot],
    779       1.1   hannken 					 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    780       1.1   hannken 		if (r != 0) {
    781       1.1   hannken 			virtio_enqueue_abort(vsc, vq, slot);
    782       1.1   hannken 			printf("%s: tx dmamap load failed, error code %d\n",
    783       1.1   hannken 			       device_xname(sc->sc_dev), r);
    784       1.1   hannken 			break;
    785       1.1   hannken 		}
    786       1.1   hannken 		r = virtio_enqueue_reserve(vsc, vq, slot,
    787       1.1   hannken 					sc->sc_tx_dmamaps[slot]->dm_nsegs + 1);
    788       1.1   hannken 		if (r != 0) {
    789       1.1   hannken 			bus_dmamap_unload(vsc->sc_dmat,
    790       1.1   hannken 					  sc->sc_tx_dmamaps[slot]);
    791       1.1   hannken 			ifp->if_flags |= IFF_OACTIVE;
    792       1.7     ozaki 			vioif_tx_vq_done_locked(vq);
    793       1.1   hannken 			if (retry++ == 0)
    794  1.11.2.1     skrll 				goto retry;
    795       1.1   hannken 			else
    796       1.1   hannken 				break;
    797       1.1   hannken 		}
    798       1.7     ozaki 
    799       1.1   hannken 		sc->sc_tx_mbufs[slot] = m;
    800       1.1   hannken 
    801       1.1   hannken 		memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
    802       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot],
    803       1.1   hannken 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
    804       1.1   hannken 				BUS_DMASYNC_PREWRITE);
    805       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot],
    806       1.1   hannken 				0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize,
    807       1.1   hannken 				BUS_DMASYNC_PREWRITE);
    808       1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true);
    809       1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true);
    810       1.1   hannken 		virtio_enqueue_commit(vsc, vq, slot, false);
    811       1.1   hannken 		queued++;
    812       1.1   hannken 		bpf_mtap(ifp, m);
    813       1.1   hannken 	}
    814       1.1   hannken 
    815       1.7     ozaki 	if (m != NULL) {
    816       1.7     ozaki 		ifp->if_flags |= IFF_OACTIVE;
    817       1.7     ozaki 		m_freem(m);
    818       1.7     ozaki 	}
    819       1.7     ozaki 
    820       1.1   hannken 	if (queued > 0) {
    821       1.1   hannken 		virtio_enqueue_commit(vsc, vq, -1, true);
    822       1.1   hannken 		ifp->if_timer = 5;
    823       1.1   hannken 	}
    824       1.7     ozaki 
    825       1.7     ozaki out:
    826       1.7     ozaki 	VIOIF_TX_UNLOCK(sc);
    827       1.1   hannken }
    828       1.1   hannken 
    829       1.1   hannken static int
    830       1.1   hannken vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    831       1.1   hannken {
    832       1.1   hannken 	int s, r;
    833       1.1   hannken 
    834       1.1   hannken 	s = splnet();
    835       1.1   hannken 
    836       1.1   hannken 	r = ether_ioctl(ifp, cmd, data);
    837       1.1   hannken 	if ((r == 0 && cmd == SIOCSIFFLAGS) ||
    838       1.1   hannken 	    (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
    839       1.1   hannken 		if (ifp->if_flags & IFF_RUNNING)
    840       1.1   hannken 			r = vioif_rx_filter(ifp->if_softc);
    841       1.1   hannken 		else
    842       1.1   hannken 			r = 0;
    843       1.1   hannken 	}
    844       1.1   hannken 
    845       1.1   hannken 	splx(s);
    846       1.1   hannken 
    847       1.1   hannken 	return r;
    848       1.1   hannken }
    849       1.1   hannken 
    850       1.1   hannken void
    851       1.1   hannken vioif_watchdog(struct ifnet *ifp)
    852       1.1   hannken {
    853       1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    854       1.1   hannken 
    855       1.1   hannken 	if (ifp->if_flags & IFF_RUNNING)
    856       1.1   hannken 		vioif_tx_vq_done(&sc->sc_vq[1]);
    857       1.1   hannken }
    858       1.1   hannken 
    859       1.1   hannken 
    860       1.1   hannken /*
    861       1.1   hannken  * Recieve implementation
    862       1.1   hannken  */
    863       1.1   hannken /* allocate and initialize a mbuf for recieve */
    864       1.1   hannken static int
    865       1.1   hannken vioif_add_rx_mbuf(struct vioif_softc *sc, int i)
    866       1.1   hannken {
    867       1.1   hannken 	struct mbuf *m;
    868       1.1   hannken 	int r;
    869       1.1   hannken 
    870       1.1   hannken 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    871       1.1   hannken 	if (m == NULL)
    872       1.1   hannken 		return ENOBUFS;
    873       1.1   hannken 	MCLGET(m, M_DONTWAIT);
    874       1.1   hannken 	if ((m->m_flags & M_EXT) == 0) {
    875       1.1   hannken 		m_freem(m);
    876       1.1   hannken 		return ENOBUFS;
    877       1.1   hannken 	}
    878       1.1   hannken 	sc->sc_rx_mbufs[i] = m;
    879       1.1   hannken 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
    880       1.1   hannken 	r = bus_dmamap_load_mbuf(sc->sc_virtio->sc_dmat,
    881       1.1   hannken 				 sc->sc_rx_dmamaps[i],
    882       1.1   hannken 				 m, BUS_DMA_READ|BUS_DMA_NOWAIT);
    883       1.1   hannken 	if (r) {
    884       1.1   hannken 		m_freem(m);
    885       1.1   hannken 		sc->sc_rx_mbufs[i] = 0;
    886       1.1   hannken 		return r;
    887       1.1   hannken 	}
    888       1.1   hannken 
    889       1.1   hannken 	return 0;
    890       1.1   hannken }
    891       1.1   hannken 
    892       1.1   hannken /* free a mbuf for recieve */
    893       1.1   hannken static void
    894       1.1   hannken vioif_free_rx_mbuf(struct vioif_softc *sc, int i)
    895       1.1   hannken {
    896       1.1   hannken 	bus_dmamap_unload(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i]);
    897       1.1   hannken 	m_freem(sc->sc_rx_mbufs[i]);
    898       1.1   hannken 	sc->sc_rx_mbufs[i] = NULL;
    899       1.1   hannken }
    900       1.1   hannken 
    901       1.1   hannken /* add mbufs for all the empty recieve slots */
    902       1.1   hannken static void
    903       1.1   hannken vioif_populate_rx_mbufs(struct vioif_softc *sc)
    904       1.1   hannken {
    905  1.11.2.1     skrll 	VIOIF_RX_LOCK(sc);
    906  1.11.2.1     skrll 	vioif_populate_rx_mbufs_locked(sc);
    907  1.11.2.1     skrll 	VIOIF_RX_UNLOCK(sc);
    908  1.11.2.1     skrll }
    909  1.11.2.1     skrll 
    910  1.11.2.1     skrll static void
    911  1.11.2.1     skrll vioif_populate_rx_mbufs_locked(struct vioif_softc *sc)
    912  1.11.2.1     skrll {
    913       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    914       1.1   hannken 	int i, r, ndone = 0;
    915       1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[0]; /* rx vq */
    916       1.1   hannken 
    917  1.11.2.1     skrll 	KASSERT(VIOIF_RX_LOCKED(sc));
    918       1.7     ozaki 
    919       1.7     ozaki 	if (sc->sc_stopping)
    920  1.11.2.1     skrll 		return;
    921       1.7     ozaki 
    922       1.1   hannken 	for (i = 0; i < vq->vq_num; i++) {
    923       1.1   hannken 		int slot;
    924       1.1   hannken 		r = virtio_enqueue_prep(vsc, vq, &slot);
    925       1.1   hannken 		if (r == EAGAIN)
    926       1.1   hannken 			break;
    927       1.1   hannken 		if (r != 0)
    928       1.1   hannken 			panic("enqueue_prep for rx buffers");
    929       1.1   hannken 		if (sc->sc_rx_mbufs[slot] == NULL) {
    930       1.1   hannken 			r = vioif_add_rx_mbuf(sc, slot);
    931       1.1   hannken 			if (r != 0) {
    932       1.1   hannken 				printf("%s: rx mbuf allocation failed, "
    933       1.1   hannken 				       "error code %d\n",
    934       1.1   hannken 				       device_xname(sc->sc_dev), r);
    935       1.1   hannken 				break;
    936       1.1   hannken 			}
    937       1.1   hannken 		}
    938       1.1   hannken 		r = virtio_enqueue_reserve(vsc, vq, slot,
    939       1.1   hannken 					sc->sc_rx_dmamaps[slot]->dm_nsegs + 1);
    940       1.1   hannken 		if (r != 0) {
    941       1.1   hannken 			vioif_free_rx_mbuf(sc, slot);
    942       1.1   hannken 			break;
    943       1.1   hannken 		}
    944       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot],
    945       1.1   hannken 			0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
    946       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot],
    947       1.1   hannken 			0, MCLBYTES, BUS_DMASYNC_PREREAD);
    948       1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false);
    949       1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false);
    950       1.1   hannken 		virtio_enqueue_commit(vsc, vq, slot, false);
    951       1.1   hannken 		ndone++;
    952       1.1   hannken 	}
    953       1.1   hannken 	if (ndone > 0)
    954       1.1   hannken 		virtio_enqueue_commit(vsc, vq, -1, true);
    955       1.1   hannken }
    956       1.1   hannken 
    957       1.1   hannken /* dequeue recieved packets */
    958       1.1   hannken static int
    959       1.1   hannken vioif_rx_deq(struct vioif_softc *sc)
    960       1.1   hannken {
    961       1.7     ozaki 	int r;
    962       1.7     ozaki 
    963       1.7     ozaki 	KASSERT(sc->sc_stopping);
    964       1.7     ozaki 
    965       1.7     ozaki 	VIOIF_RX_LOCK(sc);
    966       1.7     ozaki 	r = vioif_rx_deq_locked(sc);
    967       1.7     ozaki 	VIOIF_RX_UNLOCK(sc);
    968       1.7     ozaki 
    969       1.7     ozaki 	return r;
    970       1.7     ozaki }
    971       1.7     ozaki 
    972       1.7     ozaki /* dequeue recieved packets */
    973       1.7     ozaki static int
    974       1.7     ozaki vioif_rx_deq_locked(struct vioif_softc *sc)
    975       1.7     ozaki {
    976       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    977       1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[0];
    978       1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    979       1.1   hannken 	struct mbuf *m;
    980       1.1   hannken 	int r = 0;
    981       1.1   hannken 	int slot, len;
    982       1.1   hannken 
    983       1.7     ozaki 	KASSERT(VIOIF_RX_LOCKED(sc));
    984       1.7     ozaki 
    985       1.1   hannken 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
    986       1.1   hannken 		len -= sizeof(struct virtio_net_hdr);
    987       1.1   hannken 		r = 1;
    988       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot],
    989       1.1   hannken 				0, sizeof(struct virtio_net_hdr),
    990       1.1   hannken 				BUS_DMASYNC_POSTREAD);
    991       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot],
    992       1.1   hannken 				0, MCLBYTES,
    993       1.1   hannken 				BUS_DMASYNC_POSTREAD);
    994       1.1   hannken 		m = sc->sc_rx_mbufs[slot];
    995       1.1   hannken 		KASSERT(m != NULL);
    996       1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_rx_dmamaps[slot]);
    997       1.1   hannken 		sc->sc_rx_mbufs[slot] = 0;
    998       1.1   hannken 		virtio_dequeue_commit(vsc, vq, slot);
    999       1.1   hannken 		m->m_pkthdr.rcvif = ifp;
   1000       1.1   hannken 		m->m_len = m->m_pkthdr.len = len;
   1001       1.1   hannken 		ifp->if_ipackets++;
   1002       1.1   hannken 		bpf_mtap(ifp, m);
   1003       1.7     ozaki 
   1004       1.7     ozaki 		VIOIF_RX_UNLOCK(sc);
   1005       1.1   hannken 		(*ifp->if_input)(ifp, m);
   1006       1.7     ozaki 		VIOIF_RX_LOCK(sc);
   1007       1.7     ozaki 
   1008       1.7     ozaki 		if (sc->sc_stopping)
   1009       1.7     ozaki 			break;
   1010       1.1   hannken 	}
   1011       1.3  christos 
   1012       1.1   hannken 	return r;
   1013       1.1   hannken }
   1014       1.1   hannken 
   1015       1.1   hannken /* rx interrupt; call _dequeue above and schedule a softint */
   1016       1.1   hannken static int
   1017       1.1   hannken vioif_rx_vq_done(struct virtqueue *vq)
   1018       1.1   hannken {
   1019       1.1   hannken 	struct virtio_softc *vsc = vq->vq_owner;
   1020       1.1   hannken 	struct vioif_softc *sc = device_private(vsc->sc_child);
   1021       1.7     ozaki 	int r = 0;
   1022       1.7     ozaki 
   1023  1.11.2.1     skrll #ifdef VIOIF_SOFTINT_INTR
   1024  1.11.2.1     skrll 	KASSERT(!cpu_intr_p());
   1025  1.11.2.1     skrll #endif
   1026  1.11.2.1     skrll 
   1027       1.7     ozaki 	VIOIF_RX_LOCK(sc);
   1028       1.7     ozaki 
   1029       1.7     ozaki 	if (sc->sc_stopping)
   1030       1.7     ozaki 		goto out;
   1031       1.1   hannken 
   1032       1.7     ozaki 	r = vioif_rx_deq_locked(sc);
   1033       1.1   hannken 	if (r)
   1034  1.11.2.1     skrll #ifdef VIOIF_SOFTINT_INTR
   1035  1.11.2.1     skrll 		vioif_populate_rx_mbufs_locked(sc);
   1036  1.11.2.1     skrll #else
   1037       1.1   hannken 		softint_schedule(sc->sc_rx_softint);
   1038  1.11.2.1     skrll #endif
   1039       1.1   hannken 
   1040       1.7     ozaki out:
   1041       1.7     ozaki 	VIOIF_RX_UNLOCK(sc);
   1042       1.1   hannken 	return r;
   1043       1.1   hannken }
   1044       1.1   hannken 
   1045       1.1   hannken /* softint: enqueue recieve requests for new incoming packets */
   1046       1.1   hannken static void
   1047       1.1   hannken vioif_rx_softint(void *arg)
   1048       1.1   hannken {
   1049       1.1   hannken 	struct vioif_softc *sc = arg;
   1050       1.1   hannken 
   1051       1.1   hannken 	vioif_populate_rx_mbufs(sc);
   1052       1.1   hannken }
   1053       1.1   hannken 
   1054       1.1   hannken /* free all the mbufs; called from if_stop(disable) */
   1055       1.1   hannken static void
   1056       1.1   hannken vioif_rx_drain(struct vioif_softc *sc)
   1057       1.1   hannken {
   1058       1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[0];
   1059       1.1   hannken 	int i;
   1060       1.1   hannken 
   1061       1.1   hannken 	for (i = 0; i < vq->vq_num; i++) {
   1062       1.1   hannken 		if (sc->sc_rx_mbufs[i] == NULL)
   1063       1.1   hannken 			continue;
   1064       1.1   hannken 		vioif_free_rx_mbuf(sc, i);
   1065       1.1   hannken 	}
   1066       1.1   hannken }
   1067       1.1   hannken 
   1068       1.1   hannken 
   1069       1.1   hannken /*
   1070       1.1   hannken  * Transmition implementation
   1071       1.1   hannken  */
   1072       1.1   hannken /* actual transmission is done in if_start */
   1073       1.1   hannken /* tx interrupt; dequeue and free mbufs */
   1074       1.1   hannken /*
   1075       1.1   hannken  * tx interrupt is actually disabled; this should be called upon
   1076       1.1   hannken  * tx vq full and watchdog
   1077       1.1   hannken  */
   1078       1.1   hannken static int
   1079       1.1   hannken vioif_tx_vq_done(struct virtqueue *vq)
   1080       1.1   hannken {
   1081       1.1   hannken 	struct virtio_softc *vsc = vq->vq_owner;
   1082       1.1   hannken 	struct vioif_softc *sc = device_private(vsc->sc_child);
   1083       1.7     ozaki 	int r = 0;
   1084       1.7     ozaki 
   1085       1.7     ozaki 	VIOIF_TX_LOCK(sc);
   1086       1.7     ozaki 
   1087       1.7     ozaki 	if (sc->sc_stopping)
   1088       1.7     ozaki 		goto out;
   1089       1.7     ozaki 
   1090       1.7     ozaki 	r = vioif_tx_vq_done_locked(vq);
   1091       1.7     ozaki 
   1092       1.7     ozaki out:
   1093       1.7     ozaki 	VIOIF_TX_UNLOCK(sc);
   1094       1.7     ozaki 	return r;
   1095       1.7     ozaki }
   1096       1.7     ozaki 
   1097       1.7     ozaki static int
   1098       1.7     ozaki vioif_tx_vq_done_locked(struct virtqueue *vq)
   1099       1.7     ozaki {
   1100       1.7     ozaki 	struct virtio_softc *vsc = vq->vq_owner;
   1101       1.7     ozaki 	struct vioif_softc *sc = device_private(vsc->sc_child);
   1102       1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1103       1.1   hannken 	struct mbuf *m;
   1104       1.1   hannken 	int r = 0;
   1105       1.1   hannken 	int slot, len;
   1106       1.1   hannken 
   1107       1.7     ozaki 	KASSERT(VIOIF_TX_LOCKED(sc));
   1108       1.7     ozaki 
   1109       1.1   hannken 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
   1110       1.1   hannken 		r++;
   1111       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot],
   1112       1.1   hannken 				0, sizeof(struct virtio_net_hdr),
   1113       1.1   hannken 				BUS_DMASYNC_POSTWRITE);
   1114       1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot],
   1115       1.1   hannken 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
   1116       1.1   hannken 				BUS_DMASYNC_POSTWRITE);
   1117       1.1   hannken 		m = sc->sc_tx_mbufs[slot];
   1118       1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[slot]);
   1119       1.1   hannken 		sc->sc_tx_mbufs[slot] = 0;
   1120       1.1   hannken 		virtio_dequeue_commit(vsc, vq, slot);
   1121       1.1   hannken 		ifp->if_opackets++;
   1122       1.1   hannken 		m_freem(m);
   1123       1.1   hannken 	}
   1124       1.1   hannken 
   1125       1.1   hannken 	if (r)
   1126       1.1   hannken 		ifp->if_flags &= ~IFF_OACTIVE;
   1127       1.1   hannken 	return r;
   1128       1.1   hannken }
   1129       1.1   hannken 
   1130       1.1   hannken /* free all the mbufs already put on vq; called from if_stop(disable) */
   1131       1.1   hannken static void
   1132       1.1   hannken vioif_tx_drain(struct vioif_softc *sc)
   1133       1.1   hannken {
   1134       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1135       1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[1];
   1136       1.1   hannken 	int i;
   1137       1.1   hannken 
   1138       1.7     ozaki 	KASSERT(sc->sc_stopping);
   1139       1.7     ozaki 
   1140       1.1   hannken 	for (i = 0; i < vq->vq_num; i++) {
   1141       1.1   hannken 		if (sc->sc_tx_mbufs[i] == NULL)
   1142       1.1   hannken 			continue;
   1143       1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[i]);
   1144       1.1   hannken 		m_freem(sc->sc_tx_mbufs[i]);
   1145       1.1   hannken 		sc->sc_tx_mbufs[i] = NULL;
   1146       1.1   hannken 	}
   1147       1.1   hannken }
   1148       1.1   hannken 
   1149       1.1   hannken /*
   1150       1.1   hannken  * Control vq
   1151       1.1   hannken  */
   1152       1.1   hannken /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
   1153       1.1   hannken static int
   1154       1.1   hannken vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
   1155       1.1   hannken {
   1156       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1157       1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[2];
   1158       1.1   hannken 	int r, slot;
   1159       1.1   hannken 
   1160       1.1   hannken 	if (vsc->sc_nvqs < 3)
   1161       1.1   hannken 		return ENOTSUP;
   1162       1.1   hannken 
   1163       1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1164       1.1   hannken 	while (sc->sc_ctrl_inuse != FREE)
   1165       1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1166       1.1   hannken 	sc->sc_ctrl_inuse = INUSE;
   1167       1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1168       1.1   hannken 
   1169       1.1   hannken 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX;
   1170       1.1   hannken 	sc->sc_ctrl_cmd->command = cmd;
   1171       1.1   hannken 	sc->sc_ctrl_rx->onoff = onoff;
   1172       1.1   hannken 
   1173       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap,
   1174       1.1   hannken 			0, sizeof(struct virtio_net_ctrl_cmd),
   1175       1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1176       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap,
   1177       1.1   hannken 			0, sizeof(struct virtio_net_ctrl_rx),
   1178       1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1179       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap,
   1180       1.1   hannken 			0, sizeof(struct virtio_net_ctrl_status),
   1181       1.1   hannken 			BUS_DMASYNC_PREREAD);
   1182       1.1   hannken 
   1183       1.1   hannken 	r = virtio_enqueue_prep(vsc, vq, &slot);
   1184       1.1   hannken 	if (r != 0)
   1185       1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1186       1.1   hannken 	r = virtio_enqueue_reserve(vsc, vq, slot, 3);
   1187       1.1   hannken 	if (r != 0)
   1188       1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1189       1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
   1190       1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true);
   1191       1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
   1192       1.1   hannken 	virtio_enqueue_commit(vsc, vq, slot, true);
   1193       1.1   hannken 
   1194       1.1   hannken 	/* wait for done */
   1195       1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1196       1.1   hannken 	while (sc->sc_ctrl_inuse != DONE)
   1197       1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1198       1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1199       1.1   hannken 	/* already dequeueued */
   1200       1.1   hannken 
   1201       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0,
   1202       1.1   hannken 			sizeof(struct virtio_net_ctrl_cmd),
   1203       1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1204       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 0,
   1205       1.1   hannken 			sizeof(struct virtio_net_ctrl_rx),
   1206       1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1207       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0,
   1208       1.1   hannken 			sizeof(struct virtio_net_ctrl_status),
   1209       1.1   hannken 			BUS_DMASYNC_POSTREAD);
   1210       1.1   hannken 
   1211       1.1   hannken 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
   1212       1.1   hannken 		r = 0;
   1213       1.1   hannken 	else {
   1214       1.1   hannken 		printf("%s: failed setting rx mode\n",
   1215       1.1   hannken 		       device_xname(sc->sc_dev));
   1216       1.1   hannken 		r = EIO;
   1217       1.1   hannken 	}
   1218       1.1   hannken 
   1219       1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1220       1.1   hannken 	sc->sc_ctrl_inuse = FREE;
   1221       1.1   hannken 	cv_signal(&sc->sc_ctrl_wait);
   1222       1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1223       1.3  christos 
   1224       1.1   hannken 	return r;
   1225       1.1   hannken }
   1226       1.1   hannken 
   1227       1.1   hannken static int
   1228       1.1   hannken vioif_set_promisc(struct vioif_softc *sc, bool onoff)
   1229       1.1   hannken {
   1230       1.1   hannken 	int r;
   1231       1.1   hannken 
   1232       1.1   hannken 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
   1233       1.1   hannken 
   1234       1.1   hannken 	return r;
   1235       1.1   hannken }
   1236       1.1   hannken 
   1237       1.1   hannken static int
   1238       1.1   hannken vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
   1239       1.1   hannken {
   1240       1.1   hannken 	int r;
   1241       1.1   hannken 
   1242       1.1   hannken 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
   1243       1.1   hannken 
   1244       1.1   hannken 	return r;
   1245       1.1   hannken }
   1246       1.1   hannken 
   1247       1.1   hannken /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
   1248       1.1   hannken static int
   1249       1.1   hannken vioif_set_rx_filter(struct vioif_softc *sc)
   1250       1.1   hannken {
   1251       1.1   hannken 	/* filter already set in sc_ctrl_mac_tbl */
   1252       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1253       1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[2];
   1254       1.1   hannken 	int r, slot;
   1255       1.1   hannken 
   1256       1.1   hannken 	if (vsc->sc_nvqs < 3)
   1257       1.1   hannken 		return ENOTSUP;
   1258       1.1   hannken 
   1259       1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1260       1.1   hannken 	while (sc->sc_ctrl_inuse != FREE)
   1261       1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1262       1.1   hannken 	sc->sc_ctrl_inuse = INUSE;
   1263       1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1264       1.1   hannken 
   1265       1.1   hannken 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC;
   1266       1.1   hannken 	sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET;
   1267       1.1   hannken 
   1268       1.1   hannken 	r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap,
   1269       1.1   hannken 			    sc->sc_ctrl_mac_tbl_uc,
   1270       1.1   hannken 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
   1271       1.1   hannken 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
   1272       1.1   hannken 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1273       1.1   hannken 	if (r) {
   1274       1.1   hannken 		printf("%s: control command dmamap load failed, "
   1275       1.1   hannken 		       "error code %d\n", device_xname(sc->sc_dev), r);
   1276       1.1   hannken 		goto out;
   1277       1.1   hannken 	}
   1278       1.1   hannken 	r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap,
   1279       1.1   hannken 			    sc->sc_ctrl_mac_tbl_mc,
   1280       1.1   hannken 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
   1281       1.1   hannken 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
   1282       1.1   hannken 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1283       1.1   hannken 	if (r) {
   1284       1.1   hannken 		printf("%s: control command dmamap load failed, "
   1285       1.1   hannken 		       "error code %d\n", device_xname(sc->sc_dev), r);
   1286       1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap);
   1287       1.1   hannken 		goto out;
   1288       1.1   hannken 	}
   1289       1.1   hannken 
   1290       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap,
   1291       1.1   hannken 			0, sizeof(struct virtio_net_ctrl_cmd),
   1292       1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1293       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0,
   1294       1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1295       1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
   1296       1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1297       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0,
   1298       1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1299       1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
   1300       1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1301       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap,
   1302       1.1   hannken 			0, sizeof(struct virtio_net_ctrl_status),
   1303       1.1   hannken 			BUS_DMASYNC_PREREAD);
   1304       1.1   hannken 
   1305       1.1   hannken 	r = virtio_enqueue_prep(vsc, vq, &slot);
   1306       1.1   hannken 	if (r != 0)
   1307       1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1308       1.1   hannken 	r = virtio_enqueue_reserve(vsc, vq, slot, 4);
   1309       1.1   hannken 	if (r != 0)
   1310       1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1311       1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
   1312       1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true);
   1313       1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true);
   1314       1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
   1315       1.1   hannken 	virtio_enqueue_commit(vsc, vq, slot, true);
   1316       1.1   hannken 
   1317       1.1   hannken 	/* wait for done */
   1318       1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1319       1.1   hannken 	while (sc->sc_ctrl_inuse != DONE)
   1320       1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1321       1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1322       1.1   hannken 	/* already dequeueued */
   1323       1.1   hannken 
   1324       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0,
   1325       1.1   hannken 			sizeof(struct virtio_net_ctrl_cmd),
   1326       1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1327       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0,
   1328       1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1329       1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
   1330       1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1331       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0,
   1332       1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1333       1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
   1334       1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1335       1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0,
   1336       1.1   hannken 			sizeof(struct virtio_net_ctrl_status),
   1337       1.1   hannken 			BUS_DMASYNC_POSTREAD);
   1338       1.1   hannken 	bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap);
   1339       1.1   hannken 	bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap);
   1340       1.1   hannken 
   1341       1.1   hannken 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
   1342       1.1   hannken 		r = 0;
   1343       1.1   hannken 	else {
   1344       1.1   hannken 		printf("%s: failed setting rx filter\n",
   1345       1.1   hannken 		       device_xname(sc->sc_dev));
   1346       1.1   hannken 		r = EIO;
   1347       1.1   hannken 	}
   1348       1.1   hannken 
   1349       1.1   hannken out:
   1350       1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1351       1.1   hannken 	sc->sc_ctrl_inuse = FREE;
   1352       1.1   hannken 	cv_signal(&sc->sc_ctrl_wait);
   1353       1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1354       1.3  christos 
   1355       1.1   hannken 	return r;
   1356       1.1   hannken }
   1357       1.1   hannken 
   1358       1.1   hannken /* ctrl vq interrupt; wake up the command issuer */
   1359       1.1   hannken static int
   1360       1.1   hannken vioif_ctrl_vq_done(struct virtqueue *vq)
   1361       1.1   hannken {
   1362       1.1   hannken 	struct virtio_softc *vsc = vq->vq_owner;
   1363       1.1   hannken 	struct vioif_softc *sc = device_private(vsc->sc_child);
   1364       1.1   hannken 	int r, slot;
   1365       1.1   hannken 
   1366       1.1   hannken 	r = virtio_dequeue(vsc, vq, &slot, NULL);
   1367       1.1   hannken 	if (r == ENOENT)
   1368       1.1   hannken 		return 0;
   1369       1.1   hannken 	virtio_dequeue_commit(vsc, vq, slot);
   1370       1.1   hannken 
   1371       1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1372       1.1   hannken 	sc->sc_ctrl_inuse = DONE;
   1373       1.1   hannken 	cv_signal(&sc->sc_ctrl_wait);
   1374       1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1375       1.1   hannken 
   1376       1.1   hannken 	return 1;
   1377       1.1   hannken }
   1378       1.1   hannken 
   1379       1.1   hannken /*
   1380       1.1   hannken  * If IFF_PROMISC requested,  set promiscuous
   1381       1.1   hannken  * If multicast filter small enough (<=MAXENTRIES) set rx filter
   1382       1.1   hannken  * If large multicast filter exist use ALLMULTI
   1383       1.1   hannken  */
   1384       1.1   hannken /*
   1385       1.1   hannken  * If setting rx filter fails fall back to ALLMULTI
   1386       1.1   hannken  * If ALLMULTI fails fall back to PROMISC
   1387       1.1   hannken  */
   1388       1.1   hannken static int
   1389       1.1   hannken vioif_rx_filter(struct vioif_softc *sc)
   1390       1.1   hannken {
   1391       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1392       1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1393       1.1   hannken 	struct ether_multi *enm;
   1394       1.1   hannken 	struct ether_multistep step;
   1395       1.1   hannken 	int nentries;
   1396       1.1   hannken 	int promisc = 0, allmulti = 0, rxfilter = 0;
   1397       1.1   hannken 	int r;
   1398       1.1   hannken 
   1399       1.1   hannken 	if (vsc->sc_nvqs < 3) {	/* no ctrl vq; always promisc */
   1400       1.1   hannken 		ifp->if_flags |= IFF_PROMISC;
   1401       1.1   hannken 		return 0;
   1402       1.1   hannken 	}
   1403       1.1   hannken 
   1404       1.1   hannken 	if (ifp->if_flags & IFF_PROMISC) {
   1405       1.1   hannken 		promisc = 1;
   1406       1.1   hannken 		goto set;
   1407       1.1   hannken 	}
   1408       1.1   hannken 
   1409       1.1   hannken 	nentries = -1;
   1410       1.1   hannken 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
   1411       1.1   hannken 	while (nentries++, enm != NULL) {
   1412       1.1   hannken 		if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
   1413       1.1   hannken 			allmulti = 1;
   1414       1.1   hannken 			goto set;
   1415       1.1   hannken 		}
   1416       1.1   hannken 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1417       1.1   hannken 			   ETHER_ADDR_LEN)) {
   1418       1.1   hannken 			allmulti = 1;
   1419       1.1   hannken 			goto set;
   1420       1.1   hannken 		}
   1421       1.1   hannken 		memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries],
   1422       1.1   hannken 		       enm->enm_addrlo, ETHER_ADDR_LEN);
   1423       1.1   hannken 		ETHER_NEXT_MULTI(step, enm);
   1424       1.1   hannken 	}
   1425       1.1   hannken 	rxfilter = 1;
   1426       1.1   hannken 
   1427       1.1   hannken set:
   1428       1.1   hannken 	if (rxfilter) {
   1429       1.1   hannken 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
   1430       1.1   hannken 		sc->sc_ctrl_mac_tbl_mc->nentries = nentries;
   1431       1.1   hannken 		r = vioif_set_rx_filter(sc);
   1432       1.1   hannken 		if (r != 0) {
   1433       1.1   hannken 			rxfilter = 0;
   1434       1.1   hannken 			allmulti = 1; /* fallback */
   1435       1.1   hannken 		}
   1436       1.1   hannken 	} else {
   1437       1.1   hannken 		/* remove rx filter */
   1438       1.1   hannken 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
   1439       1.1   hannken 		sc->sc_ctrl_mac_tbl_mc->nentries = 0;
   1440       1.1   hannken 		r = vioif_set_rx_filter(sc);
   1441       1.1   hannken 		/* what to do on failure? */
   1442       1.1   hannken 	}
   1443       1.1   hannken 	if (allmulti) {
   1444       1.1   hannken 		r = vioif_set_allmulti(sc, true);
   1445       1.1   hannken 		if (r != 0) {
   1446       1.1   hannken 			allmulti = 0;
   1447       1.1   hannken 			promisc = 1; /* fallback */
   1448       1.1   hannken 		}
   1449       1.1   hannken 	} else {
   1450       1.1   hannken 		r = vioif_set_allmulti(sc, false);
   1451       1.1   hannken 		/* what to do on failure? */
   1452       1.1   hannken 	}
   1453       1.1   hannken 	if (promisc) {
   1454       1.1   hannken 		r = vioif_set_promisc(sc, true);
   1455       1.1   hannken 	} else {
   1456       1.1   hannken 		r = vioif_set_promisc(sc, false);
   1457       1.1   hannken 	}
   1458       1.1   hannken 
   1459       1.1   hannken 	return r;
   1460       1.1   hannken }
   1461       1.1   hannken 
   1462       1.1   hannken /* change link status */
   1463       1.1   hannken static int
   1464       1.1   hannken vioif_updown(struct vioif_softc *sc, bool isup)
   1465       1.1   hannken {
   1466       1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1467       1.1   hannken 
   1468       1.1   hannken 	if (!(vsc->sc_features & VIRTIO_NET_F_STATUS))
   1469       1.1   hannken 		return ENODEV;
   1470       1.1   hannken 	virtio_write_device_config_1(vsc,
   1471       1.1   hannken 				     VIRTIO_NET_CONFIG_STATUS,
   1472       1.1   hannken 				     isup?VIRTIO_NET_S_LINK_UP:0);
   1473       1.1   hannken 	return 0;
   1474       1.1   hannken }
   1475