Home | History | Annotate | Line # | Download | only in pci
if_vioif.c revision 1.3
      1  1.3  christos /*	$NetBSD: if_vioif.c,v 1.3 2013/03/30 03:21:08 christos Exp $	*/
      2  1.1   hannken 
      3  1.1   hannken /*
      4  1.1   hannken  * Copyright (c) 2010 Minoura Makoto.
      5  1.1   hannken  * All rights reserved.
      6  1.1   hannken  *
      7  1.1   hannken  * Redistribution and use in source and binary forms, with or without
      8  1.1   hannken  * modification, are permitted provided that the following conditions
      9  1.1   hannken  * are met:
     10  1.1   hannken  * 1. Redistributions of source code must retain the above copyright
     11  1.1   hannken  *    notice, this list of conditions and the following disclaimer.
     12  1.1   hannken  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1   hannken  *    notice, this list of conditions and the following disclaimer in the
     14  1.1   hannken  *    documentation and/or other materials provided with the distribution.
     15  1.1   hannken  *
     16  1.1   hannken  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  1.1   hannken  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  1.1   hannken  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  1.1   hannken  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  1.1   hannken  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  1.1   hannken  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  1.1   hannken  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  1.1   hannken  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  1.1   hannken  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  1.1   hannken  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  1.1   hannken  */
     27  1.1   hannken 
     28  1.1   hannken #include <sys/cdefs.h>
     29  1.3  christos __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.3 2013/03/30 03:21:08 christos Exp $");
     30  1.1   hannken 
     31  1.1   hannken #include <sys/param.h>
     32  1.1   hannken #include <sys/systm.h>
     33  1.1   hannken #include <sys/kernel.h>
     34  1.1   hannken #include <sys/bus.h>
     35  1.1   hannken #include <sys/condvar.h>
     36  1.1   hannken #include <sys/device.h>
     37  1.1   hannken #include <sys/intr.h>
     38  1.1   hannken #include <sys/kmem.h>
     39  1.1   hannken #include <sys/mbuf.h>
     40  1.1   hannken #include <sys/mutex.h>
     41  1.1   hannken #include <sys/sockio.h>
     42  1.1   hannken 
     43  1.1   hannken #include <dev/pci/pcidevs.h>
     44  1.1   hannken #include <dev/pci/pcireg.h>
     45  1.1   hannken #include <dev/pci/pcivar.h>
     46  1.1   hannken #include <dev/pci/virtioreg.h>
     47  1.1   hannken #include <dev/pci/virtiovar.h>
     48  1.1   hannken 
     49  1.1   hannken #include <net/if.h>
     50  1.1   hannken #include <net/if_media.h>
     51  1.1   hannken #include <net/if_ether.h>
     52  1.1   hannken 
     53  1.1   hannken #include <net/bpf.h>
     54  1.1   hannken 
     55  1.1   hannken 
     56  1.1   hannken /*
     57  1.1   hannken  * if_vioifreg.h:
     58  1.1   hannken  */
     59  1.1   hannken /* Configuration registers */
     60  1.1   hannken #define VIRTIO_NET_CONFIG_MAC		0 /* 8bit x 6byte */
     61  1.1   hannken #define VIRTIO_NET_CONFIG_STATUS	6 /* 16bit */
     62  1.1   hannken 
     63  1.1   hannken /* Feature bits */
     64  1.1   hannken #define VIRTIO_NET_F_CSUM	(1<<0)
     65  1.1   hannken #define VIRTIO_NET_F_GUEST_CSUM	(1<<1)
     66  1.1   hannken #define VIRTIO_NET_F_MAC	(1<<5)
     67  1.1   hannken #define VIRTIO_NET_F_GSO	(1<<6)
     68  1.1   hannken #define VIRTIO_NET_F_GUEST_TSO4	(1<<7)
     69  1.1   hannken #define VIRTIO_NET_F_GUEST_TSO6	(1<<8)
     70  1.1   hannken #define VIRTIO_NET_F_GUEST_ECN	(1<<9)
     71  1.1   hannken #define VIRTIO_NET_F_GUEST_UFO	(1<<10)
     72  1.1   hannken #define VIRTIO_NET_F_HOST_TSO4	(1<<11)
     73  1.1   hannken #define VIRTIO_NET_F_HOST_TSO6	(1<<12)
     74  1.1   hannken #define VIRTIO_NET_F_HOST_ECN	(1<<13)
     75  1.1   hannken #define VIRTIO_NET_F_HOST_UFO	(1<<14)
     76  1.1   hannken #define VIRTIO_NET_F_MRG_RXBUF	(1<<15)
     77  1.1   hannken #define VIRTIO_NET_F_STATUS	(1<<16)
     78  1.1   hannken #define VIRTIO_NET_F_CTRL_VQ	(1<<17)
     79  1.1   hannken #define VIRTIO_NET_F_CTRL_RX	(1<<18)
     80  1.1   hannken #define VIRTIO_NET_F_CTRL_VLAN	(1<<19)
     81  1.1   hannken 
     82  1.1   hannken /* Status */
     83  1.1   hannken #define VIRTIO_NET_S_LINK_UP	1
     84  1.1   hannken 
     85  1.1   hannken /* Packet header structure */
     86  1.1   hannken struct virtio_net_hdr {
     87  1.1   hannken 	uint8_t		flags;
     88  1.1   hannken 	uint8_t		gso_type;
     89  1.1   hannken 	uint16_t	hdr_len;
     90  1.1   hannken 	uint16_t	gso_size;
     91  1.1   hannken 	uint16_t	csum_start;
     92  1.1   hannken 	uint16_t	csum_offset;
     93  1.1   hannken #if 0
     94  1.1   hannken 	uint16_t	num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
     95  1.1   hannken #endif
     96  1.1   hannken } __packed;
     97  1.1   hannken 
     98  1.1   hannken #define VIRTIO_NET_HDR_F_NEEDS_CSUM	1 /* flags */
     99  1.1   hannken #define VIRTIO_NET_HDR_GSO_NONE		0 /* gso_type */
    100  1.1   hannken #define VIRTIO_NET_HDR_GSO_TCPV4	1 /* gso_type */
    101  1.1   hannken #define VIRTIO_NET_HDR_GSO_UDP		3 /* gso_type */
    102  1.1   hannken #define VIRTIO_NET_HDR_GSO_TCPV6	4 /* gso_type */
    103  1.1   hannken #define VIRTIO_NET_HDR_GSO_ECN		0x80 /* gso_type, |'ed */
    104  1.1   hannken 
    105  1.1   hannken #define VIRTIO_NET_MAX_GSO_LEN		(65536+ETHER_HDR_LEN)
    106  1.1   hannken 
    107  1.1   hannken /* Control virtqueue */
    108  1.1   hannken struct virtio_net_ctrl_cmd {
    109  1.1   hannken 	uint8_t	class;
    110  1.1   hannken 	uint8_t	command;
    111  1.1   hannken } __packed;
    112  1.1   hannken #define VIRTIO_NET_CTRL_RX		0
    113  1.1   hannken # define VIRTIO_NET_CTRL_RX_PROMISC	0
    114  1.1   hannken # define VIRTIO_NET_CTRL_RX_ALLMULTI	1
    115  1.1   hannken 
    116  1.1   hannken #define VIRTIO_NET_CTRL_MAC		1
    117  1.1   hannken # define VIRTIO_NET_CTRL_MAC_TABLE_SET	0
    118  1.1   hannken 
    119  1.1   hannken #define VIRTIO_NET_CTRL_VLAN		2
    120  1.1   hannken # define VIRTIO_NET_CTRL_VLAN_ADD	0
    121  1.1   hannken # define VIRTIO_NET_CTRL_VLAN_DEL	1
    122  1.1   hannken 
    123  1.1   hannken struct virtio_net_ctrl_status {
    124  1.1   hannken 	uint8_t	ack;
    125  1.1   hannken } __packed;
    126  1.1   hannken #define VIRTIO_NET_OK			0
    127  1.1   hannken #define VIRTIO_NET_ERR			1
    128  1.1   hannken 
    129  1.1   hannken struct virtio_net_ctrl_rx {
    130  1.1   hannken 	uint8_t	onoff;
    131  1.1   hannken } __packed;
    132  1.1   hannken 
    133  1.1   hannken struct virtio_net_ctrl_mac_tbl {
    134  1.1   hannken 	uint32_t nentries;
    135  1.1   hannken 	uint8_t macs[][ETHER_ADDR_LEN];
    136  1.1   hannken } __packed;
    137  1.1   hannken 
    138  1.1   hannken struct virtio_net_ctrl_vlan {
    139  1.1   hannken 	uint16_t id;
    140  1.1   hannken } __packed;
    141  1.1   hannken 
    142  1.1   hannken 
    143  1.1   hannken /*
    144  1.1   hannken  * if_vioifvar.h:
    145  1.1   hannken  */
    146  1.1   hannken struct vioif_softc {
    147  1.1   hannken 	device_t		sc_dev;
    148  1.1   hannken 
    149  1.1   hannken 	struct virtio_softc	*sc_virtio;
    150  1.1   hannken 	struct virtqueue	sc_vq[3];
    151  1.1   hannken 
    152  1.1   hannken 	uint8_t			sc_mac[ETHER_ADDR_LEN];
    153  1.1   hannken 	struct ethercom		sc_ethercom;
    154  1.1   hannken 	uint32_t		sc_features;
    155  1.1   hannken 	short			sc_ifflags;
    156  1.1   hannken 
    157  1.1   hannken 	/* bus_dmamem */
    158  1.1   hannken 	bus_dma_segment_t	sc_hdr_segs[1];
    159  1.1   hannken 	struct virtio_net_hdr	*sc_hdrs;
    160  1.1   hannken #define sc_rx_hdrs	sc_hdrs
    161  1.1   hannken 	struct virtio_net_hdr	*sc_tx_hdrs;
    162  1.1   hannken 	struct virtio_net_ctrl_cmd *sc_ctrl_cmd;
    163  1.1   hannken 	struct virtio_net_ctrl_status *sc_ctrl_status;
    164  1.1   hannken 	struct virtio_net_ctrl_rx *sc_ctrl_rx;
    165  1.1   hannken 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc;
    166  1.1   hannken 	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc;
    167  1.1   hannken 
    168  1.1   hannken 	/* kmem */
    169  1.1   hannken 	bus_dmamap_t		*sc_arrays;
    170  1.1   hannken #define sc_rxhdr_dmamaps sc_arrays
    171  1.1   hannken 	bus_dmamap_t		*sc_txhdr_dmamaps;
    172  1.1   hannken 	bus_dmamap_t		*sc_rx_dmamaps;
    173  1.1   hannken 	bus_dmamap_t		*sc_tx_dmamaps;
    174  1.1   hannken 	struct mbuf		**sc_rx_mbufs;
    175  1.1   hannken 	struct mbuf		**sc_tx_mbufs;
    176  1.1   hannken 
    177  1.1   hannken 	bus_dmamap_t		sc_ctrl_cmd_dmamap;
    178  1.1   hannken 	bus_dmamap_t		sc_ctrl_status_dmamap;
    179  1.1   hannken 	bus_dmamap_t		sc_ctrl_rx_dmamap;
    180  1.1   hannken 	bus_dmamap_t		sc_ctrl_tbl_uc_dmamap;
    181  1.1   hannken 	bus_dmamap_t		sc_ctrl_tbl_mc_dmamap;
    182  1.1   hannken 
    183  1.1   hannken 	void			*sc_rx_softint;
    184  1.1   hannken 
    185  1.1   hannken 	enum {
    186  1.1   hannken 		FREE, INUSE, DONE
    187  1.1   hannken 	}			sc_ctrl_inuse;
    188  1.1   hannken 	kcondvar_t		sc_ctrl_wait;
    189  1.1   hannken 	kmutex_t		sc_ctrl_wait_lock;
    190  1.1   hannken };
    191  1.1   hannken #define VIRTIO_NET_TX_MAXNSEGS		(16) /* XXX */
    192  1.1   hannken #define VIRTIO_NET_CTRL_MAC_MAXENTRIES	(64) /* XXX */
    193  1.1   hannken 
    194  1.1   hannken /* cfattach interface functions */
    195  1.1   hannken static int	vioif_match(device_t, cfdata_t, void *);
    196  1.1   hannken static void	vioif_attach(device_t, device_t, void *);
    197  1.1   hannken static void	vioif_deferred_init(device_t);
    198  1.1   hannken 
    199  1.1   hannken /* ifnet interface functions */
    200  1.1   hannken static int	vioif_init(struct ifnet *);
    201  1.1   hannken static void	vioif_stop(struct ifnet *, int);
    202  1.1   hannken static void	vioif_start(struct ifnet *);
    203  1.1   hannken static int	vioif_ioctl(struct ifnet *, u_long, void *);
    204  1.1   hannken static void	vioif_watchdog(struct ifnet *);
    205  1.1   hannken 
    206  1.1   hannken /* rx */
    207  1.1   hannken static int	vioif_add_rx_mbuf(struct vioif_softc *, int);
    208  1.1   hannken static void	vioif_free_rx_mbuf(struct vioif_softc *, int);
    209  1.1   hannken static void	vioif_populate_rx_mbufs(struct vioif_softc *);
    210  1.1   hannken static int	vioif_rx_deq(struct vioif_softc *);
    211  1.1   hannken static int	vioif_rx_vq_done(struct virtqueue *);
    212  1.1   hannken static void	vioif_rx_softint(void *);
    213  1.1   hannken static void	vioif_rx_drain(struct vioif_softc *);
    214  1.1   hannken 
    215  1.1   hannken /* tx */
    216  1.1   hannken static int	vioif_tx_vq_done(struct virtqueue *);
    217  1.1   hannken static void	vioif_tx_drain(struct vioif_softc *);
    218  1.1   hannken 
    219  1.1   hannken /* other control */
    220  1.1   hannken static int	vioif_updown(struct vioif_softc *, bool);
    221  1.1   hannken static int	vioif_ctrl_rx(struct vioif_softc *, int, bool);
    222  1.1   hannken static int	vioif_set_promisc(struct vioif_softc *, bool);
    223  1.1   hannken static int	vioif_set_allmulti(struct vioif_softc *, bool);
    224  1.1   hannken static int	vioif_set_rx_filter(struct vioif_softc *);
    225  1.1   hannken static int	vioif_rx_filter(struct vioif_softc *);
    226  1.1   hannken static int	vioif_ctrl_vq_done(struct virtqueue *);
    227  1.1   hannken 
    228  1.1   hannken CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
    229  1.1   hannken 		  vioif_match, vioif_attach, NULL, NULL);
    230  1.1   hannken 
    231  1.1   hannken static int
    232  1.1   hannken vioif_match(device_t parent, cfdata_t match, void *aux)
    233  1.1   hannken {
    234  1.1   hannken 	struct virtio_softc *va = aux;
    235  1.1   hannken 
    236  1.1   hannken 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
    237  1.1   hannken 		return 1;
    238  1.1   hannken 
    239  1.1   hannken 	return 0;
    240  1.1   hannken }
    241  1.1   hannken 
    242  1.1   hannken /* allocate memory */
    243  1.1   hannken /*
    244  1.1   hannken  * dma memory is used for:
    245  1.1   hannken  *   sc_rx_hdrs[slot]:	 metadata array for recieved frames (READ)
    246  1.1   hannken  *   sc_tx_hdrs[slot]:	 metadata array for frames to be sent (WRITE)
    247  1.1   hannken  *   sc_ctrl_cmd:	 command to be sent via ctrl vq (WRITE)
    248  1.1   hannken  *   sc_ctrl_status:	 return value for a command via ctrl vq (READ)
    249  1.1   hannken  *   sc_ctrl_rx:	 parameter for a VIRTIO_NET_CTRL_RX class command
    250  1.1   hannken  *			 (WRITE)
    251  1.1   hannken  *   sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
    252  1.1   hannken  *			 class command (WRITE)
    253  1.1   hannken  *   sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
    254  1.1   hannken  *			 class command (WRITE)
    255  1.1   hannken  * sc_ctrl_* structures are allocated only one each; they are protected by
    256  1.1   hannken  * sc_ctrl_inuse variable and sc_ctrl_wait condvar.
    257  1.1   hannken  */
    258  1.1   hannken /*
    259  1.1   hannken  * dynamically allocated memory is used for:
    260  1.1   hannken  *   sc_rxhdr_dmamaps[slot]:	bus_dmamap_t array for sc_rx_hdrs[slot]
    261  1.1   hannken  *   sc_txhdr_dmamaps[slot]:	bus_dmamap_t array for sc_tx_hdrs[slot]
    262  1.1   hannken  *   sc_rx_dmamaps[slot]:	bus_dmamap_t array for recieved payload
    263  1.1   hannken  *   sc_tx_dmamaps[slot]:	bus_dmamap_t array for sent payload
    264  1.1   hannken  *   sc_rx_mbufs[slot]:		mbuf pointer array for recieved frames
    265  1.1   hannken  *   sc_tx_mbufs[slot]:		mbuf pointer array for sent frames
    266  1.1   hannken  */
    267  1.1   hannken static int
    268  1.1   hannken vioif_alloc_mems(struct vioif_softc *sc)
    269  1.1   hannken {
    270  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    271  1.1   hannken 	int allocsize, allocsize2, r, rsegs, i;
    272  1.1   hannken 	void *vaddr;
    273  1.1   hannken 	intptr_t p;
    274  1.1   hannken 	int rxqsize, txqsize;
    275  1.1   hannken 
    276  1.1   hannken 	rxqsize = vsc->sc_vqs[0].vq_num;
    277  1.1   hannken 	txqsize = vsc->sc_vqs[1].vq_num;
    278  1.1   hannken 
    279  1.1   hannken 	allocsize = sizeof(struct virtio_net_hdr) * rxqsize;
    280  1.1   hannken 	allocsize += sizeof(struct virtio_net_hdr) * txqsize;
    281  1.1   hannken 	if (vsc->sc_nvqs == 3) {
    282  1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
    283  1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
    284  1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
    285  1.1   hannken 		allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
    286  1.1   hannken 			+ sizeof(struct virtio_net_ctrl_mac_tbl)
    287  1.1   hannken 			+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
    288  1.1   hannken 	}
    289  1.1   hannken 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
    290  1.1   hannken 			     &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
    291  1.1   hannken 	if (r != 0) {
    292  1.1   hannken 		aprint_error_dev(sc->sc_dev,
    293  1.1   hannken 				 "DMA memory allocation failed, size %d, "
    294  1.1   hannken 				 "error code %d\n", allocsize, r);
    295  1.1   hannken 		goto err_none;
    296  1.1   hannken 	}
    297  1.1   hannken 	r = bus_dmamem_map(vsc->sc_dmat,
    298  1.1   hannken 			   &sc->sc_hdr_segs[0], 1, allocsize,
    299  1.1   hannken 			   &vaddr, BUS_DMA_NOWAIT);
    300  1.1   hannken 	if (r != 0) {
    301  1.1   hannken 		aprint_error_dev(sc->sc_dev,
    302  1.1   hannken 				 "DMA memory map failed, "
    303  1.1   hannken 				 "error code %d\n", r);
    304  1.1   hannken 		goto err_dmamem_alloc;
    305  1.1   hannken 	}
    306  1.1   hannken 	sc->sc_hdrs = vaddr;
    307  1.1   hannken 	memset(vaddr, 0, allocsize);
    308  1.1   hannken 	p = (intptr_t) vaddr;
    309  1.1   hannken 	p += sizeof(struct virtio_net_hdr) * rxqsize;
    310  1.1   hannken #define P(name,size)	do { sc->sc_ ##name = (void*) p;	\
    311  1.1   hannken 			     p += size; } while (0)
    312  1.1   hannken 	P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize);
    313  1.1   hannken 	if (vsc->sc_nvqs == 3) {
    314  1.1   hannken 		P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd));
    315  1.1   hannken 		P(ctrl_status, sizeof(struct virtio_net_ctrl_status));
    316  1.1   hannken 		P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx));
    317  1.1   hannken 		P(ctrl_mac_tbl_uc, sizeof(struct virtio_net_ctrl_mac_tbl));
    318  1.1   hannken 		P(ctrl_mac_tbl_mc,
    319  1.1   hannken 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
    320  1.1   hannken 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES));
    321  1.1   hannken 	}
    322  1.1   hannken #undef P
    323  1.1   hannken 
    324  1.1   hannken 	allocsize2 = sizeof(bus_dmamap_t) * (rxqsize + txqsize);
    325  1.1   hannken 	allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize);
    326  1.1   hannken 	allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize);
    327  1.1   hannken 	sc->sc_arrays = kmem_zalloc(allocsize2, KM_SLEEP);
    328  1.1   hannken 	if (sc->sc_arrays == NULL)
    329  1.1   hannken 		goto err_dmamem_map;
    330  1.1   hannken 	sc->sc_txhdr_dmamaps = sc->sc_arrays + rxqsize;
    331  1.1   hannken 	sc->sc_rx_dmamaps = sc->sc_txhdr_dmamaps + txqsize;
    332  1.1   hannken 	sc->sc_tx_dmamaps = sc->sc_rx_dmamaps + rxqsize;
    333  1.1   hannken 	sc->sc_rx_mbufs = (void*) (sc->sc_tx_dmamaps + txqsize);
    334  1.1   hannken 	sc->sc_tx_mbufs = sc->sc_rx_mbufs + rxqsize;
    335  1.1   hannken 
    336  1.1   hannken #define C(map, buf, size, nsegs, rw, usage)				\
    337  1.1   hannken 	do {								\
    338  1.1   hannken 		r = bus_dmamap_create(vsc->sc_dmat, size, nsegs, size, 0, \
    339  1.1   hannken 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,	\
    340  1.1   hannken 				      &sc->sc_ ##map);			\
    341  1.1   hannken 		if (r != 0) {						\
    342  1.1   hannken 			aprint_error_dev(sc->sc_dev,			\
    343  1.1   hannken 					 usage " dmamap creation failed, " \
    344  1.1   hannken 					 "error code %d\n", r);		\
    345  1.1   hannken 					 goto err_reqs;			\
    346  1.1   hannken 		}							\
    347  1.1   hannken 	} while (0)
    348  1.1   hannken #define C_L1(map, buf, size, nsegs, rw, usage)				\
    349  1.1   hannken 	C(map, buf, size, nsegs, rw, usage);				\
    350  1.1   hannken 	do {								\
    351  1.1   hannken 		r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map,	\
    352  1.1   hannken 				    &sc->sc_ ##buf, size, NULL,		\
    353  1.1   hannken 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
    354  1.1   hannken 		if (r != 0) {						\
    355  1.1   hannken 			aprint_error_dev(sc->sc_dev,			\
    356  1.1   hannken 					 usage " dmamap load failed, "	\
    357  1.1   hannken 					 "error code %d\n", r);		\
    358  1.1   hannken 			goto err_reqs;					\
    359  1.1   hannken 		}							\
    360  1.1   hannken 	} while (0)
    361  1.1   hannken #define C_L2(map, buf, size, nsegs, rw, usage)				\
    362  1.1   hannken 	C(map, buf, size, nsegs, rw, usage);				\
    363  1.1   hannken 	do {								\
    364  1.1   hannken 		r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map,	\
    365  1.1   hannken 				    sc->sc_ ##buf, size, NULL,		\
    366  1.1   hannken 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
    367  1.1   hannken 		if (r != 0) {						\
    368  1.1   hannken 			aprint_error_dev(sc->sc_dev,			\
    369  1.1   hannken 					 usage " dmamap load failed, "	\
    370  1.1   hannken 					 "error code %d\n", r);		\
    371  1.1   hannken 			goto err_reqs;					\
    372  1.1   hannken 		}							\
    373  1.1   hannken 	} while (0)
    374  1.1   hannken 	for (i = 0; i < rxqsize; i++) {
    375  1.1   hannken 		C_L1(rxhdr_dmamaps[i], rx_hdrs[i],
    376  1.1   hannken 		    sizeof(struct virtio_net_hdr), 1,
    377  1.1   hannken 		    READ, "rx header");
    378  1.1   hannken 		C(rx_dmamaps[i], NULL, MCLBYTES, 1, 0, "rx payload");
    379  1.1   hannken 	}
    380  1.1   hannken 
    381  1.1   hannken 	for (i = 0; i < txqsize; i++) {
    382  1.1   hannken 		C_L1(txhdr_dmamaps[i], rx_hdrs[i],
    383  1.1   hannken 		    sizeof(struct virtio_net_hdr), 1,
    384  1.1   hannken 		    WRITE, "tx header");
    385  1.1   hannken 		C(tx_dmamaps[i], NULL, ETHER_MAX_LEN, 256 /* XXX */, 0,
    386  1.1   hannken 		  "tx payload");
    387  1.1   hannken 	}
    388  1.1   hannken 
    389  1.1   hannken 	if (vsc->sc_nvqs == 3) {
    390  1.1   hannken 		/* control vq class & command */
    391  1.1   hannken 		C_L2(ctrl_cmd_dmamap, ctrl_cmd,
    392  1.1   hannken 		    sizeof(struct virtio_net_ctrl_cmd), 1, WRITE,
    393  1.1   hannken 		    "control command");
    394  1.3  christos 
    395  1.1   hannken 		/* control vq status */
    396  1.1   hannken 		C_L2(ctrl_status_dmamap, ctrl_status,
    397  1.1   hannken 		    sizeof(struct virtio_net_ctrl_status), 1, READ,
    398  1.1   hannken 		    "control status");
    399  1.1   hannken 
    400  1.1   hannken 		/* control vq rx mode command parameter */
    401  1.1   hannken 		C_L2(ctrl_rx_dmamap, ctrl_rx,
    402  1.1   hannken 		    sizeof(struct virtio_net_ctrl_rx), 1, WRITE,
    403  1.1   hannken 		    "rx mode control command");
    404  1.1   hannken 
    405  1.1   hannken 		/* control vq MAC filter table for unicast */
    406  1.1   hannken 		/* do not load now since its length is variable */
    407  1.1   hannken 		C(ctrl_tbl_uc_dmamap, NULL,
    408  1.1   hannken 		  sizeof(struct virtio_net_ctrl_mac_tbl) + 0, 1, WRITE,
    409  1.1   hannken 		  "unicast MAC address filter command");
    410  1.1   hannken 
    411  1.1   hannken 		/* control vq MAC filter table for multicast */
    412  1.1   hannken 		C(ctrl_tbl_mc_dmamap, NULL,
    413  1.1   hannken 		  (sizeof(struct virtio_net_ctrl_mac_tbl)
    414  1.1   hannken 		   + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES),
    415  1.1   hannken 		  1, WRITE, "multicast MAC address filter command");
    416  1.1   hannken 	}
    417  1.1   hannken #undef C_L2
    418  1.1   hannken #undef C_L1
    419  1.1   hannken #undef C
    420  1.1   hannken 
    421  1.1   hannken 	return 0;
    422  1.1   hannken 
    423  1.1   hannken err_reqs:
    424  1.1   hannken #define D(map)								\
    425  1.1   hannken 	do {								\
    426  1.1   hannken 		if (sc->sc_ ##map) {					\
    427  1.1   hannken 			bus_dmamap_destroy(vsc->sc_dmat, sc->sc_ ##map); \
    428  1.1   hannken 			sc->sc_ ##map = NULL;				\
    429  1.1   hannken 		}							\
    430  1.1   hannken 	} while (0)
    431  1.1   hannken 	D(ctrl_tbl_mc_dmamap);
    432  1.1   hannken 	D(ctrl_tbl_uc_dmamap);
    433  1.1   hannken 	D(ctrl_rx_dmamap);
    434  1.1   hannken 	D(ctrl_status_dmamap);
    435  1.1   hannken 	D(ctrl_cmd_dmamap);
    436  1.1   hannken 	for (i = 0; i < txqsize; i++) {
    437  1.1   hannken 		D(tx_dmamaps[i]);
    438  1.1   hannken 		D(txhdr_dmamaps[i]);
    439  1.1   hannken 	}
    440  1.1   hannken 	for (i = 0; i < rxqsize; i++) {
    441  1.1   hannken 		D(rx_dmamaps[i]);
    442  1.1   hannken 		D(rxhdr_dmamaps[i]);
    443  1.1   hannken 	}
    444  1.1   hannken #undef D
    445  1.1   hannken 	if (sc->sc_arrays) {
    446  1.1   hannken 		kmem_free(sc->sc_arrays, allocsize2);
    447  1.1   hannken 		sc->sc_arrays = 0;
    448  1.1   hannken 	}
    449  1.1   hannken err_dmamem_map:
    450  1.1   hannken 	bus_dmamem_unmap(vsc->sc_dmat, sc->sc_hdrs, allocsize);
    451  1.1   hannken err_dmamem_alloc:
    452  1.1   hannken 	bus_dmamem_free(vsc->sc_dmat, &sc->sc_hdr_segs[0], 1);
    453  1.1   hannken err_none:
    454  1.1   hannken 	return -1;
    455  1.1   hannken }
    456  1.1   hannken 
    457  1.1   hannken static void
    458  1.1   hannken vioif_attach(device_t parent, device_t self, void *aux)
    459  1.1   hannken {
    460  1.1   hannken 	struct vioif_softc *sc = device_private(self);
    461  1.1   hannken 	struct virtio_softc *vsc = device_private(parent);
    462  1.1   hannken 	uint32_t features;
    463  1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    464  1.1   hannken 
    465  1.1   hannken 	if (vsc->sc_child != NULL) {
    466  1.1   hannken 		aprint_normal(": child already attached for %s; "
    467  1.1   hannken 			      "something wrong...\n",
    468  1.1   hannken 			      device_xname(parent));
    469  1.1   hannken 		return;
    470  1.1   hannken 	}
    471  1.1   hannken 
    472  1.1   hannken 	sc->sc_dev = self;
    473  1.1   hannken 	sc->sc_virtio = vsc;
    474  1.1   hannken 
    475  1.1   hannken 	vsc->sc_child = self;
    476  1.1   hannken 	vsc->sc_ipl = IPL_NET;
    477  1.1   hannken 	vsc->sc_vqs = &sc->sc_vq[0];
    478  1.1   hannken 	vsc->sc_config_change = 0;
    479  1.1   hannken 	vsc->sc_intrhand = virtio_vq_intr;
    480  1.1   hannken 
    481  1.1   hannken 	features = virtio_negotiate_features(vsc,
    482  1.1   hannken 					     (VIRTIO_NET_F_MAC |
    483  1.1   hannken 					      VIRTIO_NET_F_STATUS |
    484  1.1   hannken 					      VIRTIO_NET_F_CTRL_VQ |
    485  1.1   hannken 					      VIRTIO_NET_F_CTRL_RX |
    486  1.1   hannken 					      VIRTIO_F_NOTIFY_ON_EMPTY));
    487  1.1   hannken 	if (features & VIRTIO_NET_F_MAC) {
    488  1.1   hannken 		sc->sc_mac[0] = virtio_read_device_config_1(vsc,
    489  1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+0);
    490  1.1   hannken 		sc->sc_mac[1] = virtio_read_device_config_1(vsc,
    491  1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+1);
    492  1.1   hannken 		sc->sc_mac[2] = virtio_read_device_config_1(vsc,
    493  1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+2);
    494  1.1   hannken 		sc->sc_mac[3] = virtio_read_device_config_1(vsc,
    495  1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+3);
    496  1.1   hannken 		sc->sc_mac[4] = virtio_read_device_config_1(vsc,
    497  1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+4);
    498  1.1   hannken 		sc->sc_mac[5] = virtio_read_device_config_1(vsc,
    499  1.1   hannken 						    VIRTIO_NET_CONFIG_MAC+5);
    500  1.1   hannken 	} else {
    501  1.1   hannken 		/* code stolen from sys/net/if_tap.c */
    502  1.1   hannken 		struct timeval tv;
    503  1.1   hannken 		uint32_t ui;
    504  1.1   hannken 		getmicrouptime(&tv);
    505  1.1   hannken 		ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
    506  1.1   hannken 		memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
    507  1.1   hannken 		virtio_write_device_config_1(vsc,
    508  1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+0,
    509  1.1   hannken 					     sc->sc_mac[0]);
    510  1.1   hannken 		virtio_write_device_config_1(vsc,
    511  1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+1,
    512  1.1   hannken 					     sc->sc_mac[1]);
    513  1.1   hannken 		virtio_write_device_config_1(vsc,
    514  1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+2,
    515  1.1   hannken 					     sc->sc_mac[2]);
    516  1.1   hannken 		virtio_write_device_config_1(vsc,
    517  1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+3,
    518  1.1   hannken 					     sc->sc_mac[3]);
    519  1.1   hannken 		virtio_write_device_config_1(vsc,
    520  1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+4,
    521  1.1   hannken 					     sc->sc_mac[4]);
    522  1.1   hannken 		virtio_write_device_config_1(vsc,
    523  1.1   hannken 					     VIRTIO_NET_CONFIG_MAC+5,
    524  1.1   hannken 					     sc->sc_mac[5]);
    525  1.1   hannken 	}
    526  1.1   hannken 	aprint_normal(": Ethernet address %s\n", ether_sprintf(sc->sc_mac));
    527  1.1   hannken 	aprint_naive("\n");
    528  1.1   hannken 
    529  1.1   hannken 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0,
    530  1.1   hannken 			    MCLBYTES+sizeof(struct virtio_net_hdr), 2,
    531  1.1   hannken 			    "rx") != 0) {
    532  1.1   hannken 		goto err;
    533  1.1   hannken 	}
    534  1.1   hannken 	vsc->sc_nvqs = 1;
    535  1.1   hannken 	sc->sc_vq[0].vq_done = vioif_rx_vq_done;
    536  1.1   hannken 	if (virtio_alloc_vq(vsc, &sc->sc_vq[1], 1,
    537  1.1   hannken 			    (sizeof(struct virtio_net_hdr)
    538  1.1   hannken 			     + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
    539  1.1   hannken 			    VIRTIO_NET_TX_MAXNSEGS + 1,
    540  1.1   hannken 			    "tx") != 0) {
    541  1.1   hannken 		goto err;
    542  1.1   hannken 	}
    543  1.1   hannken 	vsc->sc_nvqs = 2;
    544  1.1   hannken 	sc->sc_vq[1].vq_done = vioif_tx_vq_done;
    545  1.1   hannken 	virtio_start_vq_intr(vsc, &sc->sc_vq[0]);
    546  1.1   hannken 	virtio_stop_vq_intr(vsc, &sc->sc_vq[1]); /* not urgent; do it later */
    547  1.1   hannken 	if ((features & VIRTIO_NET_F_CTRL_VQ)
    548  1.1   hannken 	    && (features & VIRTIO_NET_F_CTRL_RX)) {
    549  1.1   hannken 		if (virtio_alloc_vq(vsc, &sc->sc_vq[2], 2,
    550  1.1   hannken 				    NBPG, 1, "control") == 0) {
    551  1.1   hannken 			sc->sc_vq[2].vq_done = vioif_ctrl_vq_done;
    552  1.1   hannken 			cv_init(&sc->sc_ctrl_wait, "ctrl_vq");
    553  1.1   hannken 			mutex_init(&sc->sc_ctrl_wait_lock,
    554  1.1   hannken 				   MUTEX_DEFAULT, IPL_NET);
    555  1.1   hannken 			sc->sc_ctrl_inuse = FREE;
    556  1.1   hannken 			virtio_start_vq_intr(vsc, &sc->sc_vq[2]);
    557  1.1   hannken 			vsc->sc_nvqs = 3;
    558  1.1   hannken 		}
    559  1.1   hannken 	}
    560  1.1   hannken 
    561  1.1   hannken 	sc->sc_rx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
    562  1.1   hannken 					      vioif_rx_softint, sc);
    563  1.1   hannken 	if (sc->sc_rx_softint == NULL) {
    564  1.1   hannken 		aprint_error_dev(self, "cannot establish softint\n");
    565  1.1   hannken 		goto err;
    566  1.1   hannken 	}
    567  1.1   hannken 
    568  1.1   hannken 	if (vioif_alloc_mems(sc) < 0)
    569  1.1   hannken 		goto err;
    570  1.1   hannken 	if (vsc->sc_nvqs == 3)
    571  1.1   hannken 		config_interrupts(self, vioif_deferred_init);
    572  1.1   hannken 
    573  1.1   hannken 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
    574  1.1   hannken 	ifp->if_softc = sc;
    575  1.1   hannken 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    576  1.1   hannken 	ifp->if_start = vioif_start;
    577  1.1   hannken 	ifp->if_ioctl = vioif_ioctl;
    578  1.1   hannken 	ifp->if_init = vioif_init;
    579  1.1   hannken 	ifp->if_stop = vioif_stop;
    580  1.1   hannken 	ifp->if_capabilities = 0;
    581  1.1   hannken 	ifp->if_watchdog = vioif_watchdog;
    582  1.1   hannken 
    583  1.1   hannken 	if_attach(ifp);
    584  1.1   hannken 	ether_ifattach(ifp, sc->sc_mac);
    585  1.1   hannken 
    586  1.1   hannken 	return;
    587  1.1   hannken 
    588  1.1   hannken err:
    589  1.1   hannken 	if (vsc->sc_nvqs == 3) {
    590  1.1   hannken 		virtio_free_vq(vsc, &sc->sc_vq[2]);
    591  1.1   hannken 		cv_destroy(&sc->sc_ctrl_wait);
    592  1.1   hannken 		mutex_destroy(&sc->sc_ctrl_wait_lock);
    593  1.1   hannken 		vsc->sc_nvqs = 2;
    594  1.1   hannken 	}
    595  1.1   hannken 	if (vsc->sc_nvqs == 2) {
    596  1.1   hannken 		virtio_free_vq(vsc, &sc->sc_vq[1]);
    597  1.1   hannken 		vsc->sc_nvqs = 1;
    598  1.1   hannken 	}
    599  1.1   hannken 	if (vsc->sc_nvqs == 1) {
    600  1.1   hannken 		virtio_free_vq(vsc, &sc->sc_vq[0]);
    601  1.1   hannken 		vsc->sc_nvqs = 0;
    602  1.1   hannken 	}
    603  1.1   hannken 	vsc->sc_child = (void*)1;
    604  1.1   hannken 	return;
    605  1.1   hannken }
    606  1.1   hannken 
    607  1.1   hannken /* we need interrupts to make promiscuous mode off */
    608  1.1   hannken static void
    609  1.1   hannken vioif_deferred_init(device_t self)
    610  1.1   hannken {
    611  1.1   hannken 	struct vioif_softc *sc = device_private(self);
    612  1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    613  1.1   hannken 	int r;
    614  1.1   hannken 
    615  1.1   hannken 	r =  vioif_set_promisc(sc, false);
    616  1.1   hannken 	if (r != 0)
    617  1.1   hannken 		aprint_error_dev(self, "resetting promisc mode failed, "
    618  1.1   hannken 				 "errror code %d\n", r);
    619  1.1   hannken 	else
    620  1.1   hannken 		ifp->if_flags &= ~IFF_PROMISC;
    621  1.1   hannken }
    622  1.1   hannken 
    623  1.1   hannken /*
    624  1.1   hannken  * Interface functions for ifnet
    625  1.1   hannken  */
    626  1.1   hannken static int
    627  1.1   hannken vioif_init(struct ifnet *ifp)
    628  1.1   hannken {
    629  1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    630  1.1   hannken 
    631  1.1   hannken 	vioif_stop(ifp, 0);
    632  1.1   hannken 	vioif_populate_rx_mbufs(sc);
    633  1.1   hannken 	vioif_updown(sc, true);
    634  1.1   hannken 	ifp->if_flags |= IFF_RUNNING;
    635  1.1   hannken 	ifp->if_flags &= ~IFF_OACTIVE;
    636  1.1   hannken 	vioif_rx_filter(sc);
    637  1.1   hannken 
    638  1.1   hannken 	return 0;
    639  1.1   hannken }
    640  1.1   hannken 
    641  1.1   hannken static void
    642  1.1   hannken vioif_stop(struct ifnet *ifp, int disable)
    643  1.1   hannken {
    644  1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    645  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    646  1.1   hannken 
    647  1.1   hannken 	/* only way to stop I/O and DMA is resetting... */
    648  1.1   hannken 	virtio_reset(vsc);
    649  1.1   hannken 	vioif_rx_deq(sc);
    650  1.1   hannken 	vioif_tx_drain(sc);
    651  1.1   hannken 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    652  1.1   hannken 
    653  1.1   hannken 	if (disable)
    654  1.1   hannken 		vioif_rx_drain(sc);
    655  1.3  christos 
    656  1.1   hannken 	virtio_reinit_start(vsc);
    657  1.1   hannken 	virtio_negotiate_features(vsc, sc->sc_features);
    658  1.1   hannken 	virtio_start_vq_intr(vsc, &sc->sc_vq[0]);
    659  1.1   hannken 	virtio_stop_vq_intr(vsc, &sc->sc_vq[1]);
    660  1.1   hannken 	if (vsc->sc_nvqs >= 3)
    661  1.1   hannken 		virtio_start_vq_intr(vsc, &sc->sc_vq[2]);
    662  1.1   hannken 	virtio_reinit_end(vsc);
    663  1.1   hannken 	vioif_updown(sc, false);
    664  1.1   hannken }
    665  1.1   hannken 
    666  1.1   hannken static void
    667  1.1   hannken vioif_start(struct ifnet *ifp)
    668  1.1   hannken {
    669  1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    670  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    671  1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[1]; /* tx vq */
    672  1.1   hannken 	struct mbuf *m;
    673  1.1   hannken 	int queued = 0, retry = 0;
    674  1.1   hannken 
    675  1.1   hannken 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
    676  1.1   hannken 		return;
    677  1.1   hannken 
    678  1.2  jmcneill 	for (;;) {
    679  1.1   hannken 		int slot, r;
    680  1.1   hannken 
    681  1.2  jmcneill 		IFQ_POLL(&ifp->if_snd, m);
    682  1.2  jmcneill 		if (m == NULL)
    683  1.2  jmcneill 			break;
    684  1.2  jmcneill 
    685  1.1   hannken 		r = virtio_enqueue_prep(vsc, vq, &slot);
    686  1.1   hannken 		if (r == EAGAIN) {
    687  1.1   hannken 			ifp->if_flags |= IFF_OACTIVE;
    688  1.1   hannken 			vioif_tx_vq_done(vq);
    689  1.1   hannken 			if (retry++ == 0)
    690  1.1   hannken 				continue;
    691  1.1   hannken 			else
    692  1.1   hannken 				break;
    693  1.1   hannken 		}
    694  1.1   hannken 		if (r != 0)
    695  1.1   hannken 			panic("enqueue_prep for a tx buffer");
    696  1.1   hannken 		r = bus_dmamap_load_mbuf(vsc->sc_dmat,
    697  1.1   hannken 					 sc->sc_tx_dmamaps[slot],
    698  1.1   hannken 					 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    699  1.1   hannken 		if (r != 0) {
    700  1.1   hannken 			virtio_enqueue_abort(vsc, vq, slot);
    701  1.1   hannken 			printf("%s: tx dmamap load failed, error code %d\n",
    702  1.1   hannken 			       device_xname(sc->sc_dev), r);
    703  1.1   hannken 			break;
    704  1.1   hannken 		}
    705  1.1   hannken 		r = virtio_enqueue_reserve(vsc, vq, slot,
    706  1.1   hannken 					sc->sc_tx_dmamaps[slot]->dm_nsegs + 1);
    707  1.1   hannken 		if (r != 0) {
    708  1.1   hannken 			bus_dmamap_unload(vsc->sc_dmat,
    709  1.1   hannken 					  sc->sc_tx_dmamaps[slot]);
    710  1.1   hannken 			ifp->if_flags |= IFF_OACTIVE;
    711  1.1   hannken 			vioif_tx_vq_done(vq);
    712  1.1   hannken 			if (retry++ == 0)
    713  1.1   hannken 				continue;
    714  1.1   hannken 			else
    715  1.1   hannken 				break;
    716  1.1   hannken 		}
    717  1.1   hannken 		IFQ_DEQUEUE(&ifp->if_snd, m);
    718  1.1   hannken 		sc->sc_tx_mbufs[slot] = m;
    719  1.1   hannken 
    720  1.1   hannken 		memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
    721  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot],
    722  1.1   hannken 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
    723  1.1   hannken 				BUS_DMASYNC_PREWRITE);
    724  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot],
    725  1.1   hannken 				0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize,
    726  1.1   hannken 				BUS_DMASYNC_PREWRITE);
    727  1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true);
    728  1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_tx_dmamaps[slot], true);
    729  1.1   hannken 		virtio_enqueue_commit(vsc, vq, slot, false);
    730  1.1   hannken 		queued++;
    731  1.1   hannken 		bpf_mtap(ifp, m);
    732  1.1   hannken 	}
    733  1.1   hannken 
    734  1.1   hannken 	if (queued > 0) {
    735  1.1   hannken 		virtio_enqueue_commit(vsc, vq, -1, true);
    736  1.1   hannken 		ifp->if_timer = 5;
    737  1.1   hannken 	}
    738  1.1   hannken }
    739  1.1   hannken 
    740  1.1   hannken static int
    741  1.1   hannken vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    742  1.1   hannken {
    743  1.1   hannken 	int s, r;
    744  1.1   hannken 
    745  1.1   hannken 	s = splnet();
    746  1.1   hannken 
    747  1.1   hannken 	r = ether_ioctl(ifp, cmd, data);
    748  1.1   hannken 	if ((r == 0 && cmd == SIOCSIFFLAGS) ||
    749  1.1   hannken 	    (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI))) {
    750  1.1   hannken 		if (ifp->if_flags & IFF_RUNNING)
    751  1.1   hannken 			r = vioif_rx_filter(ifp->if_softc);
    752  1.1   hannken 		else
    753  1.1   hannken 			r = 0;
    754  1.1   hannken 	}
    755  1.1   hannken 
    756  1.1   hannken 	splx(s);
    757  1.1   hannken 
    758  1.1   hannken 	return r;
    759  1.1   hannken }
    760  1.1   hannken 
    761  1.1   hannken void
    762  1.1   hannken vioif_watchdog(struct ifnet *ifp)
    763  1.1   hannken {
    764  1.1   hannken 	struct vioif_softc *sc = ifp->if_softc;
    765  1.1   hannken 
    766  1.1   hannken 	if (ifp->if_flags & IFF_RUNNING)
    767  1.1   hannken 		vioif_tx_vq_done(&sc->sc_vq[1]);
    768  1.1   hannken }
    769  1.1   hannken 
    770  1.1   hannken 
    771  1.1   hannken /*
    772  1.1   hannken  * Recieve implementation
    773  1.1   hannken  */
    774  1.1   hannken /* allocate and initialize a mbuf for recieve */
    775  1.1   hannken static int
    776  1.1   hannken vioif_add_rx_mbuf(struct vioif_softc *sc, int i)
    777  1.1   hannken {
    778  1.1   hannken 	struct mbuf *m;
    779  1.1   hannken 	int r;
    780  1.1   hannken 
    781  1.1   hannken 	MGETHDR(m, M_DONTWAIT, MT_DATA);
    782  1.1   hannken 	if (m == NULL)
    783  1.1   hannken 		return ENOBUFS;
    784  1.1   hannken 	MCLGET(m, M_DONTWAIT);
    785  1.1   hannken 	if ((m->m_flags & M_EXT) == 0) {
    786  1.1   hannken 		m_freem(m);
    787  1.1   hannken 		return ENOBUFS;
    788  1.1   hannken 	}
    789  1.1   hannken 	sc->sc_rx_mbufs[i] = m;
    790  1.1   hannken 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
    791  1.1   hannken 	r = bus_dmamap_load_mbuf(sc->sc_virtio->sc_dmat,
    792  1.1   hannken 				 sc->sc_rx_dmamaps[i],
    793  1.1   hannken 				 m, BUS_DMA_READ|BUS_DMA_NOWAIT);
    794  1.1   hannken 	if (r) {
    795  1.1   hannken 		m_freem(m);
    796  1.1   hannken 		sc->sc_rx_mbufs[i] = 0;
    797  1.1   hannken 		return r;
    798  1.1   hannken 	}
    799  1.1   hannken 
    800  1.1   hannken 	return 0;
    801  1.1   hannken }
    802  1.1   hannken 
    803  1.1   hannken /* free a mbuf for recieve */
    804  1.1   hannken static void
    805  1.1   hannken vioif_free_rx_mbuf(struct vioif_softc *sc, int i)
    806  1.1   hannken {
    807  1.1   hannken 	bus_dmamap_unload(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i]);
    808  1.1   hannken 	m_freem(sc->sc_rx_mbufs[i]);
    809  1.1   hannken 	sc->sc_rx_mbufs[i] = NULL;
    810  1.1   hannken }
    811  1.1   hannken 
    812  1.1   hannken /* add mbufs for all the empty recieve slots */
    813  1.1   hannken static void
    814  1.1   hannken vioif_populate_rx_mbufs(struct vioif_softc *sc)
    815  1.1   hannken {
    816  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    817  1.1   hannken 	int i, r, ndone = 0;
    818  1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[0]; /* rx vq */
    819  1.1   hannken 
    820  1.1   hannken 	for (i = 0; i < vq->vq_num; i++) {
    821  1.1   hannken 		int slot;
    822  1.1   hannken 		r = virtio_enqueue_prep(vsc, vq, &slot);
    823  1.1   hannken 		if (r == EAGAIN)
    824  1.1   hannken 			break;
    825  1.1   hannken 		if (r != 0)
    826  1.1   hannken 			panic("enqueue_prep for rx buffers");
    827  1.1   hannken 		if (sc->sc_rx_mbufs[slot] == NULL) {
    828  1.1   hannken 			r = vioif_add_rx_mbuf(sc, slot);
    829  1.1   hannken 			if (r != 0) {
    830  1.1   hannken 				printf("%s: rx mbuf allocation failed, "
    831  1.1   hannken 				       "error code %d\n",
    832  1.1   hannken 				       device_xname(sc->sc_dev), r);
    833  1.1   hannken 				break;
    834  1.1   hannken 			}
    835  1.1   hannken 		}
    836  1.1   hannken 		r = virtio_enqueue_reserve(vsc, vq, slot,
    837  1.1   hannken 					sc->sc_rx_dmamaps[slot]->dm_nsegs + 1);
    838  1.1   hannken 		if (r != 0) {
    839  1.1   hannken 			vioif_free_rx_mbuf(sc, slot);
    840  1.1   hannken 			break;
    841  1.1   hannken 		}
    842  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot],
    843  1.1   hannken 			0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
    844  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot],
    845  1.1   hannken 			0, MCLBYTES, BUS_DMASYNC_PREREAD);
    846  1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false);
    847  1.1   hannken 		virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false);
    848  1.1   hannken 		virtio_enqueue_commit(vsc, vq, slot, false);
    849  1.1   hannken 		ndone++;
    850  1.1   hannken 	}
    851  1.1   hannken 	if (ndone > 0)
    852  1.1   hannken 		virtio_enqueue_commit(vsc, vq, -1, true);
    853  1.1   hannken }
    854  1.1   hannken 
    855  1.1   hannken /* dequeue recieved packets */
    856  1.1   hannken static int
    857  1.1   hannken vioif_rx_deq(struct vioif_softc *sc)
    858  1.1   hannken {
    859  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    860  1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[0];
    861  1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    862  1.1   hannken 	struct mbuf *m;
    863  1.1   hannken 	int r = 0;
    864  1.1   hannken 	int slot, len;
    865  1.1   hannken 
    866  1.1   hannken 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
    867  1.1   hannken 		len -= sizeof(struct virtio_net_hdr);
    868  1.1   hannken 		r = 1;
    869  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot],
    870  1.1   hannken 				0, sizeof(struct virtio_net_hdr),
    871  1.1   hannken 				BUS_DMASYNC_POSTREAD);
    872  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot],
    873  1.1   hannken 				0, MCLBYTES,
    874  1.1   hannken 				BUS_DMASYNC_POSTREAD);
    875  1.1   hannken 		m = sc->sc_rx_mbufs[slot];
    876  1.1   hannken 		KASSERT(m != NULL);
    877  1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_rx_dmamaps[slot]);
    878  1.1   hannken 		sc->sc_rx_mbufs[slot] = 0;
    879  1.1   hannken 		virtio_dequeue_commit(vsc, vq, slot);
    880  1.1   hannken 		m->m_pkthdr.rcvif = ifp;
    881  1.1   hannken 		m->m_len = m->m_pkthdr.len = len;
    882  1.1   hannken 		ifp->if_ipackets++;
    883  1.1   hannken 		bpf_mtap(ifp, m);
    884  1.1   hannken 		(*ifp->if_input)(ifp, m);
    885  1.1   hannken 	}
    886  1.3  christos 
    887  1.1   hannken 	return r;
    888  1.1   hannken }
    889  1.1   hannken 
    890  1.1   hannken /* rx interrupt; call _dequeue above and schedule a softint */
    891  1.1   hannken static int
    892  1.1   hannken vioif_rx_vq_done(struct virtqueue *vq)
    893  1.1   hannken {
    894  1.1   hannken 	struct virtio_softc *vsc = vq->vq_owner;
    895  1.1   hannken 	struct vioif_softc *sc = device_private(vsc->sc_child);
    896  1.1   hannken 	int r;
    897  1.1   hannken 
    898  1.1   hannken 	r = vioif_rx_deq(sc);
    899  1.1   hannken 	if (r)
    900  1.1   hannken 		softint_schedule(sc->sc_rx_softint);
    901  1.1   hannken 
    902  1.1   hannken 	return r;
    903  1.1   hannken }
    904  1.1   hannken 
    905  1.1   hannken /* softint: enqueue recieve requests for new incoming packets */
    906  1.1   hannken static void
    907  1.1   hannken vioif_rx_softint(void *arg)
    908  1.1   hannken {
    909  1.1   hannken 	struct vioif_softc *sc = arg;
    910  1.1   hannken 
    911  1.1   hannken 	vioif_populate_rx_mbufs(sc);
    912  1.1   hannken }
    913  1.1   hannken 
    914  1.1   hannken /* free all the mbufs; called from if_stop(disable) */
    915  1.1   hannken static void
    916  1.1   hannken vioif_rx_drain(struct vioif_softc *sc)
    917  1.1   hannken {
    918  1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[0];
    919  1.1   hannken 	int i;
    920  1.1   hannken 
    921  1.1   hannken 	for (i = 0; i < vq->vq_num; i++) {
    922  1.1   hannken 		if (sc->sc_rx_mbufs[i] == NULL)
    923  1.1   hannken 			continue;
    924  1.1   hannken 		vioif_free_rx_mbuf(sc, i);
    925  1.1   hannken 	}
    926  1.1   hannken }
    927  1.1   hannken 
    928  1.1   hannken 
    929  1.1   hannken /*
    930  1.1   hannken  * Transmition implementation
    931  1.1   hannken  */
    932  1.1   hannken /* actual transmission is done in if_start */
    933  1.1   hannken /* tx interrupt; dequeue and free mbufs */
    934  1.1   hannken /*
    935  1.1   hannken  * tx interrupt is actually disabled; this should be called upon
    936  1.1   hannken  * tx vq full and watchdog
    937  1.1   hannken  */
    938  1.1   hannken static int
    939  1.1   hannken vioif_tx_vq_done(struct virtqueue *vq)
    940  1.1   hannken {
    941  1.1   hannken 	struct virtio_softc *vsc = vq->vq_owner;
    942  1.1   hannken 	struct vioif_softc *sc = device_private(vsc->sc_child);
    943  1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    944  1.1   hannken 	struct mbuf *m;
    945  1.1   hannken 	int r = 0;
    946  1.1   hannken 	int slot, len;
    947  1.1   hannken 
    948  1.1   hannken 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
    949  1.1   hannken 		r++;
    950  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot],
    951  1.1   hannken 				0, sizeof(struct virtio_net_hdr),
    952  1.1   hannken 				BUS_DMASYNC_POSTWRITE);
    953  1.1   hannken 		bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot],
    954  1.1   hannken 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
    955  1.1   hannken 				BUS_DMASYNC_POSTWRITE);
    956  1.1   hannken 		m = sc->sc_tx_mbufs[slot];
    957  1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[slot]);
    958  1.1   hannken 		sc->sc_tx_mbufs[slot] = 0;
    959  1.1   hannken 		virtio_dequeue_commit(vsc, vq, slot);
    960  1.1   hannken 		ifp->if_opackets++;
    961  1.1   hannken 		m_freem(m);
    962  1.1   hannken 	}
    963  1.1   hannken 
    964  1.1   hannken 	if (r)
    965  1.1   hannken 		ifp->if_flags &= ~IFF_OACTIVE;
    966  1.1   hannken 	return r;
    967  1.1   hannken }
    968  1.1   hannken 
    969  1.1   hannken /* free all the mbufs already put on vq; called from if_stop(disable) */
    970  1.1   hannken static void
    971  1.1   hannken vioif_tx_drain(struct vioif_softc *sc)
    972  1.1   hannken {
    973  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    974  1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[1];
    975  1.1   hannken 	int i;
    976  1.1   hannken 
    977  1.1   hannken 	for (i = 0; i < vq->vq_num; i++) {
    978  1.1   hannken 		if (sc->sc_tx_mbufs[i] == NULL)
    979  1.1   hannken 			continue;
    980  1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[i]);
    981  1.1   hannken 		m_freem(sc->sc_tx_mbufs[i]);
    982  1.1   hannken 		sc->sc_tx_mbufs[i] = NULL;
    983  1.1   hannken 	}
    984  1.1   hannken }
    985  1.1   hannken 
    986  1.1   hannken /*
    987  1.1   hannken  * Control vq
    988  1.1   hannken  */
    989  1.1   hannken /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */
    990  1.1   hannken static int
    991  1.1   hannken vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
    992  1.1   hannken {
    993  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
    994  1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[2];
    995  1.1   hannken 	int r, slot;
    996  1.1   hannken 
    997  1.1   hannken 	if (vsc->sc_nvqs < 3)
    998  1.1   hannken 		return ENOTSUP;
    999  1.1   hannken 
   1000  1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1001  1.1   hannken 	while (sc->sc_ctrl_inuse != FREE)
   1002  1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1003  1.1   hannken 	sc->sc_ctrl_inuse = INUSE;
   1004  1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1005  1.1   hannken 
   1006  1.1   hannken 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_RX;
   1007  1.1   hannken 	sc->sc_ctrl_cmd->command = cmd;
   1008  1.1   hannken 	sc->sc_ctrl_rx->onoff = onoff;
   1009  1.1   hannken 
   1010  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap,
   1011  1.1   hannken 			0, sizeof(struct virtio_net_ctrl_cmd),
   1012  1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1013  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap,
   1014  1.1   hannken 			0, sizeof(struct virtio_net_ctrl_rx),
   1015  1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1016  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap,
   1017  1.1   hannken 			0, sizeof(struct virtio_net_ctrl_status),
   1018  1.1   hannken 			BUS_DMASYNC_PREREAD);
   1019  1.1   hannken 
   1020  1.1   hannken 	r = virtio_enqueue_prep(vsc, vq, &slot);
   1021  1.1   hannken 	if (r != 0)
   1022  1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1023  1.1   hannken 	r = virtio_enqueue_reserve(vsc, vq, slot, 3);
   1024  1.1   hannken 	if (r != 0)
   1025  1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1026  1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
   1027  1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_rx_dmamap, true);
   1028  1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
   1029  1.1   hannken 	virtio_enqueue_commit(vsc, vq, slot, true);
   1030  1.1   hannken 
   1031  1.1   hannken 	/* wait for done */
   1032  1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1033  1.1   hannken 	while (sc->sc_ctrl_inuse != DONE)
   1034  1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1035  1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1036  1.1   hannken 	/* already dequeueued */
   1037  1.1   hannken 
   1038  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0,
   1039  1.1   hannken 			sizeof(struct virtio_net_ctrl_cmd),
   1040  1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1041  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 0,
   1042  1.1   hannken 			sizeof(struct virtio_net_ctrl_rx),
   1043  1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1044  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0,
   1045  1.1   hannken 			sizeof(struct virtio_net_ctrl_status),
   1046  1.1   hannken 			BUS_DMASYNC_POSTREAD);
   1047  1.1   hannken 
   1048  1.1   hannken 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
   1049  1.1   hannken 		r = 0;
   1050  1.1   hannken 	else {
   1051  1.1   hannken 		printf("%s: failed setting rx mode\n",
   1052  1.1   hannken 		       device_xname(sc->sc_dev));
   1053  1.1   hannken 		r = EIO;
   1054  1.1   hannken 	}
   1055  1.1   hannken 
   1056  1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1057  1.1   hannken 	sc->sc_ctrl_inuse = FREE;
   1058  1.1   hannken 	cv_signal(&sc->sc_ctrl_wait);
   1059  1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1060  1.3  christos 
   1061  1.1   hannken 	return r;
   1062  1.1   hannken }
   1063  1.1   hannken 
   1064  1.1   hannken static int
   1065  1.1   hannken vioif_set_promisc(struct vioif_softc *sc, bool onoff)
   1066  1.1   hannken {
   1067  1.1   hannken 	int r;
   1068  1.1   hannken 
   1069  1.1   hannken 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff);
   1070  1.1   hannken 
   1071  1.1   hannken 	return r;
   1072  1.1   hannken }
   1073  1.1   hannken 
   1074  1.1   hannken static int
   1075  1.1   hannken vioif_set_allmulti(struct vioif_softc *sc, bool onoff)
   1076  1.1   hannken {
   1077  1.1   hannken 	int r;
   1078  1.1   hannken 
   1079  1.1   hannken 	r = vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
   1080  1.1   hannken 
   1081  1.1   hannken 	return r;
   1082  1.1   hannken }
   1083  1.1   hannken 
   1084  1.1   hannken /* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
   1085  1.1   hannken static int
   1086  1.1   hannken vioif_set_rx_filter(struct vioif_softc *sc)
   1087  1.1   hannken {
   1088  1.1   hannken 	/* filter already set in sc_ctrl_mac_tbl */
   1089  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1090  1.1   hannken 	struct virtqueue *vq = &sc->sc_vq[2];
   1091  1.1   hannken 	int r, slot;
   1092  1.1   hannken 
   1093  1.1   hannken 	if (vsc->sc_nvqs < 3)
   1094  1.1   hannken 		return ENOTSUP;
   1095  1.1   hannken 
   1096  1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1097  1.1   hannken 	while (sc->sc_ctrl_inuse != FREE)
   1098  1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1099  1.1   hannken 	sc->sc_ctrl_inuse = INUSE;
   1100  1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1101  1.1   hannken 
   1102  1.1   hannken 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC;
   1103  1.1   hannken 	sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET;
   1104  1.1   hannken 
   1105  1.1   hannken 	r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap,
   1106  1.1   hannken 			    sc->sc_ctrl_mac_tbl_uc,
   1107  1.1   hannken 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
   1108  1.1   hannken 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
   1109  1.1   hannken 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1110  1.1   hannken 	if (r) {
   1111  1.1   hannken 		printf("%s: control command dmamap load failed, "
   1112  1.1   hannken 		       "error code %d\n", device_xname(sc->sc_dev), r);
   1113  1.1   hannken 		goto out;
   1114  1.1   hannken 	}
   1115  1.1   hannken 	r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap,
   1116  1.1   hannken 			    sc->sc_ctrl_mac_tbl_mc,
   1117  1.1   hannken 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
   1118  1.1   hannken 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
   1119  1.1   hannken 			    NULL, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   1120  1.1   hannken 	if (r) {
   1121  1.1   hannken 		printf("%s: control command dmamap load failed, "
   1122  1.1   hannken 		       "error code %d\n", device_xname(sc->sc_dev), r);
   1123  1.1   hannken 		bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap);
   1124  1.1   hannken 		goto out;
   1125  1.1   hannken 	}
   1126  1.1   hannken 
   1127  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap,
   1128  1.1   hannken 			0, sizeof(struct virtio_net_ctrl_cmd),
   1129  1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1130  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0,
   1131  1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1132  1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
   1133  1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1134  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0,
   1135  1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1136  1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
   1137  1.1   hannken 			BUS_DMASYNC_PREWRITE);
   1138  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap,
   1139  1.1   hannken 			0, sizeof(struct virtio_net_ctrl_status),
   1140  1.1   hannken 			BUS_DMASYNC_PREREAD);
   1141  1.1   hannken 
   1142  1.1   hannken 	r = virtio_enqueue_prep(vsc, vq, &slot);
   1143  1.1   hannken 	if (r != 0)
   1144  1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1145  1.1   hannken 	r = virtio_enqueue_reserve(vsc, vq, slot, 4);
   1146  1.1   hannken 	if (r != 0)
   1147  1.1   hannken 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
   1148  1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_cmd_dmamap, true);
   1149  1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_uc_dmamap, true);
   1150  1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_tbl_mc_dmamap, true);
   1151  1.1   hannken 	virtio_enqueue(vsc, vq, slot, sc->sc_ctrl_status_dmamap, false);
   1152  1.1   hannken 	virtio_enqueue_commit(vsc, vq, slot, true);
   1153  1.1   hannken 
   1154  1.1   hannken 	/* wait for done */
   1155  1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1156  1.1   hannken 	while (sc->sc_ctrl_inuse != DONE)
   1157  1.1   hannken 		cv_wait(&sc->sc_ctrl_wait, &sc->sc_ctrl_wait_lock);
   1158  1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1159  1.1   hannken 	/* already dequeueued */
   1160  1.1   hannken 
   1161  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0,
   1162  1.1   hannken 			sizeof(struct virtio_net_ctrl_cmd),
   1163  1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1164  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0,
   1165  1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1166  1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
   1167  1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1168  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0,
   1169  1.1   hannken 			(sizeof(struct virtio_net_ctrl_mac_tbl)
   1170  1.1   hannken 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
   1171  1.1   hannken 			BUS_DMASYNC_POSTWRITE);
   1172  1.1   hannken 	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0,
   1173  1.1   hannken 			sizeof(struct virtio_net_ctrl_status),
   1174  1.1   hannken 			BUS_DMASYNC_POSTREAD);
   1175  1.1   hannken 	bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap);
   1176  1.1   hannken 	bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap);
   1177  1.1   hannken 
   1178  1.1   hannken 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
   1179  1.1   hannken 		r = 0;
   1180  1.1   hannken 	else {
   1181  1.1   hannken 		printf("%s: failed setting rx filter\n",
   1182  1.1   hannken 		       device_xname(sc->sc_dev));
   1183  1.1   hannken 		r = EIO;
   1184  1.1   hannken 	}
   1185  1.1   hannken 
   1186  1.1   hannken out:
   1187  1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1188  1.1   hannken 	sc->sc_ctrl_inuse = FREE;
   1189  1.1   hannken 	cv_signal(&sc->sc_ctrl_wait);
   1190  1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1191  1.3  christos 
   1192  1.1   hannken 	return r;
   1193  1.1   hannken }
   1194  1.1   hannken 
   1195  1.1   hannken /* ctrl vq interrupt; wake up the command issuer */
   1196  1.1   hannken static int
   1197  1.1   hannken vioif_ctrl_vq_done(struct virtqueue *vq)
   1198  1.1   hannken {
   1199  1.1   hannken 	struct virtio_softc *vsc = vq->vq_owner;
   1200  1.1   hannken 	struct vioif_softc *sc = device_private(vsc->sc_child);
   1201  1.1   hannken 	int r, slot;
   1202  1.1   hannken 
   1203  1.1   hannken 	r = virtio_dequeue(vsc, vq, &slot, NULL);
   1204  1.1   hannken 	if (r == ENOENT)
   1205  1.1   hannken 		return 0;
   1206  1.1   hannken 	virtio_dequeue_commit(vsc, vq, slot);
   1207  1.1   hannken 
   1208  1.1   hannken 	mutex_enter(&sc->sc_ctrl_wait_lock);
   1209  1.1   hannken 	sc->sc_ctrl_inuse = DONE;
   1210  1.1   hannken 	cv_signal(&sc->sc_ctrl_wait);
   1211  1.1   hannken 	mutex_exit(&sc->sc_ctrl_wait_lock);
   1212  1.1   hannken 
   1213  1.1   hannken 	return 1;
   1214  1.1   hannken }
   1215  1.1   hannken 
   1216  1.1   hannken /*
   1217  1.1   hannken  * If IFF_PROMISC requested,  set promiscuous
   1218  1.1   hannken  * If multicast filter small enough (<=MAXENTRIES) set rx filter
   1219  1.1   hannken  * If large multicast filter exist use ALLMULTI
   1220  1.1   hannken  */
   1221  1.1   hannken /*
   1222  1.1   hannken  * If setting rx filter fails fall back to ALLMULTI
   1223  1.1   hannken  * If ALLMULTI fails fall back to PROMISC
   1224  1.1   hannken  */
   1225  1.1   hannken static int
   1226  1.1   hannken vioif_rx_filter(struct vioif_softc *sc)
   1227  1.1   hannken {
   1228  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1229  1.1   hannken 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1230  1.1   hannken 	struct ether_multi *enm;
   1231  1.1   hannken 	struct ether_multistep step;
   1232  1.1   hannken 	int nentries;
   1233  1.1   hannken 	int promisc = 0, allmulti = 0, rxfilter = 0;
   1234  1.1   hannken 	int r;
   1235  1.1   hannken 
   1236  1.1   hannken 	if (vsc->sc_nvqs < 3) {	/* no ctrl vq; always promisc */
   1237  1.1   hannken 		ifp->if_flags |= IFF_PROMISC;
   1238  1.1   hannken 		return 0;
   1239  1.1   hannken 	}
   1240  1.1   hannken 
   1241  1.1   hannken 	if (ifp->if_flags & IFF_PROMISC) {
   1242  1.1   hannken 		promisc = 1;
   1243  1.1   hannken 		goto set;
   1244  1.1   hannken 	}
   1245  1.1   hannken 
   1246  1.1   hannken 	nentries = -1;
   1247  1.1   hannken 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
   1248  1.1   hannken 	while (nentries++, enm != NULL) {
   1249  1.1   hannken 		if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) {
   1250  1.1   hannken 			allmulti = 1;
   1251  1.1   hannken 			goto set;
   1252  1.1   hannken 		}
   1253  1.1   hannken 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1254  1.1   hannken 			   ETHER_ADDR_LEN)) {
   1255  1.1   hannken 			allmulti = 1;
   1256  1.1   hannken 			goto set;
   1257  1.1   hannken 		}
   1258  1.1   hannken 		memcpy(sc->sc_ctrl_mac_tbl_mc->macs[nentries],
   1259  1.1   hannken 		       enm->enm_addrlo, ETHER_ADDR_LEN);
   1260  1.1   hannken 		ETHER_NEXT_MULTI(step, enm);
   1261  1.1   hannken 	}
   1262  1.1   hannken 	rxfilter = 1;
   1263  1.1   hannken 
   1264  1.1   hannken set:
   1265  1.1   hannken 	if (rxfilter) {
   1266  1.1   hannken 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
   1267  1.1   hannken 		sc->sc_ctrl_mac_tbl_mc->nentries = nentries;
   1268  1.1   hannken 		r = vioif_set_rx_filter(sc);
   1269  1.1   hannken 		if (r != 0) {
   1270  1.1   hannken 			rxfilter = 0;
   1271  1.1   hannken 			allmulti = 1; /* fallback */
   1272  1.1   hannken 		}
   1273  1.1   hannken 	} else {
   1274  1.1   hannken 		/* remove rx filter */
   1275  1.1   hannken 		sc->sc_ctrl_mac_tbl_uc->nentries = 0;
   1276  1.1   hannken 		sc->sc_ctrl_mac_tbl_mc->nentries = 0;
   1277  1.1   hannken 		r = vioif_set_rx_filter(sc);
   1278  1.1   hannken 		/* what to do on failure? */
   1279  1.1   hannken 	}
   1280  1.1   hannken 	if (allmulti) {
   1281  1.1   hannken 		r = vioif_set_allmulti(sc, true);
   1282  1.1   hannken 		if (r != 0) {
   1283  1.1   hannken 			allmulti = 0;
   1284  1.1   hannken 			promisc = 1; /* fallback */
   1285  1.1   hannken 		}
   1286  1.1   hannken 	} else {
   1287  1.1   hannken 		r = vioif_set_allmulti(sc, false);
   1288  1.1   hannken 		/* what to do on failure? */
   1289  1.1   hannken 	}
   1290  1.1   hannken 	if (promisc) {
   1291  1.1   hannken 		r = vioif_set_promisc(sc, true);
   1292  1.1   hannken 	} else {
   1293  1.1   hannken 		r = vioif_set_promisc(sc, false);
   1294  1.1   hannken 	}
   1295  1.1   hannken 
   1296  1.1   hannken 	return r;
   1297  1.1   hannken }
   1298  1.1   hannken 
   1299  1.1   hannken /* change link status */
   1300  1.1   hannken static int
   1301  1.1   hannken vioif_updown(struct vioif_softc *sc, bool isup)
   1302  1.1   hannken {
   1303  1.1   hannken 	struct virtio_softc *vsc = sc->sc_virtio;
   1304  1.1   hannken 
   1305  1.1   hannken 	if (!(vsc->sc_features & VIRTIO_NET_F_STATUS))
   1306  1.1   hannken 		return ENODEV;
   1307  1.1   hannken 	virtio_write_device_config_1(vsc,
   1308  1.1   hannken 				     VIRTIO_NET_CONFIG_STATUS,
   1309  1.1   hannken 				     isup?VIRTIO_NET_S_LINK_UP:0);
   1310  1.1   hannken 	return 0;
   1311  1.1   hannken }
   1312